hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
42ea41a7b68710724ef05a2338bd034c550efddc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<math.h>
#include"Boys_gpu.h"
#define PI 3.1415926535897932
#define P25 17.4934183276248620
#define NTHREAD 64
texture<int2,1,hipReadModeElementType> tex_P;
texture<int2,1,hipReadModeElementType> tex_Zta;
texture<int2,1,hipReadModeElementType> tex_pp;
texture<float,1,hipReadModeElementType> tex_K2_p;
texture<int2,1,hipReadModeElementType> tex_PA;
texture<int2,1,hipReadModeElementType> tex_PB;
texture<unsigned int,1,hipReadModeElementType> tex_id_bra;
texture<int2,1,hipReadModeElementType> tex_Q;
texture<int2,1,hipReadModeElementType> tex_Eta;
texture<int2,1,hipReadModeElementType> tex_pq;
texture<float,1,hipReadModeElementType> tex_K2_q;
texture<int2,1,hipReadModeElementType> tex_QC;
texture<int2,1,hipReadModeElementType> tex_QD;
texture<unsigned int,1,hipReadModeElementType> tex_id_ket;
void MD_texture_binding_bra_pp(double * P_d,double * PA_d,double * PB_d,\
double * alphaP_d,double * pp_d,float * K2_p_d,unsigned int * id_bra_d,\
unsigned int primit_len){
hipBindTexture(0, tex_P, P_d, sizeof(double)*primit_len*3);
hipBindTexture(0, tex_Zta, alphaP_d, sizeof(double)*primit_len);
hipBindTexture(0, tex_pp, pp_d, sizeof(double)*primit_len);
hipBindTexture(0, tex_K2_p, K2_p_d, sizeof(float)*primit_len);
hipBindTexture(0, tex_PA, PA_d, sizeof(double)*primit_len*3);
hipBindTexture(0, tex_PB, PB_d, sizeof(double)*primit_len*3);
hipBindTexture(0, tex_id_bra, id_bra_d, sizeof(unsigned int)*primit_len);
}
void MD_texture_unbind_bra_pp(){
hipUnbindTexture(tex_P);
hipUnbindTexture(tex_Zta);
hipUnbindTexture(tex_pp);
hipUnbindTexture(tex_K2_p);
hipUnbindTexture(tex_PA);
hipUnbindTexture(tex_PB);
hipUnbindTexture(tex_id_bra);
}
void MD_texture_binding_ket_pp(double * Q_d,double * QC_d,double * QD_d,\
double * alphaQ_d,double * pq_d,float * K2_q_d,unsigned int * id_ket_d,\
unsigned int primit_len){
hipBindTexture(0, tex_Q, Q_d, sizeof(double)*primit_len*3);
hipBindTexture(0, tex_Eta, alphaQ_d, sizeof(double)*primit_len);
hipBindTexture(0, tex_pq, pq_d, sizeof(double)*primit_len);
hipBindTexture(0, tex_K2_q, K2_q_d, sizeof(float)*primit_len);
hipBindTexture(0, tex_QC, QC_d, sizeof(double)*primit_len*3);
hipBindTexture(0, tex_QD, QD_d, sizeof(double)*primit_len*3);
hipBindTexture(0, tex_id_ket, id_ket_d, sizeof(unsigned int)*primit_len);
}
void MD_texture_unbind_ket_pp(){
hipUnbindTexture(tex_Q);
hipUnbindTexture(tex_Eta);
hipUnbindTexture(tex_pq);
hipUnbindTexture(tex_K2_q);
hipUnbindTexture(tex_QC);
hipUnbindTexture(tex_QD);
hipUnbindTexture(tex_id_ket);
}
__global__ void MD_Kp_sdpp_fs(unsigned int contrc_bra_num,unsigned int contrc_ket_num,\
unsigned int * contrc_bra_id,\
unsigned int * contrc_ket_id,\
unsigned int mtrx_len,\
double * Pmtrx_in,\
double * P,\
double * PA,\
double * PB,\
double * Zta_in,\
double * pp_in,\
float * K2_p_in,\
unsigned int * id_bra_in,\
double * Q,\
double * QC,\
double * QD,\
double * Eta_in,\
double * pq_in,\
float * K2_q_in,\
unsigned int * id_ket_in,\
double * ans){
unsigned int tId_x = threadIdx.x;
unsigned int bId_x = blockIdx.x;
unsigned int bId_y = blockIdx.y;
unsigned int tdis = blockDim.x;
unsigned int bdis_x = gridDim.x;
unsigned int bdis_y = gridDim.y;
unsigned int ans_id=tId_x;
double Pmtrx[18]={0.0};
__shared__ double ans_temp[NTHREAD*3];
for(int i=0;i<3;i++){
ans_temp[i*tdis+tId_x]=0.0;
}
for(unsigned int i_contrc_bra=bId_x;i_contrc_bra<contrc_bra_num;i_contrc_bra+=bdis_x){
for(unsigned int j_contrc_ket=bId_y;j_contrc_ket<contrc_ket_num;j_contrc_ket+=bdis_y){
unsigned int primit_bra_start = contrc_bra_id[i_contrc_bra ];
unsigned int primit_bra_end = contrc_bra_id[i_contrc_bra+1];
unsigned int primit_ket_start = contrc_ket_id[j_contrc_ket ];
unsigned int primit_ket_end = contrc_ket_id[j_contrc_ket+1];
for(unsigned int ii=primit_ket_start;ii<primit_ket_end;ii++){
unsigned int id_ket=id_ket_in[ii];
double QX=Q[ii*3+0];
double QY=Q[ii*3+1];
double QZ=Q[ii*3+2];
double Qd_010[3];
Qd_010[0]=QC[ii*3+0];
Qd_010[1]=QC[ii*3+1];
Qd_010[2]=QC[ii*3+2];
double Qd_001[3];
Qd_001[0]=QD[ii*3+0];
Qd_001[1]=QD[ii*3+1];
Qd_001[2]=QD[ii*3+2];
double Eta=Eta_in[ii];
double pq=pq_in[ii];
float K2_q=K2_q_in[ii];
double aQin1=1/(2*Eta);
for(unsigned int j=tId_x;j<primit_bra_end-primit_bra_start;j+=tdis){
unsigned int jj=primit_bra_start+j;
unsigned int id_bra=tex1Dfetch(tex_id_bra,jj);
double P_max=0.0;
for(int p_j=0;p_j<3;p_j++){
for(int p_i=0;p_i<6;p_i++){
Pmtrx[p_i*3+p_j]=Pmtrx_in[(id_ket+p_j)*mtrx_len+(id_bra+p_i)];
double temp_P=fabsf(Pmtrx[p_i*3+p_j]);
if(temp_P>P_max) P_max=temp_P;
}
}
float K2_p=tex1Dfetch(tex_K2_p,jj);
if(fabsf(K2_p*K2_q)<1.0E-14){
break;
}
if(fabsf(P_max*K2_p*K2_q)<1.0E-14) continue;
int2 temp_int2;
temp_int2=tex1Dfetch(tex_Zta,jj);
double Zta=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_pp,jj);
double pp=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+0);
double PX=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+1);
double PY=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+2);
double PZ=__hiloint2double(temp_int2.y,temp_int2.x);
double Pd_001[3];
temp_int2=tex1Dfetch(tex_PB,jj*3+0);
Pd_001[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PB,jj*3+1);
Pd_001[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PB,jj*3+2);
Pd_001[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double alphaT=rsqrt(Eta+Zta);
double lmd=4*P25*pp*pq*alphaT;
alphaT=Eta*Zta*alphaT*alphaT;
double TX=PX-QX;
double TY=PY-QY;
double TZ=PZ-QZ;
double T=alphaT*(TX*TX+TY*TY+TZ*TZ);
double R_000[5];
Ft_fs_4(4,T,R_000);
R_000[0]*=lmd;
R_000[1]*=-2*alphaT*lmd;
R_000[2]*=4*alphaT*alphaT*lmd;
R_000[3]*=-8*alphaT*alphaT*alphaT*lmd;
R_000[4]*=16*alphaT*alphaT*alphaT*alphaT*lmd;
double aPin1=1/(2*Zta);
double R_100[4];
double R_200[3];
double R_300[2];
double R_400[1];
double R_010[4];
double R_110[3];
double R_210[2];
double R_310[1];
double R_020[3];
double R_120[2];
double R_220[1];
double R_030[2];
double R_130[1];
double R_040[1];
double R_001[4];
double R_101[3];
double R_201[2];
double R_301[1];
double R_011[3];
double R_111[2];
double R_211[1];
double R_021[2];
double R_121[1];
double R_031[1];
double R_002[3];
double R_102[2];
double R_202[1];
double R_012[2];
double R_112[1];
double R_022[1];
double R_003[2];
double R_103[1];
double R_013[1];
double R_004[1];
for(int i=0;i<4;i++){
R_100[i]=TX*R_000[i+1];
}
for(int i=0;i<4;i++){
R_010[i]=TY*R_000[i+1];
}
for(int i=0;i<4;i++){
R_001[i]=TZ*R_000[i+1];
}
for(int i=0;i<3;i++){
R_200[i]=TX*R_100[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_110[i]=TX*R_010[i+1];
}
for(int i=0;i<3;i++){
R_020[i]=TY*R_010[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_101[i]=TX*R_001[i+1];
}
for(int i=0;i<3;i++){
R_011[i]=TY*R_001[i+1];
}
for(int i=0;i<3;i++){
R_002[i]=TZ*R_001[i+1]+R_000[i+1];
}
for(int i=0;i<2;i++){
R_300[i]=TX*R_200[i+1]+2*R_100[i+1];
}
for(int i=0;i<2;i++){
R_210[i]=TY*R_200[i+1];
}
for(int i=0;i<2;i++){
R_120[i]=TX*R_020[i+1];
}
for(int i=0;i<2;i++){
R_030[i]=TY*R_020[i+1]+2*R_010[i+1];
}
for(int i=0;i<2;i++){
R_201[i]=TZ*R_200[i+1];
}
for(int i=0;i<2;i++){
R_111[i]=TX*R_011[i+1];
}
for(int i=0;i<2;i++){
R_021[i]=TZ*R_020[i+1];
}
for(int i=0;i<2;i++){
R_102[i]=TX*R_002[i+1];
}
for(int i=0;i<2;i++){
R_012[i]=TY*R_002[i+1];
}
for(int i=0;i<2;i++){
R_003[i]=TZ*R_002[i+1]+2*R_001[i+1];
}
for(int i=0;i<1;i++){
R_400[i]=TX*R_300[i+1]+3*R_200[i+1];
}
for(int i=0;i<1;i++){
R_310[i]=TY*R_300[i+1];
}
for(int i=0;i<1;i++){
R_220[i]=TX*R_120[i+1]+R_020[i+1];
}
for(int i=0;i<1;i++){
R_130[i]=TX*R_030[i+1];
}
for(int i=0;i<1;i++){
R_040[i]=TY*R_030[i+1]+3*R_020[i+1];
}
for(int i=0;i<1;i++){
R_301[i]=TZ*R_300[i+1];
}
for(int i=0;i<1;i++){
R_211[i]=TY*R_201[i+1];
}
for(int i=0;i<1;i++){
R_121[i]=TX*R_021[i+1];
}
for(int i=0;i<1;i++){
R_031[i]=TZ*R_030[i+1];
}
for(int i=0;i<1;i++){
R_202[i]=TX*R_102[i+1]+R_002[i+1];
}
for(int i=0;i<1;i++){
R_112[i]=TX*R_012[i+1];
}
for(int i=0;i<1;i++){
R_022[i]=TY*R_012[i+1]+R_002[i+1];
}
for(int i=0;i<1;i++){
R_103[i]=TX*R_003[i+1];
}
for(int i=0;i<1;i++){
R_013[i]=TY*R_003[i+1];
}
for(int i=0;i<1;i++){
R_004[i]=TZ*R_003[i+1]+3*R_002[i+1];
}
double Pd_101[3];
double Pd_002[3];
double Pd_102[3];
double Pd_202[3];
for(int i=0;i<3;i++){
Pd_101[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_002[i]=Pd_101[i]+Pd_001[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_102[i]=Pd_001[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_202[i]=aPin1*Pd_101[i];
}
double P_002000000=Pd_002[0];
double P_102000000=Pd_102[0];
double P_202000000=Pd_202[0];
double P_001001000=Pd_001[0]*Pd_001[1];
double P_001101000=Pd_001[0]*Pd_101[1];
double P_101001000=Pd_101[0]*Pd_001[1];
double P_101101000=Pd_101[0]*Pd_101[1];
double P_000002000=Pd_002[1];
double P_000102000=Pd_102[1];
double P_000202000=Pd_202[1];
double P_001000001=Pd_001[0]*Pd_001[2];
double P_001000101=Pd_001[0]*Pd_101[2];
double P_101000001=Pd_101[0]*Pd_001[2];
double P_101000101=Pd_101[0]*Pd_101[2];
double P_000001001=Pd_001[1]*Pd_001[2];
double P_000001101=Pd_001[1]*Pd_101[2];
double P_000101001=Pd_101[1]*Pd_001[2];
double P_000101101=Pd_101[1]*Pd_101[2];
double P_000000002=Pd_002[2];
double P_000000102=Pd_102[2];
double P_000000202=Pd_202[2];
double PR_002000000000=P_002000000*R_000[0]+-1*P_102000000*R_100[0]+P_202000000*R_200[0];
double PR_001001000000=P_001001000*R_000[0]+-1*P_001101000*R_010[0]+-1*P_101001000*R_100[0]+P_101101000*R_110[0];
double PR_000002000000=P_000002000*R_000[0]+-1*P_000102000*R_010[0]+P_000202000*R_020[0];
double PR_001000001000=P_001000001*R_000[0]+-1*P_001000101*R_001[0]+-1*P_101000001*R_100[0]+P_101000101*R_101[0];
double PR_000001001000=P_000001001*R_000[0]+-1*P_000001101*R_001[0]+-1*P_000101001*R_010[0]+P_000101101*R_011[0];
double PR_000000002000=P_000000002*R_000[0]+-1*P_000000102*R_001[0]+P_000000202*R_002[0];
double PR_002000000001=P_002000000*R_001[0]+-1*P_102000000*R_101[0]+P_202000000*R_201[0];
double PR_001001000001=P_001001000*R_001[0]+-1*P_001101000*R_011[0]+-1*P_101001000*R_101[0]+P_101101000*R_111[0];
double PR_000002000001=P_000002000*R_001[0]+-1*P_000102000*R_011[0]+P_000202000*R_021[0];
double PR_001000001001=P_001000001*R_001[0]+-1*P_001000101*R_002[0]+-1*P_101000001*R_101[0]+P_101000101*R_102[0];
double PR_000001001001=P_000001001*R_001[0]+-1*P_000001101*R_002[0]+-1*P_000101001*R_011[0]+P_000101101*R_012[0];
double PR_000000002001=P_000000002*R_001[0]+-1*P_000000102*R_002[0]+P_000000202*R_003[0];
double PR_002000000010=P_002000000*R_010[0]+-1*P_102000000*R_110[0]+P_202000000*R_210[0];
double PR_001001000010=P_001001000*R_010[0]+-1*P_001101000*R_020[0]+-1*P_101001000*R_110[0]+P_101101000*R_120[0];
double PR_000002000010=P_000002000*R_010[0]+-1*P_000102000*R_020[0]+P_000202000*R_030[0];
double PR_001000001010=P_001000001*R_010[0]+-1*P_001000101*R_011[0]+-1*P_101000001*R_110[0]+P_101000101*R_111[0];
double PR_000001001010=P_000001001*R_010[0]+-1*P_000001101*R_011[0]+-1*P_000101001*R_020[0]+P_000101101*R_021[0];
double PR_000000002010=P_000000002*R_010[0]+-1*P_000000102*R_011[0]+P_000000202*R_012[0];
double PR_002000000100=P_002000000*R_100[0]+-1*P_102000000*R_200[0]+P_202000000*R_300[0];
double PR_001001000100=P_001001000*R_100[0]+-1*P_001101000*R_110[0]+-1*P_101001000*R_200[0]+P_101101000*R_210[0];
double PR_000002000100=P_000002000*R_100[0]+-1*P_000102000*R_110[0]+P_000202000*R_120[0];
double PR_001000001100=P_001000001*R_100[0]+-1*P_001000101*R_101[0]+-1*P_101000001*R_200[0]+P_101000101*R_201[0];
double PR_000001001100=P_000001001*R_100[0]+-1*P_000001101*R_101[0]+-1*P_000101001*R_110[0]+P_000101101*R_111[0];
double PR_000000002100=P_000000002*R_100[0]+-1*P_000000102*R_101[0]+P_000000202*R_102[0];
double PR_002000000002=P_002000000*R_002[0]+-1*P_102000000*R_102[0]+P_202000000*R_202[0];
double PR_001001000002=P_001001000*R_002[0]+-1*P_001101000*R_012[0]+-1*P_101001000*R_102[0]+P_101101000*R_112[0];
double PR_000002000002=P_000002000*R_002[0]+-1*P_000102000*R_012[0]+P_000202000*R_022[0];
double PR_001000001002=P_001000001*R_002[0]+-1*P_001000101*R_003[0]+-1*P_101000001*R_102[0]+P_101000101*R_103[0];
double PR_000001001002=P_000001001*R_002[0]+-1*P_000001101*R_003[0]+-1*P_000101001*R_012[0]+P_000101101*R_013[0];
double PR_000000002002=P_000000002*R_002[0]+-1*P_000000102*R_003[0]+P_000000202*R_004[0];
double PR_002000000011=P_002000000*R_011[0]+-1*P_102000000*R_111[0]+P_202000000*R_211[0];
double PR_001001000011=P_001001000*R_011[0]+-1*P_001101000*R_021[0]+-1*P_101001000*R_111[0]+P_101101000*R_121[0];
double PR_000002000011=P_000002000*R_011[0]+-1*P_000102000*R_021[0]+P_000202000*R_031[0];
double PR_001000001011=P_001000001*R_011[0]+-1*P_001000101*R_012[0]+-1*P_101000001*R_111[0]+P_101000101*R_112[0];
double PR_000001001011=P_000001001*R_011[0]+-1*P_000001101*R_012[0]+-1*P_000101001*R_021[0]+P_000101101*R_022[0];
double PR_000000002011=P_000000002*R_011[0]+-1*P_000000102*R_012[0]+P_000000202*R_013[0];
double PR_002000000020=P_002000000*R_020[0]+-1*P_102000000*R_120[0]+P_202000000*R_220[0];
double PR_001001000020=P_001001000*R_020[0]+-1*P_001101000*R_030[0]+-1*P_101001000*R_120[0]+P_101101000*R_130[0];
double PR_000002000020=P_000002000*R_020[0]+-1*P_000102000*R_030[0]+P_000202000*R_040[0];
double PR_001000001020=P_001000001*R_020[0]+-1*P_001000101*R_021[0]+-1*P_101000001*R_120[0]+P_101000101*R_121[0];
double PR_000001001020=P_000001001*R_020[0]+-1*P_000001101*R_021[0]+-1*P_000101001*R_030[0]+P_000101101*R_031[0];
double PR_000000002020=P_000000002*R_020[0]+-1*P_000000102*R_021[0]+P_000000202*R_022[0];
double PR_002000000101=P_002000000*R_101[0]+-1*P_102000000*R_201[0]+P_202000000*R_301[0];
double PR_001001000101=P_001001000*R_101[0]+-1*P_001101000*R_111[0]+-1*P_101001000*R_201[0]+P_101101000*R_211[0];
double PR_000002000101=P_000002000*R_101[0]+-1*P_000102000*R_111[0]+P_000202000*R_121[0];
double PR_001000001101=P_001000001*R_101[0]+-1*P_001000101*R_102[0]+-1*P_101000001*R_201[0]+P_101000101*R_202[0];
double PR_000001001101=P_000001001*R_101[0]+-1*P_000001101*R_102[0]+-1*P_000101001*R_111[0]+P_000101101*R_112[0];
double PR_000000002101=P_000000002*R_101[0]+-1*P_000000102*R_102[0]+P_000000202*R_103[0];
double PR_002000000110=P_002000000*R_110[0]+-1*P_102000000*R_210[0]+P_202000000*R_310[0];
double PR_001001000110=P_001001000*R_110[0]+-1*P_001101000*R_120[0]+-1*P_101001000*R_210[0]+P_101101000*R_220[0];
double PR_000002000110=P_000002000*R_110[0]+-1*P_000102000*R_120[0]+P_000202000*R_130[0];
double PR_001000001110=P_001000001*R_110[0]+-1*P_001000101*R_111[0]+-1*P_101000001*R_210[0]+P_101000101*R_211[0];
double PR_000001001110=P_000001001*R_110[0]+-1*P_000001101*R_111[0]+-1*P_000101001*R_120[0]+P_000101101*R_121[0];
double PR_000000002110=P_000000002*R_110[0]+-1*P_000000102*R_111[0]+P_000000202*R_112[0];
double PR_002000000200=P_002000000*R_200[0]+-1*P_102000000*R_300[0]+P_202000000*R_400[0];
double PR_001001000200=P_001001000*R_200[0]+-1*P_001101000*R_210[0]+-1*P_101001000*R_300[0]+P_101101000*R_310[0];
double PR_000002000200=P_000002000*R_200[0]+-1*P_000102000*R_210[0]+P_000202000*R_220[0];
double PR_001000001200=P_001000001*R_200[0]+-1*P_001000101*R_201[0]+-1*P_101000001*R_300[0]+P_101000101*R_301[0];
double PR_000001001200=P_000001001*R_200[0]+-1*P_000001101*R_201[0]+-1*P_000101001*R_210[0]+P_000101101*R_211[0];
double PR_000000002200=P_000000002*R_200[0]+-1*P_000000102*R_201[0]+P_000000202*R_202[0];
double Qd_101[3];
double Qd_110[3];
double Qd_011[3];
double Qd_111[3];
double Qd_211[3];
for(int i=0;i<3;i++){
Qd_101[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_110[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_011[i]=Qd_101[i]+Qd_010[i]*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_111[i]=Qd_010[i]*Qd_101[i]+aQin1*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_211[i]=aQin1*Qd_101[i];
}
double Q_011000000=Qd_011[0];
double Q_111000000=Qd_111[0];
double Q_211000000=Qd_211[0];
double Q_010001000=Qd_010[0]*Qd_001[1];
double Q_010101000=Qd_010[0]*Qd_101[1];
double Q_110001000=Qd_110[0]*Qd_001[1];
double Q_110101000=Qd_110[0]*Qd_101[1];
double Q_010000001=Qd_010[0]*Qd_001[2];
double Q_010000101=Qd_010[0]*Qd_101[2];
double Q_110000001=Qd_110[0]*Qd_001[2];
double Q_110000101=Qd_110[0]*Qd_101[2];
double Q_001010000=Qd_001[0]*Qd_010[1];
double Q_001110000=Qd_001[0]*Qd_110[1];
double Q_101010000=Qd_101[0]*Qd_010[1];
double Q_101110000=Qd_101[0]*Qd_110[1];
double Q_000011000=Qd_011[1];
double Q_000111000=Qd_111[1];
double Q_000211000=Qd_211[1];
double Q_000010001=Qd_010[1]*Qd_001[2];
double Q_000010101=Qd_010[1]*Qd_101[2];
double Q_000110001=Qd_110[1]*Qd_001[2];
double Q_000110101=Qd_110[1]*Qd_101[2];
double Q_001000010=Qd_001[0]*Qd_010[2];
double Q_001000110=Qd_001[0]*Qd_110[2];
double Q_101000010=Qd_101[0]*Qd_010[2];
double Q_101000110=Qd_101[0]*Qd_110[2];
double Q_000001010=Qd_001[1]*Qd_010[2];
double Q_000001110=Qd_001[1]*Qd_110[2];
double Q_000101010=Qd_101[1]*Qd_010[2];
double Q_000101110=Qd_101[1]*Qd_110[2];
double Q_000000011=Qd_011[2];
double Q_000000111=Qd_111[2];
double Q_000000211=Qd_211[2];
ans_temp[ans_id*3+0]+=Pmtrx[0]*(Q_011000000*PR_002000000000+Q_111000000*PR_002000000100+Q_211000000*PR_002000000200);
ans_temp[ans_id*3+0]+=Pmtrx[1]*(Q_010001000*PR_002000000000+Q_010101000*PR_002000000010+Q_110001000*PR_002000000100+Q_110101000*PR_002000000110);
ans_temp[ans_id*3+0]+=Pmtrx[2]*(Q_010000001*PR_002000000000+Q_010000101*PR_002000000001+Q_110000001*PR_002000000100+Q_110000101*PR_002000000101);
ans_temp[ans_id*3+1]+=Pmtrx[0]*(Q_001010000*PR_002000000000+Q_001110000*PR_002000000010+Q_101010000*PR_002000000100+Q_101110000*PR_002000000110);
ans_temp[ans_id*3+1]+=Pmtrx[1]*(Q_000011000*PR_002000000000+Q_000111000*PR_002000000010+Q_000211000*PR_002000000020);
ans_temp[ans_id*3+1]+=Pmtrx[2]*(Q_000010001*PR_002000000000+Q_000010101*PR_002000000001+Q_000110001*PR_002000000010+Q_000110101*PR_002000000011);
ans_temp[ans_id*3+2]+=Pmtrx[0]*(Q_001000010*PR_002000000000+Q_001000110*PR_002000000001+Q_101000010*PR_002000000100+Q_101000110*PR_002000000101);
ans_temp[ans_id*3+2]+=Pmtrx[1]*(Q_000001010*PR_002000000000+Q_000001110*PR_002000000001+Q_000101010*PR_002000000010+Q_000101110*PR_002000000011);
ans_temp[ans_id*3+2]+=Pmtrx[2]*(Q_000000011*PR_002000000000+Q_000000111*PR_002000000001+Q_000000211*PR_002000000002);
ans_temp[ans_id*3+0]+=Pmtrx[3]*(Q_011000000*PR_001001000000+Q_111000000*PR_001001000100+Q_211000000*PR_001001000200);
ans_temp[ans_id*3+0]+=Pmtrx[4]*(Q_010001000*PR_001001000000+Q_010101000*PR_001001000010+Q_110001000*PR_001001000100+Q_110101000*PR_001001000110);
ans_temp[ans_id*3+0]+=Pmtrx[5]*(Q_010000001*PR_001001000000+Q_010000101*PR_001001000001+Q_110000001*PR_001001000100+Q_110000101*PR_001001000101);
ans_temp[ans_id*3+1]+=Pmtrx[3]*(Q_001010000*PR_001001000000+Q_001110000*PR_001001000010+Q_101010000*PR_001001000100+Q_101110000*PR_001001000110);
ans_temp[ans_id*3+1]+=Pmtrx[4]*(Q_000011000*PR_001001000000+Q_000111000*PR_001001000010+Q_000211000*PR_001001000020);
ans_temp[ans_id*3+1]+=Pmtrx[5]*(Q_000010001*PR_001001000000+Q_000010101*PR_001001000001+Q_000110001*PR_001001000010+Q_000110101*PR_001001000011);
ans_temp[ans_id*3+2]+=Pmtrx[3]*(Q_001000010*PR_001001000000+Q_001000110*PR_001001000001+Q_101000010*PR_001001000100+Q_101000110*PR_001001000101);
ans_temp[ans_id*3+2]+=Pmtrx[4]*(Q_000001010*PR_001001000000+Q_000001110*PR_001001000001+Q_000101010*PR_001001000010+Q_000101110*PR_001001000011);
ans_temp[ans_id*3+2]+=Pmtrx[5]*(Q_000000011*PR_001001000000+Q_000000111*PR_001001000001+Q_000000211*PR_001001000002);
ans_temp[ans_id*3+0]+=Pmtrx[6]*(Q_011000000*PR_000002000000+Q_111000000*PR_000002000100+Q_211000000*PR_000002000200);
ans_temp[ans_id*3+0]+=Pmtrx[7]*(Q_010001000*PR_000002000000+Q_010101000*PR_000002000010+Q_110001000*PR_000002000100+Q_110101000*PR_000002000110);
ans_temp[ans_id*3+0]+=Pmtrx[8]*(Q_010000001*PR_000002000000+Q_010000101*PR_000002000001+Q_110000001*PR_000002000100+Q_110000101*PR_000002000101);
ans_temp[ans_id*3+1]+=Pmtrx[6]*(Q_001010000*PR_000002000000+Q_001110000*PR_000002000010+Q_101010000*PR_000002000100+Q_101110000*PR_000002000110);
ans_temp[ans_id*3+1]+=Pmtrx[7]*(Q_000011000*PR_000002000000+Q_000111000*PR_000002000010+Q_000211000*PR_000002000020);
ans_temp[ans_id*3+1]+=Pmtrx[8]*(Q_000010001*PR_000002000000+Q_000010101*PR_000002000001+Q_000110001*PR_000002000010+Q_000110101*PR_000002000011);
ans_temp[ans_id*3+2]+=Pmtrx[6]*(Q_001000010*PR_000002000000+Q_001000110*PR_000002000001+Q_101000010*PR_000002000100+Q_101000110*PR_000002000101);
ans_temp[ans_id*3+2]+=Pmtrx[7]*(Q_000001010*PR_000002000000+Q_000001110*PR_000002000001+Q_000101010*PR_000002000010+Q_000101110*PR_000002000011);
ans_temp[ans_id*3+2]+=Pmtrx[8]*(Q_000000011*PR_000002000000+Q_000000111*PR_000002000001+Q_000000211*PR_000002000002);
ans_temp[ans_id*3+0]+=Pmtrx[9]*(Q_011000000*PR_001000001000+Q_111000000*PR_001000001100+Q_211000000*PR_001000001200);
ans_temp[ans_id*3+0]+=Pmtrx[10]*(Q_010001000*PR_001000001000+Q_010101000*PR_001000001010+Q_110001000*PR_001000001100+Q_110101000*PR_001000001110);
ans_temp[ans_id*3+0]+=Pmtrx[11]*(Q_010000001*PR_001000001000+Q_010000101*PR_001000001001+Q_110000001*PR_001000001100+Q_110000101*PR_001000001101);
ans_temp[ans_id*3+1]+=Pmtrx[9]*(Q_001010000*PR_001000001000+Q_001110000*PR_001000001010+Q_101010000*PR_001000001100+Q_101110000*PR_001000001110);
ans_temp[ans_id*3+1]+=Pmtrx[10]*(Q_000011000*PR_001000001000+Q_000111000*PR_001000001010+Q_000211000*PR_001000001020);
ans_temp[ans_id*3+1]+=Pmtrx[11]*(Q_000010001*PR_001000001000+Q_000010101*PR_001000001001+Q_000110001*PR_001000001010+Q_000110101*PR_001000001011);
ans_temp[ans_id*3+2]+=Pmtrx[9]*(Q_001000010*PR_001000001000+Q_001000110*PR_001000001001+Q_101000010*PR_001000001100+Q_101000110*PR_001000001101);
ans_temp[ans_id*3+2]+=Pmtrx[10]*(Q_000001010*PR_001000001000+Q_000001110*PR_001000001001+Q_000101010*PR_001000001010+Q_000101110*PR_001000001011);
ans_temp[ans_id*3+2]+=Pmtrx[11]*(Q_000000011*PR_001000001000+Q_000000111*PR_001000001001+Q_000000211*PR_001000001002);
ans_temp[ans_id*3+0]+=Pmtrx[12]*(Q_011000000*PR_000001001000+Q_111000000*PR_000001001100+Q_211000000*PR_000001001200);
ans_temp[ans_id*3+0]+=Pmtrx[13]*(Q_010001000*PR_000001001000+Q_010101000*PR_000001001010+Q_110001000*PR_000001001100+Q_110101000*PR_000001001110);
ans_temp[ans_id*3+0]+=Pmtrx[14]*(Q_010000001*PR_000001001000+Q_010000101*PR_000001001001+Q_110000001*PR_000001001100+Q_110000101*PR_000001001101);
ans_temp[ans_id*3+1]+=Pmtrx[12]*(Q_001010000*PR_000001001000+Q_001110000*PR_000001001010+Q_101010000*PR_000001001100+Q_101110000*PR_000001001110);
ans_temp[ans_id*3+1]+=Pmtrx[13]*(Q_000011000*PR_000001001000+Q_000111000*PR_000001001010+Q_000211000*PR_000001001020);
ans_temp[ans_id*3+1]+=Pmtrx[14]*(Q_000010001*PR_000001001000+Q_000010101*PR_000001001001+Q_000110001*PR_000001001010+Q_000110101*PR_000001001011);
ans_temp[ans_id*3+2]+=Pmtrx[12]*(Q_001000010*PR_000001001000+Q_001000110*PR_000001001001+Q_101000010*PR_000001001100+Q_101000110*PR_000001001101);
ans_temp[ans_id*3+2]+=Pmtrx[13]*(Q_000001010*PR_000001001000+Q_000001110*PR_000001001001+Q_000101010*PR_000001001010+Q_000101110*PR_000001001011);
ans_temp[ans_id*3+2]+=Pmtrx[14]*(Q_000000011*PR_000001001000+Q_000000111*PR_000001001001+Q_000000211*PR_000001001002);
ans_temp[ans_id*3+0]+=Pmtrx[15]*(Q_011000000*PR_000000002000+Q_111000000*PR_000000002100+Q_211000000*PR_000000002200);
ans_temp[ans_id*3+0]+=Pmtrx[16]*(Q_010001000*PR_000000002000+Q_010101000*PR_000000002010+Q_110001000*PR_000000002100+Q_110101000*PR_000000002110);
ans_temp[ans_id*3+0]+=Pmtrx[17]*(Q_010000001*PR_000000002000+Q_010000101*PR_000000002001+Q_110000001*PR_000000002100+Q_110000101*PR_000000002101);
ans_temp[ans_id*3+1]+=Pmtrx[15]*(Q_001010000*PR_000000002000+Q_001110000*PR_000000002010+Q_101010000*PR_000000002100+Q_101110000*PR_000000002110);
ans_temp[ans_id*3+1]+=Pmtrx[16]*(Q_000011000*PR_000000002000+Q_000111000*PR_000000002010+Q_000211000*PR_000000002020);
ans_temp[ans_id*3+1]+=Pmtrx[17]*(Q_000010001*PR_000000002000+Q_000010101*PR_000000002001+Q_000110001*PR_000000002010+Q_000110101*PR_000000002011);
ans_temp[ans_id*3+2]+=Pmtrx[15]*(Q_001000010*PR_000000002000+Q_001000110*PR_000000002001+Q_101000010*PR_000000002100+Q_101000110*PR_000000002101);
ans_temp[ans_id*3+2]+=Pmtrx[16]*(Q_000001010*PR_000000002000+Q_000001110*PR_000000002001+Q_000101010*PR_000000002010+Q_000101110*PR_000000002011);
ans_temp[ans_id*3+2]+=Pmtrx[17]*(Q_000000011*PR_000000002000+Q_000000111*PR_000000002001+Q_000000211*PR_000000002002);
}
}
__syncthreads();
int num_thread=NTHREAD/2;
while (num_thread!=0){
__syncthreads();
if(tId_x<num_thread){
for(int ians=0;ians<3;ians++){
ans_temp[tId_x*3+ians]+=ans_temp[(tId_x+num_thread)*3+ians];
}
}
num_thread/=2;
}
if(tId_x==0){
for(int ians=0;ians<3;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*3+ians]=ans_temp[(tId_x)*3+ians];
}
}
}
}
}
__global__ void MD_Kq_sdpp_fs(unsigned int contrc_bra_num,unsigned int contrc_ket_num,\
unsigned int * contrc_bra_id,\
unsigned int * contrc_ket_id,\
unsigned int mtrx_len,\
double * Pmtrx_in,\
double * P,\
double * PA,\
double * PB,\
double * Zta_in,\
double * pp_in,\
float * K2_p_in,\
unsigned int * id_bra_in,\
double * Q,\
double * QC,\
double * QD,\
double * Eta_in,\
double * pq_in,\
float * K2_q_in,\
unsigned int * id_ket_in,\
double * ans){
unsigned int tId_x = threadIdx.x;
unsigned int bId_x = blockIdx.x;
unsigned int bId_y = blockIdx.y;
unsigned int tdis = blockDim.x;
unsigned int bdis_x = gridDim.x;
unsigned int bdis_y = gridDim.y;
unsigned int ans_id=tId_x;
double Pmtrx[18]={0.0};
__shared__ double ans_temp[NTHREAD*3];
for(int i=0;i<3;i++){
ans_temp[i*tdis+tId_x]=0.0;
}
for(unsigned int i_contrc_bra=bId_x;i_contrc_bra<contrc_bra_num;i_contrc_bra+=bdis_x){
for(unsigned int j_contrc_ket=bId_y;j_contrc_ket<contrc_ket_num;j_contrc_ket+=bdis_y){
unsigned int primit_bra_start = contrc_bra_id[i_contrc_bra ];
unsigned int primit_bra_end = contrc_bra_id[i_contrc_bra+1];
unsigned int primit_ket_start = contrc_ket_id[j_contrc_ket ];
unsigned int primit_ket_end = contrc_ket_id[j_contrc_ket+1];
for(unsigned int ii=primit_bra_start;ii<primit_bra_end;ii++){
unsigned int id_bra=id_bra_in[ii];
double PX=P[ii*3+0];
double PY=P[ii*3+1];
double PZ=P[ii*3+2];
double Pd_001[3];
Pd_001[0]=PB[ii*3+0];
Pd_001[1]=PB[ii*3+1];
Pd_001[2]=PB[ii*3+2];
double Zta=Zta_in[ii];
double pp=pp_in[ii];
float K2_p=K2_p_in[ii];
double aPin1=1/(2*Zta);
for(unsigned int j=tId_x;j<primit_ket_end-primit_ket_start;j+=tdis){
unsigned int jj=primit_ket_start+j;
unsigned int id_ket=tex1Dfetch(tex_id_ket,jj);
double P_max=0.0;
for(int p_j=0;p_j<3;p_j++){
for(int p_i=0;p_i<6;p_i++){
Pmtrx[p_i*3+p_j]=Pmtrx_in[(id_ket+p_j)*mtrx_len+(id_bra+p_i)];
double temp_P=fabsf(Pmtrx[p_i*3+p_j]);
if(temp_P>P_max) P_max=temp_P;
}
}
float K2_q=tex1Dfetch(tex_K2_q,jj);
if(fabsf(K2_p*K2_q)<1.0E-14){
break;
}
if(fabsf(P_max*K2_p*K2_q)<1.0E-14) continue;
int2 temp_int2;
temp_int2=tex1Dfetch(tex_Eta,jj);
double Eta=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_pq,jj);
double pq=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+0);
double QX=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+1);
double QY=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+2);
double QZ=__hiloint2double(temp_int2.y,temp_int2.x);
double Qd_010[3];
temp_int2=tex1Dfetch(tex_QC,jj*3+0);
Qd_010[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QC,jj*3+1);
Qd_010[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QC,jj*3+2);
Qd_010[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double Qd_001[3];
temp_int2=tex1Dfetch(tex_QD,jj*3+0);
Qd_001[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QD,jj*3+1);
Qd_001[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QD,jj*3+2);
Qd_001[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double alphaT=rsqrt(Eta+Zta);
double lmd=4*P25*pp*pq*alphaT;
alphaT=Eta*Zta*alphaT*alphaT;
double TX=PX-QX;
double TY=PY-QY;
double TZ=PZ-QZ;
double T=alphaT*(TX*TX+TY*TY+TZ*TZ);
double R_000[5];
Ft_fs_4(4,T,R_000);
R_000[0]*=lmd;
R_000[1]*=-2*alphaT*lmd;
R_000[2]*=4*alphaT*alphaT*lmd;
R_000[3]*=-8*alphaT*alphaT*alphaT*lmd;
R_000[4]*=16*alphaT*alphaT*alphaT*alphaT*lmd;
double aQin1=1/(2*Eta);
double R_100[4];
double R_200[3];
double R_300[2];
double R_400[1];
double R_010[4];
double R_110[3];
double R_210[2];
double R_310[1];
double R_020[3];
double R_120[2];
double R_220[1];
double R_030[2];
double R_130[1];
double R_040[1];
double R_001[4];
double R_101[3];
double R_201[2];
double R_301[1];
double R_011[3];
double R_111[2];
double R_211[1];
double R_021[2];
double R_121[1];
double R_031[1];
double R_002[3];
double R_102[2];
double R_202[1];
double R_012[2];
double R_112[1];
double R_022[1];
double R_003[2];
double R_103[1];
double R_013[1];
double R_004[1];
for(int i=0;i<4;i++){
R_100[i]=TX*R_000[i+1];
}
for(int i=0;i<4;i++){
R_010[i]=TY*R_000[i+1];
}
for(int i=0;i<4;i++){
R_001[i]=TZ*R_000[i+1];
}
for(int i=0;i<3;i++){
R_200[i]=TX*R_100[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_110[i]=TX*R_010[i+1];
}
for(int i=0;i<3;i++){
R_020[i]=TY*R_010[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_101[i]=TX*R_001[i+1];
}
for(int i=0;i<3;i++){
R_011[i]=TY*R_001[i+1];
}
for(int i=0;i<3;i++){
R_002[i]=TZ*R_001[i+1]+R_000[i+1];
}
for(int i=0;i<2;i++){
R_300[i]=TX*R_200[i+1]+2*R_100[i+1];
}
for(int i=0;i<2;i++){
R_210[i]=TY*R_200[i+1];
}
for(int i=0;i<2;i++){
R_120[i]=TX*R_020[i+1];
}
for(int i=0;i<2;i++){
R_030[i]=TY*R_020[i+1]+2*R_010[i+1];
}
for(int i=0;i<2;i++){
R_201[i]=TZ*R_200[i+1];
}
for(int i=0;i<2;i++){
R_111[i]=TX*R_011[i+1];
}
for(int i=0;i<2;i++){
R_021[i]=TZ*R_020[i+1];
}
for(int i=0;i<2;i++){
R_102[i]=TX*R_002[i+1];
}
for(int i=0;i<2;i++){
R_012[i]=TY*R_002[i+1];
}
for(int i=0;i<2;i++){
R_003[i]=TZ*R_002[i+1]+2*R_001[i+1];
}
for(int i=0;i<1;i++){
R_400[i]=TX*R_300[i+1]+3*R_200[i+1];
}
for(int i=0;i<1;i++){
R_310[i]=TY*R_300[i+1];
}
for(int i=0;i<1;i++){
R_220[i]=TX*R_120[i+1]+R_020[i+1];
}
for(int i=0;i<1;i++){
R_130[i]=TX*R_030[i+1];
}
for(int i=0;i<1;i++){
R_040[i]=TY*R_030[i+1]+3*R_020[i+1];
}
for(int i=0;i<1;i++){
R_301[i]=TZ*R_300[i+1];
}
for(int i=0;i<1;i++){
R_211[i]=TY*R_201[i+1];
}
for(int i=0;i<1;i++){
R_121[i]=TX*R_021[i+1];
}
for(int i=0;i<1;i++){
R_031[i]=TZ*R_030[i+1];
}
for(int i=0;i<1;i++){
R_202[i]=TX*R_102[i+1]+R_002[i+1];
}
for(int i=0;i<1;i++){
R_112[i]=TX*R_012[i+1];
}
for(int i=0;i<1;i++){
R_022[i]=TY*R_012[i+1]+R_002[i+1];
}
for(int i=0;i<1;i++){
R_103[i]=TX*R_003[i+1];
}
for(int i=0;i<1;i++){
R_013[i]=TY*R_003[i+1];
}
for(int i=0;i<1;i++){
R_004[i]=TZ*R_003[i+1]+3*R_002[i+1];
}
double Qd_101[3];
double Qd_110[3];
double Qd_011[3];
double Qd_111[3];
double Qd_211[3];
for(int i=0;i<3;i++){
Qd_101[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_110[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_011[i]=Qd_101[i]+Qd_010[i]*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_111[i]=Qd_010[i]*Qd_101[i]+aQin1*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_211[i]=aQin1*Qd_101[i];
}
double Q_011000000=Qd_011[0];
double Q_111000000=Qd_111[0];
double Q_211000000=Qd_211[0];
double Q_010001000=Qd_010[0]*Qd_001[1];
double Q_010101000=Qd_010[0]*Qd_101[1];
double Q_110001000=Qd_110[0]*Qd_001[1];
double Q_110101000=Qd_110[0]*Qd_101[1];
double Q_010000001=Qd_010[0]*Qd_001[2];
double Q_010000101=Qd_010[0]*Qd_101[2];
double Q_110000001=Qd_110[0]*Qd_001[2];
double Q_110000101=Qd_110[0]*Qd_101[2];
double Q_001010000=Qd_001[0]*Qd_010[1];
double Q_001110000=Qd_001[0]*Qd_110[1];
double Q_101010000=Qd_101[0]*Qd_010[1];
double Q_101110000=Qd_101[0]*Qd_110[1];
double Q_000011000=Qd_011[1];
double Q_000111000=Qd_111[1];
double Q_000211000=Qd_211[1];
double Q_000010001=Qd_010[1]*Qd_001[2];
double Q_000010101=Qd_010[1]*Qd_101[2];
double Q_000110001=Qd_110[1]*Qd_001[2];
double Q_000110101=Qd_110[1]*Qd_101[2];
double Q_001000010=Qd_001[0]*Qd_010[2];
double Q_001000110=Qd_001[0]*Qd_110[2];
double Q_101000010=Qd_101[0]*Qd_010[2];
double Q_101000110=Qd_101[0]*Qd_110[2];
double Q_000001010=Qd_001[1]*Qd_010[2];
double Q_000001110=Qd_001[1]*Qd_110[2];
double Q_000101010=Qd_101[1]*Qd_010[2];
double Q_000101110=Qd_101[1]*Qd_110[2];
double Q_000000011=Qd_011[2];
double Q_000000111=Qd_111[2];
double Q_000000211=Qd_211[2];
double QR_011000000000=Q_011000000*R_000[0]+-1*Q_111000000*R_100[0]+Q_211000000*R_200[0];
double QR_010001000000=Q_010001000*R_000[0]+-1*Q_010101000*R_010[0]+-1*Q_110001000*R_100[0]+Q_110101000*R_110[0];
double QR_010000001000=Q_010000001*R_000[0]+-1*Q_010000101*R_001[0]+-1*Q_110000001*R_100[0]+Q_110000101*R_101[0];
double QR_001010000000=Q_001010000*R_000[0]+-1*Q_001110000*R_010[0]+-1*Q_101010000*R_100[0]+Q_101110000*R_110[0];
double QR_000011000000=Q_000011000*R_000[0]+-1*Q_000111000*R_010[0]+Q_000211000*R_020[0];
double QR_000010001000=Q_000010001*R_000[0]+-1*Q_000010101*R_001[0]+-1*Q_000110001*R_010[0]+Q_000110101*R_011[0];
double QR_001000010000=Q_001000010*R_000[0]+-1*Q_001000110*R_001[0]+-1*Q_101000010*R_100[0]+Q_101000110*R_101[0];
double QR_000001010000=Q_000001010*R_000[0]+-1*Q_000001110*R_001[0]+-1*Q_000101010*R_010[0]+Q_000101110*R_011[0];
double QR_000000011000=Q_000000011*R_000[0]+-1*Q_000000111*R_001[0]+Q_000000211*R_002[0];
double QR_011000000001=Q_011000000*R_001[0]+-1*Q_111000000*R_101[0]+Q_211000000*R_201[0];
double QR_010001000001=Q_010001000*R_001[0]+-1*Q_010101000*R_011[0]+-1*Q_110001000*R_101[0]+Q_110101000*R_111[0];
double QR_010000001001=Q_010000001*R_001[0]+-1*Q_010000101*R_002[0]+-1*Q_110000001*R_101[0]+Q_110000101*R_102[0];
double QR_001010000001=Q_001010000*R_001[0]+-1*Q_001110000*R_011[0]+-1*Q_101010000*R_101[0]+Q_101110000*R_111[0];
double QR_000011000001=Q_000011000*R_001[0]+-1*Q_000111000*R_011[0]+Q_000211000*R_021[0];
double QR_000010001001=Q_000010001*R_001[0]+-1*Q_000010101*R_002[0]+-1*Q_000110001*R_011[0]+Q_000110101*R_012[0];
double QR_001000010001=Q_001000010*R_001[0]+-1*Q_001000110*R_002[0]+-1*Q_101000010*R_101[0]+Q_101000110*R_102[0];
double QR_000001010001=Q_000001010*R_001[0]+-1*Q_000001110*R_002[0]+-1*Q_000101010*R_011[0]+Q_000101110*R_012[0];
double QR_000000011001=Q_000000011*R_001[0]+-1*Q_000000111*R_002[0]+Q_000000211*R_003[0];
double QR_011000000010=Q_011000000*R_010[0]+-1*Q_111000000*R_110[0]+Q_211000000*R_210[0];
double QR_010001000010=Q_010001000*R_010[0]+-1*Q_010101000*R_020[0]+-1*Q_110001000*R_110[0]+Q_110101000*R_120[0];
double QR_010000001010=Q_010000001*R_010[0]+-1*Q_010000101*R_011[0]+-1*Q_110000001*R_110[0]+Q_110000101*R_111[0];
double QR_001010000010=Q_001010000*R_010[0]+-1*Q_001110000*R_020[0]+-1*Q_101010000*R_110[0]+Q_101110000*R_120[0];
double QR_000011000010=Q_000011000*R_010[0]+-1*Q_000111000*R_020[0]+Q_000211000*R_030[0];
double QR_000010001010=Q_000010001*R_010[0]+-1*Q_000010101*R_011[0]+-1*Q_000110001*R_020[0]+Q_000110101*R_021[0];
double QR_001000010010=Q_001000010*R_010[0]+-1*Q_001000110*R_011[0]+-1*Q_101000010*R_110[0]+Q_101000110*R_111[0];
double QR_000001010010=Q_000001010*R_010[0]+-1*Q_000001110*R_011[0]+-1*Q_000101010*R_020[0]+Q_000101110*R_021[0];
double QR_000000011010=Q_000000011*R_010[0]+-1*Q_000000111*R_011[0]+Q_000000211*R_012[0];
double QR_011000000100=Q_011000000*R_100[0]+-1*Q_111000000*R_200[0]+Q_211000000*R_300[0];
double QR_010001000100=Q_010001000*R_100[0]+-1*Q_010101000*R_110[0]+-1*Q_110001000*R_200[0]+Q_110101000*R_210[0];
double QR_010000001100=Q_010000001*R_100[0]+-1*Q_010000101*R_101[0]+-1*Q_110000001*R_200[0]+Q_110000101*R_201[0];
double QR_001010000100=Q_001010000*R_100[0]+-1*Q_001110000*R_110[0]+-1*Q_101010000*R_200[0]+Q_101110000*R_210[0];
double QR_000011000100=Q_000011000*R_100[0]+-1*Q_000111000*R_110[0]+Q_000211000*R_120[0];
double QR_000010001100=Q_000010001*R_100[0]+-1*Q_000010101*R_101[0]+-1*Q_000110001*R_110[0]+Q_000110101*R_111[0];
double QR_001000010100=Q_001000010*R_100[0]+-1*Q_001000110*R_101[0]+-1*Q_101000010*R_200[0]+Q_101000110*R_201[0];
double QR_000001010100=Q_000001010*R_100[0]+-1*Q_000001110*R_101[0]+-1*Q_000101010*R_110[0]+Q_000101110*R_111[0];
double QR_000000011100=Q_000000011*R_100[0]+-1*Q_000000111*R_101[0]+Q_000000211*R_102[0];
double QR_011000000002=Q_011000000*R_002[0]+-1*Q_111000000*R_102[0]+Q_211000000*R_202[0];
double QR_010001000002=Q_010001000*R_002[0]+-1*Q_010101000*R_012[0]+-1*Q_110001000*R_102[0]+Q_110101000*R_112[0];
double QR_010000001002=Q_010000001*R_002[0]+-1*Q_010000101*R_003[0]+-1*Q_110000001*R_102[0]+Q_110000101*R_103[0];
double QR_001010000002=Q_001010000*R_002[0]+-1*Q_001110000*R_012[0]+-1*Q_101010000*R_102[0]+Q_101110000*R_112[0];
double QR_000011000002=Q_000011000*R_002[0]+-1*Q_000111000*R_012[0]+Q_000211000*R_022[0];
double QR_000010001002=Q_000010001*R_002[0]+-1*Q_000010101*R_003[0]+-1*Q_000110001*R_012[0]+Q_000110101*R_013[0];
double QR_001000010002=Q_001000010*R_002[0]+-1*Q_001000110*R_003[0]+-1*Q_101000010*R_102[0]+Q_101000110*R_103[0];
double QR_000001010002=Q_000001010*R_002[0]+-1*Q_000001110*R_003[0]+-1*Q_000101010*R_012[0]+Q_000101110*R_013[0];
double QR_000000011002=Q_000000011*R_002[0]+-1*Q_000000111*R_003[0]+Q_000000211*R_004[0];
double QR_011000000011=Q_011000000*R_011[0]+-1*Q_111000000*R_111[0]+Q_211000000*R_211[0];
double QR_010001000011=Q_010001000*R_011[0]+-1*Q_010101000*R_021[0]+-1*Q_110001000*R_111[0]+Q_110101000*R_121[0];
double QR_010000001011=Q_010000001*R_011[0]+-1*Q_010000101*R_012[0]+-1*Q_110000001*R_111[0]+Q_110000101*R_112[0];
double QR_001010000011=Q_001010000*R_011[0]+-1*Q_001110000*R_021[0]+-1*Q_101010000*R_111[0]+Q_101110000*R_121[0];
double QR_000011000011=Q_000011000*R_011[0]+-1*Q_000111000*R_021[0]+Q_000211000*R_031[0];
double QR_000010001011=Q_000010001*R_011[0]+-1*Q_000010101*R_012[0]+-1*Q_000110001*R_021[0]+Q_000110101*R_022[0];
double QR_001000010011=Q_001000010*R_011[0]+-1*Q_001000110*R_012[0]+-1*Q_101000010*R_111[0]+Q_101000110*R_112[0];
double QR_000001010011=Q_000001010*R_011[0]+-1*Q_000001110*R_012[0]+-1*Q_000101010*R_021[0]+Q_000101110*R_022[0];
double QR_000000011011=Q_000000011*R_011[0]+-1*Q_000000111*R_012[0]+Q_000000211*R_013[0];
double QR_011000000020=Q_011000000*R_020[0]+-1*Q_111000000*R_120[0]+Q_211000000*R_220[0];
double QR_010001000020=Q_010001000*R_020[0]+-1*Q_010101000*R_030[0]+-1*Q_110001000*R_120[0]+Q_110101000*R_130[0];
double QR_010000001020=Q_010000001*R_020[0]+-1*Q_010000101*R_021[0]+-1*Q_110000001*R_120[0]+Q_110000101*R_121[0];
double QR_001010000020=Q_001010000*R_020[0]+-1*Q_001110000*R_030[0]+-1*Q_101010000*R_120[0]+Q_101110000*R_130[0];
double QR_000011000020=Q_000011000*R_020[0]+-1*Q_000111000*R_030[0]+Q_000211000*R_040[0];
double QR_000010001020=Q_000010001*R_020[0]+-1*Q_000010101*R_021[0]+-1*Q_000110001*R_030[0]+Q_000110101*R_031[0];
double QR_001000010020=Q_001000010*R_020[0]+-1*Q_001000110*R_021[0]+-1*Q_101000010*R_120[0]+Q_101000110*R_121[0];
double QR_000001010020=Q_000001010*R_020[0]+-1*Q_000001110*R_021[0]+-1*Q_000101010*R_030[0]+Q_000101110*R_031[0];
double QR_000000011020=Q_000000011*R_020[0]+-1*Q_000000111*R_021[0]+Q_000000211*R_022[0];
double QR_011000000101=Q_011000000*R_101[0]+-1*Q_111000000*R_201[0]+Q_211000000*R_301[0];
double QR_010001000101=Q_010001000*R_101[0]+-1*Q_010101000*R_111[0]+-1*Q_110001000*R_201[0]+Q_110101000*R_211[0];
double QR_010000001101=Q_010000001*R_101[0]+-1*Q_010000101*R_102[0]+-1*Q_110000001*R_201[0]+Q_110000101*R_202[0];
double QR_001010000101=Q_001010000*R_101[0]+-1*Q_001110000*R_111[0]+-1*Q_101010000*R_201[0]+Q_101110000*R_211[0];
double QR_000011000101=Q_000011000*R_101[0]+-1*Q_000111000*R_111[0]+Q_000211000*R_121[0];
double QR_000010001101=Q_000010001*R_101[0]+-1*Q_000010101*R_102[0]+-1*Q_000110001*R_111[0]+Q_000110101*R_112[0];
double QR_001000010101=Q_001000010*R_101[0]+-1*Q_001000110*R_102[0]+-1*Q_101000010*R_201[0]+Q_101000110*R_202[0];
double QR_000001010101=Q_000001010*R_101[0]+-1*Q_000001110*R_102[0]+-1*Q_000101010*R_111[0]+Q_000101110*R_112[0];
double QR_000000011101=Q_000000011*R_101[0]+-1*Q_000000111*R_102[0]+Q_000000211*R_103[0];
double QR_011000000110=Q_011000000*R_110[0]+-1*Q_111000000*R_210[0]+Q_211000000*R_310[0];
double QR_010001000110=Q_010001000*R_110[0]+-1*Q_010101000*R_120[0]+-1*Q_110001000*R_210[0]+Q_110101000*R_220[0];
double QR_010000001110=Q_010000001*R_110[0]+-1*Q_010000101*R_111[0]+-1*Q_110000001*R_210[0]+Q_110000101*R_211[0];
double QR_001010000110=Q_001010000*R_110[0]+-1*Q_001110000*R_120[0]+-1*Q_101010000*R_210[0]+Q_101110000*R_220[0];
double QR_000011000110=Q_000011000*R_110[0]+-1*Q_000111000*R_120[0]+Q_000211000*R_130[0];
double QR_000010001110=Q_000010001*R_110[0]+-1*Q_000010101*R_111[0]+-1*Q_000110001*R_120[0]+Q_000110101*R_121[0];
double QR_001000010110=Q_001000010*R_110[0]+-1*Q_001000110*R_111[0]+-1*Q_101000010*R_210[0]+Q_101000110*R_211[0];
double QR_000001010110=Q_000001010*R_110[0]+-1*Q_000001110*R_111[0]+-1*Q_000101010*R_120[0]+Q_000101110*R_121[0];
double QR_000000011110=Q_000000011*R_110[0]+-1*Q_000000111*R_111[0]+Q_000000211*R_112[0];
double QR_011000000200=Q_011000000*R_200[0]+-1*Q_111000000*R_300[0]+Q_211000000*R_400[0];
double QR_010001000200=Q_010001000*R_200[0]+-1*Q_010101000*R_210[0]+-1*Q_110001000*R_300[0]+Q_110101000*R_310[0];
double QR_010000001200=Q_010000001*R_200[0]+-1*Q_010000101*R_201[0]+-1*Q_110000001*R_300[0]+Q_110000101*R_301[0];
double QR_001010000200=Q_001010000*R_200[0]+-1*Q_001110000*R_210[0]+-1*Q_101010000*R_300[0]+Q_101110000*R_310[0];
double QR_000011000200=Q_000011000*R_200[0]+-1*Q_000111000*R_210[0]+Q_000211000*R_220[0];
double QR_000010001200=Q_000010001*R_200[0]+-1*Q_000010101*R_201[0]+-1*Q_000110001*R_210[0]+Q_000110101*R_211[0];
double QR_001000010200=Q_001000010*R_200[0]+-1*Q_001000110*R_201[0]+-1*Q_101000010*R_300[0]+Q_101000110*R_301[0];
double QR_000001010200=Q_000001010*R_200[0]+-1*Q_000001110*R_201[0]+-1*Q_000101010*R_210[0]+Q_000101110*R_211[0];
double QR_000000011200=Q_000000011*R_200[0]+-1*Q_000000111*R_201[0]+Q_000000211*R_202[0];
double Pd_101[3];
double Pd_002[3];
double Pd_102[3];
double Pd_202[3];
for(int i=0;i<3;i++){
Pd_101[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_002[i]=Pd_101[i]+Pd_001[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_102[i]=Pd_001[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_202[i]=aPin1*Pd_101[i];
}
double P_002000000=Pd_002[0];
double P_102000000=Pd_102[0];
double P_202000000=Pd_202[0];
double P_001001000=Pd_001[0]*Pd_001[1];
double P_001101000=Pd_001[0]*Pd_101[1];
double P_101001000=Pd_101[0]*Pd_001[1];
double P_101101000=Pd_101[0]*Pd_101[1];
double P_000002000=Pd_002[1];
double P_000102000=Pd_102[1];
double P_000202000=Pd_202[1];
double P_001000001=Pd_001[0]*Pd_001[2];
double P_001000101=Pd_001[0]*Pd_101[2];
double P_101000001=Pd_101[0]*Pd_001[2];
double P_101000101=Pd_101[0]*Pd_101[2];
double P_000001001=Pd_001[1]*Pd_001[2];
double P_000001101=Pd_001[1]*Pd_101[2];
double P_000101001=Pd_101[1]*Pd_001[2];
double P_000101101=Pd_101[1]*Pd_101[2];
double P_000000002=Pd_002[2];
double P_000000102=Pd_102[2];
double P_000000202=Pd_202[2];
ans_temp[ans_id*3+0]+=Pmtrx[0]*(P_002000000*QR_011000000000+P_102000000*QR_011000000100+P_202000000*QR_011000000200);
ans_temp[ans_id*3+0]+=Pmtrx[1]*(P_002000000*QR_010001000000+P_102000000*QR_010001000100+P_202000000*QR_010001000200);
ans_temp[ans_id*3+0]+=Pmtrx[2]*(P_002000000*QR_010000001000+P_102000000*QR_010000001100+P_202000000*QR_010000001200);
ans_temp[ans_id*3+1]+=Pmtrx[0]*(P_002000000*QR_001010000000+P_102000000*QR_001010000100+P_202000000*QR_001010000200);
ans_temp[ans_id*3+1]+=Pmtrx[1]*(P_002000000*QR_000011000000+P_102000000*QR_000011000100+P_202000000*QR_000011000200);
ans_temp[ans_id*3+1]+=Pmtrx[2]*(P_002000000*QR_000010001000+P_102000000*QR_000010001100+P_202000000*QR_000010001200);
ans_temp[ans_id*3+2]+=Pmtrx[0]*(P_002000000*QR_001000010000+P_102000000*QR_001000010100+P_202000000*QR_001000010200);
ans_temp[ans_id*3+2]+=Pmtrx[1]*(P_002000000*QR_000001010000+P_102000000*QR_000001010100+P_202000000*QR_000001010200);
ans_temp[ans_id*3+2]+=Pmtrx[2]*(P_002000000*QR_000000011000+P_102000000*QR_000000011100+P_202000000*QR_000000011200);
ans_temp[ans_id*3+0]+=Pmtrx[3]*(P_001001000*QR_011000000000+P_001101000*QR_011000000010+P_101001000*QR_011000000100+P_101101000*QR_011000000110);
ans_temp[ans_id*3+0]+=Pmtrx[4]*(P_001001000*QR_010001000000+P_001101000*QR_010001000010+P_101001000*QR_010001000100+P_101101000*QR_010001000110);
ans_temp[ans_id*3+0]+=Pmtrx[5]*(P_001001000*QR_010000001000+P_001101000*QR_010000001010+P_101001000*QR_010000001100+P_101101000*QR_010000001110);
ans_temp[ans_id*3+1]+=Pmtrx[3]*(P_001001000*QR_001010000000+P_001101000*QR_001010000010+P_101001000*QR_001010000100+P_101101000*QR_001010000110);
ans_temp[ans_id*3+1]+=Pmtrx[4]*(P_001001000*QR_000011000000+P_001101000*QR_000011000010+P_101001000*QR_000011000100+P_101101000*QR_000011000110);
ans_temp[ans_id*3+1]+=Pmtrx[5]*(P_001001000*QR_000010001000+P_001101000*QR_000010001010+P_101001000*QR_000010001100+P_101101000*QR_000010001110);
ans_temp[ans_id*3+2]+=Pmtrx[3]*(P_001001000*QR_001000010000+P_001101000*QR_001000010010+P_101001000*QR_001000010100+P_101101000*QR_001000010110);
ans_temp[ans_id*3+2]+=Pmtrx[4]*(P_001001000*QR_000001010000+P_001101000*QR_000001010010+P_101001000*QR_000001010100+P_101101000*QR_000001010110);
ans_temp[ans_id*3+2]+=Pmtrx[5]*(P_001001000*QR_000000011000+P_001101000*QR_000000011010+P_101001000*QR_000000011100+P_101101000*QR_000000011110);
ans_temp[ans_id*3+0]+=Pmtrx[6]*(P_000002000*QR_011000000000+P_000102000*QR_011000000010+P_000202000*QR_011000000020);
ans_temp[ans_id*3+0]+=Pmtrx[7]*(P_000002000*QR_010001000000+P_000102000*QR_010001000010+P_000202000*QR_010001000020);
ans_temp[ans_id*3+0]+=Pmtrx[8]*(P_000002000*QR_010000001000+P_000102000*QR_010000001010+P_000202000*QR_010000001020);
ans_temp[ans_id*3+1]+=Pmtrx[6]*(P_000002000*QR_001010000000+P_000102000*QR_001010000010+P_000202000*QR_001010000020);
ans_temp[ans_id*3+1]+=Pmtrx[7]*(P_000002000*QR_000011000000+P_000102000*QR_000011000010+P_000202000*QR_000011000020);
ans_temp[ans_id*3+1]+=Pmtrx[8]*(P_000002000*QR_000010001000+P_000102000*QR_000010001010+P_000202000*QR_000010001020);
ans_temp[ans_id*3+2]+=Pmtrx[6]*(P_000002000*QR_001000010000+P_000102000*QR_001000010010+P_000202000*QR_001000010020);
ans_temp[ans_id*3+2]+=Pmtrx[7]*(P_000002000*QR_000001010000+P_000102000*QR_000001010010+P_000202000*QR_000001010020);
ans_temp[ans_id*3+2]+=Pmtrx[8]*(P_000002000*QR_000000011000+P_000102000*QR_000000011010+P_000202000*QR_000000011020);
ans_temp[ans_id*3+0]+=Pmtrx[9]*(P_001000001*QR_011000000000+P_001000101*QR_011000000001+P_101000001*QR_011000000100+P_101000101*QR_011000000101);
ans_temp[ans_id*3+0]+=Pmtrx[10]*(P_001000001*QR_010001000000+P_001000101*QR_010001000001+P_101000001*QR_010001000100+P_101000101*QR_010001000101);
ans_temp[ans_id*3+0]+=Pmtrx[11]*(P_001000001*QR_010000001000+P_001000101*QR_010000001001+P_101000001*QR_010000001100+P_101000101*QR_010000001101);
ans_temp[ans_id*3+1]+=Pmtrx[9]*(P_001000001*QR_001010000000+P_001000101*QR_001010000001+P_101000001*QR_001010000100+P_101000101*QR_001010000101);
ans_temp[ans_id*3+1]+=Pmtrx[10]*(P_001000001*QR_000011000000+P_001000101*QR_000011000001+P_101000001*QR_000011000100+P_101000101*QR_000011000101);
ans_temp[ans_id*3+1]+=Pmtrx[11]*(P_001000001*QR_000010001000+P_001000101*QR_000010001001+P_101000001*QR_000010001100+P_101000101*QR_000010001101);
ans_temp[ans_id*3+2]+=Pmtrx[9]*(P_001000001*QR_001000010000+P_001000101*QR_001000010001+P_101000001*QR_001000010100+P_101000101*QR_001000010101);
ans_temp[ans_id*3+2]+=Pmtrx[10]*(P_001000001*QR_000001010000+P_001000101*QR_000001010001+P_101000001*QR_000001010100+P_101000101*QR_000001010101);
ans_temp[ans_id*3+2]+=Pmtrx[11]*(P_001000001*QR_000000011000+P_001000101*QR_000000011001+P_101000001*QR_000000011100+P_101000101*QR_000000011101);
ans_temp[ans_id*3+0]+=Pmtrx[12]*(P_000001001*QR_011000000000+P_000001101*QR_011000000001+P_000101001*QR_011000000010+P_000101101*QR_011000000011);
ans_temp[ans_id*3+0]+=Pmtrx[13]*(P_000001001*QR_010001000000+P_000001101*QR_010001000001+P_000101001*QR_010001000010+P_000101101*QR_010001000011);
ans_temp[ans_id*3+0]+=Pmtrx[14]*(P_000001001*QR_010000001000+P_000001101*QR_010000001001+P_000101001*QR_010000001010+P_000101101*QR_010000001011);
ans_temp[ans_id*3+1]+=Pmtrx[12]*(P_000001001*QR_001010000000+P_000001101*QR_001010000001+P_000101001*QR_001010000010+P_000101101*QR_001010000011);
ans_temp[ans_id*3+1]+=Pmtrx[13]*(P_000001001*QR_000011000000+P_000001101*QR_000011000001+P_000101001*QR_000011000010+P_000101101*QR_000011000011);
ans_temp[ans_id*3+1]+=Pmtrx[14]*(P_000001001*QR_000010001000+P_000001101*QR_000010001001+P_000101001*QR_000010001010+P_000101101*QR_000010001011);
ans_temp[ans_id*3+2]+=Pmtrx[12]*(P_000001001*QR_001000010000+P_000001101*QR_001000010001+P_000101001*QR_001000010010+P_000101101*QR_001000010011);
ans_temp[ans_id*3+2]+=Pmtrx[13]*(P_000001001*QR_000001010000+P_000001101*QR_000001010001+P_000101001*QR_000001010010+P_000101101*QR_000001010011);
ans_temp[ans_id*3+2]+=Pmtrx[14]*(P_000001001*QR_000000011000+P_000001101*QR_000000011001+P_000101001*QR_000000011010+P_000101101*QR_000000011011);
ans_temp[ans_id*3+0]+=Pmtrx[15]*(P_000000002*QR_011000000000+P_000000102*QR_011000000001+P_000000202*QR_011000000002);
ans_temp[ans_id*3+0]+=Pmtrx[16]*(P_000000002*QR_010001000000+P_000000102*QR_010001000001+P_000000202*QR_010001000002);
ans_temp[ans_id*3+0]+=Pmtrx[17]*(P_000000002*QR_010000001000+P_000000102*QR_010000001001+P_000000202*QR_010000001002);
ans_temp[ans_id*3+1]+=Pmtrx[15]*(P_000000002*QR_001010000000+P_000000102*QR_001010000001+P_000000202*QR_001010000002);
ans_temp[ans_id*3+1]+=Pmtrx[16]*(P_000000002*QR_000011000000+P_000000102*QR_000011000001+P_000000202*QR_000011000002);
ans_temp[ans_id*3+1]+=Pmtrx[17]*(P_000000002*QR_000010001000+P_000000102*QR_000010001001+P_000000202*QR_000010001002);
ans_temp[ans_id*3+2]+=Pmtrx[15]*(P_000000002*QR_001000010000+P_000000102*QR_001000010001+P_000000202*QR_001000010002);
ans_temp[ans_id*3+2]+=Pmtrx[16]*(P_000000002*QR_000001010000+P_000000102*QR_000001010001+P_000000202*QR_000001010002);
ans_temp[ans_id*3+2]+=Pmtrx[17]*(P_000000002*QR_000000011000+P_000000102*QR_000000011001+P_000000202*QR_000000011002);
}
}
__syncthreads();
int num_thread=NTHREAD/2;
while (num_thread!=0){
__syncthreads();
if(tId_x<num_thread){
for(int ians=0;ians<3;ians++){
ans_temp[tId_x*3+ians]+=ans_temp[(tId_x+num_thread)*3+ians];
}
}
num_thread/=2;
}
if(tId_x==0){
for(int ians=0;ians<3;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*3+ians]=ans_temp[(tId_x)*3+ians];
}
}
}
}
}
__global__ void MD_Kp_pppp_fs(unsigned int contrc_bra_num,unsigned int contrc_ket_num,\
unsigned int * contrc_bra_id,\
unsigned int * contrc_ket_id,\
unsigned int mtrx_len,\
double * Pmtrx_in,\
double * P,\
double * PA,\
double * PB,\
double * Zta_in,\
double * pp_in,\
float * K2_p_in,\
unsigned int * id_bra_in,\
double * Q,\
double * QC,\
double * QD,\
double * Eta_in,\
double * pq_in,\
float * K2_q_in,\
unsigned int * id_ket_in,\
double * ans){
unsigned int tId_x = threadIdx.x;
unsigned int bId_x = blockIdx.x;
unsigned int bId_y = blockIdx.y;
unsigned int tdis = blockDim.x;
unsigned int bdis_x = gridDim.x;
unsigned int bdis_y = gridDim.y;
unsigned int ans_id=tId_x;
double Pmtrx[9]={0.0};
__shared__ double ans_temp[NTHREAD*9];
for(int i=0;i<9;i++){
ans_temp[i*tdis+tId_x]=0.0;
}
for(unsigned int i_contrc_bra=bId_x;i_contrc_bra<contrc_bra_num;i_contrc_bra+=bdis_x){
for(unsigned int j_contrc_ket=bId_y;j_contrc_ket<contrc_ket_num;j_contrc_ket+=bdis_y){
unsigned int primit_bra_start = contrc_bra_id[i_contrc_bra ];
unsigned int primit_bra_end = contrc_bra_id[i_contrc_bra+1];
unsigned int primit_ket_start = contrc_ket_id[j_contrc_ket ];
unsigned int primit_ket_end = contrc_ket_id[j_contrc_ket+1];
if(i_contrc_bra>j_contrc_ket){
if(tId_x==0){
for(int ians=0;ians<9;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*9+ians]=0.0;
}
}
continue;
}
for(unsigned int ii=primit_ket_start;ii<primit_ket_end;ii++){
unsigned int id_ket=id_ket_in[ii];
double QX=Q[ii*3+0];
double QY=Q[ii*3+1];
double QZ=Q[ii*3+2];
double Qd_010[3];
Qd_010[0]=QC[ii*3+0];
Qd_010[1]=QC[ii*3+1];
Qd_010[2]=QC[ii*3+2];
double Qd_001[3];
Qd_001[0]=QD[ii*3+0];
Qd_001[1]=QD[ii*3+1];
Qd_001[2]=QD[ii*3+2];
double Eta=Eta_in[ii];
double pq=pq_in[ii];
float K2_q=K2_q_in[ii];
double aQin1=1/(2*Eta);
for(unsigned int j=tId_x;j<primit_bra_end-primit_bra_start;j+=tdis){
unsigned int jj=primit_bra_start+j;
unsigned int id_bra=tex1Dfetch(tex_id_bra,jj);
double P_max=0.0;
for(int p_j=0;p_j<3;p_j++){
for(int p_i=0;p_i<3;p_i++){
Pmtrx[p_i*3+p_j]=Pmtrx_in[(id_ket+p_j)*mtrx_len+(id_bra+p_i)];
double temp_P=fabsf(Pmtrx[p_i*3+p_j]);
if(temp_P>P_max) P_max=temp_P;
}
}
float K2_p=tex1Dfetch(tex_K2_p,jj);
if(fabsf(K2_p*K2_q)<1.0E-14){
break;
}
if(fabsf(P_max*K2_p*K2_q)<1.0E-14) continue;
int2 temp_int2;
temp_int2=tex1Dfetch(tex_Zta,jj);
double Zta=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_pp,jj);
double pp=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+0);
double PX=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+1);
double PY=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+2);
double PZ=__hiloint2double(temp_int2.y,temp_int2.x);
double Pd_010[3];
temp_int2=tex1Dfetch(tex_PA,jj*3+0);
Pd_010[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PA,jj*3+1);
Pd_010[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PA,jj*3+2);
Pd_010[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double Pd_001[3];
temp_int2=tex1Dfetch(tex_PB,jj*3+0);
Pd_001[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PB,jj*3+1);
Pd_001[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PB,jj*3+2);
Pd_001[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double alphaT=rsqrt(Eta+Zta);
double lmd=2*P25*pp*pq*alphaT;
alphaT=Eta*Zta*alphaT*alphaT;
double TX=PX-QX;
double TY=PY-QY;
double TZ=PZ-QZ;
double T=alphaT*(TX*TX+TY*TY+TZ*TZ);
double R_000[5];
Ft_fs_4(4,T,R_000);
R_000[0]*=lmd;
R_000[1]*=-2*alphaT*lmd;
R_000[2]*=4*alphaT*alphaT*lmd;
R_000[3]*=-8*alphaT*alphaT*alphaT*lmd;
R_000[4]*=16*alphaT*alphaT*alphaT*alphaT*lmd;
double aPin1=1/(2*Zta);
double R_100[4];
double R_200[3];
double R_300[2];
double R_400[1];
double R_010[4];
double R_110[3];
double R_210[2];
double R_310[1];
double R_020[3];
double R_120[2];
double R_220[1];
double R_030[2];
double R_130[1];
double R_040[1];
double R_001[4];
double R_101[3];
double R_201[2];
double R_301[1];
double R_011[3];
double R_111[2];
double R_211[1];
double R_021[2];
double R_121[1];
double R_031[1];
double R_002[3];
double R_102[2];
double R_202[1];
double R_012[2];
double R_112[1];
double R_022[1];
double R_003[2];
double R_103[1];
double R_013[1];
double R_004[1];
for(int i=0;i<4;i++){
R_100[i]=TX*R_000[i+1];
}
for(int i=0;i<4;i++){
R_010[i]=TY*R_000[i+1];
}
for(int i=0;i<4;i++){
R_001[i]=TZ*R_000[i+1];
}
for(int i=0;i<3;i++){
R_200[i]=TX*R_100[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_110[i]=TX*R_010[i+1];
}
for(int i=0;i<3;i++){
R_020[i]=TY*R_010[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_101[i]=TX*R_001[i+1];
}
for(int i=0;i<3;i++){
R_011[i]=TY*R_001[i+1];
}
for(int i=0;i<3;i++){
R_002[i]=TZ*R_001[i+1]+R_000[i+1];
}
for(int i=0;i<2;i++){
R_300[i]=TX*R_200[i+1]+2*R_100[i+1];
}
for(int i=0;i<2;i++){
R_210[i]=TY*R_200[i+1];
}
for(int i=0;i<2;i++){
R_120[i]=TX*R_020[i+1];
}
for(int i=0;i<2;i++){
R_030[i]=TY*R_020[i+1]+2*R_010[i+1];
}
for(int i=0;i<2;i++){
R_201[i]=TZ*R_200[i+1];
}
for(int i=0;i<2;i++){
R_111[i]=TX*R_011[i+1];
}
for(int i=0;i<2;i++){
R_021[i]=TZ*R_020[i+1];
}
for(int i=0;i<2;i++){
R_102[i]=TX*R_002[i+1];
}
for(int i=0;i<2;i++){
R_012[i]=TY*R_002[i+1];
}
for(int i=0;i<2;i++){
R_003[i]=TZ*R_002[i+1]+2*R_001[i+1];
}
for(int i=0;i<1;i++){
R_400[i]=TX*R_300[i+1]+3*R_200[i+1];
}
for(int i=0;i<1;i++){
R_310[i]=TY*R_300[i+1];
}
for(int i=0;i<1;i++){
R_220[i]=TX*R_120[i+1]+R_020[i+1];
}
for(int i=0;i<1;i++){
R_130[i]=TX*R_030[i+1];
}
for(int i=0;i<1;i++){
R_040[i]=TY*R_030[i+1]+3*R_020[i+1];
}
for(int i=0;i<1;i++){
R_301[i]=TZ*R_300[i+1];
}
for(int i=0;i<1;i++){
R_211[i]=TY*R_201[i+1];
}
for(int i=0;i<1;i++){
R_121[i]=TX*R_021[i+1];
}
for(int i=0;i<1;i++){
R_031[i]=TZ*R_030[i+1];
}
for(int i=0;i<1;i++){
R_202[i]=TX*R_102[i+1]+R_002[i+1];
}
for(int i=0;i<1;i++){
R_112[i]=TX*R_012[i+1];
}
for(int i=0;i<1;i++){
R_022[i]=TY*R_012[i+1]+R_002[i+1];
}
for(int i=0;i<1;i++){
R_103[i]=TX*R_003[i+1];
}
for(int i=0;i<1;i++){
R_013[i]=TY*R_003[i+1];
}
for(int i=0;i<1;i++){
R_004[i]=TZ*R_003[i+1]+3*R_002[i+1];
}
double Pd_101[3];
double Pd_110[3];
double Pd_011[3];
double Pd_111[3];
double Pd_211[3];
for(int i=0;i<3;i++){
Pd_101[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_110[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_011[i]=Pd_101[i]+Pd_010[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_111[i]=Pd_010[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_211[i]=aPin1*Pd_101[i];
}
double P_011000000=Pd_011[0];
double P_111000000=Pd_111[0];
double P_211000000=Pd_211[0];
double P_010001000=Pd_010[0]*Pd_001[1];
double P_010101000=Pd_010[0]*Pd_101[1];
double P_110001000=Pd_110[0]*Pd_001[1];
double P_110101000=Pd_110[0]*Pd_101[1];
double P_010000001=Pd_010[0]*Pd_001[2];
double P_010000101=Pd_010[0]*Pd_101[2];
double P_110000001=Pd_110[0]*Pd_001[2];
double P_110000101=Pd_110[0]*Pd_101[2];
double P_001010000=Pd_001[0]*Pd_010[1];
double P_001110000=Pd_001[0]*Pd_110[1];
double P_101010000=Pd_101[0]*Pd_010[1];
double P_101110000=Pd_101[0]*Pd_110[1];
double P_000011000=Pd_011[1];
double P_000111000=Pd_111[1];
double P_000211000=Pd_211[1];
double P_000010001=Pd_010[1]*Pd_001[2];
double P_000010101=Pd_010[1]*Pd_101[2];
double P_000110001=Pd_110[1]*Pd_001[2];
double P_000110101=Pd_110[1]*Pd_101[2];
double P_001000010=Pd_001[0]*Pd_010[2];
double P_001000110=Pd_001[0]*Pd_110[2];
double P_101000010=Pd_101[0]*Pd_010[2];
double P_101000110=Pd_101[0]*Pd_110[2];
double P_000001010=Pd_001[1]*Pd_010[2];
double P_000001110=Pd_001[1]*Pd_110[2];
double P_000101010=Pd_101[1]*Pd_010[2];
double P_000101110=Pd_101[1]*Pd_110[2];
double P_000000011=Pd_011[2];
double P_000000111=Pd_111[2];
double P_000000211=Pd_211[2];
double PR_011000000000=P_011000000*R_000[0]+-1*P_111000000*R_100[0]+P_211000000*R_200[0];
double PR_010001000000=P_010001000*R_000[0]+-1*P_010101000*R_010[0]+-1*P_110001000*R_100[0]+P_110101000*R_110[0];
double PR_010000001000=P_010000001*R_000[0]+-1*P_010000101*R_001[0]+-1*P_110000001*R_100[0]+P_110000101*R_101[0];
double PR_001010000000=P_001010000*R_000[0]+-1*P_001110000*R_010[0]+-1*P_101010000*R_100[0]+P_101110000*R_110[0];
double PR_000011000000=P_000011000*R_000[0]+-1*P_000111000*R_010[0]+P_000211000*R_020[0];
double PR_000010001000=P_000010001*R_000[0]+-1*P_000010101*R_001[0]+-1*P_000110001*R_010[0]+P_000110101*R_011[0];
double PR_001000010000=P_001000010*R_000[0]+-1*P_001000110*R_001[0]+-1*P_101000010*R_100[0]+P_101000110*R_101[0];
double PR_000001010000=P_000001010*R_000[0]+-1*P_000001110*R_001[0]+-1*P_000101010*R_010[0]+P_000101110*R_011[0];
double PR_000000011000=P_000000011*R_000[0]+-1*P_000000111*R_001[0]+P_000000211*R_002[0];
double PR_011000000001=P_011000000*R_001[0]+-1*P_111000000*R_101[0]+P_211000000*R_201[0];
double PR_010001000001=P_010001000*R_001[0]+-1*P_010101000*R_011[0]+-1*P_110001000*R_101[0]+P_110101000*R_111[0];
double PR_010000001001=P_010000001*R_001[0]+-1*P_010000101*R_002[0]+-1*P_110000001*R_101[0]+P_110000101*R_102[0];
double PR_001010000001=P_001010000*R_001[0]+-1*P_001110000*R_011[0]+-1*P_101010000*R_101[0]+P_101110000*R_111[0];
double PR_000011000001=P_000011000*R_001[0]+-1*P_000111000*R_011[0]+P_000211000*R_021[0];
double PR_000010001001=P_000010001*R_001[0]+-1*P_000010101*R_002[0]+-1*P_000110001*R_011[0]+P_000110101*R_012[0];
double PR_001000010001=P_001000010*R_001[0]+-1*P_001000110*R_002[0]+-1*P_101000010*R_101[0]+P_101000110*R_102[0];
double PR_000001010001=P_000001010*R_001[0]+-1*P_000001110*R_002[0]+-1*P_000101010*R_011[0]+P_000101110*R_012[0];
double PR_000000011001=P_000000011*R_001[0]+-1*P_000000111*R_002[0]+P_000000211*R_003[0];
double PR_011000000010=P_011000000*R_010[0]+-1*P_111000000*R_110[0]+P_211000000*R_210[0];
double PR_010001000010=P_010001000*R_010[0]+-1*P_010101000*R_020[0]+-1*P_110001000*R_110[0]+P_110101000*R_120[0];
double PR_010000001010=P_010000001*R_010[0]+-1*P_010000101*R_011[0]+-1*P_110000001*R_110[0]+P_110000101*R_111[0];
double PR_001010000010=P_001010000*R_010[0]+-1*P_001110000*R_020[0]+-1*P_101010000*R_110[0]+P_101110000*R_120[0];
double PR_000011000010=P_000011000*R_010[0]+-1*P_000111000*R_020[0]+P_000211000*R_030[0];
double PR_000010001010=P_000010001*R_010[0]+-1*P_000010101*R_011[0]+-1*P_000110001*R_020[0]+P_000110101*R_021[0];
double PR_001000010010=P_001000010*R_010[0]+-1*P_001000110*R_011[0]+-1*P_101000010*R_110[0]+P_101000110*R_111[0];
double PR_000001010010=P_000001010*R_010[0]+-1*P_000001110*R_011[0]+-1*P_000101010*R_020[0]+P_000101110*R_021[0];
double PR_000000011010=P_000000011*R_010[0]+-1*P_000000111*R_011[0]+P_000000211*R_012[0];
double PR_011000000100=P_011000000*R_100[0]+-1*P_111000000*R_200[0]+P_211000000*R_300[0];
double PR_010001000100=P_010001000*R_100[0]+-1*P_010101000*R_110[0]+-1*P_110001000*R_200[0]+P_110101000*R_210[0];
double PR_010000001100=P_010000001*R_100[0]+-1*P_010000101*R_101[0]+-1*P_110000001*R_200[0]+P_110000101*R_201[0];
double PR_001010000100=P_001010000*R_100[0]+-1*P_001110000*R_110[0]+-1*P_101010000*R_200[0]+P_101110000*R_210[0];
double PR_000011000100=P_000011000*R_100[0]+-1*P_000111000*R_110[0]+P_000211000*R_120[0];
double PR_000010001100=P_000010001*R_100[0]+-1*P_000010101*R_101[0]+-1*P_000110001*R_110[0]+P_000110101*R_111[0];
double PR_001000010100=P_001000010*R_100[0]+-1*P_001000110*R_101[0]+-1*P_101000010*R_200[0]+P_101000110*R_201[0];
double PR_000001010100=P_000001010*R_100[0]+-1*P_000001110*R_101[0]+-1*P_000101010*R_110[0]+P_000101110*R_111[0];
double PR_000000011100=P_000000011*R_100[0]+-1*P_000000111*R_101[0]+P_000000211*R_102[0];
double PR_011000000002=P_011000000*R_002[0]+-1*P_111000000*R_102[0]+P_211000000*R_202[0];
double PR_010001000002=P_010001000*R_002[0]+-1*P_010101000*R_012[0]+-1*P_110001000*R_102[0]+P_110101000*R_112[0];
double PR_010000001002=P_010000001*R_002[0]+-1*P_010000101*R_003[0]+-1*P_110000001*R_102[0]+P_110000101*R_103[0];
double PR_001010000002=P_001010000*R_002[0]+-1*P_001110000*R_012[0]+-1*P_101010000*R_102[0]+P_101110000*R_112[0];
double PR_000011000002=P_000011000*R_002[0]+-1*P_000111000*R_012[0]+P_000211000*R_022[0];
double PR_000010001002=P_000010001*R_002[0]+-1*P_000010101*R_003[0]+-1*P_000110001*R_012[0]+P_000110101*R_013[0];
double PR_001000010002=P_001000010*R_002[0]+-1*P_001000110*R_003[0]+-1*P_101000010*R_102[0]+P_101000110*R_103[0];
double PR_000001010002=P_000001010*R_002[0]+-1*P_000001110*R_003[0]+-1*P_000101010*R_012[0]+P_000101110*R_013[0];
double PR_000000011002=P_000000011*R_002[0]+-1*P_000000111*R_003[0]+P_000000211*R_004[0];
double PR_011000000011=P_011000000*R_011[0]+-1*P_111000000*R_111[0]+P_211000000*R_211[0];
double PR_010001000011=P_010001000*R_011[0]+-1*P_010101000*R_021[0]+-1*P_110001000*R_111[0]+P_110101000*R_121[0];
double PR_010000001011=P_010000001*R_011[0]+-1*P_010000101*R_012[0]+-1*P_110000001*R_111[0]+P_110000101*R_112[0];
double PR_001010000011=P_001010000*R_011[0]+-1*P_001110000*R_021[0]+-1*P_101010000*R_111[0]+P_101110000*R_121[0];
double PR_000011000011=P_000011000*R_011[0]+-1*P_000111000*R_021[0]+P_000211000*R_031[0];
double PR_000010001011=P_000010001*R_011[0]+-1*P_000010101*R_012[0]+-1*P_000110001*R_021[0]+P_000110101*R_022[0];
double PR_001000010011=P_001000010*R_011[0]+-1*P_001000110*R_012[0]+-1*P_101000010*R_111[0]+P_101000110*R_112[0];
double PR_000001010011=P_000001010*R_011[0]+-1*P_000001110*R_012[0]+-1*P_000101010*R_021[0]+P_000101110*R_022[0];
double PR_000000011011=P_000000011*R_011[0]+-1*P_000000111*R_012[0]+P_000000211*R_013[0];
double PR_011000000020=P_011000000*R_020[0]+-1*P_111000000*R_120[0]+P_211000000*R_220[0];
double PR_010001000020=P_010001000*R_020[0]+-1*P_010101000*R_030[0]+-1*P_110001000*R_120[0]+P_110101000*R_130[0];
double PR_010000001020=P_010000001*R_020[0]+-1*P_010000101*R_021[0]+-1*P_110000001*R_120[0]+P_110000101*R_121[0];
double PR_001010000020=P_001010000*R_020[0]+-1*P_001110000*R_030[0]+-1*P_101010000*R_120[0]+P_101110000*R_130[0];
double PR_000011000020=P_000011000*R_020[0]+-1*P_000111000*R_030[0]+P_000211000*R_040[0];
double PR_000010001020=P_000010001*R_020[0]+-1*P_000010101*R_021[0]+-1*P_000110001*R_030[0]+P_000110101*R_031[0];
double PR_001000010020=P_001000010*R_020[0]+-1*P_001000110*R_021[0]+-1*P_101000010*R_120[0]+P_101000110*R_121[0];
double PR_000001010020=P_000001010*R_020[0]+-1*P_000001110*R_021[0]+-1*P_000101010*R_030[0]+P_000101110*R_031[0];
double PR_000000011020=P_000000011*R_020[0]+-1*P_000000111*R_021[0]+P_000000211*R_022[0];
double PR_011000000101=P_011000000*R_101[0]+-1*P_111000000*R_201[0]+P_211000000*R_301[0];
double PR_010001000101=P_010001000*R_101[0]+-1*P_010101000*R_111[0]+-1*P_110001000*R_201[0]+P_110101000*R_211[0];
double PR_010000001101=P_010000001*R_101[0]+-1*P_010000101*R_102[0]+-1*P_110000001*R_201[0]+P_110000101*R_202[0];
double PR_001010000101=P_001010000*R_101[0]+-1*P_001110000*R_111[0]+-1*P_101010000*R_201[0]+P_101110000*R_211[0];
double PR_000011000101=P_000011000*R_101[0]+-1*P_000111000*R_111[0]+P_000211000*R_121[0];
double PR_000010001101=P_000010001*R_101[0]+-1*P_000010101*R_102[0]+-1*P_000110001*R_111[0]+P_000110101*R_112[0];
double PR_001000010101=P_001000010*R_101[0]+-1*P_001000110*R_102[0]+-1*P_101000010*R_201[0]+P_101000110*R_202[0];
double PR_000001010101=P_000001010*R_101[0]+-1*P_000001110*R_102[0]+-1*P_000101010*R_111[0]+P_000101110*R_112[0];
double PR_000000011101=P_000000011*R_101[0]+-1*P_000000111*R_102[0]+P_000000211*R_103[0];
double PR_011000000110=P_011000000*R_110[0]+-1*P_111000000*R_210[0]+P_211000000*R_310[0];
double PR_010001000110=P_010001000*R_110[0]+-1*P_010101000*R_120[0]+-1*P_110001000*R_210[0]+P_110101000*R_220[0];
double PR_010000001110=P_010000001*R_110[0]+-1*P_010000101*R_111[0]+-1*P_110000001*R_210[0]+P_110000101*R_211[0];
double PR_001010000110=P_001010000*R_110[0]+-1*P_001110000*R_120[0]+-1*P_101010000*R_210[0]+P_101110000*R_220[0];
double PR_000011000110=P_000011000*R_110[0]+-1*P_000111000*R_120[0]+P_000211000*R_130[0];
double PR_000010001110=P_000010001*R_110[0]+-1*P_000010101*R_111[0]+-1*P_000110001*R_120[0]+P_000110101*R_121[0];
double PR_001000010110=P_001000010*R_110[0]+-1*P_001000110*R_111[0]+-1*P_101000010*R_210[0]+P_101000110*R_211[0];
double PR_000001010110=P_000001010*R_110[0]+-1*P_000001110*R_111[0]+-1*P_000101010*R_120[0]+P_000101110*R_121[0];
double PR_000000011110=P_000000011*R_110[0]+-1*P_000000111*R_111[0]+P_000000211*R_112[0];
double PR_011000000200=P_011000000*R_200[0]+-1*P_111000000*R_300[0]+P_211000000*R_400[0];
double PR_010001000200=P_010001000*R_200[0]+-1*P_010101000*R_210[0]+-1*P_110001000*R_300[0]+P_110101000*R_310[0];
double PR_010000001200=P_010000001*R_200[0]+-1*P_010000101*R_201[0]+-1*P_110000001*R_300[0]+P_110000101*R_301[0];
double PR_001010000200=P_001010000*R_200[0]+-1*P_001110000*R_210[0]+-1*P_101010000*R_300[0]+P_101110000*R_310[0];
double PR_000011000200=P_000011000*R_200[0]+-1*P_000111000*R_210[0]+P_000211000*R_220[0];
double PR_000010001200=P_000010001*R_200[0]+-1*P_000010101*R_201[0]+-1*P_000110001*R_210[0]+P_000110101*R_211[0];
double PR_001000010200=P_001000010*R_200[0]+-1*P_001000110*R_201[0]+-1*P_101000010*R_300[0]+P_101000110*R_301[0];
double PR_000001010200=P_000001010*R_200[0]+-1*P_000001110*R_201[0]+-1*P_000101010*R_210[0]+P_000101110*R_211[0];
double PR_000000011200=P_000000011*R_200[0]+-1*P_000000111*R_201[0]+P_000000211*R_202[0];
double Qd_101[3];
double Qd_110[3];
double Qd_011[3];
double Qd_111[3];
double Qd_211[3];
for(int i=0;i<3;i++){
Qd_101[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_110[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_011[i]=Qd_101[i]+Qd_010[i]*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_111[i]=Qd_010[i]*Qd_101[i]+aQin1*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_211[i]=aQin1*Qd_101[i];
}
double Q_011000000=Qd_011[0];
double Q_111000000=Qd_111[0];
double Q_211000000=Qd_211[0];
double Q_010001000=Qd_010[0]*Qd_001[1];
double Q_010101000=Qd_010[0]*Qd_101[1];
double Q_110001000=Qd_110[0]*Qd_001[1];
double Q_110101000=Qd_110[0]*Qd_101[1];
double Q_010000001=Qd_010[0]*Qd_001[2];
double Q_010000101=Qd_010[0]*Qd_101[2];
double Q_110000001=Qd_110[0]*Qd_001[2];
double Q_110000101=Qd_110[0]*Qd_101[2];
double Q_001010000=Qd_001[0]*Qd_010[1];
double Q_001110000=Qd_001[0]*Qd_110[1];
double Q_101010000=Qd_101[0]*Qd_010[1];
double Q_101110000=Qd_101[0]*Qd_110[1];
double Q_000011000=Qd_011[1];
double Q_000111000=Qd_111[1];
double Q_000211000=Qd_211[1];
double Q_000010001=Qd_010[1]*Qd_001[2];
double Q_000010101=Qd_010[1]*Qd_101[2];
double Q_000110001=Qd_110[1]*Qd_001[2];
double Q_000110101=Qd_110[1]*Qd_101[2];
double Q_001000010=Qd_001[0]*Qd_010[2];
double Q_001000110=Qd_001[0]*Qd_110[2];
double Q_101000010=Qd_101[0]*Qd_010[2];
double Q_101000110=Qd_101[0]*Qd_110[2];
double Q_000001010=Qd_001[1]*Qd_010[2];
double Q_000001110=Qd_001[1]*Qd_110[2];
double Q_000101010=Qd_101[1]*Qd_010[2];
double Q_000101110=Qd_101[1]*Qd_110[2];
double Q_000000011=Qd_011[2];
double Q_000000111=Qd_111[2];
double Q_000000211=Qd_211[2];
ans_temp[ans_id*9+0]+=Pmtrx[0]*(Q_011000000*PR_011000000000+Q_111000000*PR_011000000100+Q_211000000*PR_011000000200);
ans_temp[ans_id*9+0]+=Pmtrx[1]*(Q_010001000*PR_011000000000+Q_010101000*PR_011000000010+Q_110001000*PR_011000000100+Q_110101000*PR_011000000110);
ans_temp[ans_id*9+0]+=Pmtrx[2]*(Q_010000001*PR_011000000000+Q_010000101*PR_011000000001+Q_110000001*PR_011000000100+Q_110000101*PR_011000000101);
ans_temp[ans_id*9+1]+=Pmtrx[0]*(Q_001010000*PR_011000000000+Q_001110000*PR_011000000010+Q_101010000*PR_011000000100+Q_101110000*PR_011000000110);
ans_temp[ans_id*9+1]+=Pmtrx[1]*(Q_000011000*PR_011000000000+Q_000111000*PR_011000000010+Q_000211000*PR_011000000020);
ans_temp[ans_id*9+1]+=Pmtrx[2]*(Q_000010001*PR_011000000000+Q_000010101*PR_011000000001+Q_000110001*PR_011000000010+Q_000110101*PR_011000000011);
ans_temp[ans_id*9+2]+=Pmtrx[0]*(Q_001000010*PR_011000000000+Q_001000110*PR_011000000001+Q_101000010*PR_011000000100+Q_101000110*PR_011000000101);
ans_temp[ans_id*9+2]+=Pmtrx[1]*(Q_000001010*PR_011000000000+Q_000001110*PR_011000000001+Q_000101010*PR_011000000010+Q_000101110*PR_011000000011);
ans_temp[ans_id*9+2]+=Pmtrx[2]*(Q_000000011*PR_011000000000+Q_000000111*PR_011000000001+Q_000000211*PR_011000000002);
ans_temp[ans_id*9+0]+=Pmtrx[3]*(Q_011000000*PR_010001000000+Q_111000000*PR_010001000100+Q_211000000*PR_010001000200);
ans_temp[ans_id*9+0]+=Pmtrx[4]*(Q_010001000*PR_010001000000+Q_010101000*PR_010001000010+Q_110001000*PR_010001000100+Q_110101000*PR_010001000110);
ans_temp[ans_id*9+0]+=Pmtrx[5]*(Q_010000001*PR_010001000000+Q_010000101*PR_010001000001+Q_110000001*PR_010001000100+Q_110000101*PR_010001000101);
ans_temp[ans_id*9+1]+=Pmtrx[3]*(Q_001010000*PR_010001000000+Q_001110000*PR_010001000010+Q_101010000*PR_010001000100+Q_101110000*PR_010001000110);
ans_temp[ans_id*9+1]+=Pmtrx[4]*(Q_000011000*PR_010001000000+Q_000111000*PR_010001000010+Q_000211000*PR_010001000020);
ans_temp[ans_id*9+1]+=Pmtrx[5]*(Q_000010001*PR_010001000000+Q_000010101*PR_010001000001+Q_000110001*PR_010001000010+Q_000110101*PR_010001000011);
ans_temp[ans_id*9+2]+=Pmtrx[3]*(Q_001000010*PR_010001000000+Q_001000110*PR_010001000001+Q_101000010*PR_010001000100+Q_101000110*PR_010001000101);
ans_temp[ans_id*9+2]+=Pmtrx[4]*(Q_000001010*PR_010001000000+Q_000001110*PR_010001000001+Q_000101010*PR_010001000010+Q_000101110*PR_010001000011);
ans_temp[ans_id*9+2]+=Pmtrx[5]*(Q_000000011*PR_010001000000+Q_000000111*PR_010001000001+Q_000000211*PR_010001000002);
ans_temp[ans_id*9+0]+=Pmtrx[6]*(Q_011000000*PR_010000001000+Q_111000000*PR_010000001100+Q_211000000*PR_010000001200);
ans_temp[ans_id*9+0]+=Pmtrx[7]*(Q_010001000*PR_010000001000+Q_010101000*PR_010000001010+Q_110001000*PR_010000001100+Q_110101000*PR_010000001110);
ans_temp[ans_id*9+0]+=Pmtrx[8]*(Q_010000001*PR_010000001000+Q_010000101*PR_010000001001+Q_110000001*PR_010000001100+Q_110000101*PR_010000001101);
ans_temp[ans_id*9+1]+=Pmtrx[6]*(Q_001010000*PR_010000001000+Q_001110000*PR_010000001010+Q_101010000*PR_010000001100+Q_101110000*PR_010000001110);
ans_temp[ans_id*9+1]+=Pmtrx[7]*(Q_000011000*PR_010000001000+Q_000111000*PR_010000001010+Q_000211000*PR_010000001020);
ans_temp[ans_id*9+1]+=Pmtrx[8]*(Q_000010001*PR_010000001000+Q_000010101*PR_010000001001+Q_000110001*PR_010000001010+Q_000110101*PR_010000001011);
ans_temp[ans_id*9+2]+=Pmtrx[6]*(Q_001000010*PR_010000001000+Q_001000110*PR_010000001001+Q_101000010*PR_010000001100+Q_101000110*PR_010000001101);
ans_temp[ans_id*9+2]+=Pmtrx[7]*(Q_000001010*PR_010000001000+Q_000001110*PR_010000001001+Q_000101010*PR_010000001010+Q_000101110*PR_010000001011);
ans_temp[ans_id*9+2]+=Pmtrx[8]*(Q_000000011*PR_010000001000+Q_000000111*PR_010000001001+Q_000000211*PR_010000001002);
ans_temp[ans_id*9+3]+=Pmtrx[0]*(Q_011000000*PR_001010000000+Q_111000000*PR_001010000100+Q_211000000*PR_001010000200);
ans_temp[ans_id*9+3]+=Pmtrx[1]*(Q_010001000*PR_001010000000+Q_010101000*PR_001010000010+Q_110001000*PR_001010000100+Q_110101000*PR_001010000110);
ans_temp[ans_id*9+3]+=Pmtrx[2]*(Q_010000001*PR_001010000000+Q_010000101*PR_001010000001+Q_110000001*PR_001010000100+Q_110000101*PR_001010000101);
ans_temp[ans_id*9+4]+=Pmtrx[0]*(Q_001010000*PR_001010000000+Q_001110000*PR_001010000010+Q_101010000*PR_001010000100+Q_101110000*PR_001010000110);
ans_temp[ans_id*9+4]+=Pmtrx[1]*(Q_000011000*PR_001010000000+Q_000111000*PR_001010000010+Q_000211000*PR_001010000020);
ans_temp[ans_id*9+4]+=Pmtrx[2]*(Q_000010001*PR_001010000000+Q_000010101*PR_001010000001+Q_000110001*PR_001010000010+Q_000110101*PR_001010000011);
ans_temp[ans_id*9+5]+=Pmtrx[0]*(Q_001000010*PR_001010000000+Q_001000110*PR_001010000001+Q_101000010*PR_001010000100+Q_101000110*PR_001010000101);
ans_temp[ans_id*9+5]+=Pmtrx[1]*(Q_000001010*PR_001010000000+Q_000001110*PR_001010000001+Q_000101010*PR_001010000010+Q_000101110*PR_001010000011);
ans_temp[ans_id*9+5]+=Pmtrx[2]*(Q_000000011*PR_001010000000+Q_000000111*PR_001010000001+Q_000000211*PR_001010000002);
ans_temp[ans_id*9+3]+=Pmtrx[3]*(Q_011000000*PR_000011000000+Q_111000000*PR_000011000100+Q_211000000*PR_000011000200);
ans_temp[ans_id*9+3]+=Pmtrx[4]*(Q_010001000*PR_000011000000+Q_010101000*PR_000011000010+Q_110001000*PR_000011000100+Q_110101000*PR_000011000110);
ans_temp[ans_id*9+3]+=Pmtrx[5]*(Q_010000001*PR_000011000000+Q_010000101*PR_000011000001+Q_110000001*PR_000011000100+Q_110000101*PR_000011000101);
ans_temp[ans_id*9+4]+=Pmtrx[3]*(Q_001010000*PR_000011000000+Q_001110000*PR_000011000010+Q_101010000*PR_000011000100+Q_101110000*PR_000011000110);
ans_temp[ans_id*9+4]+=Pmtrx[4]*(Q_000011000*PR_000011000000+Q_000111000*PR_000011000010+Q_000211000*PR_000011000020);
ans_temp[ans_id*9+4]+=Pmtrx[5]*(Q_000010001*PR_000011000000+Q_000010101*PR_000011000001+Q_000110001*PR_000011000010+Q_000110101*PR_000011000011);
ans_temp[ans_id*9+5]+=Pmtrx[3]*(Q_001000010*PR_000011000000+Q_001000110*PR_000011000001+Q_101000010*PR_000011000100+Q_101000110*PR_000011000101);
ans_temp[ans_id*9+5]+=Pmtrx[4]*(Q_000001010*PR_000011000000+Q_000001110*PR_000011000001+Q_000101010*PR_000011000010+Q_000101110*PR_000011000011);
ans_temp[ans_id*9+5]+=Pmtrx[5]*(Q_000000011*PR_000011000000+Q_000000111*PR_000011000001+Q_000000211*PR_000011000002);
ans_temp[ans_id*9+3]+=Pmtrx[6]*(Q_011000000*PR_000010001000+Q_111000000*PR_000010001100+Q_211000000*PR_000010001200);
ans_temp[ans_id*9+3]+=Pmtrx[7]*(Q_010001000*PR_000010001000+Q_010101000*PR_000010001010+Q_110001000*PR_000010001100+Q_110101000*PR_000010001110);
ans_temp[ans_id*9+3]+=Pmtrx[8]*(Q_010000001*PR_000010001000+Q_010000101*PR_000010001001+Q_110000001*PR_000010001100+Q_110000101*PR_000010001101);
ans_temp[ans_id*9+4]+=Pmtrx[6]*(Q_001010000*PR_000010001000+Q_001110000*PR_000010001010+Q_101010000*PR_000010001100+Q_101110000*PR_000010001110);
ans_temp[ans_id*9+4]+=Pmtrx[7]*(Q_000011000*PR_000010001000+Q_000111000*PR_000010001010+Q_000211000*PR_000010001020);
ans_temp[ans_id*9+4]+=Pmtrx[8]*(Q_000010001*PR_000010001000+Q_000010101*PR_000010001001+Q_000110001*PR_000010001010+Q_000110101*PR_000010001011);
ans_temp[ans_id*9+5]+=Pmtrx[6]*(Q_001000010*PR_000010001000+Q_001000110*PR_000010001001+Q_101000010*PR_000010001100+Q_101000110*PR_000010001101);
ans_temp[ans_id*9+5]+=Pmtrx[7]*(Q_000001010*PR_000010001000+Q_000001110*PR_000010001001+Q_000101010*PR_000010001010+Q_000101110*PR_000010001011);
ans_temp[ans_id*9+5]+=Pmtrx[8]*(Q_000000011*PR_000010001000+Q_000000111*PR_000010001001+Q_000000211*PR_000010001002);
ans_temp[ans_id*9+6]+=Pmtrx[0]*(Q_011000000*PR_001000010000+Q_111000000*PR_001000010100+Q_211000000*PR_001000010200);
ans_temp[ans_id*9+6]+=Pmtrx[1]*(Q_010001000*PR_001000010000+Q_010101000*PR_001000010010+Q_110001000*PR_001000010100+Q_110101000*PR_001000010110);
ans_temp[ans_id*9+6]+=Pmtrx[2]*(Q_010000001*PR_001000010000+Q_010000101*PR_001000010001+Q_110000001*PR_001000010100+Q_110000101*PR_001000010101);
ans_temp[ans_id*9+7]+=Pmtrx[0]*(Q_001010000*PR_001000010000+Q_001110000*PR_001000010010+Q_101010000*PR_001000010100+Q_101110000*PR_001000010110);
ans_temp[ans_id*9+7]+=Pmtrx[1]*(Q_000011000*PR_001000010000+Q_000111000*PR_001000010010+Q_000211000*PR_001000010020);
ans_temp[ans_id*9+7]+=Pmtrx[2]*(Q_000010001*PR_001000010000+Q_000010101*PR_001000010001+Q_000110001*PR_001000010010+Q_000110101*PR_001000010011);
ans_temp[ans_id*9+8]+=Pmtrx[0]*(Q_001000010*PR_001000010000+Q_001000110*PR_001000010001+Q_101000010*PR_001000010100+Q_101000110*PR_001000010101);
ans_temp[ans_id*9+8]+=Pmtrx[1]*(Q_000001010*PR_001000010000+Q_000001110*PR_001000010001+Q_000101010*PR_001000010010+Q_000101110*PR_001000010011);
ans_temp[ans_id*9+8]+=Pmtrx[2]*(Q_000000011*PR_001000010000+Q_000000111*PR_001000010001+Q_000000211*PR_001000010002);
ans_temp[ans_id*9+6]+=Pmtrx[3]*(Q_011000000*PR_000001010000+Q_111000000*PR_000001010100+Q_211000000*PR_000001010200);
ans_temp[ans_id*9+6]+=Pmtrx[4]*(Q_010001000*PR_000001010000+Q_010101000*PR_000001010010+Q_110001000*PR_000001010100+Q_110101000*PR_000001010110);
ans_temp[ans_id*9+6]+=Pmtrx[5]*(Q_010000001*PR_000001010000+Q_010000101*PR_000001010001+Q_110000001*PR_000001010100+Q_110000101*PR_000001010101);
ans_temp[ans_id*9+7]+=Pmtrx[3]*(Q_001010000*PR_000001010000+Q_001110000*PR_000001010010+Q_101010000*PR_000001010100+Q_101110000*PR_000001010110);
ans_temp[ans_id*9+7]+=Pmtrx[4]*(Q_000011000*PR_000001010000+Q_000111000*PR_000001010010+Q_000211000*PR_000001010020);
ans_temp[ans_id*9+7]+=Pmtrx[5]*(Q_000010001*PR_000001010000+Q_000010101*PR_000001010001+Q_000110001*PR_000001010010+Q_000110101*PR_000001010011);
ans_temp[ans_id*9+8]+=Pmtrx[3]*(Q_001000010*PR_000001010000+Q_001000110*PR_000001010001+Q_101000010*PR_000001010100+Q_101000110*PR_000001010101);
ans_temp[ans_id*9+8]+=Pmtrx[4]*(Q_000001010*PR_000001010000+Q_000001110*PR_000001010001+Q_000101010*PR_000001010010+Q_000101110*PR_000001010011);
ans_temp[ans_id*9+8]+=Pmtrx[5]*(Q_000000011*PR_000001010000+Q_000000111*PR_000001010001+Q_000000211*PR_000001010002);
ans_temp[ans_id*9+6]+=Pmtrx[6]*(Q_011000000*PR_000000011000+Q_111000000*PR_000000011100+Q_211000000*PR_000000011200);
ans_temp[ans_id*9+6]+=Pmtrx[7]*(Q_010001000*PR_000000011000+Q_010101000*PR_000000011010+Q_110001000*PR_000000011100+Q_110101000*PR_000000011110);
ans_temp[ans_id*9+6]+=Pmtrx[8]*(Q_010000001*PR_000000011000+Q_010000101*PR_000000011001+Q_110000001*PR_000000011100+Q_110000101*PR_000000011101);
ans_temp[ans_id*9+7]+=Pmtrx[6]*(Q_001010000*PR_000000011000+Q_001110000*PR_000000011010+Q_101010000*PR_000000011100+Q_101110000*PR_000000011110);
ans_temp[ans_id*9+7]+=Pmtrx[7]*(Q_000011000*PR_000000011000+Q_000111000*PR_000000011010+Q_000211000*PR_000000011020);
ans_temp[ans_id*9+7]+=Pmtrx[8]*(Q_000010001*PR_000000011000+Q_000010101*PR_000000011001+Q_000110001*PR_000000011010+Q_000110101*PR_000000011011);
ans_temp[ans_id*9+8]+=Pmtrx[6]*(Q_001000010*PR_000000011000+Q_001000110*PR_000000011001+Q_101000010*PR_000000011100+Q_101000110*PR_000000011101);
ans_temp[ans_id*9+8]+=Pmtrx[7]*(Q_000001010*PR_000000011000+Q_000001110*PR_000000011001+Q_000101010*PR_000000011010+Q_000101110*PR_000000011011);
ans_temp[ans_id*9+8]+=Pmtrx[8]*(Q_000000011*PR_000000011000+Q_000000111*PR_000000011001+Q_000000211*PR_000000011002);
}
}
__syncthreads();
int num_thread=NTHREAD/2;
while (num_thread!=0){
__syncthreads();
if(tId_x<num_thread){
for(int ians=0;ians<9;ians++){
ans_temp[tId_x*9+ians]+=ans_temp[(tId_x+num_thread)*9+ians];
}
}
num_thread/=2;
}
if(tId_x==0){
for(int ians=0;ians<9;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*9+ians]=ans_temp[(tId_x)*9+ians];
}
}
}
}
}
__global__ void MD_Kq_pppp_fs(unsigned int contrc_bra_num,unsigned int contrc_ket_num,\
unsigned int * contrc_bra_id,\
unsigned int * contrc_ket_id,\
unsigned int mtrx_len,\
double * Pmtrx_in,\
double * P,\
double * PA,\
double * PB,\
double * Zta_in,\
double * pp_in,\
float * K2_p_in,\
unsigned int * id_bra_in,\
double * Q,\
double * QC,\
double * QD,\
double * Eta_in,\
double * pq_in,\
float * K2_q_in,\
unsigned int * id_ket_in,\
double * ans){
unsigned int tId_x = threadIdx.x;
unsigned int bId_x = blockIdx.x;
unsigned int bId_y = blockIdx.y;
unsigned int tdis = blockDim.x;
unsigned int bdis_x = gridDim.x;
unsigned int bdis_y = gridDim.y;
unsigned int ans_id=tId_x;
double Pmtrx[9]={0.0};
__shared__ double ans_temp[NTHREAD*9];
for(int i=0;i<9;i++){
ans_temp[i*tdis+tId_x]=0.0;
}
for(unsigned int i_contrc_bra=bId_x;i_contrc_bra<contrc_bra_num;i_contrc_bra+=bdis_x){
for(unsigned int j_contrc_ket=bId_y;j_contrc_ket<contrc_ket_num;j_contrc_ket+=bdis_y){
unsigned int primit_bra_start = contrc_bra_id[i_contrc_bra ];
unsigned int primit_bra_end = contrc_bra_id[i_contrc_bra+1];
unsigned int primit_ket_start = contrc_ket_id[j_contrc_ket ];
unsigned int primit_ket_end = contrc_ket_id[j_contrc_ket+1];
if(i_contrc_bra>j_contrc_ket){
if(tId_x==0){
for(int ians=0;ians<9;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*9+ians]=0.0;
}
}
continue;
}
for(unsigned int ii=primit_bra_start;ii<primit_bra_end;ii++){
unsigned int id_bra=id_bra_in[ii];
double PX=P[ii*3+0];
double PY=P[ii*3+1];
double PZ=P[ii*3+2];
double Pd_010[3];
Pd_010[0]=PA[ii*3+0];
Pd_010[1]=PA[ii*3+1];
Pd_010[2]=PA[ii*3+2];
double Pd_001[3];
Pd_001[0]=PB[ii*3+0];
Pd_001[1]=PB[ii*3+1];
Pd_001[2]=PB[ii*3+2];
double Zta=Zta_in[ii];
double pp=pp_in[ii];
float K2_p=K2_p_in[ii];
double aPin1=1/(2*Zta);
for(unsigned int j=tId_x;j<primit_ket_end-primit_ket_start;j+=tdis){
unsigned int jj=primit_ket_start+j;
unsigned int id_ket=tex1Dfetch(tex_id_ket,jj);
double P_max=0.0;
for(int p_j=0;p_j<3;p_j++){
for(int p_i=0;p_i<3;p_i++){
Pmtrx[p_i*3+p_j]=Pmtrx_in[(id_ket+p_j)*mtrx_len+(id_bra+p_i)];
double temp_P=fabsf(Pmtrx[p_i*3+p_j]);
if(temp_P>P_max) P_max=temp_P;
}
}
float K2_q=tex1Dfetch(tex_K2_q,jj);
if(fabsf(K2_p*K2_q)<1.0E-14){
break;
}
if(fabsf(P_max*K2_p*K2_q)<1.0E-14) continue;
int2 temp_int2;
temp_int2=tex1Dfetch(tex_Eta,jj);
double Eta=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_pq,jj);
double pq=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+0);
double QX=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+1);
double QY=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+2);
double QZ=__hiloint2double(temp_int2.y,temp_int2.x);
double Qd_010[3];
temp_int2=tex1Dfetch(tex_QC,jj*3+0);
Qd_010[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QC,jj*3+1);
Qd_010[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QC,jj*3+2);
Qd_010[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double Qd_001[3];
temp_int2=tex1Dfetch(tex_QD,jj*3+0);
Qd_001[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QD,jj*3+1);
Qd_001[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QD,jj*3+2);
Qd_001[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double alphaT=rsqrt(Eta+Zta);
double lmd=2*P25*pp*pq*alphaT;
alphaT=Eta*Zta*alphaT*alphaT;
double TX=PX-QX;
double TY=PY-QY;
double TZ=PZ-QZ;
double T=alphaT*(TX*TX+TY*TY+TZ*TZ);
double R_000[5];
Ft_fs_4(4,T,R_000);
R_000[0]*=lmd;
R_000[1]*=-2*alphaT*lmd;
R_000[2]*=4*alphaT*alphaT*lmd;
R_000[3]*=-8*alphaT*alphaT*alphaT*lmd;
R_000[4]*=16*alphaT*alphaT*alphaT*alphaT*lmd;
double aQin1=1/(2*Eta);
double R_100[4];
double R_200[3];
double R_300[2];
double R_400[1];
double R_010[4];
double R_110[3];
double R_210[2];
double R_310[1];
double R_020[3];
double R_120[2];
double R_220[1];
double R_030[2];
double R_130[1];
double R_040[1];
double R_001[4];
double R_101[3];
double R_201[2];
double R_301[1];
double R_011[3];
double R_111[2];
double R_211[1];
double R_021[2];
double R_121[1];
double R_031[1];
double R_002[3];
double R_102[2];
double R_202[1];
double R_012[2];
double R_112[1];
double R_022[1];
double R_003[2];
double R_103[1];
double R_013[1];
double R_004[1];
for(int i=0;i<4;i++){
R_100[i]=TX*R_000[i+1];
}
for(int i=0;i<4;i++){
R_010[i]=TY*R_000[i+1];
}
for(int i=0;i<4;i++){
R_001[i]=TZ*R_000[i+1];
}
for(int i=0;i<3;i++){
R_200[i]=TX*R_100[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_110[i]=TX*R_010[i+1];
}
for(int i=0;i<3;i++){
R_020[i]=TY*R_010[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_101[i]=TX*R_001[i+1];
}
for(int i=0;i<3;i++){
R_011[i]=TY*R_001[i+1];
}
for(int i=0;i<3;i++){
R_002[i]=TZ*R_001[i+1]+R_000[i+1];
}
for(int i=0;i<2;i++){
R_300[i]=TX*R_200[i+1]+2*R_100[i+1];
}
for(int i=0;i<2;i++){
R_210[i]=TY*R_200[i+1];
}
for(int i=0;i<2;i++){
R_120[i]=TX*R_020[i+1];
}
for(int i=0;i<2;i++){
R_030[i]=TY*R_020[i+1]+2*R_010[i+1];
}
for(int i=0;i<2;i++){
R_201[i]=TZ*R_200[i+1];
}
for(int i=0;i<2;i++){
R_111[i]=TX*R_011[i+1];
}
for(int i=0;i<2;i++){
R_021[i]=TZ*R_020[i+1];
}
for(int i=0;i<2;i++){
R_102[i]=TX*R_002[i+1];
}
for(int i=0;i<2;i++){
R_012[i]=TY*R_002[i+1];
}
for(int i=0;i<2;i++){
R_003[i]=TZ*R_002[i+1]+2*R_001[i+1];
}
for(int i=0;i<1;i++){
R_400[i]=TX*R_300[i+1]+3*R_200[i+1];
}
for(int i=0;i<1;i++){
R_310[i]=TY*R_300[i+1];
}
for(int i=0;i<1;i++){
R_220[i]=TX*R_120[i+1]+R_020[i+1];
}
for(int i=0;i<1;i++){
R_130[i]=TX*R_030[i+1];
}
for(int i=0;i<1;i++){
R_040[i]=TY*R_030[i+1]+3*R_020[i+1];
}
for(int i=0;i<1;i++){
R_301[i]=TZ*R_300[i+1];
}
for(int i=0;i<1;i++){
R_211[i]=TY*R_201[i+1];
}
for(int i=0;i<1;i++){
R_121[i]=TX*R_021[i+1];
}
for(int i=0;i<1;i++){
R_031[i]=TZ*R_030[i+1];
}
for(int i=0;i<1;i++){
R_202[i]=TX*R_102[i+1]+R_002[i+1];
}
for(int i=0;i<1;i++){
R_112[i]=TX*R_012[i+1];
}
for(int i=0;i<1;i++){
R_022[i]=TY*R_012[i+1]+R_002[i+1];
}
for(int i=0;i<1;i++){
R_103[i]=TX*R_003[i+1];
}
for(int i=0;i<1;i++){
R_013[i]=TY*R_003[i+1];
}
for(int i=0;i<1;i++){
R_004[i]=TZ*R_003[i+1]+3*R_002[i+1];
}
double Qd_101[3];
double Qd_110[3];
double Qd_011[3];
double Qd_111[3];
double Qd_211[3];
for(int i=0;i<3;i++){
Qd_101[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_110[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_011[i]=Qd_101[i]+Qd_010[i]*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_111[i]=Qd_010[i]*Qd_101[i]+aQin1*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_211[i]=aQin1*Qd_101[i];
}
double Q_011000000=Qd_011[0];
double Q_111000000=Qd_111[0];
double Q_211000000=Qd_211[0];
double Q_010001000=Qd_010[0]*Qd_001[1];
double Q_010101000=Qd_010[0]*Qd_101[1];
double Q_110001000=Qd_110[0]*Qd_001[1];
double Q_110101000=Qd_110[0]*Qd_101[1];
double Q_010000001=Qd_010[0]*Qd_001[2];
double Q_010000101=Qd_010[0]*Qd_101[2];
double Q_110000001=Qd_110[0]*Qd_001[2];
double Q_110000101=Qd_110[0]*Qd_101[2];
double Q_001010000=Qd_001[0]*Qd_010[1];
double Q_001110000=Qd_001[0]*Qd_110[1];
double Q_101010000=Qd_101[0]*Qd_010[1];
double Q_101110000=Qd_101[0]*Qd_110[1];
double Q_000011000=Qd_011[1];
double Q_000111000=Qd_111[1];
double Q_000211000=Qd_211[1];
double Q_000010001=Qd_010[1]*Qd_001[2];
double Q_000010101=Qd_010[1]*Qd_101[2];
double Q_000110001=Qd_110[1]*Qd_001[2];
double Q_000110101=Qd_110[1]*Qd_101[2];
double Q_001000010=Qd_001[0]*Qd_010[2];
double Q_001000110=Qd_001[0]*Qd_110[2];
double Q_101000010=Qd_101[0]*Qd_010[2];
double Q_101000110=Qd_101[0]*Qd_110[2];
double Q_000001010=Qd_001[1]*Qd_010[2];
double Q_000001110=Qd_001[1]*Qd_110[2];
double Q_000101010=Qd_101[1]*Qd_010[2];
double Q_000101110=Qd_101[1]*Qd_110[2];
double Q_000000011=Qd_011[2];
double Q_000000111=Qd_111[2];
double Q_000000211=Qd_211[2];
double QR_011000000000=Q_011000000*R_000[0]+-1*Q_111000000*R_100[0]+Q_211000000*R_200[0];
double QR_010001000000=Q_010001000*R_000[0]+-1*Q_010101000*R_010[0]+-1*Q_110001000*R_100[0]+Q_110101000*R_110[0];
double QR_010000001000=Q_010000001*R_000[0]+-1*Q_010000101*R_001[0]+-1*Q_110000001*R_100[0]+Q_110000101*R_101[0];
double QR_001010000000=Q_001010000*R_000[0]+-1*Q_001110000*R_010[0]+-1*Q_101010000*R_100[0]+Q_101110000*R_110[0];
double QR_000011000000=Q_000011000*R_000[0]+-1*Q_000111000*R_010[0]+Q_000211000*R_020[0];
double QR_000010001000=Q_000010001*R_000[0]+-1*Q_000010101*R_001[0]+-1*Q_000110001*R_010[0]+Q_000110101*R_011[0];
double QR_001000010000=Q_001000010*R_000[0]+-1*Q_001000110*R_001[0]+-1*Q_101000010*R_100[0]+Q_101000110*R_101[0];
double QR_000001010000=Q_000001010*R_000[0]+-1*Q_000001110*R_001[0]+-1*Q_000101010*R_010[0]+Q_000101110*R_011[0];
double QR_000000011000=Q_000000011*R_000[0]+-1*Q_000000111*R_001[0]+Q_000000211*R_002[0];
double QR_011000000001=Q_011000000*R_001[0]+-1*Q_111000000*R_101[0]+Q_211000000*R_201[0];
double QR_010001000001=Q_010001000*R_001[0]+-1*Q_010101000*R_011[0]+-1*Q_110001000*R_101[0]+Q_110101000*R_111[0];
double QR_010000001001=Q_010000001*R_001[0]+-1*Q_010000101*R_002[0]+-1*Q_110000001*R_101[0]+Q_110000101*R_102[0];
double QR_001010000001=Q_001010000*R_001[0]+-1*Q_001110000*R_011[0]+-1*Q_101010000*R_101[0]+Q_101110000*R_111[0];
double QR_000011000001=Q_000011000*R_001[0]+-1*Q_000111000*R_011[0]+Q_000211000*R_021[0];
double QR_000010001001=Q_000010001*R_001[0]+-1*Q_000010101*R_002[0]+-1*Q_000110001*R_011[0]+Q_000110101*R_012[0];
double QR_001000010001=Q_001000010*R_001[0]+-1*Q_001000110*R_002[0]+-1*Q_101000010*R_101[0]+Q_101000110*R_102[0];
double QR_000001010001=Q_000001010*R_001[0]+-1*Q_000001110*R_002[0]+-1*Q_000101010*R_011[0]+Q_000101110*R_012[0];
double QR_000000011001=Q_000000011*R_001[0]+-1*Q_000000111*R_002[0]+Q_000000211*R_003[0];
double QR_011000000010=Q_011000000*R_010[0]+-1*Q_111000000*R_110[0]+Q_211000000*R_210[0];
double QR_010001000010=Q_010001000*R_010[0]+-1*Q_010101000*R_020[0]+-1*Q_110001000*R_110[0]+Q_110101000*R_120[0];
double QR_010000001010=Q_010000001*R_010[0]+-1*Q_010000101*R_011[0]+-1*Q_110000001*R_110[0]+Q_110000101*R_111[0];
double QR_001010000010=Q_001010000*R_010[0]+-1*Q_001110000*R_020[0]+-1*Q_101010000*R_110[0]+Q_101110000*R_120[0];
double QR_000011000010=Q_000011000*R_010[0]+-1*Q_000111000*R_020[0]+Q_000211000*R_030[0];
double QR_000010001010=Q_000010001*R_010[0]+-1*Q_000010101*R_011[0]+-1*Q_000110001*R_020[0]+Q_000110101*R_021[0];
double QR_001000010010=Q_001000010*R_010[0]+-1*Q_001000110*R_011[0]+-1*Q_101000010*R_110[0]+Q_101000110*R_111[0];
double QR_000001010010=Q_000001010*R_010[0]+-1*Q_000001110*R_011[0]+-1*Q_000101010*R_020[0]+Q_000101110*R_021[0];
double QR_000000011010=Q_000000011*R_010[0]+-1*Q_000000111*R_011[0]+Q_000000211*R_012[0];
double QR_011000000100=Q_011000000*R_100[0]+-1*Q_111000000*R_200[0]+Q_211000000*R_300[0];
double QR_010001000100=Q_010001000*R_100[0]+-1*Q_010101000*R_110[0]+-1*Q_110001000*R_200[0]+Q_110101000*R_210[0];
double QR_010000001100=Q_010000001*R_100[0]+-1*Q_010000101*R_101[0]+-1*Q_110000001*R_200[0]+Q_110000101*R_201[0];
double QR_001010000100=Q_001010000*R_100[0]+-1*Q_001110000*R_110[0]+-1*Q_101010000*R_200[0]+Q_101110000*R_210[0];
double QR_000011000100=Q_000011000*R_100[0]+-1*Q_000111000*R_110[0]+Q_000211000*R_120[0];
double QR_000010001100=Q_000010001*R_100[0]+-1*Q_000010101*R_101[0]+-1*Q_000110001*R_110[0]+Q_000110101*R_111[0];
double QR_001000010100=Q_001000010*R_100[0]+-1*Q_001000110*R_101[0]+-1*Q_101000010*R_200[0]+Q_101000110*R_201[0];
double QR_000001010100=Q_000001010*R_100[0]+-1*Q_000001110*R_101[0]+-1*Q_000101010*R_110[0]+Q_000101110*R_111[0];
double QR_000000011100=Q_000000011*R_100[0]+-1*Q_000000111*R_101[0]+Q_000000211*R_102[0];
double QR_011000000002=Q_011000000*R_002[0]+-1*Q_111000000*R_102[0]+Q_211000000*R_202[0];
double QR_010001000002=Q_010001000*R_002[0]+-1*Q_010101000*R_012[0]+-1*Q_110001000*R_102[0]+Q_110101000*R_112[0];
double QR_010000001002=Q_010000001*R_002[0]+-1*Q_010000101*R_003[0]+-1*Q_110000001*R_102[0]+Q_110000101*R_103[0];
double QR_001010000002=Q_001010000*R_002[0]+-1*Q_001110000*R_012[0]+-1*Q_101010000*R_102[0]+Q_101110000*R_112[0];
double QR_000011000002=Q_000011000*R_002[0]+-1*Q_000111000*R_012[0]+Q_000211000*R_022[0];
double QR_000010001002=Q_000010001*R_002[0]+-1*Q_000010101*R_003[0]+-1*Q_000110001*R_012[0]+Q_000110101*R_013[0];
double QR_001000010002=Q_001000010*R_002[0]+-1*Q_001000110*R_003[0]+-1*Q_101000010*R_102[0]+Q_101000110*R_103[0];
double QR_000001010002=Q_000001010*R_002[0]+-1*Q_000001110*R_003[0]+-1*Q_000101010*R_012[0]+Q_000101110*R_013[0];
double QR_000000011002=Q_000000011*R_002[0]+-1*Q_000000111*R_003[0]+Q_000000211*R_004[0];
double QR_011000000011=Q_011000000*R_011[0]+-1*Q_111000000*R_111[0]+Q_211000000*R_211[0];
double QR_010001000011=Q_010001000*R_011[0]+-1*Q_010101000*R_021[0]+-1*Q_110001000*R_111[0]+Q_110101000*R_121[0];
double QR_010000001011=Q_010000001*R_011[0]+-1*Q_010000101*R_012[0]+-1*Q_110000001*R_111[0]+Q_110000101*R_112[0];
double QR_001010000011=Q_001010000*R_011[0]+-1*Q_001110000*R_021[0]+-1*Q_101010000*R_111[0]+Q_101110000*R_121[0];
double QR_000011000011=Q_000011000*R_011[0]+-1*Q_000111000*R_021[0]+Q_000211000*R_031[0];
double QR_000010001011=Q_000010001*R_011[0]+-1*Q_000010101*R_012[0]+-1*Q_000110001*R_021[0]+Q_000110101*R_022[0];
double QR_001000010011=Q_001000010*R_011[0]+-1*Q_001000110*R_012[0]+-1*Q_101000010*R_111[0]+Q_101000110*R_112[0];
double QR_000001010011=Q_000001010*R_011[0]+-1*Q_000001110*R_012[0]+-1*Q_000101010*R_021[0]+Q_000101110*R_022[0];
double QR_000000011011=Q_000000011*R_011[0]+-1*Q_000000111*R_012[0]+Q_000000211*R_013[0];
double QR_011000000020=Q_011000000*R_020[0]+-1*Q_111000000*R_120[0]+Q_211000000*R_220[0];
double QR_010001000020=Q_010001000*R_020[0]+-1*Q_010101000*R_030[0]+-1*Q_110001000*R_120[0]+Q_110101000*R_130[0];
double QR_010000001020=Q_010000001*R_020[0]+-1*Q_010000101*R_021[0]+-1*Q_110000001*R_120[0]+Q_110000101*R_121[0];
double QR_001010000020=Q_001010000*R_020[0]+-1*Q_001110000*R_030[0]+-1*Q_101010000*R_120[0]+Q_101110000*R_130[0];
double QR_000011000020=Q_000011000*R_020[0]+-1*Q_000111000*R_030[0]+Q_000211000*R_040[0];
double QR_000010001020=Q_000010001*R_020[0]+-1*Q_000010101*R_021[0]+-1*Q_000110001*R_030[0]+Q_000110101*R_031[0];
double QR_001000010020=Q_001000010*R_020[0]+-1*Q_001000110*R_021[0]+-1*Q_101000010*R_120[0]+Q_101000110*R_121[0];
double QR_000001010020=Q_000001010*R_020[0]+-1*Q_000001110*R_021[0]+-1*Q_000101010*R_030[0]+Q_000101110*R_031[0];
double QR_000000011020=Q_000000011*R_020[0]+-1*Q_000000111*R_021[0]+Q_000000211*R_022[0];
double QR_011000000101=Q_011000000*R_101[0]+-1*Q_111000000*R_201[0]+Q_211000000*R_301[0];
double QR_010001000101=Q_010001000*R_101[0]+-1*Q_010101000*R_111[0]+-1*Q_110001000*R_201[0]+Q_110101000*R_211[0];
double QR_010000001101=Q_010000001*R_101[0]+-1*Q_010000101*R_102[0]+-1*Q_110000001*R_201[0]+Q_110000101*R_202[0];
double QR_001010000101=Q_001010000*R_101[0]+-1*Q_001110000*R_111[0]+-1*Q_101010000*R_201[0]+Q_101110000*R_211[0];
double QR_000011000101=Q_000011000*R_101[0]+-1*Q_000111000*R_111[0]+Q_000211000*R_121[0];
double QR_000010001101=Q_000010001*R_101[0]+-1*Q_000010101*R_102[0]+-1*Q_000110001*R_111[0]+Q_000110101*R_112[0];
double QR_001000010101=Q_001000010*R_101[0]+-1*Q_001000110*R_102[0]+-1*Q_101000010*R_201[0]+Q_101000110*R_202[0];
double QR_000001010101=Q_000001010*R_101[0]+-1*Q_000001110*R_102[0]+-1*Q_000101010*R_111[0]+Q_000101110*R_112[0];
double QR_000000011101=Q_000000011*R_101[0]+-1*Q_000000111*R_102[0]+Q_000000211*R_103[0];
double QR_011000000110=Q_011000000*R_110[0]+-1*Q_111000000*R_210[0]+Q_211000000*R_310[0];
double QR_010001000110=Q_010001000*R_110[0]+-1*Q_010101000*R_120[0]+-1*Q_110001000*R_210[0]+Q_110101000*R_220[0];
double QR_010000001110=Q_010000001*R_110[0]+-1*Q_010000101*R_111[0]+-1*Q_110000001*R_210[0]+Q_110000101*R_211[0];
double QR_001010000110=Q_001010000*R_110[0]+-1*Q_001110000*R_120[0]+-1*Q_101010000*R_210[0]+Q_101110000*R_220[0];
double QR_000011000110=Q_000011000*R_110[0]+-1*Q_000111000*R_120[0]+Q_000211000*R_130[0];
double QR_000010001110=Q_000010001*R_110[0]+-1*Q_000010101*R_111[0]+-1*Q_000110001*R_120[0]+Q_000110101*R_121[0];
double QR_001000010110=Q_001000010*R_110[0]+-1*Q_001000110*R_111[0]+-1*Q_101000010*R_210[0]+Q_101000110*R_211[0];
double QR_000001010110=Q_000001010*R_110[0]+-1*Q_000001110*R_111[0]+-1*Q_000101010*R_120[0]+Q_000101110*R_121[0];
double QR_000000011110=Q_000000011*R_110[0]+-1*Q_000000111*R_111[0]+Q_000000211*R_112[0];
double QR_011000000200=Q_011000000*R_200[0]+-1*Q_111000000*R_300[0]+Q_211000000*R_400[0];
double QR_010001000200=Q_010001000*R_200[0]+-1*Q_010101000*R_210[0]+-1*Q_110001000*R_300[0]+Q_110101000*R_310[0];
double QR_010000001200=Q_010000001*R_200[0]+-1*Q_010000101*R_201[0]+-1*Q_110000001*R_300[0]+Q_110000101*R_301[0];
double QR_001010000200=Q_001010000*R_200[0]+-1*Q_001110000*R_210[0]+-1*Q_101010000*R_300[0]+Q_101110000*R_310[0];
double QR_000011000200=Q_000011000*R_200[0]+-1*Q_000111000*R_210[0]+Q_000211000*R_220[0];
double QR_000010001200=Q_000010001*R_200[0]+-1*Q_000010101*R_201[0]+-1*Q_000110001*R_210[0]+Q_000110101*R_211[0];
double QR_001000010200=Q_001000010*R_200[0]+-1*Q_001000110*R_201[0]+-1*Q_101000010*R_300[0]+Q_101000110*R_301[0];
double QR_000001010200=Q_000001010*R_200[0]+-1*Q_000001110*R_201[0]+-1*Q_000101010*R_210[0]+Q_000101110*R_211[0];
double QR_000000011200=Q_000000011*R_200[0]+-1*Q_000000111*R_201[0]+Q_000000211*R_202[0];
double Pd_101[3];
double Pd_110[3];
double Pd_011[3];
double Pd_111[3];
double Pd_211[3];
for(int i=0;i<3;i++){
Pd_101[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_110[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_011[i]=Pd_101[i]+Pd_010[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_111[i]=Pd_010[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_211[i]=aPin1*Pd_101[i];
}
double P_011000000=Pd_011[0];
double P_111000000=Pd_111[0];
double P_211000000=Pd_211[0];
double P_010001000=Pd_010[0]*Pd_001[1];
double P_010101000=Pd_010[0]*Pd_101[1];
double P_110001000=Pd_110[0]*Pd_001[1];
double P_110101000=Pd_110[0]*Pd_101[1];
double P_010000001=Pd_010[0]*Pd_001[2];
double P_010000101=Pd_010[0]*Pd_101[2];
double P_110000001=Pd_110[0]*Pd_001[2];
double P_110000101=Pd_110[0]*Pd_101[2];
double P_001010000=Pd_001[0]*Pd_010[1];
double P_001110000=Pd_001[0]*Pd_110[1];
double P_101010000=Pd_101[0]*Pd_010[1];
double P_101110000=Pd_101[0]*Pd_110[1];
double P_000011000=Pd_011[1];
double P_000111000=Pd_111[1];
double P_000211000=Pd_211[1];
double P_000010001=Pd_010[1]*Pd_001[2];
double P_000010101=Pd_010[1]*Pd_101[2];
double P_000110001=Pd_110[1]*Pd_001[2];
double P_000110101=Pd_110[1]*Pd_101[2];
double P_001000010=Pd_001[0]*Pd_010[2];
double P_001000110=Pd_001[0]*Pd_110[2];
double P_101000010=Pd_101[0]*Pd_010[2];
double P_101000110=Pd_101[0]*Pd_110[2];
double P_000001010=Pd_001[1]*Pd_010[2];
double P_000001110=Pd_001[1]*Pd_110[2];
double P_000101010=Pd_101[1]*Pd_010[2];
double P_000101110=Pd_101[1]*Pd_110[2];
double P_000000011=Pd_011[2];
double P_000000111=Pd_111[2];
double P_000000211=Pd_211[2];
ans_temp[ans_id*9+0]+=Pmtrx[0]*(P_011000000*QR_011000000000+P_111000000*QR_011000000100+P_211000000*QR_011000000200);
ans_temp[ans_id*9+0]+=Pmtrx[1]*(P_011000000*QR_010001000000+P_111000000*QR_010001000100+P_211000000*QR_010001000200);
ans_temp[ans_id*9+0]+=Pmtrx[2]*(P_011000000*QR_010000001000+P_111000000*QR_010000001100+P_211000000*QR_010000001200);
ans_temp[ans_id*9+1]+=Pmtrx[0]*(P_011000000*QR_001010000000+P_111000000*QR_001010000100+P_211000000*QR_001010000200);
ans_temp[ans_id*9+1]+=Pmtrx[1]*(P_011000000*QR_000011000000+P_111000000*QR_000011000100+P_211000000*QR_000011000200);
ans_temp[ans_id*9+1]+=Pmtrx[2]*(P_011000000*QR_000010001000+P_111000000*QR_000010001100+P_211000000*QR_000010001200);
ans_temp[ans_id*9+2]+=Pmtrx[0]*(P_011000000*QR_001000010000+P_111000000*QR_001000010100+P_211000000*QR_001000010200);
ans_temp[ans_id*9+2]+=Pmtrx[1]*(P_011000000*QR_000001010000+P_111000000*QR_000001010100+P_211000000*QR_000001010200);
ans_temp[ans_id*9+2]+=Pmtrx[2]*(P_011000000*QR_000000011000+P_111000000*QR_000000011100+P_211000000*QR_000000011200);
ans_temp[ans_id*9+0]+=Pmtrx[3]*(P_010001000*QR_011000000000+P_010101000*QR_011000000010+P_110001000*QR_011000000100+P_110101000*QR_011000000110);
ans_temp[ans_id*9+0]+=Pmtrx[4]*(P_010001000*QR_010001000000+P_010101000*QR_010001000010+P_110001000*QR_010001000100+P_110101000*QR_010001000110);
ans_temp[ans_id*9+0]+=Pmtrx[5]*(P_010001000*QR_010000001000+P_010101000*QR_010000001010+P_110001000*QR_010000001100+P_110101000*QR_010000001110);
ans_temp[ans_id*9+1]+=Pmtrx[3]*(P_010001000*QR_001010000000+P_010101000*QR_001010000010+P_110001000*QR_001010000100+P_110101000*QR_001010000110);
ans_temp[ans_id*9+1]+=Pmtrx[4]*(P_010001000*QR_000011000000+P_010101000*QR_000011000010+P_110001000*QR_000011000100+P_110101000*QR_000011000110);
ans_temp[ans_id*9+1]+=Pmtrx[5]*(P_010001000*QR_000010001000+P_010101000*QR_000010001010+P_110001000*QR_000010001100+P_110101000*QR_000010001110);
ans_temp[ans_id*9+2]+=Pmtrx[3]*(P_010001000*QR_001000010000+P_010101000*QR_001000010010+P_110001000*QR_001000010100+P_110101000*QR_001000010110);
ans_temp[ans_id*9+2]+=Pmtrx[4]*(P_010001000*QR_000001010000+P_010101000*QR_000001010010+P_110001000*QR_000001010100+P_110101000*QR_000001010110);
ans_temp[ans_id*9+2]+=Pmtrx[5]*(P_010001000*QR_000000011000+P_010101000*QR_000000011010+P_110001000*QR_000000011100+P_110101000*QR_000000011110);
ans_temp[ans_id*9+0]+=Pmtrx[6]*(P_010000001*QR_011000000000+P_010000101*QR_011000000001+P_110000001*QR_011000000100+P_110000101*QR_011000000101);
ans_temp[ans_id*9+0]+=Pmtrx[7]*(P_010000001*QR_010001000000+P_010000101*QR_010001000001+P_110000001*QR_010001000100+P_110000101*QR_010001000101);
ans_temp[ans_id*9+0]+=Pmtrx[8]*(P_010000001*QR_010000001000+P_010000101*QR_010000001001+P_110000001*QR_010000001100+P_110000101*QR_010000001101);
ans_temp[ans_id*9+1]+=Pmtrx[6]*(P_010000001*QR_001010000000+P_010000101*QR_001010000001+P_110000001*QR_001010000100+P_110000101*QR_001010000101);
ans_temp[ans_id*9+1]+=Pmtrx[7]*(P_010000001*QR_000011000000+P_010000101*QR_000011000001+P_110000001*QR_000011000100+P_110000101*QR_000011000101);
ans_temp[ans_id*9+1]+=Pmtrx[8]*(P_010000001*QR_000010001000+P_010000101*QR_000010001001+P_110000001*QR_000010001100+P_110000101*QR_000010001101);
ans_temp[ans_id*9+2]+=Pmtrx[6]*(P_010000001*QR_001000010000+P_010000101*QR_001000010001+P_110000001*QR_001000010100+P_110000101*QR_001000010101);
ans_temp[ans_id*9+2]+=Pmtrx[7]*(P_010000001*QR_000001010000+P_010000101*QR_000001010001+P_110000001*QR_000001010100+P_110000101*QR_000001010101);
ans_temp[ans_id*9+2]+=Pmtrx[8]*(P_010000001*QR_000000011000+P_010000101*QR_000000011001+P_110000001*QR_000000011100+P_110000101*QR_000000011101);
ans_temp[ans_id*9+3]+=Pmtrx[0]*(P_001010000*QR_011000000000+P_001110000*QR_011000000010+P_101010000*QR_011000000100+P_101110000*QR_011000000110);
ans_temp[ans_id*9+3]+=Pmtrx[1]*(P_001010000*QR_010001000000+P_001110000*QR_010001000010+P_101010000*QR_010001000100+P_101110000*QR_010001000110);
ans_temp[ans_id*9+3]+=Pmtrx[2]*(P_001010000*QR_010000001000+P_001110000*QR_010000001010+P_101010000*QR_010000001100+P_101110000*QR_010000001110);
ans_temp[ans_id*9+4]+=Pmtrx[0]*(P_001010000*QR_001010000000+P_001110000*QR_001010000010+P_101010000*QR_001010000100+P_101110000*QR_001010000110);
ans_temp[ans_id*9+4]+=Pmtrx[1]*(P_001010000*QR_000011000000+P_001110000*QR_000011000010+P_101010000*QR_000011000100+P_101110000*QR_000011000110);
ans_temp[ans_id*9+4]+=Pmtrx[2]*(P_001010000*QR_000010001000+P_001110000*QR_000010001010+P_101010000*QR_000010001100+P_101110000*QR_000010001110);
ans_temp[ans_id*9+5]+=Pmtrx[0]*(P_001010000*QR_001000010000+P_001110000*QR_001000010010+P_101010000*QR_001000010100+P_101110000*QR_001000010110);
ans_temp[ans_id*9+5]+=Pmtrx[1]*(P_001010000*QR_000001010000+P_001110000*QR_000001010010+P_101010000*QR_000001010100+P_101110000*QR_000001010110);
ans_temp[ans_id*9+5]+=Pmtrx[2]*(P_001010000*QR_000000011000+P_001110000*QR_000000011010+P_101010000*QR_000000011100+P_101110000*QR_000000011110);
ans_temp[ans_id*9+3]+=Pmtrx[3]*(P_000011000*QR_011000000000+P_000111000*QR_011000000010+P_000211000*QR_011000000020);
ans_temp[ans_id*9+3]+=Pmtrx[4]*(P_000011000*QR_010001000000+P_000111000*QR_010001000010+P_000211000*QR_010001000020);
ans_temp[ans_id*9+3]+=Pmtrx[5]*(P_000011000*QR_010000001000+P_000111000*QR_010000001010+P_000211000*QR_010000001020);
ans_temp[ans_id*9+4]+=Pmtrx[3]*(P_000011000*QR_001010000000+P_000111000*QR_001010000010+P_000211000*QR_001010000020);
ans_temp[ans_id*9+4]+=Pmtrx[4]*(P_000011000*QR_000011000000+P_000111000*QR_000011000010+P_000211000*QR_000011000020);
ans_temp[ans_id*9+4]+=Pmtrx[5]*(P_000011000*QR_000010001000+P_000111000*QR_000010001010+P_000211000*QR_000010001020);
ans_temp[ans_id*9+5]+=Pmtrx[3]*(P_000011000*QR_001000010000+P_000111000*QR_001000010010+P_000211000*QR_001000010020);
ans_temp[ans_id*9+5]+=Pmtrx[4]*(P_000011000*QR_000001010000+P_000111000*QR_000001010010+P_000211000*QR_000001010020);
ans_temp[ans_id*9+5]+=Pmtrx[5]*(P_000011000*QR_000000011000+P_000111000*QR_000000011010+P_000211000*QR_000000011020);
ans_temp[ans_id*9+3]+=Pmtrx[6]*(P_000010001*QR_011000000000+P_000010101*QR_011000000001+P_000110001*QR_011000000010+P_000110101*QR_011000000011);
ans_temp[ans_id*9+3]+=Pmtrx[7]*(P_000010001*QR_010001000000+P_000010101*QR_010001000001+P_000110001*QR_010001000010+P_000110101*QR_010001000011);
ans_temp[ans_id*9+3]+=Pmtrx[8]*(P_000010001*QR_010000001000+P_000010101*QR_010000001001+P_000110001*QR_010000001010+P_000110101*QR_010000001011);
ans_temp[ans_id*9+4]+=Pmtrx[6]*(P_000010001*QR_001010000000+P_000010101*QR_001010000001+P_000110001*QR_001010000010+P_000110101*QR_001010000011);
ans_temp[ans_id*9+4]+=Pmtrx[7]*(P_000010001*QR_000011000000+P_000010101*QR_000011000001+P_000110001*QR_000011000010+P_000110101*QR_000011000011);
ans_temp[ans_id*9+4]+=Pmtrx[8]*(P_000010001*QR_000010001000+P_000010101*QR_000010001001+P_000110001*QR_000010001010+P_000110101*QR_000010001011);
ans_temp[ans_id*9+5]+=Pmtrx[6]*(P_000010001*QR_001000010000+P_000010101*QR_001000010001+P_000110001*QR_001000010010+P_000110101*QR_001000010011);
ans_temp[ans_id*9+5]+=Pmtrx[7]*(P_000010001*QR_000001010000+P_000010101*QR_000001010001+P_000110001*QR_000001010010+P_000110101*QR_000001010011);
ans_temp[ans_id*9+5]+=Pmtrx[8]*(P_000010001*QR_000000011000+P_000010101*QR_000000011001+P_000110001*QR_000000011010+P_000110101*QR_000000011011);
ans_temp[ans_id*9+6]+=Pmtrx[0]*(P_001000010*QR_011000000000+P_001000110*QR_011000000001+P_101000010*QR_011000000100+P_101000110*QR_011000000101);
ans_temp[ans_id*9+6]+=Pmtrx[1]*(P_001000010*QR_010001000000+P_001000110*QR_010001000001+P_101000010*QR_010001000100+P_101000110*QR_010001000101);
ans_temp[ans_id*9+6]+=Pmtrx[2]*(P_001000010*QR_010000001000+P_001000110*QR_010000001001+P_101000010*QR_010000001100+P_101000110*QR_010000001101);
ans_temp[ans_id*9+7]+=Pmtrx[0]*(P_001000010*QR_001010000000+P_001000110*QR_001010000001+P_101000010*QR_001010000100+P_101000110*QR_001010000101);
ans_temp[ans_id*9+7]+=Pmtrx[1]*(P_001000010*QR_000011000000+P_001000110*QR_000011000001+P_101000010*QR_000011000100+P_101000110*QR_000011000101);
ans_temp[ans_id*9+7]+=Pmtrx[2]*(P_001000010*QR_000010001000+P_001000110*QR_000010001001+P_101000010*QR_000010001100+P_101000110*QR_000010001101);
ans_temp[ans_id*9+8]+=Pmtrx[0]*(P_001000010*QR_001000010000+P_001000110*QR_001000010001+P_101000010*QR_001000010100+P_101000110*QR_001000010101);
ans_temp[ans_id*9+8]+=Pmtrx[1]*(P_001000010*QR_000001010000+P_001000110*QR_000001010001+P_101000010*QR_000001010100+P_101000110*QR_000001010101);
ans_temp[ans_id*9+8]+=Pmtrx[2]*(P_001000010*QR_000000011000+P_001000110*QR_000000011001+P_101000010*QR_000000011100+P_101000110*QR_000000011101);
ans_temp[ans_id*9+6]+=Pmtrx[3]*(P_000001010*QR_011000000000+P_000001110*QR_011000000001+P_000101010*QR_011000000010+P_000101110*QR_011000000011);
ans_temp[ans_id*9+6]+=Pmtrx[4]*(P_000001010*QR_010001000000+P_000001110*QR_010001000001+P_000101010*QR_010001000010+P_000101110*QR_010001000011);
ans_temp[ans_id*9+6]+=Pmtrx[5]*(P_000001010*QR_010000001000+P_000001110*QR_010000001001+P_000101010*QR_010000001010+P_000101110*QR_010000001011);
ans_temp[ans_id*9+7]+=Pmtrx[3]*(P_000001010*QR_001010000000+P_000001110*QR_001010000001+P_000101010*QR_001010000010+P_000101110*QR_001010000011);
ans_temp[ans_id*9+7]+=Pmtrx[4]*(P_000001010*QR_000011000000+P_000001110*QR_000011000001+P_000101010*QR_000011000010+P_000101110*QR_000011000011);
ans_temp[ans_id*9+7]+=Pmtrx[5]*(P_000001010*QR_000010001000+P_000001110*QR_000010001001+P_000101010*QR_000010001010+P_000101110*QR_000010001011);
ans_temp[ans_id*9+8]+=Pmtrx[3]*(P_000001010*QR_001000010000+P_000001110*QR_001000010001+P_000101010*QR_001000010010+P_000101110*QR_001000010011);
ans_temp[ans_id*9+8]+=Pmtrx[4]*(P_000001010*QR_000001010000+P_000001110*QR_000001010001+P_000101010*QR_000001010010+P_000101110*QR_000001010011);
ans_temp[ans_id*9+8]+=Pmtrx[5]*(P_000001010*QR_000000011000+P_000001110*QR_000000011001+P_000101010*QR_000000011010+P_000101110*QR_000000011011);
ans_temp[ans_id*9+6]+=Pmtrx[6]*(P_000000011*QR_011000000000+P_000000111*QR_011000000001+P_000000211*QR_011000000002);
ans_temp[ans_id*9+6]+=Pmtrx[7]*(P_000000011*QR_010001000000+P_000000111*QR_010001000001+P_000000211*QR_010001000002);
ans_temp[ans_id*9+6]+=Pmtrx[8]*(P_000000011*QR_010000001000+P_000000111*QR_010000001001+P_000000211*QR_010000001002);
ans_temp[ans_id*9+7]+=Pmtrx[6]*(P_000000011*QR_001010000000+P_000000111*QR_001010000001+P_000000211*QR_001010000002);
ans_temp[ans_id*9+7]+=Pmtrx[7]*(P_000000011*QR_000011000000+P_000000111*QR_000011000001+P_000000211*QR_000011000002);
ans_temp[ans_id*9+7]+=Pmtrx[8]*(P_000000011*QR_000010001000+P_000000111*QR_000010001001+P_000000211*QR_000010001002);
ans_temp[ans_id*9+8]+=Pmtrx[6]*(P_000000011*QR_001000010000+P_000000111*QR_001000010001+P_000000211*QR_001000010002);
ans_temp[ans_id*9+8]+=Pmtrx[7]*(P_000000011*QR_000001010000+P_000000111*QR_000001010001+P_000000211*QR_000001010002);
ans_temp[ans_id*9+8]+=Pmtrx[8]*(P_000000011*QR_000000011000+P_000000111*QR_000000011001+P_000000211*QR_000000011002);
}
}
__syncthreads();
int num_thread=NTHREAD/2;
while (num_thread!=0){
__syncthreads();
if(tId_x<num_thread){
for(int ians=0;ians<9;ians++){
ans_temp[tId_x*9+ians]+=ans_temp[(tId_x+num_thread)*9+ians];
}
}
num_thread/=2;
}
if(tId_x==0){
for(int ians=0;ians<9;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*9+ians]=ans_temp[(tId_x)*9+ians];
}
}
}
}
}
__global__ void MD_Kp_pdpp_fs(unsigned int contrc_bra_num,unsigned int contrc_ket_num,\
unsigned int * contrc_bra_id,\
unsigned int * contrc_ket_id,\
unsigned int mtrx_len,\
double * Pmtrx_in,\
double * P,\
double * PA,\
double * PB,\
double * Zta_in,\
double * pp_in,\
float * K2_p_in,\
unsigned int * id_bra_in,\
double * Q,\
double * QC,\
double * QD,\
double * Eta_in,\
double * pq_in,\
float * K2_q_in,\
unsigned int * id_ket_in,\
double * ans){
unsigned int tId_x = threadIdx.x;
unsigned int bId_x = blockIdx.x;
unsigned int bId_y = blockIdx.y;
unsigned int tdis = blockDim.x;
unsigned int bdis_x = gridDim.x;
unsigned int bdis_y = gridDim.y;
unsigned int ans_id=tId_x;
double Pmtrx[18]={0.0};
__shared__ double ans_temp[NTHREAD*9];
for(int i=0;i<9;i++){
ans_temp[i*tdis+tId_x]=0.0;
}
for(unsigned int i_contrc_bra=bId_x;i_contrc_bra<contrc_bra_num;i_contrc_bra+=bdis_x){
for(unsigned int j_contrc_ket=bId_y;j_contrc_ket<contrc_ket_num;j_contrc_ket+=bdis_y){
unsigned int primit_bra_start = contrc_bra_id[i_contrc_bra ];
unsigned int primit_bra_end = contrc_bra_id[i_contrc_bra+1];
unsigned int primit_ket_start = contrc_ket_id[j_contrc_ket ];
unsigned int primit_ket_end = contrc_ket_id[j_contrc_ket+1];
for(unsigned int ii=primit_ket_start;ii<primit_ket_end;ii++){
unsigned int id_ket=id_ket_in[ii];
double QX=Q[ii*3+0];
double QY=Q[ii*3+1];
double QZ=Q[ii*3+2];
double Qd_010[3];
Qd_010[0]=QC[ii*3+0];
Qd_010[1]=QC[ii*3+1];
Qd_010[2]=QC[ii*3+2];
double Qd_001[3];
Qd_001[0]=QD[ii*3+0];
Qd_001[1]=QD[ii*3+1];
Qd_001[2]=QD[ii*3+2];
double Eta=Eta_in[ii];
double pq=pq_in[ii];
float K2_q=K2_q_in[ii];
double aQin1=1/(2*Eta);
for(unsigned int j=tId_x;j<primit_bra_end-primit_bra_start;j+=tdis){
unsigned int jj=primit_bra_start+j;
unsigned int id_bra=tex1Dfetch(tex_id_bra,jj);
double P_max=0.0;
for(int p_j=0;p_j<3;p_j++){
for(int p_i=0;p_i<6;p_i++){
Pmtrx[p_i*3+p_j]=Pmtrx_in[(id_ket+p_j)*mtrx_len+(id_bra+p_i)];
double temp_P=fabsf(Pmtrx[p_i*3+p_j]);
if(temp_P>P_max) P_max=temp_P;
}
}
float K2_p=tex1Dfetch(tex_K2_p,jj);
if(fabsf(K2_p*K2_q)<1.0E-14){
break;
}
if(fabsf(P_max*K2_p*K2_q)<1.0E-14) continue;
int2 temp_int2;
temp_int2=tex1Dfetch(tex_Zta,jj);
double Zta=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_pp,jj);
double pp=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+0);
double PX=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+1);
double PY=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+2);
double PZ=__hiloint2double(temp_int2.y,temp_int2.x);
double Pd_010[3];
temp_int2=tex1Dfetch(tex_PA,jj*3+0);
Pd_010[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PA,jj*3+1);
Pd_010[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PA,jj*3+2);
Pd_010[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double Pd_001[3];
temp_int2=tex1Dfetch(tex_PB,jj*3+0);
Pd_001[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PB,jj*3+1);
Pd_001[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PB,jj*3+2);
Pd_001[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double alphaT=rsqrt(Eta+Zta);
double lmd=4*P25*pp*pq*alphaT;
alphaT=Eta*Zta*alphaT*alphaT;
double TX=PX-QX;
double TY=PY-QY;
double TZ=PZ-QZ;
double T=alphaT*(TX*TX+TY*TY+TZ*TZ);
double R_000[6];
Ft_fs_5(5,T,R_000);
R_000[0]*=lmd;
R_000[1]*=-2*alphaT*lmd;
R_000[2]*=4*alphaT*alphaT*lmd;
R_000[3]*=-8*alphaT*alphaT*alphaT*lmd;
R_000[4]*=16*alphaT*alphaT*alphaT*alphaT*lmd;
R_000[5]*=-32*alphaT*alphaT*alphaT*alphaT*alphaT*lmd;
double aPin1=1/(2*Zta);
double R_100[5];
double R_200[4];
double R_300[3];
double R_400[2];
double R_500[1];
double R_010[5];
double R_110[4];
double R_210[3];
double R_310[2];
double R_410[1];
double R_020[4];
double R_120[3];
double R_220[2];
double R_320[1];
double R_030[3];
double R_130[2];
double R_230[1];
double R_040[2];
double R_140[1];
double R_050[1];
double R_001[5];
double R_101[4];
double R_201[3];
double R_301[2];
double R_401[1];
double R_011[4];
double R_111[3];
double R_211[2];
double R_311[1];
double R_021[3];
double R_121[2];
double R_221[1];
double R_031[2];
double R_131[1];
double R_041[1];
double R_002[4];
double R_102[3];
double R_202[2];
double R_302[1];
double R_012[3];
double R_112[2];
double R_212[1];
double R_022[2];
double R_122[1];
double R_032[1];
double R_003[3];
double R_103[2];
double R_203[1];
double R_013[2];
double R_113[1];
double R_023[1];
double R_004[2];
double R_104[1];
double R_014[1];
double R_005[1];
for(int i=0;i<5;i++){
R_100[i]=TX*R_000[i+1];
}
for(int i=0;i<5;i++){
R_010[i]=TY*R_000[i+1];
}
for(int i=0;i<5;i++){
R_001[i]=TZ*R_000[i+1];
}
for(int i=0;i<4;i++){
R_200[i]=TX*R_100[i+1]+R_000[i+1];
}
for(int i=0;i<4;i++){
R_110[i]=TX*R_010[i+1];
}
for(int i=0;i<4;i++){
R_020[i]=TY*R_010[i+1]+R_000[i+1];
}
for(int i=0;i<4;i++){
R_101[i]=TX*R_001[i+1];
}
for(int i=0;i<4;i++){
R_011[i]=TY*R_001[i+1];
}
for(int i=0;i<4;i++){
R_002[i]=TZ*R_001[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_300[i]=TX*R_200[i+1]+2*R_100[i+1];
}
for(int i=0;i<3;i++){
R_210[i]=TY*R_200[i+1];
}
for(int i=0;i<3;i++){
R_120[i]=TX*R_020[i+1];
}
for(int i=0;i<3;i++){
R_030[i]=TY*R_020[i+1]+2*R_010[i+1];
}
for(int i=0;i<3;i++){
R_201[i]=TZ*R_200[i+1];
}
for(int i=0;i<3;i++){
R_111[i]=TX*R_011[i+1];
}
for(int i=0;i<3;i++){
R_021[i]=TZ*R_020[i+1];
}
for(int i=0;i<3;i++){
R_102[i]=TX*R_002[i+1];
}
for(int i=0;i<3;i++){
R_012[i]=TY*R_002[i+1];
}
for(int i=0;i<3;i++){
R_003[i]=TZ*R_002[i+1]+2*R_001[i+1];
}
for(int i=0;i<2;i++){
R_400[i]=TX*R_300[i+1]+3*R_200[i+1];
}
for(int i=0;i<2;i++){
R_310[i]=TY*R_300[i+1];
}
for(int i=0;i<2;i++){
R_220[i]=TX*R_120[i+1]+R_020[i+1];
}
for(int i=0;i<2;i++){
R_130[i]=TX*R_030[i+1];
}
for(int i=0;i<2;i++){
R_040[i]=TY*R_030[i+1]+3*R_020[i+1];
}
for(int i=0;i<2;i++){
R_301[i]=TZ*R_300[i+1];
}
for(int i=0;i<2;i++){
R_211[i]=TY*R_201[i+1];
}
for(int i=0;i<2;i++){
R_121[i]=TX*R_021[i+1];
}
for(int i=0;i<2;i++){
R_031[i]=TZ*R_030[i+1];
}
for(int i=0;i<2;i++){
R_202[i]=TX*R_102[i+1]+R_002[i+1];
}
for(int i=0;i<2;i++){
R_112[i]=TX*R_012[i+1];
}
for(int i=0;i<2;i++){
R_022[i]=TY*R_012[i+1]+R_002[i+1];
}
for(int i=0;i<2;i++){
R_103[i]=TX*R_003[i+1];
}
for(int i=0;i<2;i++){
R_013[i]=TY*R_003[i+1];
}
for(int i=0;i<2;i++){
R_004[i]=TZ*R_003[i+1]+3*R_002[i+1];
}
for(int i=0;i<1;i++){
R_500[i]=TX*R_400[i+1]+4*R_300[i+1];
}
for(int i=0;i<1;i++){
R_410[i]=TY*R_400[i+1];
}
for(int i=0;i<1;i++){
R_320[i]=TX*R_220[i+1]+2*R_120[i+1];
}
for(int i=0;i<1;i++){
R_230[i]=TY*R_220[i+1]+2*R_210[i+1];
}
for(int i=0;i<1;i++){
R_140[i]=TX*R_040[i+1];
}
for(int i=0;i<1;i++){
R_050[i]=TY*R_040[i+1]+4*R_030[i+1];
}
for(int i=0;i<1;i++){
R_401[i]=TZ*R_400[i+1];
}
for(int i=0;i<1;i++){
R_311[i]=TY*R_301[i+1];
}
for(int i=0;i<1;i++){
R_221[i]=TZ*R_220[i+1];
}
for(int i=0;i<1;i++){
R_131[i]=TX*R_031[i+1];
}
for(int i=0;i<1;i++){
R_041[i]=TZ*R_040[i+1];
}
for(int i=0;i<1;i++){
R_302[i]=TX*R_202[i+1]+2*R_102[i+1];
}
for(int i=0;i<1;i++){
R_212[i]=TY*R_202[i+1];
}
for(int i=0;i<1;i++){
R_122[i]=TX*R_022[i+1];
}
for(int i=0;i<1;i++){
R_032[i]=TY*R_022[i+1]+2*R_012[i+1];
}
for(int i=0;i<1;i++){
R_203[i]=TZ*R_202[i+1]+2*R_201[i+1];
}
for(int i=0;i<1;i++){
R_113[i]=TX*R_013[i+1];
}
for(int i=0;i<1;i++){
R_023[i]=TZ*R_022[i+1]+2*R_021[i+1];
}
for(int i=0;i<1;i++){
R_104[i]=TX*R_004[i+1];
}
for(int i=0;i<1;i++){
R_014[i]=TY*R_004[i+1];
}
for(int i=0;i<1;i++){
R_005[i]=TZ*R_004[i+1]+4*R_003[i+1];
}
double Pd_101[3];
double Pd_002[3];
double Pd_102[3];
double Pd_202[3];
double Pd_110[3];
double Pd_011[3];
double Pd_111[3];
double Pd_211[3];
double Pd_012[3];
double Pd_112[3];
double Pd_212[3];
double Pd_312[3];
for(int i=0;i<3;i++){
Pd_101[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_002[i]=Pd_101[i]+Pd_001[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_102[i]=Pd_001[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_202[i]=aPin1*Pd_101[i];
}
for(int i=0;i<3;i++){
Pd_110[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_011[i]=Pd_101[i]+Pd_010[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_111[i]=Pd_010[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_211[i]=aPin1*Pd_101[i];
}
for(int i=0;i<3;i++){
Pd_012[i]=Pd_111[i]+Pd_001[i]*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_112[i]=2*Pd_211[i]+Pd_001[i]*Pd_111[i]+aPin1*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_212[i]=Pd_001[i]*Pd_211[i]+aPin1*Pd_111[i];
}
for(int i=0;i<3;i++){
Pd_312[i]=aPin1*Pd_211[i];
}
double P_012000000=Pd_012[0];
double P_112000000=Pd_112[0];
double P_212000000=Pd_212[0];
double P_312000000=Pd_312[0];
double P_011001000=Pd_011[0]*Pd_001[1];
double P_011101000=Pd_011[0]*Pd_101[1];
double P_111001000=Pd_111[0]*Pd_001[1];
double P_111101000=Pd_111[0]*Pd_101[1];
double P_211001000=Pd_211[0]*Pd_001[1];
double P_211101000=Pd_211[0]*Pd_101[1];
double P_010002000=Pd_010[0]*Pd_002[1];
double P_010102000=Pd_010[0]*Pd_102[1];
double P_010202000=Pd_010[0]*Pd_202[1];
double P_110002000=Pd_110[0]*Pd_002[1];
double P_110102000=Pd_110[0]*Pd_102[1];
double P_110202000=Pd_110[0]*Pd_202[1];
double P_011000001=Pd_011[0]*Pd_001[2];
double P_011000101=Pd_011[0]*Pd_101[2];
double P_111000001=Pd_111[0]*Pd_001[2];
double P_111000101=Pd_111[0]*Pd_101[2];
double P_211000001=Pd_211[0]*Pd_001[2];
double P_211000101=Pd_211[0]*Pd_101[2];
double P_010001001=Pd_010[0]*Pd_001[1]*Pd_001[2];
double P_010001101=Pd_010[0]*Pd_001[1]*Pd_101[2];
double P_010101001=Pd_010[0]*Pd_101[1]*Pd_001[2];
double P_010101101=Pd_010[0]*Pd_101[1]*Pd_101[2];
double P_110001001=Pd_110[0]*Pd_001[1]*Pd_001[2];
double P_110001101=Pd_110[0]*Pd_001[1]*Pd_101[2];
double P_110101001=Pd_110[0]*Pd_101[1]*Pd_001[2];
double P_110101101=Pd_110[0]*Pd_101[1]*Pd_101[2];
double P_010000002=Pd_010[0]*Pd_002[2];
double P_010000102=Pd_010[0]*Pd_102[2];
double P_010000202=Pd_010[0]*Pd_202[2];
double P_110000002=Pd_110[0]*Pd_002[2];
double P_110000102=Pd_110[0]*Pd_102[2];
double P_110000202=Pd_110[0]*Pd_202[2];
double P_002010000=Pd_002[0]*Pd_010[1];
double P_002110000=Pd_002[0]*Pd_110[1];
double P_102010000=Pd_102[0]*Pd_010[1];
double P_102110000=Pd_102[0]*Pd_110[1];
double P_202010000=Pd_202[0]*Pd_010[1];
double P_202110000=Pd_202[0]*Pd_110[1];
double P_001011000=Pd_001[0]*Pd_011[1];
double P_001111000=Pd_001[0]*Pd_111[1];
double P_001211000=Pd_001[0]*Pd_211[1];
double P_101011000=Pd_101[0]*Pd_011[1];
double P_101111000=Pd_101[0]*Pd_111[1];
double P_101211000=Pd_101[0]*Pd_211[1];
double P_000012000=Pd_012[1];
double P_000112000=Pd_112[1];
double P_000212000=Pd_212[1];
double P_000312000=Pd_312[1];
double P_001010001=Pd_001[0]*Pd_010[1]*Pd_001[2];
double P_001010101=Pd_001[0]*Pd_010[1]*Pd_101[2];
double P_001110001=Pd_001[0]*Pd_110[1]*Pd_001[2];
double P_001110101=Pd_001[0]*Pd_110[1]*Pd_101[2];
double P_101010001=Pd_101[0]*Pd_010[1]*Pd_001[2];
double P_101010101=Pd_101[0]*Pd_010[1]*Pd_101[2];
double P_101110001=Pd_101[0]*Pd_110[1]*Pd_001[2];
double P_101110101=Pd_101[0]*Pd_110[1]*Pd_101[2];
double P_000011001=Pd_011[1]*Pd_001[2];
double P_000011101=Pd_011[1]*Pd_101[2];
double P_000111001=Pd_111[1]*Pd_001[2];
double P_000111101=Pd_111[1]*Pd_101[2];
double P_000211001=Pd_211[1]*Pd_001[2];
double P_000211101=Pd_211[1]*Pd_101[2];
double P_000010002=Pd_010[1]*Pd_002[2];
double P_000010102=Pd_010[1]*Pd_102[2];
double P_000010202=Pd_010[1]*Pd_202[2];
double P_000110002=Pd_110[1]*Pd_002[2];
double P_000110102=Pd_110[1]*Pd_102[2];
double P_000110202=Pd_110[1]*Pd_202[2];
double P_002000010=Pd_002[0]*Pd_010[2];
double P_002000110=Pd_002[0]*Pd_110[2];
double P_102000010=Pd_102[0]*Pd_010[2];
double P_102000110=Pd_102[0]*Pd_110[2];
double P_202000010=Pd_202[0]*Pd_010[2];
double P_202000110=Pd_202[0]*Pd_110[2];
double P_001001010=Pd_001[0]*Pd_001[1]*Pd_010[2];
double P_001001110=Pd_001[0]*Pd_001[1]*Pd_110[2];
double P_001101010=Pd_001[0]*Pd_101[1]*Pd_010[2];
double P_001101110=Pd_001[0]*Pd_101[1]*Pd_110[2];
double P_101001010=Pd_101[0]*Pd_001[1]*Pd_010[2];
double P_101001110=Pd_101[0]*Pd_001[1]*Pd_110[2];
double P_101101010=Pd_101[0]*Pd_101[1]*Pd_010[2];
double P_101101110=Pd_101[0]*Pd_101[1]*Pd_110[2];
double P_000002010=Pd_002[1]*Pd_010[2];
double P_000002110=Pd_002[1]*Pd_110[2];
double P_000102010=Pd_102[1]*Pd_010[2];
double P_000102110=Pd_102[1]*Pd_110[2];
double P_000202010=Pd_202[1]*Pd_010[2];
double P_000202110=Pd_202[1]*Pd_110[2];
double P_001000011=Pd_001[0]*Pd_011[2];
double P_001000111=Pd_001[0]*Pd_111[2];
double P_001000211=Pd_001[0]*Pd_211[2];
double P_101000011=Pd_101[0]*Pd_011[2];
double P_101000111=Pd_101[0]*Pd_111[2];
double P_101000211=Pd_101[0]*Pd_211[2];
double P_000001011=Pd_001[1]*Pd_011[2];
double P_000001111=Pd_001[1]*Pd_111[2];
double P_000001211=Pd_001[1]*Pd_211[2];
double P_000101011=Pd_101[1]*Pd_011[2];
double P_000101111=Pd_101[1]*Pd_111[2];
double P_000101211=Pd_101[1]*Pd_211[2];
double P_000000012=Pd_012[2];
double P_000000112=Pd_112[2];
double P_000000212=Pd_212[2];
double P_000000312=Pd_312[2];
double PR_012000000000=P_012000000*R_000[0]+-1*P_112000000*R_100[0]+P_212000000*R_200[0]+-1*P_312000000*R_300[0];
double PR_011001000000=P_011001000*R_000[0]+-1*P_011101000*R_010[0]+-1*P_111001000*R_100[0]+P_111101000*R_110[0]+P_211001000*R_200[0]+-1*P_211101000*R_210[0];
double PR_010002000000=P_010002000*R_000[0]+-1*P_010102000*R_010[0]+P_010202000*R_020[0]+-1*P_110002000*R_100[0]+P_110102000*R_110[0]+-1*P_110202000*R_120[0];
double PR_011000001000=P_011000001*R_000[0]+-1*P_011000101*R_001[0]+-1*P_111000001*R_100[0]+P_111000101*R_101[0]+P_211000001*R_200[0]+-1*P_211000101*R_201[0];
double PR_010001001000=P_010001001*R_000[0]+-1*P_010001101*R_001[0]+-1*P_010101001*R_010[0]+P_010101101*R_011[0]+-1*P_110001001*R_100[0]+P_110001101*R_101[0]+P_110101001*R_110[0]+-1*P_110101101*R_111[0];
double PR_010000002000=P_010000002*R_000[0]+-1*P_010000102*R_001[0]+P_010000202*R_002[0]+-1*P_110000002*R_100[0]+P_110000102*R_101[0]+-1*P_110000202*R_102[0];
double PR_002010000000=P_002010000*R_000[0]+-1*P_002110000*R_010[0]+-1*P_102010000*R_100[0]+P_102110000*R_110[0]+P_202010000*R_200[0]+-1*P_202110000*R_210[0];
double PR_001011000000=P_001011000*R_000[0]+-1*P_001111000*R_010[0]+P_001211000*R_020[0]+-1*P_101011000*R_100[0]+P_101111000*R_110[0]+-1*P_101211000*R_120[0];
double PR_000012000000=P_000012000*R_000[0]+-1*P_000112000*R_010[0]+P_000212000*R_020[0]+-1*P_000312000*R_030[0];
double PR_001010001000=P_001010001*R_000[0]+-1*P_001010101*R_001[0]+-1*P_001110001*R_010[0]+P_001110101*R_011[0]+-1*P_101010001*R_100[0]+P_101010101*R_101[0]+P_101110001*R_110[0]+-1*P_101110101*R_111[0];
double PR_000011001000=P_000011001*R_000[0]+-1*P_000011101*R_001[0]+-1*P_000111001*R_010[0]+P_000111101*R_011[0]+P_000211001*R_020[0]+-1*P_000211101*R_021[0];
double PR_000010002000=P_000010002*R_000[0]+-1*P_000010102*R_001[0]+P_000010202*R_002[0]+-1*P_000110002*R_010[0]+P_000110102*R_011[0]+-1*P_000110202*R_012[0];
double PR_002000010000=P_002000010*R_000[0]+-1*P_002000110*R_001[0]+-1*P_102000010*R_100[0]+P_102000110*R_101[0]+P_202000010*R_200[0]+-1*P_202000110*R_201[0];
double PR_001001010000=P_001001010*R_000[0]+-1*P_001001110*R_001[0]+-1*P_001101010*R_010[0]+P_001101110*R_011[0]+-1*P_101001010*R_100[0]+P_101001110*R_101[0]+P_101101010*R_110[0]+-1*P_101101110*R_111[0];
double PR_000002010000=P_000002010*R_000[0]+-1*P_000002110*R_001[0]+-1*P_000102010*R_010[0]+P_000102110*R_011[0]+P_000202010*R_020[0]+-1*P_000202110*R_021[0];
double PR_001000011000=P_001000011*R_000[0]+-1*P_001000111*R_001[0]+P_001000211*R_002[0]+-1*P_101000011*R_100[0]+P_101000111*R_101[0]+-1*P_101000211*R_102[0];
double PR_000001011000=P_000001011*R_000[0]+-1*P_000001111*R_001[0]+P_000001211*R_002[0]+-1*P_000101011*R_010[0]+P_000101111*R_011[0]+-1*P_000101211*R_012[0];
double PR_000000012000=P_000000012*R_000[0]+-1*P_000000112*R_001[0]+P_000000212*R_002[0]+-1*P_000000312*R_003[0];
double PR_012000000001=P_012000000*R_001[0]+-1*P_112000000*R_101[0]+P_212000000*R_201[0]+-1*P_312000000*R_301[0];
double PR_011001000001=P_011001000*R_001[0]+-1*P_011101000*R_011[0]+-1*P_111001000*R_101[0]+P_111101000*R_111[0]+P_211001000*R_201[0]+-1*P_211101000*R_211[0];
double PR_010002000001=P_010002000*R_001[0]+-1*P_010102000*R_011[0]+P_010202000*R_021[0]+-1*P_110002000*R_101[0]+P_110102000*R_111[0]+-1*P_110202000*R_121[0];
double PR_011000001001=P_011000001*R_001[0]+-1*P_011000101*R_002[0]+-1*P_111000001*R_101[0]+P_111000101*R_102[0]+P_211000001*R_201[0]+-1*P_211000101*R_202[0];
double PR_010001001001=P_010001001*R_001[0]+-1*P_010001101*R_002[0]+-1*P_010101001*R_011[0]+P_010101101*R_012[0]+-1*P_110001001*R_101[0]+P_110001101*R_102[0]+P_110101001*R_111[0]+-1*P_110101101*R_112[0];
double PR_010000002001=P_010000002*R_001[0]+-1*P_010000102*R_002[0]+P_010000202*R_003[0]+-1*P_110000002*R_101[0]+P_110000102*R_102[0]+-1*P_110000202*R_103[0];
double PR_002010000001=P_002010000*R_001[0]+-1*P_002110000*R_011[0]+-1*P_102010000*R_101[0]+P_102110000*R_111[0]+P_202010000*R_201[0]+-1*P_202110000*R_211[0];
double PR_001011000001=P_001011000*R_001[0]+-1*P_001111000*R_011[0]+P_001211000*R_021[0]+-1*P_101011000*R_101[0]+P_101111000*R_111[0]+-1*P_101211000*R_121[0];
double PR_000012000001=P_000012000*R_001[0]+-1*P_000112000*R_011[0]+P_000212000*R_021[0]+-1*P_000312000*R_031[0];
double PR_001010001001=P_001010001*R_001[0]+-1*P_001010101*R_002[0]+-1*P_001110001*R_011[0]+P_001110101*R_012[0]+-1*P_101010001*R_101[0]+P_101010101*R_102[0]+P_101110001*R_111[0]+-1*P_101110101*R_112[0];
double PR_000011001001=P_000011001*R_001[0]+-1*P_000011101*R_002[0]+-1*P_000111001*R_011[0]+P_000111101*R_012[0]+P_000211001*R_021[0]+-1*P_000211101*R_022[0];
double PR_000010002001=P_000010002*R_001[0]+-1*P_000010102*R_002[0]+P_000010202*R_003[0]+-1*P_000110002*R_011[0]+P_000110102*R_012[0]+-1*P_000110202*R_013[0];
double PR_002000010001=P_002000010*R_001[0]+-1*P_002000110*R_002[0]+-1*P_102000010*R_101[0]+P_102000110*R_102[0]+P_202000010*R_201[0]+-1*P_202000110*R_202[0];
double PR_001001010001=P_001001010*R_001[0]+-1*P_001001110*R_002[0]+-1*P_001101010*R_011[0]+P_001101110*R_012[0]+-1*P_101001010*R_101[0]+P_101001110*R_102[0]+P_101101010*R_111[0]+-1*P_101101110*R_112[0];
double PR_000002010001=P_000002010*R_001[0]+-1*P_000002110*R_002[0]+-1*P_000102010*R_011[0]+P_000102110*R_012[0]+P_000202010*R_021[0]+-1*P_000202110*R_022[0];
double PR_001000011001=P_001000011*R_001[0]+-1*P_001000111*R_002[0]+P_001000211*R_003[0]+-1*P_101000011*R_101[0]+P_101000111*R_102[0]+-1*P_101000211*R_103[0];
double PR_000001011001=P_000001011*R_001[0]+-1*P_000001111*R_002[0]+P_000001211*R_003[0]+-1*P_000101011*R_011[0]+P_000101111*R_012[0]+-1*P_000101211*R_013[0];
double PR_000000012001=P_000000012*R_001[0]+-1*P_000000112*R_002[0]+P_000000212*R_003[0]+-1*P_000000312*R_004[0];
double PR_012000000010=P_012000000*R_010[0]+-1*P_112000000*R_110[0]+P_212000000*R_210[0]+-1*P_312000000*R_310[0];
double PR_011001000010=P_011001000*R_010[0]+-1*P_011101000*R_020[0]+-1*P_111001000*R_110[0]+P_111101000*R_120[0]+P_211001000*R_210[0]+-1*P_211101000*R_220[0];
double PR_010002000010=P_010002000*R_010[0]+-1*P_010102000*R_020[0]+P_010202000*R_030[0]+-1*P_110002000*R_110[0]+P_110102000*R_120[0]+-1*P_110202000*R_130[0];
double PR_011000001010=P_011000001*R_010[0]+-1*P_011000101*R_011[0]+-1*P_111000001*R_110[0]+P_111000101*R_111[0]+P_211000001*R_210[0]+-1*P_211000101*R_211[0];
double PR_010001001010=P_010001001*R_010[0]+-1*P_010001101*R_011[0]+-1*P_010101001*R_020[0]+P_010101101*R_021[0]+-1*P_110001001*R_110[0]+P_110001101*R_111[0]+P_110101001*R_120[0]+-1*P_110101101*R_121[0];
double PR_010000002010=P_010000002*R_010[0]+-1*P_010000102*R_011[0]+P_010000202*R_012[0]+-1*P_110000002*R_110[0]+P_110000102*R_111[0]+-1*P_110000202*R_112[0];
double PR_002010000010=P_002010000*R_010[0]+-1*P_002110000*R_020[0]+-1*P_102010000*R_110[0]+P_102110000*R_120[0]+P_202010000*R_210[0]+-1*P_202110000*R_220[0];
double PR_001011000010=P_001011000*R_010[0]+-1*P_001111000*R_020[0]+P_001211000*R_030[0]+-1*P_101011000*R_110[0]+P_101111000*R_120[0]+-1*P_101211000*R_130[0];
double PR_000012000010=P_000012000*R_010[0]+-1*P_000112000*R_020[0]+P_000212000*R_030[0]+-1*P_000312000*R_040[0];
double PR_001010001010=P_001010001*R_010[0]+-1*P_001010101*R_011[0]+-1*P_001110001*R_020[0]+P_001110101*R_021[0]+-1*P_101010001*R_110[0]+P_101010101*R_111[0]+P_101110001*R_120[0]+-1*P_101110101*R_121[0];
double PR_000011001010=P_000011001*R_010[0]+-1*P_000011101*R_011[0]+-1*P_000111001*R_020[0]+P_000111101*R_021[0]+P_000211001*R_030[0]+-1*P_000211101*R_031[0];
double PR_000010002010=P_000010002*R_010[0]+-1*P_000010102*R_011[0]+P_000010202*R_012[0]+-1*P_000110002*R_020[0]+P_000110102*R_021[0]+-1*P_000110202*R_022[0];
double PR_002000010010=P_002000010*R_010[0]+-1*P_002000110*R_011[0]+-1*P_102000010*R_110[0]+P_102000110*R_111[0]+P_202000010*R_210[0]+-1*P_202000110*R_211[0];
double PR_001001010010=P_001001010*R_010[0]+-1*P_001001110*R_011[0]+-1*P_001101010*R_020[0]+P_001101110*R_021[0]+-1*P_101001010*R_110[0]+P_101001110*R_111[0]+P_101101010*R_120[0]+-1*P_101101110*R_121[0];
double PR_000002010010=P_000002010*R_010[0]+-1*P_000002110*R_011[0]+-1*P_000102010*R_020[0]+P_000102110*R_021[0]+P_000202010*R_030[0]+-1*P_000202110*R_031[0];
double PR_001000011010=P_001000011*R_010[0]+-1*P_001000111*R_011[0]+P_001000211*R_012[0]+-1*P_101000011*R_110[0]+P_101000111*R_111[0]+-1*P_101000211*R_112[0];
double PR_000001011010=P_000001011*R_010[0]+-1*P_000001111*R_011[0]+P_000001211*R_012[0]+-1*P_000101011*R_020[0]+P_000101111*R_021[0]+-1*P_000101211*R_022[0];
double PR_000000012010=P_000000012*R_010[0]+-1*P_000000112*R_011[0]+P_000000212*R_012[0]+-1*P_000000312*R_013[0];
double PR_012000000100=P_012000000*R_100[0]+-1*P_112000000*R_200[0]+P_212000000*R_300[0]+-1*P_312000000*R_400[0];
double PR_011001000100=P_011001000*R_100[0]+-1*P_011101000*R_110[0]+-1*P_111001000*R_200[0]+P_111101000*R_210[0]+P_211001000*R_300[0]+-1*P_211101000*R_310[0];
double PR_010002000100=P_010002000*R_100[0]+-1*P_010102000*R_110[0]+P_010202000*R_120[0]+-1*P_110002000*R_200[0]+P_110102000*R_210[0]+-1*P_110202000*R_220[0];
double PR_011000001100=P_011000001*R_100[0]+-1*P_011000101*R_101[0]+-1*P_111000001*R_200[0]+P_111000101*R_201[0]+P_211000001*R_300[0]+-1*P_211000101*R_301[0];
double PR_010001001100=P_010001001*R_100[0]+-1*P_010001101*R_101[0]+-1*P_010101001*R_110[0]+P_010101101*R_111[0]+-1*P_110001001*R_200[0]+P_110001101*R_201[0]+P_110101001*R_210[0]+-1*P_110101101*R_211[0];
double PR_010000002100=P_010000002*R_100[0]+-1*P_010000102*R_101[0]+P_010000202*R_102[0]+-1*P_110000002*R_200[0]+P_110000102*R_201[0]+-1*P_110000202*R_202[0];
double PR_002010000100=P_002010000*R_100[0]+-1*P_002110000*R_110[0]+-1*P_102010000*R_200[0]+P_102110000*R_210[0]+P_202010000*R_300[0]+-1*P_202110000*R_310[0];
double PR_001011000100=P_001011000*R_100[0]+-1*P_001111000*R_110[0]+P_001211000*R_120[0]+-1*P_101011000*R_200[0]+P_101111000*R_210[0]+-1*P_101211000*R_220[0];
double PR_000012000100=P_000012000*R_100[0]+-1*P_000112000*R_110[0]+P_000212000*R_120[0]+-1*P_000312000*R_130[0];
double PR_001010001100=P_001010001*R_100[0]+-1*P_001010101*R_101[0]+-1*P_001110001*R_110[0]+P_001110101*R_111[0]+-1*P_101010001*R_200[0]+P_101010101*R_201[0]+P_101110001*R_210[0]+-1*P_101110101*R_211[0];
double PR_000011001100=P_000011001*R_100[0]+-1*P_000011101*R_101[0]+-1*P_000111001*R_110[0]+P_000111101*R_111[0]+P_000211001*R_120[0]+-1*P_000211101*R_121[0];
double PR_000010002100=P_000010002*R_100[0]+-1*P_000010102*R_101[0]+P_000010202*R_102[0]+-1*P_000110002*R_110[0]+P_000110102*R_111[0]+-1*P_000110202*R_112[0];
double PR_002000010100=P_002000010*R_100[0]+-1*P_002000110*R_101[0]+-1*P_102000010*R_200[0]+P_102000110*R_201[0]+P_202000010*R_300[0]+-1*P_202000110*R_301[0];
double PR_001001010100=P_001001010*R_100[0]+-1*P_001001110*R_101[0]+-1*P_001101010*R_110[0]+P_001101110*R_111[0]+-1*P_101001010*R_200[0]+P_101001110*R_201[0]+P_101101010*R_210[0]+-1*P_101101110*R_211[0];
double PR_000002010100=P_000002010*R_100[0]+-1*P_000002110*R_101[0]+-1*P_000102010*R_110[0]+P_000102110*R_111[0]+P_000202010*R_120[0]+-1*P_000202110*R_121[0];
double PR_001000011100=P_001000011*R_100[0]+-1*P_001000111*R_101[0]+P_001000211*R_102[0]+-1*P_101000011*R_200[0]+P_101000111*R_201[0]+-1*P_101000211*R_202[0];
double PR_000001011100=P_000001011*R_100[0]+-1*P_000001111*R_101[0]+P_000001211*R_102[0]+-1*P_000101011*R_110[0]+P_000101111*R_111[0]+-1*P_000101211*R_112[0];
double PR_000000012100=P_000000012*R_100[0]+-1*P_000000112*R_101[0]+P_000000212*R_102[0]+-1*P_000000312*R_103[0];
double PR_012000000002=P_012000000*R_002[0]+-1*P_112000000*R_102[0]+P_212000000*R_202[0]+-1*P_312000000*R_302[0];
double PR_011001000002=P_011001000*R_002[0]+-1*P_011101000*R_012[0]+-1*P_111001000*R_102[0]+P_111101000*R_112[0]+P_211001000*R_202[0]+-1*P_211101000*R_212[0];
double PR_010002000002=P_010002000*R_002[0]+-1*P_010102000*R_012[0]+P_010202000*R_022[0]+-1*P_110002000*R_102[0]+P_110102000*R_112[0]+-1*P_110202000*R_122[0];
double PR_011000001002=P_011000001*R_002[0]+-1*P_011000101*R_003[0]+-1*P_111000001*R_102[0]+P_111000101*R_103[0]+P_211000001*R_202[0]+-1*P_211000101*R_203[0];
double PR_010001001002=P_010001001*R_002[0]+-1*P_010001101*R_003[0]+-1*P_010101001*R_012[0]+P_010101101*R_013[0]+-1*P_110001001*R_102[0]+P_110001101*R_103[0]+P_110101001*R_112[0]+-1*P_110101101*R_113[0];
double PR_010000002002=P_010000002*R_002[0]+-1*P_010000102*R_003[0]+P_010000202*R_004[0]+-1*P_110000002*R_102[0]+P_110000102*R_103[0]+-1*P_110000202*R_104[0];
double PR_002010000002=P_002010000*R_002[0]+-1*P_002110000*R_012[0]+-1*P_102010000*R_102[0]+P_102110000*R_112[0]+P_202010000*R_202[0]+-1*P_202110000*R_212[0];
double PR_001011000002=P_001011000*R_002[0]+-1*P_001111000*R_012[0]+P_001211000*R_022[0]+-1*P_101011000*R_102[0]+P_101111000*R_112[0]+-1*P_101211000*R_122[0];
double PR_000012000002=P_000012000*R_002[0]+-1*P_000112000*R_012[0]+P_000212000*R_022[0]+-1*P_000312000*R_032[0];
double PR_001010001002=P_001010001*R_002[0]+-1*P_001010101*R_003[0]+-1*P_001110001*R_012[0]+P_001110101*R_013[0]+-1*P_101010001*R_102[0]+P_101010101*R_103[0]+P_101110001*R_112[0]+-1*P_101110101*R_113[0];
double PR_000011001002=P_000011001*R_002[0]+-1*P_000011101*R_003[0]+-1*P_000111001*R_012[0]+P_000111101*R_013[0]+P_000211001*R_022[0]+-1*P_000211101*R_023[0];
double PR_000010002002=P_000010002*R_002[0]+-1*P_000010102*R_003[0]+P_000010202*R_004[0]+-1*P_000110002*R_012[0]+P_000110102*R_013[0]+-1*P_000110202*R_014[0];
double PR_002000010002=P_002000010*R_002[0]+-1*P_002000110*R_003[0]+-1*P_102000010*R_102[0]+P_102000110*R_103[0]+P_202000010*R_202[0]+-1*P_202000110*R_203[0];
double PR_001001010002=P_001001010*R_002[0]+-1*P_001001110*R_003[0]+-1*P_001101010*R_012[0]+P_001101110*R_013[0]+-1*P_101001010*R_102[0]+P_101001110*R_103[0]+P_101101010*R_112[0]+-1*P_101101110*R_113[0];
double PR_000002010002=P_000002010*R_002[0]+-1*P_000002110*R_003[0]+-1*P_000102010*R_012[0]+P_000102110*R_013[0]+P_000202010*R_022[0]+-1*P_000202110*R_023[0];
double PR_001000011002=P_001000011*R_002[0]+-1*P_001000111*R_003[0]+P_001000211*R_004[0]+-1*P_101000011*R_102[0]+P_101000111*R_103[0]+-1*P_101000211*R_104[0];
double PR_000001011002=P_000001011*R_002[0]+-1*P_000001111*R_003[0]+P_000001211*R_004[0]+-1*P_000101011*R_012[0]+P_000101111*R_013[0]+-1*P_000101211*R_014[0];
double PR_000000012002=P_000000012*R_002[0]+-1*P_000000112*R_003[0]+P_000000212*R_004[0]+-1*P_000000312*R_005[0];
double PR_012000000011=P_012000000*R_011[0]+-1*P_112000000*R_111[0]+P_212000000*R_211[0]+-1*P_312000000*R_311[0];
double PR_011001000011=P_011001000*R_011[0]+-1*P_011101000*R_021[0]+-1*P_111001000*R_111[0]+P_111101000*R_121[0]+P_211001000*R_211[0]+-1*P_211101000*R_221[0];
double PR_010002000011=P_010002000*R_011[0]+-1*P_010102000*R_021[0]+P_010202000*R_031[0]+-1*P_110002000*R_111[0]+P_110102000*R_121[0]+-1*P_110202000*R_131[0];
double PR_011000001011=P_011000001*R_011[0]+-1*P_011000101*R_012[0]+-1*P_111000001*R_111[0]+P_111000101*R_112[0]+P_211000001*R_211[0]+-1*P_211000101*R_212[0];
double PR_010001001011=P_010001001*R_011[0]+-1*P_010001101*R_012[0]+-1*P_010101001*R_021[0]+P_010101101*R_022[0]+-1*P_110001001*R_111[0]+P_110001101*R_112[0]+P_110101001*R_121[0]+-1*P_110101101*R_122[0];
double PR_010000002011=P_010000002*R_011[0]+-1*P_010000102*R_012[0]+P_010000202*R_013[0]+-1*P_110000002*R_111[0]+P_110000102*R_112[0]+-1*P_110000202*R_113[0];
double PR_002010000011=P_002010000*R_011[0]+-1*P_002110000*R_021[0]+-1*P_102010000*R_111[0]+P_102110000*R_121[0]+P_202010000*R_211[0]+-1*P_202110000*R_221[0];
double PR_001011000011=P_001011000*R_011[0]+-1*P_001111000*R_021[0]+P_001211000*R_031[0]+-1*P_101011000*R_111[0]+P_101111000*R_121[0]+-1*P_101211000*R_131[0];
double PR_000012000011=P_000012000*R_011[0]+-1*P_000112000*R_021[0]+P_000212000*R_031[0]+-1*P_000312000*R_041[0];
double PR_001010001011=P_001010001*R_011[0]+-1*P_001010101*R_012[0]+-1*P_001110001*R_021[0]+P_001110101*R_022[0]+-1*P_101010001*R_111[0]+P_101010101*R_112[0]+P_101110001*R_121[0]+-1*P_101110101*R_122[0];
double PR_000011001011=P_000011001*R_011[0]+-1*P_000011101*R_012[0]+-1*P_000111001*R_021[0]+P_000111101*R_022[0]+P_000211001*R_031[0]+-1*P_000211101*R_032[0];
double PR_000010002011=P_000010002*R_011[0]+-1*P_000010102*R_012[0]+P_000010202*R_013[0]+-1*P_000110002*R_021[0]+P_000110102*R_022[0]+-1*P_000110202*R_023[0];
double PR_002000010011=P_002000010*R_011[0]+-1*P_002000110*R_012[0]+-1*P_102000010*R_111[0]+P_102000110*R_112[0]+P_202000010*R_211[0]+-1*P_202000110*R_212[0];
double PR_001001010011=P_001001010*R_011[0]+-1*P_001001110*R_012[0]+-1*P_001101010*R_021[0]+P_001101110*R_022[0]+-1*P_101001010*R_111[0]+P_101001110*R_112[0]+P_101101010*R_121[0]+-1*P_101101110*R_122[0];
double PR_000002010011=P_000002010*R_011[0]+-1*P_000002110*R_012[0]+-1*P_000102010*R_021[0]+P_000102110*R_022[0]+P_000202010*R_031[0]+-1*P_000202110*R_032[0];
double PR_001000011011=P_001000011*R_011[0]+-1*P_001000111*R_012[0]+P_001000211*R_013[0]+-1*P_101000011*R_111[0]+P_101000111*R_112[0]+-1*P_101000211*R_113[0];
double PR_000001011011=P_000001011*R_011[0]+-1*P_000001111*R_012[0]+P_000001211*R_013[0]+-1*P_000101011*R_021[0]+P_000101111*R_022[0]+-1*P_000101211*R_023[0];
double PR_000000012011=P_000000012*R_011[0]+-1*P_000000112*R_012[0]+P_000000212*R_013[0]+-1*P_000000312*R_014[0];
double PR_012000000020=P_012000000*R_020[0]+-1*P_112000000*R_120[0]+P_212000000*R_220[0]+-1*P_312000000*R_320[0];
double PR_011001000020=P_011001000*R_020[0]+-1*P_011101000*R_030[0]+-1*P_111001000*R_120[0]+P_111101000*R_130[0]+P_211001000*R_220[0]+-1*P_211101000*R_230[0];
double PR_010002000020=P_010002000*R_020[0]+-1*P_010102000*R_030[0]+P_010202000*R_040[0]+-1*P_110002000*R_120[0]+P_110102000*R_130[0]+-1*P_110202000*R_140[0];
double PR_011000001020=P_011000001*R_020[0]+-1*P_011000101*R_021[0]+-1*P_111000001*R_120[0]+P_111000101*R_121[0]+P_211000001*R_220[0]+-1*P_211000101*R_221[0];
double PR_010001001020=P_010001001*R_020[0]+-1*P_010001101*R_021[0]+-1*P_010101001*R_030[0]+P_010101101*R_031[0]+-1*P_110001001*R_120[0]+P_110001101*R_121[0]+P_110101001*R_130[0]+-1*P_110101101*R_131[0];
double PR_010000002020=P_010000002*R_020[0]+-1*P_010000102*R_021[0]+P_010000202*R_022[0]+-1*P_110000002*R_120[0]+P_110000102*R_121[0]+-1*P_110000202*R_122[0];
double PR_002010000020=P_002010000*R_020[0]+-1*P_002110000*R_030[0]+-1*P_102010000*R_120[0]+P_102110000*R_130[0]+P_202010000*R_220[0]+-1*P_202110000*R_230[0];
double PR_001011000020=P_001011000*R_020[0]+-1*P_001111000*R_030[0]+P_001211000*R_040[0]+-1*P_101011000*R_120[0]+P_101111000*R_130[0]+-1*P_101211000*R_140[0];
double PR_000012000020=P_000012000*R_020[0]+-1*P_000112000*R_030[0]+P_000212000*R_040[0]+-1*P_000312000*R_050[0];
double PR_001010001020=P_001010001*R_020[0]+-1*P_001010101*R_021[0]+-1*P_001110001*R_030[0]+P_001110101*R_031[0]+-1*P_101010001*R_120[0]+P_101010101*R_121[0]+P_101110001*R_130[0]+-1*P_101110101*R_131[0];
double PR_000011001020=P_000011001*R_020[0]+-1*P_000011101*R_021[0]+-1*P_000111001*R_030[0]+P_000111101*R_031[0]+P_000211001*R_040[0]+-1*P_000211101*R_041[0];
double PR_000010002020=P_000010002*R_020[0]+-1*P_000010102*R_021[0]+P_000010202*R_022[0]+-1*P_000110002*R_030[0]+P_000110102*R_031[0]+-1*P_000110202*R_032[0];
double PR_002000010020=P_002000010*R_020[0]+-1*P_002000110*R_021[0]+-1*P_102000010*R_120[0]+P_102000110*R_121[0]+P_202000010*R_220[0]+-1*P_202000110*R_221[0];
double PR_001001010020=P_001001010*R_020[0]+-1*P_001001110*R_021[0]+-1*P_001101010*R_030[0]+P_001101110*R_031[0]+-1*P_101001010*R_120[0]+P_101001110*R_121[0]+P_101101010*R_130[0]+-1*P_101101110*R_131[0];
double PR_000002010020=P_000002010*R_020[0]+-1*P_000002110*R_021[0]+-1*P_000102010*R_030[0]+P_000102110*R_031[0]+P_000202010*R_040[0]+-1*P_000202110*R_041[0];
double PR_001000011020=P_001000011*R_020[0]+-1*P_001000111*R_021[0]+P_001000211*R_022[0]+-1*P_101000011*R_120[0]+P_101000111*R_121[0]+-1*P_101000211*R_122[0];
double PR_000001011020=P_000001011*R_020[0]+-1*P_000001111*R_021[0]+P_000001211*R_022[0]+-1*P_000101011*R_030[0]+P_000101111*R_031[0]+-1*P_000101211*R_032[0];
double PR_000000012020=P_000000012*R_020[0]+-1*P_000000112*R_021[0]+P_000000212*R_022[0]+-1*P_000000312*R_023[0];
double PR_012000000101=P_012000000*R_101[0]+-1*P_112000000*R_201[0]+P_212000000*R_301[0]+-1*P_312000000*R_401[0];
double PR_011001000101=P_011001000*R_101[0]+-1*P_011101000*R_111[0]+-1*P_111001000*R_201[0]+P_111101000*R_211[0]+P_211001000*R_301[0]+-1*P_211101000*R_311[0];
double PR_010002000101=P_010002000*R_101[0]+-1*P_010102000*R_111[0]+P_010202000*R_121[0]+-1*P_110002000*R_201[0]+P_110102000*R_211[0]+-1*P_110202000*R_221[0];
double PR_011000001101=P_011000001*R_101[0]+-1*P_011000101*R_102[0]+-1*P_111000001*R_201[0]+P_111000101*R_202[0]+P_211000001*R_301[0]+-1*P_211000101*R_302[0];
double PR_010001001101=P_010001001*R_101[0]+-1*P_010001101*R_102[0]+-1*P_010101001*R_111[0]+P_010101101*R_112[0]+-1*P_110001001*R_201[0]+P_110001101*R_202[0]+P_110101001*R_211[0]+-1*P_110101101*R_212[0];
double PR_010000002101=P_010000002*R_101[0]+-1*P_010000102*R_102[0]+P_010000202*R_103[0]+-1*P_110000002*R_201[0]+P_110000102*R_202[0]+-1*P_110000202*R_203[0];
double PR_002010000101=P_002010000*R_101[0]+-1*P_002110000*R_111[0]+-1*P_102010000*R_201[0]+P_102110000*R_211[0]+P_202010000*R_301[0]+-1*P_202110000*R_311[0];
double PR_001011000101=P_001011000*R_101[0]+-1*P_001111000*R_111[0]+P_001211000*R_121[0]+-1*P_101011000*R_201[0]+P_101111000*R_211[0]+-1*P_101211000*R_221[0];
double PR_000012000101=P_000012000*R_101[0]+-1*P_000112000*R_111[0]+P_000212000*R_121[0]+-1*P_000312000*R_131[0];
double PR_001010001101=P_001010001*R_101[0]+-1*P_001010101*R_102[0]+-1*P_001110001*R_111[0]+P_001110101*R_112[0]+-1*P_101010001*R_201[0]+P_101010101*R_202[0]+P_101110001*R_211[0]+-1*P_101110101*R_212[0];
double PR_000011001101=P_000011001*R_101[0]+-1*P_000011101*R_102[0]+-1*P_000111001*R_111[0]+P_000111101*R_112[0]+P_000211001*R_121[0]+-1*P_000211101*R_122[0];
double PR_000010002101=P_000010002*R_101[0]+-1*P_000010102*R_102[0]+P_000010202*R_103[0]+-1*P_000110002*R_111[0]+P_000110102*R_112[0]+-1*P_000110202*R_113[0];
double PR_002000010101=P_002000010*R_101[0]+-1*P_002000110*R_102[0]+-1*P_102000010*R_201[0]+P_102000110*R_202[0]+P_202000010*R_301[0]+-1*P_202000110*R_302[0];
double PR_001001010101=P_001001010*R_101[0]+-1*P_001001110*R_102[0]+-1*P_001101010*R_111[0]+P_001101110*R_112[0]+-1*P_101001010*R_201[0]+P_101001110*R_202[0]+P_101101010*R_211[0]+-1*P_101101110*R_212[0];
double PR_000002010101=P_000002010*R_101[0]+-1*P_000002110*R_102[0]+-1*P_000102010*R_111[0]+P_000102110*R_112[0]+P_000202010*R_121[0]+-1*P_000202110*R_122[0];
double PR_001000011101=P_001000011*R_101[0]+-1*P_001000111*R_102[0]+P_001000211*R_103[0]+-1*P_101000011*R_201[0]+P_101000111*R_202[0]+-1*P_101000211*R_203[0];
double PR_000001011101=P_000001011*R_101[0]+-1*P_000001111*R_102[0]+P_000001211*R_103[0]+-1*P_000101011*R_111[0]+P_000101111*R_112[0]+-1*P_000101211*R_113[0];
double PR_000000012101=P_000000012*R_101[0]+-1*P_000000112*R_102[0]+P_000000212*R_103[0]+-1*P_000000312*R_104[0];
double PR_012000000110=P_012000000*R_110[0]+-1*P_112000000*R_210[0]+P_212000000*R_310[0]+-1*P_312000000*R_410[0];
double PR_011001000110=P_011001000*R_110[0]+-1*P_011101000*R_120[0]+-1*P_111001000*R_210[0]+P_111101000*R_220[0]+P_211001000*R_310[0]+-1*P_211101000*R_320[0];
double PR_010002000110=P_010002000*R_110[0]+-1*P_010102000*R_120[0]+P_010202000*R_130[0]+-1*P_110002000*R_210[0]+P_110102000*R_220[0]+-1*P_110202000*R_230[0];
double PR_011000001110=P_011000001*R_110[0]+-1*P_011000101*R_111[0]+-1*P_111000001*R_210[0]+P_111000101*R_211[0]+P_211000001*R_310[0]+-1*P_211000101*R_311[0];
double PR_010001001110=P_010001001*R_110[0]+-1*P_010001101*R_111[0]+-1*P_010101001*R_120[0]+P_010101101*R_121[0]+-1*P_110001001*R_210[0]+P_110001101*R_211[0]+P_110101001*R_220[0]+-1*P_110101101*R_221[0];
double PR_010000002110=P_010000002*R_110[0]+-1*P_010000102*R_111[0]+P_010000202*R_112[0]+-1*P_110000002*R_210[0]+P_110000102*R_211[0]+-1*P_110000202*R_212[0];
double PR_002010000110=P_002010000*R_110[0]+-1*P_002110000*R_120[0]+-1*P_102010000*R_210[0]+P_102110000*R_220[0]+P_202010000*R_310[0]+-1*P_202110000*R_320[0];
double PR_001011000110=P_001011000*R_110[0]+-1*P_001111000*R_120[0]+P_001211000*R_130[0]+-1*P_101011000*R_210[0]+P_101111000*R_220[0]+-1*P_101211000*R_230[0];
double PR_000012000110=P_000012000*R_110[0]+-1*P_000112000*R_120[0]+P_000212000*R_130[0]+-1*P_000312000*R_140[0];
double PR_001010001110=P_001010001*R_110[0]+-1*P_001010101*R_111[0]+-1*P_001110001*R_120[0]+P_001110101*R_121[0]+-1*P_101010001*R_210[0]+P_101010101*R_211[0]+P_101110001*R_220[0]+-1*P_101110101*R_221[0];
double PR_000011001110=P_000011001*R_110[0]+-1*P_000011101*R_111[0]+-1*P_000111001*R_120[0]+P_000111101*R_121[0]+P_000211001*R_130[0]+-1*P_000211101*R_131[0];
double PR_000010002110=P_000010002*R_110[0]+-1*P_000010102*R_111[0]+P_000010202*R_112[0]+-1*P_000110002*R_120[0]+P_000110102*R_121[0]+-1*P_000110202*R_122[0];
double PR_002000010110=P_002000010*R_110[0]+-1*P_002000110*R_111[0]+-1*P_102000010*R_210[0]+P_102000110*R_211[0]+P_202000010*R_310[0]+-1*P_202000110*R_311[0];
double PR_001001010110=P_001001010*R_110[0]+-1*P_001001110*R_111[0]+-1*P_001101010*R_120[0]+P_001101110*R_121[0]+-1*P_101001010*R_210[0]+P_101001110*R_211[0]+P_101101010*R_220[0]+-1*P_101101110*R_221[0];
double PR_000002010110=P_000002010*R_110[0]+-1*P_000002110*R_111[0]+-1*P_000102010*R_120[0]+P_000102110*R_121[0]+P_000202010*R_130[0]+-1*P_000202110*R_131[0];
double PR_001000011110=P_001000011*R_110[0]+-1*P_001000111*R_111[0]+P_001000211*R_112[0]+-1*P_101000011*R_210[0]+P_101000111*R_211[0]+-1*P_101000211*R_212[0];
double PR_000001011110=P_000001011*R_110[0]+-1*P_000001111*R_111[0]+P_000001211*R_112[0]+-1*P_000101011*R_120[0]+P_000101111*R_121[0]+-1*P_000101211*R_122[0];
double PR_000000012110=P_000000012*R_110[0]+-1*P_000000112*R_111[0]+P_000000212*R_112[0]+-1*P_000000312*R_113[0];
double PR_012000000200=P_012000000*R_200[0]+-1*P_112000000*R_300[0]+P_212000000*R_400[0]+-1*P_312000000*R_500[0];
double PR_011001000200=P_011001000*R_200[0]+-1*P_011101000*R_210[0]+-1*P_111001000*R_300[0]+P_111101000*R_310[0]+P_211001000*R_400[0]+-1*P_211101000*R_410[0];
double PR_010002000200=P_010002000*R_200[0]+-1*P_010102000*R_210[0]+P_010202000*R_220[0]+-1*P_110002000*R_300[0]+P_110102000*R_310[0]+-1*P_110202000*R_320[0];
double PR_011000001200=P_011000001*R_200[0]+-1*P_011000101*R_201[0]+-1*P_111000001*R_300[0]+P_111000101*R_301[0]+P_211000001*R_400[0]+-1*P_211000101*R_401[0];
double PR_010001001200=P_010001001*R_200[0]+-1*P_010001101*R_201[0]+-1*P_010101001*R_210[0]+P_010101101*R_211[0]+-1*P_110001001*R_300[0]+P_110001101*R_301[0]+P_110101001*R_310[0]+-1*P_110101101*R_311[0];
double PR_010000002200=P_010000002*R_200[0]+-1*P_010000102*R_201[0]+P_010000202*R_202[0]+-1*P_110000002*R_300[0]+P_110000102*R_301[0]+-1*P_110000202*R_302[0];
double PR_002010000200=P_002010000*R_200[0]+-1*P_002110000*R_210[0]+-1*P_102010000*R_300[0]+P_102110000*R_310[0]+P_202010000*R_400[0]+-1*P_202110000*R_410[0];
double PR_001011000200=P_001011000*R_200[0]+-1*P_001111000*R_210[0]+P_001211000*R_220[0]+-1*P_101011000*R_300[0]+P_101111000*R_310[0]+-1*P_101211000*R_320[0];
double PR_000012000200=P_000012000*R_200[0]+-1*P_000112000*R_210[0]+P_000212000*R_220[0]+-1*P_000312000*R_230[0];
double PR_001010001200=P_001010001*R_200[0]+-1*P_001010101*R_201[0]+-1*P_001110001*R_210[0]+P_001110101*R_211[0]+-1*P_101010001*R_300[0]+P_101010101*R_301[0]+P_101110001*R_310[0]+-1*P_101110101*R_311[0];
double PR_000011001200=P_000011001*R_200[0]+-1*P_000011101*R_201[0]+-1*P_000111001*R_210[0]+P_000111101*R_211[0]+P_000211001*R_220[0]+-1*P_000211101*R_221[0];
double PR_000010002200=P_000010002*R_200[0]+-1*P_000010102*R_201[0]+P_000010202*R_202[0]+-1*P_000110002*R_210[0]+P_000110102*R_211[0]+-1*P_000110202*R_212[0];
double PR_002000010200=P_002000010*R_200[0]+-1*P_002000110*R_201[0]+-1*P_102000010*R_300[0]+P_102000110*R_301[0]+P_202000010*R_400[0]+-1*P_202000110*R_401[0];
double PR_001001010200=P_001001010*R_200[0]+-1*P_001001110*R_201[0]+-1*P_001101010*R_210[0]+P_001101110*R_211[0]+-1*P_101001010*R_300[0]+P_101001110*R_301[0]+P_101101010*R_310[0]+-1*P_101101110*R_311[0];
double PR_000002010200=P_000002010*R_200[0]+-1*P_000002110*R_201[0]+-1*P_000102010*R_210[0]+P_000102110*R_211[0]+P_000202010*R_220[0]+-1*P_000202110*R_221[0];
double PR_001000011200=P_001000011*R_200[0]+-1*P_001000111*R_201[0]+P_001000211*R_202[0]+-1*P_101000011*R_300[0]+P_101000111*R_301[0]+-1*P_101000211*R_302[0];
double PR_000001011200=P_000001011*R_200[0]+-1*P_000001111*R_201[0]+P_000001211*R_202[0]+-1*P_000101011*R_210[0]+P_000101111*R_211[0]+-1*P_000101211*R_212[0];
double PR_000000012200=P_000000012*R_200[0]+-1*P_000000112*R_201[0]+P_000000212*R_202[0]+-1*P_000000312*R_203[0];
double Qd_101[3];
double Qd_110[3];
double Qd_011[3];
double Qd_111[3];
double Qd_211[3];
for(int i=0;i<3;i++){
Qd_101[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_110[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_011[i]=Qd_101[i]+Qd_010[i]*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_111[i]=Qd_010[i]*Qd_101[i]+aQin1*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_211[i]=aQin1*Qd_101[i];
}
double Q_011000000=Qd_011[0];
double Q_111000000=Qd_111[0];
double Q_211000000=Qd_211[0];
double Q_010001000=Qd_010[0]*Qd_001[1];
double Q_010101000=Qd_010[0]*Qd_101[1];
double Q_110001000=Qd_110[0]*Qd_001[1];
double Q_110101000=Qd_110[0]*Qd_101[1];
double Q_010000001=Qd_010[0]*Qd_001[2];
double Q_010000101=Qd_010[0]*Qd_101[2];
double Q_110000001=Qd_110[0]*Qd_001[2];
double Q_110000101=Qd_110[0]*Qd_101[2];
double Q_001010000=Qd_001[0]*Qd_010[1];
double Q_001110000=Qd_001[0]*Qd_110[1];
double Q_101010000=Qd_101[0]*Qd_010[1];
double Q_101110000=Qd_101[0]*Qd_110[1];
double Q_000011000=Qd_011[1];
double Q_000111000=Qd_111[1];
double Q_000211000=Qd_211[1];
double Q_000010001=Qd_010[1]*Qd_001[2];
double Q_000010101=Qd_010[1]*Qd_101[2];
double Q_000110001=Qd_110[1]*Qd_001[2];
double Q_000110101=Qd_110[1]*Qd_101[2];
double Q_001000010=Qd_001[0]*Qd_010[2];
double Q_001000110=Qd_001[0]*Qd_110[2];
double Q_101000010=Qd_101[0]*Qd_010[2];
double Q_101000110=Qd_101[0]*Qd_110[2];
double Q_000001010=Qd_001[1]*Qd_010[2];
double Q_000001110=Qd_001[1]*Qd_110[2];
double Q_000101010=Qd_101[1]*Qd_010[2];
double Q_000101110=Qd_101[1]*Qd_110[2];
double Q_000000011=Qd_011[2];
double Q_000000111=Qd_111[2];
double Q_000000211=Qd_211[2];
ans_temp[ans_id*9+0]+=Pmtrx[0]*(Q_011000000*PR_012000000000+Q_111000000*PR_012000000100+Q_211000000*PR_012000000200);
ans_temp[ans_id*9+0]+=Pmtrx[1]*(Q_010001000*PR_012000000000+Q_010101000*PR_012000000010+Q_110001000*PR_012000000100+Q_110101000*PR_012000000110);
ans_temp[ans_id*9+0]+=Pmtrx[2]*(Q_010000001*PR_012000000000+Q_010000101*PR_012000000001+Q_110000001*PR_012000000100+Q_110000101*PR_012000000101);
ans_temp[ans_id*9+1]+=Pmtrx[0]*(Q_001010000*PR_012000000000+Q_001110000*PR_012000000010+Q_101010000*PR_012000000100+Q_101110000*PR_012000000110);
ans_temp[ans_id*9+1]+=Pmtrx[1]*(Q_000011000*PR_012000000000+Q_000111000*PR_012000000010+Q_000211000*PR_012000000020);
ans_temp[ans_id*9+1]+=Pmtrx[2]*(Q_000010001*PR_012000000000+Q_000010101*PR_012000000001+Q_000110001*PR_012000000010+Q_000110101*PR_012000000011);
ans_temp[ans_id*9+2]+=Pmtrx[0]*(Q_001000010*PR_012000000000+Q_001000110*PR_012000000001+Q_101000010*PR_012000000100+Q_101000110*PR_012000000101);
ans_temp[ans_id*9+2]+=Pmtrx[1]*(Q_000001010*PR_012000000000+Q_000001110*PR_012000000001+Q_000101010*PR_012000000010+Q_000101110*PR_012000000011);
ans_temp[ans_id*9+2]+=Pmtrx[2]*(Q_000000011*PR_012000000000+Q_000000111*PR_012000000001+Q_000000211*PR_012000000002);
ans_temp[ans_id*9+0]+=Pmtrx[3]*(Q_011000000*PR_011001000000+Q_111000000*PR_011001000100+Q_211000000*PR_011001000200);
ans_temp[ans_id*9+0]+=Pmtrx[4]*(Q_010001000*PR_011001000000+Q_010101000*PR_011001000010+Q_110001000*PR_011001000100+Q_110101000*PR_011001000110);
ans_temp[ans_id*9+0]+=Pmtrx[5]*(Q_010000001*PR_011001000000+Q_010000101*PR_011001000001+Q_110000001*PR_011001000100+Q_110000101*PR_011001000101);
ans_temp[ans_id*9+1]+=Pmtrx[3]*(Q_001010000*PR_011001000000+Q_001110000*PR_011001000010+Q_101010000*PR_011001000100+Q_101110000*PR_011001000110);
ans_temp[ans_id*9+1]+=Pmtrx[4]*(Q_000011000*PR_011001000000+Q_000111000*PR_011001000010+Q_000211000*PR_011001000020);
ans_temp[ans_id*9+1]+=Pmtrx[5]*(Q_000010001*PR_011001000000+Q_000010101*PR_011001000001+Q_000110001*PR_011001000010+Q_000110101*PR_011001000011);
ans_temp[ans_id*9+2]+=Pmtrx[3]*(Q_001000010*PR_011001000000+Q_001000110*PR_011001000001+Q_101000010*PR_011001000100+Q_101000110*PR_011001000101);
ans_temp[ans_id*9+2]+=Pmtrx[4]*(Q_000001010*PR_011001000000+Q_000001110*PR_011001000001+Q_000101010*PR_011001000010+Q_000101110*PR_011001000011);
ans_temp[ans_id*9+2]+=Pmtrx[5]*(Q_000000011*PR_011001000000+Q_000000111*PR_011001000001+Q_000000211*PR_011001000002);
ans_temp[ans_id*9+0]+=Pmtrx[6]*(Q_011000000*PR_010002000000+Q_111000000*PR_010002000100+Q_211000000*PR_010002000200);
ans_temp[ans_id*9+0]+=Pmtrx[7]*(Q_010001000*PR_010002000000+Q_010101000*PR_010002000010+Q_110001000*PR_010002000100+Q_110101000*PR_010002000110);
ans_temp[ans_id*9+0]+=Pmtrx[8]*(Q_010000001*PR_010002000000+Q_010000101*PR_010002000001+Q_110000001*PR_010002000100+Q_110000101*PR_010002000101);
ans_temp[ans_id*9+1]+=Pmtrx[6]*(Q_001010000*PR_010002000000+Q_001110000*PR_010002000010+Q_101010000*PR_010002000100+Q_101110000*PR_010002000110);
ans_temp[ans_id*9+1]+=Pmtrx[7]*(Q_000011000*PR_010002000000+Q_000111000*PR_010002000010+Q_000211000*PR_010002000020);
ans_temp[ans_id*9+1]+=Pmtrx[8]*(Q_000010001*PR_010002000000+Q_000010101*PR_010002000001+Q_000110001*PR_010002000010+Q_000110101*PR_010002000011);
ans_temp[ans_id*9+2]+=Pmtrx[6]*(Q_001000010*PR_010002000000+Q_001000110*PR_010002000001+Q_101000010*PR_010002000100+Q_101000110*PR_010002000101);
ans_temp[ans_id*9+2]+=Pmtrx[7]*(Q_000001010*PR_010002000000+Q_000001110*PR_010002000001+Q_000101010*PR_010002000010+Q_000101110*PR_010002000011);
ans_temp[ans_id*9+2]+=Pmtrx[8]*(Q_000000011*PR_010002000000+Q_000000111*PR_010002000001+Q_000000211*PR_010002000002);
ans_temp[ans_id*9+0]+=Pmtrx[9]*(Q_011000000*PR_011000001000+Q_111000000*PR_011000001100+Q_211000000*PR_011000001200);
ans_temp[ans_id*9+0]+=Pmtrx[10]*(Q_010001000*PR_011000001000+Q_010101000*PR_011000001010+Q_110001000*PR_011000001100+Q_110101000*PR_011000001110);
ans_temp[ans_id*9+0]+=Pmtrx[11]*(Q_010000001*PR_011000001000+Q_010000101*PR_011000001001+Q_110000001*PR_011000001100+Q_110000101*PR_011000001101);
ans_temp[ans_id*9+1]+=Pmtrx[9]*(Q_001010000*PR_011000001000+Q_001110000*PR_011000001010+Q_101010000*PR_011000001100+Q_101110000*PR_011000001110);
ans_temp[ans_id*9+1]+=Pmtrx[10]*(Q_000011000*PR_011000001000+Q_000111000*PR_011000001010+Q_000211000*PR_011000001020);
ans_temp[ans_id*9+1]+=Pmtrx[11]*(Q_000010001*PR_011000001000+Q_000010101*PR_011000001001+Q_000110001*PR_011000001010+Q_000110101*PR_011000001011);
ans_temp[ans_id*9+2]+=Pmtrx[9]*(Q_001000010*PR_011000001000+Q_001000110*PR_011000001001+Q_101000010*PR_011000001100+Q_101000110*PR_011000001101);
ans_temp[ans_id*9+2]+=Pmtrx[10]*(Q_000001010*PR_011000001000+Q_000001110*PR_011000001001+Q_000101010*PR_011000001010+Q_000101110*PR_011000001011);
ans_temp[ans_id*9+2]+=Pmtrx[11]*(Q_000000011*PR_011000001000+Q_000000111*PR_011000001001+Q_000000211*PR_011000001002);
ans_temp[ans_id*9+0]+=Pmtrx[12]*(Q_011000000*PR_010001001000+Q_111000000*PR_010001001100+Q_211000000*PR_010001001200);
ans_temp[ans_id*9+0]+=Pmtrx[13]*(Q_010001000*PR_010001001000+Q_010101000*PR_010001001010+Q_110001000*PR_010001001100+Q_110101000*PR_010001001110);
ans_temp[ans_id*9+0]+=Pmtrx[14]*(Q_010000001*PR_010001001000+Q_010000101*PR_010001001001+Q_110000001*PR_010001001100+Q_110000101*PR_010001001101);
ans_temp[ans_id*9+1]+=Pmtrx[12]*(Q_001010000*PR_010001001000+Q_001110000*PR_010001001010+Q_101010000*PR_010001001100+Q_101110000*PR_010001001110);
ans_temp[ans_id*9+1]+=Pmtrx[13]*(Q_000011000*PR_010001001000+Q_000111000*PR_010001001010+Q_000211000*PR_010001001020);
ans_temp[ans_id*9+1]+=Pmtrx[14]*(Q_000010001*PR_010001001000+Q_000010101*PR_010001001001+Q_000110001*PR_010001001010+Q_000110101*PR_010001001011);
ans_temp[ans_id*9+2]+=Pmtrx[12]*(Q_001000010*PR_010001001000+Q_001000110*PR_010001001001+Q_101000010*PR_010001001100+Q_101000110*PR_010001001101);
ans_temp[ans_id*9+2]+=Pmtrx[13]*(Q_000001010*PR_010001001000+Q_000001110*PR_010001001001+Q_000101010*PR_010001001010+Q_000101110*PR_010001001011);
ans_temp[ans_id*9+2]+=Pmtrx[14]*(Q_000000011*PR_010001001000+Q_000000111*PR_010001001001+Q_000000211*PR_010001001002);
ans_temp[ans_id*9+0]+=Pmtrx[15]*(Q_011000000*PR_010000002000+Q_111000000*PR_010000002100+Q_211000000*PR_010000002200);
ans_temp[ans_id*9+0]+=Pmtrx[16]*(Q_010001000*PR_010000002000+Q_010101000*PR_010000002010+Q_110001000*PR_010000002100+Q_110101000*PR_010000002110);
ans_temp[ans_id*9+0]+=Pmtrx[17]*(Q_010000001*PR_010000002000+Q_010000101*PR_010000002001+Q_110000001*PR_010000002100+Q_110000101*PR_010000002101);
ans_temp[ans_id*9+1]+=Pmtrx[15]*(Q_001010000*PR_010000002000+Q_001110000*PR_010000002010+Q_101010000*PR_010000002100+Q_101110000*PR_010000002110);
ans_temp[ans_id*9+1]+=Pmtrx[16]*(Q_000011000*PR_010000002000+Q_000111000*PR_010000002010+Q_000211000*PR_010000002020);
ans_temp[ans_id*9+1]+=Pmtrx[17]*(Q_000010001*PR_010000002000+Q_000010101*PR_010000002001+Q_000110001*PR_010000002010+Q_000110101*PR_010000002011);
ans_temp[ans_id*9+2]+=Pmtrx[15]*(Q_001000010*PR_010000002000+Q_001000110*PR_010000002001+Q_101000010*PR_010000002100+Q_101000110*PR_010000002101);
ans_temp[ans_id*9+2]+=Pmtrx[16]*(Q_000001010*PR_010000002000+Q_000001110*PR_010000002001+Q_000101010*PR_010000002010+Q_000101110*PR_010000002011);
ans_temp[ans_id*9+2]+=Pmtrx[17]*(Q_000000011*PR_010000002000+Q_000000111*PR_010000002001+Q_000000211*PR_010000002002);
ans_temp[ans_id*9+3]+=Pmtrx[0]*(Q_011000000*PR_002010000000+Q_111000000*PR_002010000100+Q_211000000*PR_002010000200);
ans_temp[ans_id*9+3]+=Pmtrx[1]*(Q_010001000*PR_002010000000+Q_010101000*PR_002010000010+Q_110001000*PR_002010000100+Q_110101000*PR_002010000110);
ans_temp[ans_id*9+3]+=Pmtrx[2]*(Q_010000001*PR_002010000000+Q_010000101*PR_002010000001+Q_110000001*PR_002010000100+Q_110000101*PR_002010000101);
ans_temp[ans_id*9+4]+=Pmtrx[0]*(Q_001010000*PR_002010000000+Q_001110000*PR_002010000010+Q_101010000*PR_002010000100+Q_101110000*PR_002010000110);
ans_temp[ans_id*9+4]+=Pmtrx[1]*(Q_000011000*PR_002010000000+Q_000111000*PR_002010000010+Q_000211000*PR_002010000020);
ans_temp[ans_id*9+4]+=Pmtrx[2]*(Q_000010001*PR_002010000000+Q_000010101*PR_002010000001+Q_000110001*PR_002010000010+Q_000110101*PR_002010000011);
ans_temp[ans_id*9+5]+=Pmtrx[0]*(Q_001000010*PR_002010000000+Q_001000110*PR_002010000001+Q_101000010*PR_002010000100+Q_101000110*PR_002010000101);
ans_temp[ans_id*9+5]+=Pmtrx[1]*(Q_000001010*PR_002010000000+Q_000001110*PR_002010000001+Q_000101010*PR_002010000010+Q_000101110*PR_002010000011);
ans_temp[ans_id*9+5]+=Pmtrx[2]*(Q_000000011*PR_002010000000+Q_000000111*PR_002010000001+Q_000000211*PR_002010000002);
ans_temp[ans_id*9+3]+=Pmtrx[3]*(Q_011000000*PR_001011000000+Q_111000000*PR_001011000100+Q_211000000*PR_001011000200);
ans_temp[ans_id*9+3]+=Pmtrx[4]*(Q_010001000*PR_001011000000+Q_010101000*PR_001011000010+Q_110001000*PR_001011000100+Q_110101000*PR_001011000110);
ans_temp[ans_id*9+3]+=Pmtrx[5]*(Q_010000001*PR_001011000000+Q_010000101*PR_001011000001+Q_110000001*PR_001011000100+Q_110000101*PR_001011000101);
ans_temp[ans_id*9+4]+=Pmtrx[3]*(Q_001010000*PR_001011000000+Q_001110000*PR_001011000010+Q_101010000*PR_001011000100+Q_101110000*PR_001011000110);
ans_temp[ans_id*9+4]+=Pmtrx[4]*(Q_000011000*PR_001011000000+Q_000111000*PR_001011000010+Q_000211000*PR_001011000020);
ans_temp[ans_id*9+4]+=Pmtrx[5]*(Q_000010001*PR_001011000000+Q_000010101*PR_001011000001+Q_000110001*PR_001011000010+Q_000110101*PR_001011000011);
ans_temp[ans_id*9+5]+=Pmtrx[3]*(Q_001000010*PR_001011000000+Q_001000110*PR_001011000001+Q_101000010*PR_001011000100+Q_101000110*PR_001011000101);
ans_temp[ans_id*9+5]+=Pmtrx[4]*(Q_000001010*PR_001011000000+Q_000001110*PR_001011000001+Q_000101010*PR_001011000010+Q_000101110*PR_001011000011);
ans_temp[ans_id*9+5]+=Pmtrx[5]*(Q_000000011*PR_001011000000+Q_000000111*PR_001011000001+Q_000000211*PR_001011000002);
ans_temp[ans_id*9+3]+=Pmtrx[6]*(Q_011000000*PR_000012000000+Q_111000000*PR_000012000100+Q_211000000*PR_000012000200);
ans_temp[ans_id*9+3]+=Pmtrx[7]*(Q_010001000*PR_000012000000+Q_010101000*PR_000012000010+Q_110001000*PR_000012000100+Q_110101000*PR_000012000110);
ans_temp[ans_id*9+3]+=Pmtrx[8]*(Q_010000001*PR_000012000000+Q_010000101*PR_000012000001+Q_110000001*PR_000012000100+Q_110000101*PR_000012000101);
ans_temp[ans_id*9+4]+=Pmtrx[6]*(Q_001010000*PR_000012000000+Q_001110000*PR_000012000010+Q_101010000*PR_000012000100+Q_101110000*PR_000012000110);
ans_temp[ans_id*9+4]+=Pmtrx[7]*(Q_000011000*PR_000012000000+Q_000111000*PR_000012000010+Q_000211000*PR_000012000020);
ans_temp[ans_id*9+4]+=Pmtrx[8]*(Q_000010001*PR_000012000000+Q_000010101*PR_000012000001+Q_000110001*PR_000012000010+Q_000110101*PR_000012000011);
ans_temp[ans_id*9+5]+=Pmtrx[6]*(Q_001000010*PR_000012000000+Q_001000110*PR_000012000001+Q_101000010*PR_000012000100+Q_101000110*PR_000012000101);
ans_temp[ans_id*9+5]+=Pmtrx[7]*(Q_000001010*PR_000012000000+Q_000001110*PR_000012000001+Q_000101010*PR_000012000010+Q_000101110*PR_000012000011);
ans_temp[ans_id*9+5]+=Pmtrx[8]*(Q_000000011*PR_000012000000+Q_000000111*PR_000012000001+Q_000000211*PR_000012000002);
ans_temp[ans_id*9+3]+=Pmtrx[9]*(Q_011000000*PR_001010001000+Q_111000000*PR_001010001100+Q_211000000*PR_001010001200);
ans_temp[ans_id*9+3]+=Pmtrx[10]*(Q_010001000*PR_001010001000+Q_010101000*PR_001010001010+Q_110001000*PR_001010001100+Q_110101000*PR_001010001110);
ans_temp[ans_id*9+3]+=Pmtrx[11]*(Q_010000001*PR_001010001000+Q_010000101*PR_001010001001+Q_110000001*PR_001010001100+Q_110000101*PR_001010001101);
ans_temp[ans_id*9+4]+=Pmtrx[9]*(Q_001010000*PR_001010001000+Q_001110000*PR_001010001010+Q_101010000*PR_001010001100+Q_101110000*PR_001010001110);
ans_temp[ans_id*9+4]+=Pmtrx[10]*(Q_000011000*PR_001010001000+Q_000111000*PR_001010001010+Q_000211000*PR_001010001020);
ans_temp[ans_id*9+4]+=Pmtrx[11]*(Q_000010001*PR_001010001000+Q_000010101*PR_001010001001+Q_000110001*PR_001010001010+Q_000110101*PR_001010001011);
ans_temp[ans_id*9+5]+=Pmtrx[9]*(Q_001000010*PR_001010001000+Q_001000110*PR_001010001001+Q_101000010*PR_001010001100+Q_101000110*PR_001010001101);
ans_temp[ans_id*9+5]+=Pmtrx[10]*(Q_000001010*PR_001010001000+Q_000001110*PR_001010001001+Q_000101010*PR_001010001010+Q_000101110*PR_001010001011);
ans_temp[ans_id*9+5]+=Pmtrx[11]*(Q_000000011*PR_001010001000+Q_000000111*PR_001010001001+Q_000000211*PR_001010001002);
ans_temp[ans_id*9+3]+=Pmtrx[12]*(Q_011000000*PR_000011001000+Q_111000000*PR_000011001100+Q_211000000*PR_000011001200);
ans_temp[ans_id*9+3]+=Pmtrx[13]*(Q_010001000*PR_000011001000+Q_010101000*PR_000011001010+Q_110001000*PR_000011001100+Q_110101000*PR_000011001110);
ans_temp[ans_id*9+3]+=Pmtrx[14]*(Q_010000001*PR_000011001000+Q_010000101*PR_000011001001+Q_110000001*PR_000011001100+Q_110000101*PR_000011001101);
ans_temp[ans_id*9+4]+=Pmtrx[12]*(Q_001010000*PR_000011001000+Q_001110000*PR_000011001010+Q_101010000*PR_000011001100+Q_101110000*PR_000011001110);
ans_temp[ans_id*9+4]+=Pmtrx[13]*(Q_000011000*PR_000011001000+Q_000111000*PR_000011001010+Q_000211000*PR_000011001020);
ans_temp[ans_id*9+4]+=Pmtrx[14]*(Q_000010001*PR_000011001000+Q_000010101*PR_000011001001+Q_000110001*PR_000011001010+Q_000110101*PR_000011001011);
ans_temp[ans_id*9+5]+=Pmtrx[12]*(Q_001000010*PR_000011001000+Q_001000110*PR_000011001001+Q_101000010*PR_000011001100+Q_101000110*PR_000011001101);
ans_temp[ans_id*9+5]+=Pmtrx[13]*(Q_000001010*PR_000011001000+Q_000001110*PR_000011001001+Q_000101010*PR_000011001010+Q_000101110*PR_000011001011);
ans_temp[ans_id*9+5]+=Pmtrx[14]*(Q_000000011*PR_000011001000+Q_000000111*PR_000011001001+Q_000000211*PR_000011001002);
ans_temp[ans_id*9+3]+=Pmtrx[15]*(Q_011000000*PR_000010002000+Q_111000000*PR_000010002100+Q_211000000*PR_000010002200);
ans_temp[ans_id*9+3]+=Pmtrx[16]*(Q_010001000*PR_000010002000+Q_010101000*PR_000010002010+Q_110001000*PR_000010002100+Q_110101000*PR_000010002110);
ans_temp[ans_id*9+3]+=Pmtrx[17]*(Q_010000001*PR_000010002000+Q_010000101*PR_000010002001+Q_110000001*PR_000010002100+Q_110000101*PR_000010002101);
ans_temp[ans_id*9+4]+=Pmtrx[15]*(Q_001010000*PR_000010002000+Q_001110000*PR_000010002010+Q_101010000*PR_000010002100+Q_101110000*PR_000010002110);
ans_temp[ans_id*9+4]+=Pmtrx[16]*(Q_000011000*PR_000010002000+Q_000111000*PR_000010002010+Q_000211000*PR_000010002020);
ans_temp[ans_id*9+4]+=Pmtrx[17]*(Q_000010001*PR_000010002000+Q_000010101*PR_000010002001+Q_000110001*PR_000010002010+Q_000110101*PR_000010002011);
ans_temp[ans_id*9+5]+=Pmtrx[15]*(Q_001000010*PR_000010002000+Q_001000110*PR_000010002001+Q_101000010*PR_000010002100+Q_101000110*PR_000010002101);
ans_temp[ans_id*9+5]+=Pmtrx[16]*(Q_000001010*PR_000010002000+Q_000001110*PR_000010002001+Q_000101010*PR_000010002010+Q_000101110*PR_000010002011);
ans_temp[ans_id*9+5]+=Pmtrx[17]*(Q_000000011*PR_000010002000+Q_000000111*PR_000010002001+Q_000000211*PR_000010002002);
ans_temp[ans_id*9+6]+=Pmtrx[0]*(Q_011000000*PR_002000010000+Q_111000000*PR_002000010100+Q_211000000*PR_002000010200);
ans_temp[ans_id*9+6]+=Pmtrx[1]*(Q_010001000*PR_002000010000+Q_010101000*PR_002000010010+Q_110001000*PR_002000010100+Q_110101000*PR_002000010110);
ans_temp[ans_id*9+6]+=Pmtrx[2]*(Q_010000001*PR_002000010000+Q_010000101*PR_002000010001+Q_110000001*PR_002000010100+Q_110000101*PR_002000010101);
ans_temp[ans_id*9+7]+=Pmtrx[0]*(Q_001010000*PR_002000010000+Q_001110000*PR_002000010010+Q_101010000*PR_002000010100+Q_101110000*PR_002000010110);
ans_temp[ans_id*9+7]+=Pmtrx[1]*(Q_000011000*PR_002000010000+Q_000111000*PR_002000010010+Q_000211000*PR_002000010020);
ans_temp[ans_id*9+7]+=Pmtrx[2]*(Q_000010001*PR_002000010000+Q_000010101*PR_002000010001+Q_000110001*PR_002000010010+Q_000110101*PR_002000010011);
ans_temp[ans_id*9+8]+=Pmtrx[0]*(Q_001000010*PR_002000010000+Q_001000110*PR_002000010001+Q_101000010*PR_002000010100+Q_101000110*PR_002000010101);
ans_temp[ans_id*9+8]+=Pmtrx[1]*(Q_000001010*PR_002000010000+Q_000001110*PR_002000010001+Q_000101010*PR_002000010010+Q_000101110*PR_002000010011);
ans_temp[ans_id*9+8]+=Pmtrx[2]*(Q_000000011*PR_002000010000+Q_000000111*PR_002000010001+Q_000000211*PR_002000010002);
ans_temp[ans_id*9+6]+=Pmtrx[3]*(Q_011000000*PR_001001010000+Q_111000000*PR_001001010100+Q_211000000*PR_001001010200);
ans_temp[ans_id*9+6]+=Pmtrx[4]*(Q_010001000*PR_001001010000+Q_010101000*PR_001001010010+Q_110001000*PR_001001010100+Q_110101000*PR_001001010110);
ans_temp[ans_id*9+6]+=Pmtrx[5]*(Q_010000001*PR_001001010000+Q_010000101*PR_001001010001+Q_110000001*PR_001001010100+Q_110000101*PR_001001010101);
ans_temp[ans_id*9+7]+=Pmtrx[3]*(Q_001010000*PR_001001010000+Q_001110000*PR_001001010010+Q_101010000*PR_001001010100+Q_101110000*PR_001001010110);
ans_temp[ans_id*9+7]+=Pmtrx[4]*(Q_000011000*PR_001001010000+Q_000111000*PR_001001010010+Q_000211000*PR_001001010020);
ans_temp[ans_id*9+7]+=Pmtrx[5]*(Q_000010001*PR_001001010000+Q_000010101*PR_001001010001+Q_000110001*PR_001001010010+Q_000110101*PR_001001010011);
ans_temp[ans_id*9+8]+=Pmtrx[3]*(Q_001000010*PR_001001010000+Q_001000110*PR_001001010001+Q_101000010*PR_001001010100+Q_101000110*PR_001001010101);
ans_temp[ans_id*9+8]+=Pmtrx[4]*(Q_000001010*PR_001001010000+Q_000001110*PR_001001010001+Q_000101010*PR_001001010010+Q_000101110*PR_001001010011);
ans_temp[ans_id*9+8]+=Pmtrx[5]*(Q_000000011*PR_001001010000+Q_000000111*PR_001001010001+Q_000000211*PR_001001010002);
ans_temp[ans_id*9+6]+=Pmtrx[6]*(Q_011000000*PR_000002010000+Q_111000000*PR_000002010100+Q_211000000*PR_000002010200);
ans_temp[ans_id*9+6]+=Pmtrx[7]*(Q_010001000*PR_000002010000+Q_010101000*PR_000002010010+Q_110001000*PR_000002010100+Q_110101000*PR_000002010110);
ans_temp[ans_id*9+6]+=Pmtrx[8]*(Q_010000001*PR_000002010000+Q_010000101*PR_000002010001+Q_110000001*PR_000002010100+Q_110000101*PR_000002010101);
ans_temp[ans_id*9+7]+=Pmtrx[6]*(Q_001010000*PR_000002010000+Q_001110000*PR_000002010010+Q_101010000*PR_000002010100+Q_101110000*PR_000002010110);
ans_temp[ans_id*9+7]+=Pmtrx[7]*(Q_000011000*PR_000002010000+Q_000111000*PR_000002010010+Q_000211000*PR_000002010020);
ans_temp[ans_id*9+7]+=Pmtrx[8]*(Q_000010001*PR_000002010000+Q_000010101*PR_000002010001+Q_000110001*PR_000002010010+Q_000110101*PR_000002010011);
ans_temp[ans_id*9+8]+=Pmtrx[6]*(Q_001000010*PR_000002010000+Q_001000110*PR_000002010001+Q_101000010*PR_000002010100+Q_101000110*PR_000002010101);
ans_temp[ans_id*9+8]+=Pmtrx[7]*(Q_000001010*PR_000002010000+Q_000001110*PR_000002010001+Q_000101010*PR_000002010010+Q_000101110*PR_000002010011);
ans_temp[ans_id*9+8]+=Pmtrx[8]*(Q_000000011*PR_000002010000+Q_000000111*PR_000002010001+Q_000000211*PR_000002010002);
ans_temp[ans_id*9+6]+=Pmtrx[9]*(Q_011000000*PR_001000011000+Q_111000000*PR_001000011100+Q_211000000*PR_001000011200);
ans_temp[ans_id*9+6]+=Pmtrx[10]*(Q_010001000*PR_001000011000+Q_010101000*PR_001000011010+Q_110001000*PR_001000011100+Q_110101000*PR_001000011110);
ans_temp[ans_id*9+6]+=Pmtrx[11]*(Q_010000001*PR_001000011000+Q_010000101*PR_001000011001+Q_110000001*PR_001000011100+Q_110000101*PR_001000011101);
ans_temp[ans_id*9+7]+=Pmtrx[9]*(Q_001010000*PR_001000011000+Q_001110000*PR_001000011010+Q_101010000*PR_001000011100+Q_101110000*PR_001000011110);
ans_temp[ans_id*9+7]+=Pmtrx[10]*(Q_000011000*PR_001000011000+Q_000111000*PR_001000011010+Q_000211000*PR_001000011020);
ans_temp[ans_id*9+7]+=Pmtrx[11]*(Q_000010001*PR_001000011000+Q_000010101*PR_001000011001+Q_000110001*PR_001000011010+Q_000110101*PR_001000011011);
ans_temp[ans_id*9+8]+=Pmtrx[9]*(Q_001000010*PR_001000011000+Q_001000110*PR_001000011001+Q_101000010*PR_001000011100+Q_101000110*PR_001000011101);
ans_temp[ans_id*9+8]+=Pmtrx[10]*(Q_000001010*PR_001000011000+Q_000001110*PR_001000011001+Q_000101010*PR_001000011010+Q_000101110*PR_001000011011);
ans_temp[ans_id*9+8]+=Pmtrx[11]*(Q_000000011*PR_001000011000+Q_000000111*PR_001000011001+Q_000000211*PR_001000011002);
ans_temp[ans_id*9+6]+=Pmtrx[12]*(Q_011000000*PR_000001011000+Q_111000000*PR_000001011100+Q_211000000*PR_000001011200);
ans_temp[ans_id*9+6]+=Pmtrx[13]*(Q_010001000*PR_000001011000+Q_010101000*PR_000001011010+Q_110001000*PR_000001011100+Q_110101000*PR_000001011110);
ans_temp[ans_id*9+6]+=Pmtrx[14]*(Q_010000001*PR_000001011000+Q_010000101*PR_000001011001+Q_110000001*PR_000001011100+Q_110000101*PR_000001011101);
ans_temp[ans_id*9+7]+=Pmtrx[12]*(Q_001010000*PR_000001011000+Q_001110000*PR_000001011010+Q_101010000*PR_000001011100+Q_101110000*PR_000001011110);
ans_temp[ans_id*9+7]+=Pmtrx[13]*(Q_000011000*PR_000001011000+Q_000111000*PR_000001011010+Q_000211000*PR_000001011020);
ans_temp[ans_id*9+7]+=Pmtrx[14]*(Q_000010001*PR_000001011000+Q_000010101*PR_000001011001+Q_000110001*PR_000001011010+Q_000110101*PR_000001011011);
ans_temp[ans_id*9+8]+=Pmtrx[12]*(Q_001000010*PR_000001011000+Q_001000110*PR_000001011001+Q_101000010*PR_000001011100+Q_101000110*PR_000001011101);
ans_temp[ans_id*9+8]+=Pmtrx[13]*(Q_000001010*PR_000001011000+Q_000001110*PR_000001011001+Q_000101010*PR_000001011010+Q_000101110*PR_000001011011);
ans_temp[ans_id*9+8]+=Pmtrx[14]*(Q_000000011*PR_000001011000+Q_000000111*PR_000001011001+Q_000000211*PR_000001011002);
ans_temp[ans_id*9+6]+=Pmtrx[15]*(Q_011000000*PR_000000012000+Q_111000000*PR_000000012100+Q_211000000*PR_000000012200);
ans_temp[ans_id*9+6]+=Pmtrx[16]*(Q_010001000*PR_000000012000+Q_010101000*PR_000000012010+Q_110001000*PR_000000012100+Q_110101000*PR_000000012110);
ans_temp[ans_id*9+6]+=Pmtrx[17]*(Q_010000001*PR_000000012000+Q_010000101*PR_000000012001+Q_110000001*PR_000000012100+Q_110000101*PR_000000012101);
ans_temp[ans_id*9+7]+=Pmtrx[15]*(Q_001010000*PR_000000012000+Q_001110000*PR_000000012010+Q_101010000*PR_000000012100+Q_101110000*PR_000000012110);
ans_temp[ans_id*9+7]+=Pmtrx[16]*(Q_000011000*PR_000000012000+Q_000111000*PR_000000012010+Q_000211000*PR_000000012020);
ans_temp[ans_id*9+7]+=Pmtrx[17]*(Q_000010001*PR_000000012000+Q_000010101*PR_000000012001+Q_000110001*PR_000000012010+Q_000110101*PR_000000012011);
ans_temp[ans_id*9+8]+=Pmtrx[15]*(Q_001000010*PR_000000012000+Q_001000110*PR_000000012001+Q_101000010*PR_000000012100+Q_101000110*PR_000000012101);
ans_temp[ans_id*9+8]+=Pmtrx[16]*(Q_000001010*PR_000000012000+Q_000001110*PR_000000012001+Q_000101010*PR_000000012010+Q_000101110*PR_000000012011);
ans_temp[ans_id*9+8]+=Pmtrx[17]*(Q_000000011*PR_000000012000+Q_000000111*PR_000000012001+Q_000000211*PR_000000012002);
}
}
__syncthreads();
int num_thread=NTHREAD/2;
while (num_thread!=0){
__syncthreads();
if(tId_x<num_thread){
for(int ians=0;ians<9;ians++){
ans_temp[tId_x*9+ians]+=ans_temp[(tId_x+num_thread)*9+ians];
}
}
num_thread/=2;
}
if(tId_x==0){
for(int ians=0;ians<9;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*9+ians]=ans_temp[(tId_x)*9+ians];
}
}
}
}
}
__global__ void MD_Kq_pdpp_fs(unsigned int contrc_bra_num,unsigned int contrc_ket_num,\
unsigned int * contrc_bra_id,\
unsigned int * contrc_ket_id,\
unsigned int mtrx_len,\
double * Pmtrx_in,\
double * P,\
double * PA,\
double * PB,\
double * Zta_in,\
double * pp_in,\
float * K2_p_in,\
unsigned int * id_bra_in,\
double * Q,\
double * QC,\
double * QD,\
double * Eta_in,\
double * pq_in,\
float * K2_q_in,\
unsigned int * id_ket_in,\
double * ans){
unsigned int tId_x = threadIdx.x;
unsigned int bId_x = blockIdx.x;
unsigned int bId_y = blockIdx.y;
unsigned int tdis = blockDim.x;
unsigned int bdis_x = gridDim.x;
unsigned int bdis_y = gridDim.y;
unsigned int ans_id=tId_x;
double Pmtrx[18]={0.0};
__shared__ double ans_temp[NTHREAD*9];
for(int i=0;i<9;i++){
ans_temp[i*tdis+tId_x]=0.0;
}
for(unsigned int i_contrc_bra=bId_x;i_contrc_bra<contrc_bra_num;i_contrc_bra+=bdis_x){
for(unsigned int j_contrc_ket=bId_y;j_contrc_ket<contrc_ket_num;j_contrc_ket+=bdis_y){
unsigned int primit_bra_start = contrc_bra_id[i_contrc_bra ];
unsigned int primit_bra_end = contrc_bra_id[i_contrc_bra+1];
unsigned int primit_ket_start = contrc_ket_id[j_contrc_ket ];
unsigned int primit_ket_end = contrc_ket_id[j_contrc_ket+1];
for(unsigned int ii=primit_bra_start;ii<primit_bra_end;ii++){
unsigned int id_bra=id_bra_in[ii];
double PX=P[ii*3+0];
double PY=P[ii*3+1];
double PZ=P[ii*3+2];
double Pd_010[3];
Pd_010[0]=PA[ii*3+0];
Pd_010[1]=PA[ii*3+1];
Pd_010[2]=PA[ii*3+2];
double Pd_001[3];
Pd_001[0]=PB[ii*3+0];
Pd_001[1]=PB[ii*3+1];
Pd_001[2]=PB[ii*3+2];
double Zta=Zta_in[ii];
double pp=pp_in[ii];
float K2_p=K2_p_in[ii];
double aPin1=1/(2*Zta);
for(unsigned int j=tId_x;j<primit_ket_end-primit_ket_start;j+=tdis){
unsigned int jj=primit_ket_start+j;
unsigned int id_ket=tex1Dfetch(tex_id_ket,jj);
double P_max=0.0;
for(int p_j=0;p_j<3;p_j++){
for(int p_i=0;p_i<6;p_i++){
Pmtrx[p_i*3+p_j]=Pmtrx_in[(id_ket+p_j)*mtrx_len+(id_bra+p_i)];
double temp_P=fabsf(Pmtrx[p_i*3+p_j]);
if(temp_P>P_max) P_max=temp_P;
}
}
float K2_q=tex1Dfetch(tex_K2_q,jj);
if(fabsf(K2_p*K2_q)<1.0E-14){
break;
}
if(fabsf(P_max*K2_p*K2_q)<1.0E-14) continue;
int2 temp_int2;
temp_int2=tex1Dfetch(tex_Eta,jj);
double Eta=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_pq,jj);
double pq=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+0);
double QX=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+1);
double QY=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+2);
double QZ=__hiloint2double(temp_int2.y,temp_int2.x);
double Qd_010[3];
temp_int2=tex1Dfetch(tex_QC,jj*3+0);
Qd_010[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QC,jj*3+1);
Qd_010[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QC,jj*3+2);
Qd_010[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double Qd_001[3];
temp_int2=tex1Dfetch(tex_QD,jj*3+0);
Qd_001[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QD,jj*3+1);
Qd_001[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QD,jj*3+2);
Qd_001[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double alphaT=rsqrt(Eta+Zta);
double lmd=4*P25*pp*pq*alphaT;
alphaT=Eta*Zta*alphaT*alphaT;
double TX=PX-QX;
double TY=PY-QY;
double TZ=PZ-QZ;
double T=alphaT*(TX*TX+TY*TY+TZ*TZ);
double R_000[6];
Ft_fs_5(5,T,R_000);
R_000[0]*=lmd;
R_000[1]*=-2*alphaT*lmd;
R_000[2]*=4*alphaT*alphaT*lmd;
R_000[3]*=-8*alphaT*alphaT*alphaT*lmd;
R_000[4]*=16*alphaT*alphaT*alphaT*alphaT*lmd;
R_000[5]*=-32*alphaT*alphaT*alphaT*alphaT*alphaT*lmd;
double aQin1=1/(2*Eta);
double R_100[5];
double R_200[4];
double R_300[3];
double R_400[2];
double R_500[1];
double R_010[5];
double R_110[4];
double R_210[3];
double R_310[2];
double R_410[1];
double R_020[4];
double R_120[3];
double R_220[2];
double R_320[1];
double R_030[3];
double R_130[2];
double R_230[1];
double R_040[2];
double R_140[1];
double R_050[1];
double R_001[5];
double R_101[4];
double R_201[3];
double R_301[2];
double R_401[1];
double R_011[4];
double R_111[3];
double R_211[2];
double R_311[1];
double R_021[3];
double R_121[2];
double R_221[1];
double R_031[2];
double R_131[1];
double R_041[1];
double R_002[4];
double R_102[3];
double R_202[2];
double R_302[1];
double R_012[3];
double R_112[2];
double R_212[1];
double R_022[2];
double R_122[1];
double R_032[1];
double R_003[3];
double R_103[2];
double R_203[1];
double R_013[2];
double R_113[1];
double R_023[1];
double R_004[2];
double R_104[1];
double R_014[1];
double R_005[1];
for(int i=0;i<5;i++){
R_100[i]=TX*R_000[i+1];
}
for(int i=0;i<5;i++){
R_010[i]=TY*R_000[i+1];
}
for(int i=0;i<5;i++){
R_001[i]=TZ*R_000[i+1];
}
for(int i=0;i<4;i++){
R_200[i]=TX*R_100[i+1]+R_000[i+1];
}
for(int i=0;i<4;i++){
R_110[i]=TX*R_010[i+1];
}
for(int i=0;i<4;i++){
R_020[i]=TY*R_010[i+1]+R_000[i+1];
}
for(int i=0;i<4;i++){
R_101[i]=TX*R_001[i+1];
}
for(int i=0;i<4;i++){
R_011[i]=TY*R_001[i+1];
}
for(int i=0;i<4;i++){
R_002[i]=TZ*R_001[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_300[i]=TX*R_200[i+1]+2*R_100[i+1];
}
for(int i=0;i<3;i++){
R_210[i]=TY*R_200[i+1];
}
for(int i=0;i<3;i++){
R_120[i]=TX*R_020[i+1];
}
for(int i=0;i<3;i++){
R_030[i]=TY*R_020[i+1]+2*R_010[i+1];
}
for(int i=0;i<3;i++){
R_201[i]=TZ*R_200[i+1];
}
for(int i=0;i<3;i++){
R_111[i]=TX*R_011[i+1];
}
for(int i=0;i<3;i++){
R_021[i]=TZ*R_020[i+1];
}
for(int i=0;i<3;i++){
R_102[i]=TX*R_002[i+1];
}
for(int i=0;i<3;i++){
R_012[i]=TY*R_002[i+1];
}
for(int i=0;i<3;i++){
R_003[i]=TZ*R_002[i+1]+2*R_001[i+1];
}
for(int i=0;i<2;i++){
R_400[i]=TX*R_300[i+1]+3*R_200[i+1];
}
for(int i=0;i<2;i++){
R_310[i]=TY*R_300[i+1];
}
for(int i=0;i<2;i++){
R_220[i]=TX*R_120[i+1]+R_020[i+1];
}
for(int i=0;i<2;i++){
R_130[i]=TX*R_030[i+1];
}
for(int i=0;i<2;i++){
R_040[i]=TY*R_030[i+1]+3*R_020[i+1];
}
for(int i=0;i<2;i++){
R_301[i]=TZ*R_300[i+1];
}
for(int i=0;i<2;i++){
R_211[i]=TY*R_201[i+1];
}
for(int i=0;i<2;i++){
R_121[i]=TX*R_021[i+1];
}
for(int i=0;i<2;i++){
R_031[i]=TZ*R_030[i+1];
}
for(int i=0;i<2;i++){
R_202[i]=TX*R_102[i+1]+R_002[i+1];
}
for(int i=0;i<2;i++){
R_112[i]=TX*R_012[i+1];
}
for(int i=0;i<2;i++){
R_022[i]=TY*R_012[i+1]+R_002[i+1];
}
for(int i=0;i<2;i++){
R_103[i]=TX*R_003[i+1];
}
for(int i=0;i<2;i++){
R_013[i]=TY*R_003[i+1];
}
for(int i=0;i<2;i++){
R_004[i]=TZ*R_003[i+1]+3*R_002[i+1];
}
for(int i=0;i<1;i++){
R_500[i]=TX*R_400[i+1]+4*R_300[i+1];
}
for(int i=0;i<1;i++){
R_410[i]=TY*R_400[i+1];
}
for(int i=0;i<1;i++){
R_320[i]=TX*R_220[i+1]+2*R_120[i+1];
}
for(int i=0;i<1;i++){
R_230[i]=TY*R_220[i+1]+2*R_210[i+1];
}
for(int i=0;i<1;i++){
R_140[i]=TX*R_040[i+1];
}
for(int i=0;i<1;i++){
R_050[i]=TY*R_040[i+1]+4*R_030[i+1];
}
for(int i=0;i<1;i++){
R_401[i]=TZ*R_400[i+1];
}
for(int i=0;i<1;i++){
R_311[i]=TY*R_301[i+1];
}
for(int i=0;i<1;i++){
R_221[i]=TZ*R_220[i+1];
}
for(int i=0;i<1;i++){
R_131[i]=TX*R_031[i+1];
}
for(int i=0;i<1;i++){
R_041[i]=TZ*R_040[i+1];
}
for(int i=0;i<1;i++){
R_302[i]=TX*R_202[i+1]+2*R_102[i+1];
}
for(int i=0;i<1;i++){
R_212[i]=TY*R_202[i+1];
}
for(int i=0;i<1;i++){
R_122[i]=TX*R_022[i+1];
}
for(int i=0;i<1;i++){
R_032[i]=TY*R_022[i+1]+2*R_012[i+1];
}
for(int i=0;i<1;i++){
R_203[i]=TZ*R_202[i+1]+2*R_201[i+1];
}
for(int i=0;i<1;i++){
R_113[i]=TX*R_013[i+1];
}
for(int i=0;i<1;i++){
R_023[i]=TZ*R_022[i+1]+2*R_021[i+1];
}
for(int i=0;i<1;i++){
R_104[i]=TX*R_004[i+1];
}
for(int i=0;i<1;i++){
R_014[i]=TY*R_004[i+1];
}
for(int i=0;i<1;i++){
R_005[i]=TZ*R_004[i+1]+4*R_003[i+1];
}
double Qd_101[3];
double Qd_110[3];
double Qd_011[3];
double Qd_111[3];
double Qd_211[3];
for(int i=0;i<3;i++){
Qd_101[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_110[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_011[i]=Qd_101[i]+Qd_010[i]*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_111[i]=Qd_010[i]*Qd_101[i]+aQin1*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_211[i]=aQin1*Qd_101[i];
}
double Q_011000000=Qd_011[0];
double Q_111000000=Qd_111[0];
double Q_211000000=Qd_211[0];
double Q_010001000=Qd_010[0]*Qd_001[1];
double Q_010101000=Qd_010[0]*Qd_101[1];
double Q_110001000=Qd_110[0]*Qd_001[1];
double Q_110101000=Qd_110[0]*Qd_101[1];
double Q_010000001=Qd_010[0]*Qd_001[2];
double Q_010000101=Qd_010[0]*Qd_101[2];
double Q_110000001=Qd_110[0]*Qd_001[2];
double Q_110000101=Qd_110[0]*Qd_101[2];
double Q_001010000=Qd_001[0]*Qd_010[1];
double Q_001110000=Qd_001[0]*Qd_110[1];
double Q_101010000=Qd_101[0]*Qd_010[1];
double Q_101110000=Qd_101[0]*Qd_110[1];
double Q_000011000=Qd_011[1];
double Q_000111000=Qd_111[1];
double Q_000211000=Qd_211[1];
double Q_000010001=Qd_010[1]*Qd_001[2];
double Q_000010101=Qd_010[1]*Qd_101[2];
double Q_000110001=Qd_110[1]*Qd_001[2];
double Q_000110101=Qd_110[1]*Qd_101[2];
double Q_001000010=Qd_001[0]*Qd_010[2];
double Q_001000110=Qd_001[0]*Qd_110[2];
double Q_101000010=Qd_101[0]*Qd_010[2];
double Q_101000110=Qd_101[0]*Qd_110[2];
double Q_000001010=Qd_001[1]*Qd_010[2];
double Q_000001110=Qd_001[1]*Qd_110[2];
double Q_000101010=Qd_101[1]*Qd_010[2];
double Q_000101110=Qd_101[1]*Qd_110[2];
double Q_000000011=Qd_011[2];
double Q_000000111=Qd_111[2];
double Q_000000211=Qd_211[2];
double QR_011000000000=Q_011000000*R_000[0]+-1*Q_111000000*R_100[0]+Q_211000000*R_200[0];
double QR_010001000000=Q_010001000*R_000[0]+-1*Q_010101000*R_010[0]+-1*Q_110001000*R_100[0]+Q_110101000*R_110[0];
double QR_010000001000=Q_010000001*R_000[0]+-1*Q_010000101*R_001[0]+-1*Q_110000001*R_100[0]+Q_110000101*R_101[0];
double QR_001010000000=Q_001010000*R_000[0]+-1*Q_001110000*R_010[0]+-1*Q_101010000*R_100[0]+Q_101110000*R_110[0];
double QR_000011000000=Q_000011000*R_000[0]+-1*Q_000111000*R_010[0]+Q_000211000*R_020[0];
double QR_000010001000=Q_000010001*R_000[0]+-1*Q_000010101*R_001[0]+-1*Q_000110001*R_010[0]+Q_000110101*R_011[0];
double QR_001000010000=Q_001000010*R_000[0]+-1*Q_001000110*R_001[0]+-1*Q_101000010*R_100[0]+Q_101000110*R_101[0];
double QR_000001010000=Q_000001010*R_000[0]+-1*Q_000001110*R_001[0]+-1*Q_000101010*R_010[0]+Q_000101110*R_011[0];
double QR_000000011000=Q_000000011*R_000[0]+-1*Q_000000111*R_001[0]+Q_000000211*R_002[0];
double QR_011000000001=Q_011000000*R_001[0]+-1*Q_111000000*R_101[0]+Q_211000000*R_201[0];
double QR_010001000001=Q_010001000*R_001[0]+-1*Q_010101000*R_011[0]+-1*Q_110001000*R_101[0]+Q_110101000*R_111[0];
double QR_010000001001=Q_010000001*R_001[0]+-1*Q_010000101*R_002[0]+-1*Q_110000001*R_101[0]+Q_110000101*R_102[0];
double QR_001010000001=Q_001010000*R_001[0]+-1*Q_001110000*R_011[0]+-1*Q_101010000*R_101[0]+Q_101110000*R_111[0];
double QR_000011000001=Q_000011000*R_001[0]+-1*Q_000111000*R_011[0]+Q_000211000*R_021[0];
double QR_000010001001=Q_000010001*R_001[0]+-1*Q_000010101*R_002[0]+-1*Q_000110001*R_011[0]+Q_000110101*R_012[0];
double QR_001000010001=Q_001000010*R_001[0]+-1*Q_001000110*R_002[0]+-1*Q_101000010*R_101[0]+Q_101000110*R_102[0];
double QR_000001010001=Q_000001010*R_001[0]+-1*Q_000001110*R_002[0]+-1*Q_000101010*R_011[0]+Q_000101110*R_012[0];
double QR_000000011001=Q_000000011*R_001[0]+-1*Q_000000111*R_002[0]+Q_000000211*R_003[0];
double QR_011000000010=Q_011000000*R_010[0]+-1*Q_111000000*R_110[0]+Q_211000000*R_210[0];
double QR_010001000010=Q_010001000*R_010[0]+-1*Q_010101000*R_020[0]+-1*Q_110001000*R_110[0]+Q_110101000*R_120[0];
double QR_010000001010=Q_010000001*R_010[0]+-1*Q_010000101*R_011[0]+-1*Q_110000001*R_110[0]+Q_110000101*R_111[0];
double QR_001010000010=Q_001010000*R_010[0]+-1*Q_001110000*R_020[0]+-1*Q_101010000*R_110[0]+Q_101110000*R_120[0];
double QR_000011000010=Q_000011000*R_010[0]+-1*Q_000111000*R_020[0]+Q_000211000*R_030[0];
double QR_000010001010=Q_000010001*R_010[0]+-1*Q_000010101*R_011[0]+-1*Q_000110001*R_020[0]+Q_000110101*R_021[0];
double QR_001000010010=Q_001000010*R_010[0]+-1*Q_001000110*R_011[0]+-1*Q_101000010*R_110[0]+Q_101000110*R_111[0];
double QR_000001010010=Q_000001010*R_010[0]+-1*Q_000001110*R_011[0]+-1*Q_000101010*R_020[0]+Q_000101110*R_021[0];
double QR_000000011010=Q_000000011*R_010[0]+-1*Q_000000111*R_011[0]+Q_000000211*R_012[0];
double QR_011000000100=Q_011000000*R_100[0]+-1*Q_111000000*R_200[0]+Q_211000000*R_300[0];
double QR_010001000100=Q_010001000*R_100[0]+-1*Q_010101000*R_110[0]+-1*Q_110001000*R_200[0]+Q_110101000*R_210[0];
double QR_010000001100=Q_010000001*R_100[0]+-1*Q_010000101*R_101[0]+-1*Q_110000001*R_200[0]+Q_110000101*R_201[0];
double QR_001010000100=Q_001010000*R_100[0]+-1*Q_001110000*R_110[0]+-1*Q_101010000*R_200[0]+Q_101110000*R_210[0];
double QR_000011000100=Q_000011000*R_100[0]+-1*Q_000111000*R_110[0]+Q_000211000*R_120[0];
double QR_000010001100=Q_000010001*R_100[0]+-1*Q_000010101*R_101[0]+-1*Q_000110001*R_110[0]+Q_000110101*R_111[0];
double QR_001000010100=Q_001000010*R_100[0]+-1*Q_001000110*R_101[0]+-1*Q_101000010*R_200[0]+Q_101000110*R_201[0];
double QR_000001010100=Q_000001010*R_100[0]+-1*Q_000001110*R_101[0]+-1*Q_000101010*R_110[0]+Q_000101110*R_111[0];
double QR_000000011100=Q_000000011*R_100[0]+-1*Q_000000111*R_101[0]+Q_000000211*R_102[0];
double QR_011000000002=Q_011000000*R_002[0]+-1*Q_111000000*R_102[0]+Q_211000000*R_202[0];
double QR_010001000002=Q_010001000*R_002[0]+-1*Q_010101000*R_012[0]+-1*Q_110001000*R_102[0]+Q_110101000*R_112[0];
double QR_010000001002=Q_010000001*R_002[0]+-1*Q_010000101*R_003[0]+-1*Q_110000001*R_102[0]+Q_110000101*R_103[0];
double QR_001010000002=Q_001010000*R_002[0]+-1*Q_001110000*R_012[0]+-1*Q_101010000*R_102[0]+Q_101110000*R_112[0];
double QR_000011000002=Q_000011000*R_002[0]+-1*Q_000111000*R_012[0]+Q_000211000*R_022[0];
double QR_000010001002=Q_000010001*R_002[0]+-1*Q_000010101*R_003[0]+-1*Q_000110001*R_012[0]+Q_000110101*R_013[0];
double QR_001000010002=Q_001000010*R_002[0]+-1*Q_001000110*R_003[0]+-1*Q_101000010*R_102[0]+Q_101000110*R_103[0];
double QR_000001010002=Q_000001010*R_002[0]+-1*Q_000001110*R_003[0]+-1*Q_000101010*R_012[0]+Q_000101110*R_013[0];
double QR_000000011002=Q_000000011*R_002[0]+-1*Q_000000111*R_003[0]+Q_000000211*R_004[0];
double QR_011000000011=Q_011000000*R_011[0]+-1*Q_111000000*R_111[0]+Q_211000000*R_211[0];
double QR_010001000011=Q_010001000*R_011[0]+-1*Q_010101000*R_021[0]+-1*Q_110001000*R_111[0]+Q_110101000*R_121[0];
double QR_010000001011=Q_010000001*R_011[0]+-1*Q_010000101*R_012[0]+-1*Q_110000001*R_111[0]+Q_110000101*R_112[0];
double QR_001010000011=Q_001010000*R_011[0]+-1*Q_001110000*R_021[0]+-1*Q_101010000*R_111[0]+Q_101110000*R_121[0];
double QR_000011000011=Q_000011000*R_011[0]+-1*Q_000111000*R_021[0]+Q_000211000*R_031[0];
double QR_000010001011=Q_000010001*R_011[0]+-1*Q_000010101*R_012[0]+-1*Q_000110001*R_021[0]+Q_000110101*R_022[0];
double QR_001000010011=Q_001000010*R_011[0]+-1*Q_001000110*R_012[0]+-1*Q_101000010*R_111[0]+Q_101000110*R_112[0];
double QR_000001010011=Q_000001010*R_011[0]+-1*Q_000001110*R_012[0]+-1*Q_000101010*R_021[0]+Q_000101110*R_022[0];
double QR_000000011011=Q_000000011*R_011[0]+-1*Q_000000111*R_012[0]+Q_000000211*R_013[0];
double QR_011000000020=Q_011000000*R_020[0]+-1*Q_111000000*R_120[0]+Q_211000000*R_220[0];
double QR_010001000020=Q_010001000*R_020[0]+-1*Q_010101000*R_030[0]+-1*Q_110001000*R_120[0]+Q_110101000*R_130[0];
double QR_010000001020=Q_010000001*R_020[0]+-1*Q_010000101*R_021[0]+-1*Q_110000001*R_120[0]+Q_110000101*R_121[0];
double QR_001010000020=Q_001010000*R_020[0]+-1*Q_001110000*R_030[0]+-1*Q_101010000*R_120[0]+Q_101110000*R_130[0];
double QR_000011000020=Q_000011000*R_020[0]+-1*Q_000111000*R_030[0]+Q_000211000*R_040[0];
double QR_000010001020=Q_000010001*R_020[0]+-1*Q_000010101*R_021[0]+-1*Q_000110001*R_030[0]+Q_000110101*R_031[0];
double QR_001000010020=Q_001000010*R_020[0]+-1*Q_001000110*R_021[0]+-1*Q_101000010*R_120[0]+Q_101000110*R_121[0];
double QR_000001010020=Q_000001010*R_020[0]+-1*Q_000001110*R_021[0]+-1*Q_000101010*R_030[0]+Q_000101110*R_031[0];
double QR_000000011020=Q_000000011*R_020[0]+-1*Q_000000111*R_021[0]+Q_000000211*R_022[0];
double QR_011000000101=Q_011000000*R_101[0]+-1*Q_111000000*R_201[0]+Q_211000000*R_301[0];
double QR_010001000101=Q_010001000*R_101[0]+-1*Q_010101000*R_111[0]+-1*Q_110001000*R_201[0]+Q_110101000*R_211[0];
double QR_010000001101=Q_010000001*R_101[0]+-1*Q_010000101*R_102[0]+-1*Q_110000001*R_201[0]+Q_110000101*R_202[0];
double QR_001010000101=Q_001010000*R_101[0]+-1*Q_001110000*R_111[0]+-1*Q_101010000*R_201[0]+Q_101110000*R_211[0];
double QR_000011000101=Q_000011000*R_101[0]+-1*Q_000111000*R_111[0]+Q_000211000*R_121[0];
double QR_000010001101=Q_000010001*R_101[0]+-1*Q_000010101*R_102[0]+-1*Q_000110001*R_111[0]+Q_000110101*R_112[0];
double QR_001000010101=Q_001000010*R_101[0]+-1*Q_001000110*R_102[0]+-1*Q_101000010*R_201[0]+Q_101000110*R_202[0];
double QR_000001010101=Q_000001010*R_101[0]+-1*Q_000001110*R_102[0]+-1*Q_000101010*R_111[0]+Q_000101110*R_112[0];
double QR_000000011101=Q_000000011*R_101[0]+-1*Q_000000111*R_102[0]+Q_000000211*R_103[0];
double QR_011000000110=Q_011000000*R_110[0]+-1*Q_111000000*R_210[0]+Q_211000000*R_310[0];
double QR_010001000110=Q_010001000*R_110[0]+-1*Q_010101000*R_120[0]+-1*Q_110001000*R_210[0]+Q_110101000*R_220[0];
double QR_010000001110=Q_010000001*R_110[0]+-1*Q_010000101*R_111[0]+-1*Q_110000001*R_210[0]+Q_110000101*R_211[0];
double QR_001010000110=Q_001010000*R_110[0]+-1*Q_001110000*R_120[0]+-1*Q_101010000*R_210[0]+Q_101110000*R_220[0];
double QR_000011000110=Q_000011000*R_110[0]+-1*Q_000111000*R_120[0]+Q_000211000*R_130[0];
double QR_000010001110=Q_000010001*R_110[0]+-1*Q_000010101*R_111[0]+-1*Q_000110001*R_120[0]+Q_000110101*R_121[0];
double QR_001000010110=Q_001000010*R_110[0]+-1*Q_001000110*R_111[0]+-1*Q_101000010*R_210[0]+Q_101000110*R_211[0];
double QR_000001010110=Q_000001010*R_110[0]+-1*Q_000001110*R_111[0]+-1*Q_000101010*R_120[0]+Q_000101110*R_121[0];
double QR_000000011110=Q_000000011*R_110[0]+-1*Q_000000111*R_111[0]+Q_000000211*R_112[0];
double QR_011000000200=Q_011000000*R_200[0]+-1*Q_111000000*R_300[0]+Q_211000000*R_400[0];
double QR_010001000200=Q_010001000*R_200[0]+-1*Q_010101000*R_210[0]+-1*Q_110001000*R_300[0]+Q_110101000*R_310[0];
double QR_010000001200=Q_010000001*R_200[0]+-1*Q_010000101*R_201[0]+-1*Q_110000001*R_300[0]+Q_110000101*R_301[0];
double QR_001010000200=Q_001010000*R_200[0]+-1*Q_001110000*R_210[0]+-1*Q_101010000*R_300[0]+Q_101110000*R_310[0];
double QR_000011000200=Q_000011000*R_200[0]+-1*Q_000111000*R_210[0]+Q_000211000*R_220[0];
double QR_000010001200=Q_000010001*R_200[0]+-1*Q_000010101*R_201[0]+-1*Q_000110001*R_210[0]+Q_000110101*R_211[0];
double QR_001000010200=Q_001000010*R_200[0]+-1*Q_001000110*R_201[0]+-1*Q_101000010*R_300[0]+Q_101000110*R_301[0];
double QR_000001010200=Q_000001010*R_200[0]+-1*Q_000001110*R_201[0]+-1*Q_000101010*R_210[0]+Q_000101110*R_211[0];
double QR_000000011200=Q_000000011*R_200[0]+-1*Q_000000111*R_201[0]+Q_000000211*R_202[0];
double QR_011000000003=Q_011000000*R_003[0]+-1*Q_111000000*R_103[0]+Q_211000000*R_203[0];
double QR_010001000003=Q_010001000*R_003[0]+-1*Q_010101000*R_013[0]+-1*Q_110001000*R_103[0]+Q_110101000*R_113[0];
double QR_010000001003=Q_010000001*R_003[0]+-1*Q_010000101*R_004[0]+-1*Q_110000001*R_103[0]+Q_110000101*R_104[0];
double QR_001010000003=Q_001010000*R_003[0]+-1*Q_001110000*R_013[0]+-1*Q_101010000*R_103[0]+Q_101110000*R_113[0];
double QR_000011000003=Q_000011000*R_003[0]+-1*Q_000111000*R_013[0]+Q_000211000*R_023[0];
double QR_000010001003=Q_000010001*R_003[0]+-1*Q_000010101*R_004[0]+-1*Q_000110001*R_013[0]+Q_000110101*R_014[0];
double QR_001000010003=Q_001000010*R_003[0]+-1*Q_001000110*R_004[0]+-1*Q_101000010*R_103[0]+Q_101000110*R_104[0];
double QR_000001010003=Q_000001010*R_003[0]+-1*Q_000001110*R_004[0]+-1*Q_000101010*R_013[0]+Q_000101110*R_014[0];
double QR_000000011003=Q_000000011*R_003[0]+-1*Q_000000111*R_004[0]+Q_000000211*R_005[0];
double QR_011000000012=Q_011000000*R_012[0]+-1*Q_111000000*R_112[0]+Q_211000000*R_212[0];
double QR_010001000012=Q_010001000*R_012[0]+-1*Q_010101000*R_022[0]+-1*Q_110001000*R_112[0]+Q_110101000*R_122[0];
double QR_010000001012=Q_010000001*R_012[0]+-1*Q_010000101*R_013[0]+-1*Q_110000001*R_112[0]+Q_110000101*R_113[0];
double QR_001010000012=Q_001010000*R_012[0]+-1*Q_001110000*R_022[0]+-1*Q_101010000*R_112[0]+Q_101110000*R_122[0];
double QR_000011000012=Q_000011000*R_012[0]+-1*Q_000111000*R_022[0]+Q_000211000*R_032[0];
double QR_000010001012=Q_000010001*R_012[0]+-1*Q_000010101*R_013[0]+-1*Q_000110001*R_022[0]+Q_000110101*R_023[0];
double QR_001000010012=Q_001000010*R_012[0]+-1*Q_001000110*R_013[0]+-1*Q_101000010*R_112[0]+Q_101000110*R_113[0];
double QR_000001010012=Q_000001010*R_012[0]+-1*Q_000001110*R_013[0]+-1*Q_000101010*R_022[0]+Q_000101110*R_023[0];
double QR_000000011012=Q_000000011*R_012[0]+-1*Q_000000111*R_013[0]+Q_000000211*R_014[0];
double QR_011000000021=Q_011000000*R_021[0]+-1*Q_111000000*R_121[0]+Q_211000000*R_221[0];
double QR_010001000021=Q_010001000*R_021[0]+-1*Q_010101000*R_031[0]+-1*Q_110001000*R_121[0]+Q_110101000*R_131[0];
double QR_010000001021=Q_010000001*R_021[0]+-1*Q_010000101*R_022[0]+-1*Q_110000001*R_121[0]+Q_110000101*R_122[0];
double QR_001010000021=Q_001010000*R_021[0]+-1*Q_001110000*R_031[0]+-1*Q_101010000*R_121[0]+Q_101110000*R_131[0];
double QR_000011000021=Q_000011000*R_021[0]+-1*Q_000111000*R_031[0]+Q_000211000*R_041[0];
double QR_000010001021=Q_000010001*R_021[0]+-1*Q_000010101*R_022[0]+-1*Q_000110001*R_031[0]+Q_000110101*R_032[0];
double QR_001000010021=Q_001000010*R_021[0]+-1*Q_001000110*R_022[0]+-1*Q_101000010*R_121[0]+Q_101000110*R_122[0];
double QR_000001010021=Q_000001010*R_021[0]+-1*Q_000001110*R_022[0]+-1*Q_000101010*R_031[0]+Q_000101110*R_032[0];
double QR_000000011021=Q_000000011*R_021[0]+-1*Q_000000111*R_022[0]+Q_000000211*R_023[0];
double QR_011000000030=Q_011000000*R_030[0]+-1*Q_111000000*R_130[0]+Q_211000000*R_230[0];
double QR_010001000030=Q_010001000*R_030[0]+-1*Q_010101000*R_040[0]+-1*Q_110001000*R_130[0]+Q_110101000*R_140[0];
double QR_010000001030=Q_010000001*R_030[0]+-1*Q_010000101*R_031[0]+-1*Q_110000001*R_130[0]+Q_110000101*R_131[0];
double QR_001010000030=Q_001010000*R_030[0]+-1*Q_001110000*R_040[0]+-1*Q_101010000*R_130[0]+Q_101110000*R_140[0];
double QR_000011000030=Q_000011000*R_030[0]+-1*Q_000111000*R_040[0]+Q_000211000*R_050[0];
double QR_000010001030=Q_000010001*R_030[0]+-1*Q_000010101*R_031[0]+-1*Q_000110001*R_040[0]+Q_000110101*R_041[0];
double QR_001000010030=Q_001000010*R_030[0]+-1*Q_001000110*R_031[0]+-1*Q_101000010*R_130[0]+Q_101000110*R_131[0];
double QR_000001010030=Q_000001010*R_030[0]+-1*Q_000001110*R_031[0]+-1*Q_000101010*R_040[0]+Q_000101110*R_041[0];
double QR_000000011030=Q_000000011*R_030[0]+-1*Q_000000111*R_031[0]+Q_000000211*R_032[0];
double QR_011000000102=Q_011000000*R_102[0]+-1*Q_111000000*R_202[0]+Q_211000000*R_302[0];
double QR_010001000102=Q_010001000*R_102[0]+-1*Q_010101000*R_112[0]+-1*Q_110001000*R_202[0]+Q_110101000*R_212[0];
double QR_010000001102=Q_010000001*R_102[0]+-1*Q_010000101*R_103[0]+-1*Q_110000001*R_202[0]+Q_110000101*R_203[0];
double QR_001010000102=Q_001010000*R_102[0]+-1*Q_001110000*R_112[0]+-1*Q_101010000*R_202[0]+Q_101110000*R_212[0];
double QR_000011000102=Q_000011000*R_102[0]+-1*Q_000111000*R_112[0]+Q_000211000*R_122[0];
double QR_000010001102=Q_000010001*R_102[0]+-1*Q_000010101*R_103[0]+-1*Q_000110001*R_112[0]+Q_000110101*R_113[0];
double QR_001000010102=Q_001000010*R_102[0]+-1*Q_001000110*R_103[0]+-1*Q_101000010*R_202[0]+Q_101000110*R_203[0];
double QR_000001010102=Q_000001010*R_102[0]+-1*Q_000001110*R_103[0]+-1*Q_000101010*R_112[0]+Q_000101110*R_113[0];
double QR_000000011102=Q_000000011*R_102[0]+-1*Q_000000111*R_103[0]+Q_000000211*R_104[0];
double QR_011000000111=Q_011000000*R_111[0]+-1*Q_111000000*R_211[0]+Q_211000000*R_311[0];
double QR_010001000111=Q_010001000*R_111[0]+-1*Q_010101000*R_121[0]+-1*Q_110001000*R_211[0]+Q_110101000*R_221[0];
double QR_010000001111=Q_010000001*R_111[0]+-1*Q_010000101*R_112[0]+-1*Q_110000001*R_211[0]+Q_110000101*R_212[0];
double QR_001010000111=Q_001010000*R_111[0]+-1*Q_001110000*R_121[0]+-1*Q_101010000*R_211[0]+Q_101110000*R_221[0];
double QR_000011000111=Q_000011000*R_111[0]+-1*Q_000111000*R_121[0]+Q_000211000*R_131[0];
double QR_000010001111=Q_000010001*R_111[0]+-1*Q_000010101*R_112[0]+-1*Q_000110001*R_121[0]+Q_000110101*R_122[0];
double QR_001000010111=Q_001000010*R_111[0]+-1*Q_001000110*R_112[0]+-1*Q_101000010*R_211[0]+Q_101000110*R_212[0];
double QR_000001010111=Q_000001010*R_111[0]+-1*Q_000001110*R_112[0]+-1*Q_000101010*R_121[0]+Q_000101110*R_122[0];
double QR_000000011111=Q_000000011*R_111[0]+-1*Q_000000111*R_112[0]+Q_000000211*R_113[0];
double QR_011000000120=Q_011000000*R_120[0]+-1*Q_111000000*R_220[0]+Q_211000000*R_320[0];
double QR_010001000120=Q_010001000*R_120[0]+-1*Q_010101000*R_130[0]+-1*Q_110001000*R_220[0]+Q_110101000*R_230[0];
double QR_010000001120=Q_010000001*R_120[0]+-1*Q_010000101*R_121[0]+-1*Q_110000001*R_220[0]+Q_110000101*R_221[0];
double QR_001010000120=Q_001010000*R_120[0]+-1*Q_001110000*R_130[0]+-1*Q_101010000*R_220[0]+Q_101110000*R_230[0];
double QR_000011000120=Q_000011000*R_120[0]+-1*Q_000111000*R_130[0]+Q_000211000*R_140[0];
double QR_000010001120=Q_000010001*R_120[0]+-1*Q_000010101*R_121[0]+-1*Q_000110001*R_130[0]+Q_000110101*R_131[0];
double QR_001000010120=Q_001000010*R_120[0]+-1*Q_001000110*R_121[0]+-1*Q_101000010*R_220[0]+Q_101000110*R_221[0];
double QR_000001010120=Q_000001010*R_120[0]+-1*Q_000001110*R_121[0]+-1*Q_000101010*R_130[0]+Q_000101110*R_131[0];
double QR_000000011120=Q_000000011*R_120[0]+-1*Q_000000111*R_121[0]+Q_000000211*R_122[0];
double QR_011000000201=Q_011000000*R_201[0]+-1*Q_111000000*R_301[0]+Q_211000000*R_401[0];
double QR_010001000201=Q_010001000*R_201[0]+-1*Q_010101000*R_211[0]+-1*Q_110001000*R_301[0]+Q_110101000*R_311[0];
double QR_010000001201=Q_010000001*R_201[0]+-1*Q_010000101*R_202[0]+-1*Q_110000001*R_301[0]+Q_110000101*R_302[0];
double QR_001010000201=Q_001010000*R_201[0]+-1*Q_001110000*R_211[0]+-1*Q_101010000*R_301[0]+Q_101110000*R_311[0];
double QR_000011000201=Q_000011000*R_201[0]+-1*Q_000111000*R_211[0]+Q_000211000*R_221[0];
double QR_000010001201=Q_000010001*R_201[0]+-1*Q_000010101*R_202[0]+-1*Q_000110001*R_211[0]+Q_000110101*R_212[0];
double QR_001000010201=Q_001000010*R_201[0]+-1*Q_001000110*R_202[0]+-1*Q_101000010*R_301[0]+Q_101000110*R_302[0];
double QR_000001010201=Q_000001010*R_201[0]+-1*Q_000001110*R_202[0]+-1*Q_000101010*R_211[0]+Q_000101110*R_212[0];
double QR_000000011201=Q_000000011*R_201[0]+-1*Q_000000111*R_202[0]+Q_000000211*R_203[0];
double QR_011000000210=Q_011000000*R_210[0]+-1*Q_111000000*R_310[0]+Q_211000000*R_410[0];
double QR_010001000210=Q_010001000*R_210[0]+-1*Q_010101000*R_220[0]+-1*Q_110001000*R_310[0]+Q_110101000*R_320[0];
double QR_010000001210=Q_010000001*R_210[0]+-1*Q_010000101*R_211[0]+-1*Q_110000001*R_310[0]+Q_110000101*R_311[0];
double QR_001010000210=Q_001010000*R_210[0]+-1*Q_001110000*R_220[0]+-1*Q_101010000*R_310[0]+Q_101110000*R_320[0];
double QR_000011000210=Q_000011000*R_210[0]+-1*Q_000111000*R_220[0]+Q_000211000*R_230[0];
double QR_000010001210=Q_000010001*R_210[0]+-1*Q_000010101*R_211[0]+-1*Q_000110001*R_220[0]+Q_000110101*R_221[0];
double QR_001000010210=Q_001000010*R_210[0]+-1*Q_001000110*R_211[0]+-1*Q_101000010*R_310[0]+Q_101000110*R_311[0];
double QR_000001010210=Q_000001010*R_210[0]+-1*Q_000001110*R_211[0]+-1*Q_000101010*R_220[0]+Q_000101110*R_221[0];
double QR_000000011210=Q_000000011*R_210[0]+-1*Q_000000111*R_211[0]+Q_000000211*R_212[0];
double QR_011000000300=Q_011000000*R_300[0]+-1*Q_111000000*R_400[0]+Q_211000000*R_500[0];
double QR_010001000300=Q_010001000*R_300[0]+-1*Q_010101000*R_310[0]+-1*Q_110001000*R_400[0]+Q_110101000*R_410[0];
double QR_010000001300=Q_010000001*R_300[0]+-1*Q_010000101*R_301[0]+-1*Q_110000001*R_400[0]+Q_110000101*R_401[0];
double QR_001010000300=Q_001010000*R_300[0]+-1*Q_001110000*R_310[0]+-1*Q_101010000*R_400[0]+Q_101110000*R_410[0];
double QR_000011000300=Q_000011000*R_300[0]+-1*Q_000111000*R_310[0]+Q_000211000*R_320[0];
double QR_000010001300=Q_000010001*R_300[0]+-1*Q_000010101*R_301[0]+-1*Q_000110001*R_310[0]+Q_000110101*R_311[0];
double QR_001000010300=Q_001000010*R_300[0]+-1*Q_001000110*R_301[0]+-1*Q_101000010*R_400[0]+Q_101000110*R_401[0];
double QR_000001010300=Q_000001010*R_300[0]+-1*Q_000001110*R_301[0]+-1*Q_000101010*R_310[0]+Q_000101110*R_311[0];
double QR_000000011300=Q_000000011*R_300[0]+-1*Q_000000111*R_301[0]+Q_000000211*R_302[0];
double Pd_101[3];
double Pd_002[3];
double Pd_102[3];
double Pd_202[3];
double Pd_110[3];
double Pd_011[3];
double Pd_111[3];
double Pd_211[3];
double Pd_012[3];
double Pd_112[3];
double Pd_212[3];
double Pd_312[3];
for(int i=0;i<3;i++){
Pd_101[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_002[i]=Pd_101[i]+Pd_001[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_102[i]=Pd_001[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_202[i]=aPin1*Pd_101[i];
}
for(int i=0;i<3;i++){
Pd_110[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_011[i]=Pd_101[i]+Pd_010[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_111[i]=Pd_010[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_211[i]=aPin1*Pd_101[i];
}
for(int i=0;i<3;i++){
Pd_012[i]=Pd_111[i]+Pd_001[i]*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_112[i]=2*Pd_211[i]+Pd_001[i]*Pd_111[i]+aPin1*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_212[i]=Pd_001[i]*Pd_211[i]+aPin1*Pd_111[i];
}
for(int i=0;i<3;i++){
Pd_312[i]=aPin1*Pd_211[i];
}
double P_012000000=Pd_012[0];
double P_112000000=Pd_112[0];
double P_212000000=Pd_212[0];
double P_312000000=Pd_312[0];
double P_011001000=Pd_011[0]*Pd_001[1];
double P_011101000=Pd_011[0]*Pd_101[1];
double P_111001000=Pd_111[0]*Pd_001[1];
double P_111101000=Pd_111[0]*Pd_101[1];
double P_211001000=Pd_211[0]*Pd_001[1];
double P_211101000=Pd_211[0]*Pd_101[1];
double P_010002000=Pd_010[0]*Pd_002[1];
double P_010102000=Pd_010[0]*Pd_102[1];
double P_010202000=Pd_010[0]*Pd_202[1];
double P_110002000=Pd_110[0]*Pd_002[1];
double P_110102000=Pd_110[0]*Pd_102[1];
double P_110202000=Pd_110[0]*Pd_202[1];
double P_011000001=Pd_011[0]*Pd_001[2];
double P_011000101=Pd_011[0]*Pd_101[2];
double P_111000001=Pd_111[0]*Pd_001[2];
double P_111000101=Pd_111[0]*Pd_101[2];
double P_211000001=Pd_211[0]*Pd_001[2];
double P_211000101=Pd_211[0]*Pd_101[2];
double P_010001001=Pd_010[0]*Pd_001[1]*Pd_001[2];
double P_010001101=Pd_010[0]*Pd_001[1]*Pd_101[2];
double P_010101001=Pd_010[0]*Pd_101[1]*Pd_001[2];
double P_010101101=Pd_010[0]*Pd_101[1]*Pd_101[2];
double P_110001001=Pd_110[0]*Pd_001[1]*Pd_001[2];
double P_110001101=Pd_110[0]*Pd_001[1]*Pd_101[2];
double P_110101001=Pd_110[0]*Pd_101[1]*Pd_001[2];
double P_110101101=Pd_110[0]*Pd_101[1]*Pd_101[2];
double P_010000002=Pd_010[0]*Pd_002[2];
double P_010000102=Pd_010[0]*Pd_102[2];
double P_010000202=Pd_010[0]*Pd_202[2];
double P_110000002=Pd_110[0]*Pd_002[2];
double P_110000102=Pd_110[0]*Pd_102[2];
double P_110000202=Pd_110[0]*Pd_202[2];
double P_002010000=Pd_002[0]*Pd_010[1];
double P_002110000=Pd_002[0]*Pd_110[1];
double P_102010000=Pd_102[0]*Pd_010[1];
double P_102110000=Pd_102[0]*Pd_110[1];
double P_202010000=Pd_202[0]*Pd_010[1];
double P_202110000=Pd_202[0]*Pd_110[1];
double P_001011000=Pd_001[0]*Pd_011[1];
double P_001111000=Pd_001[0]*Pd_111[1];
double P_001211000=Pd_001[0]*Pd_211[1];
double P_101011000=Pd_101[0]*Pd_011[1];
double P_101111000=Pd_101[0]*Pd_111[1];
double P_101211000=Pd_101[0]*Pd_211[1];
double P_000012000=Pd_012[1];
double P_000112000=Pd_112[1];
double P_000212000=Pd_212[1];
double P_000312000=Pd_312[1];
double P_001010001=Pd_001[0]*Pd_010[1]*Pd_001[2];
double P_001010101=Pd_001[0]*Pd_010[1]*Pd_101[2];
double P_001110001=Pd_001[0]*Pd_110[1]*Pd_001[2];
double P_001110101=Pd_001[0]*Pd_110[1]*Pd_101[2];
double P_101010001=Pd_101[0]*Pd_010[1]*Pd_001[2];
double P_101010101=Pd_101[0]*Pd_010[1]*Pd_101[2];
double P_101110001=Pd_101[0]*Pd_110[1]*Pd_001[2];
double P_101110101=Pd_101[0]*Pd_110[1]*Pd_101[2];
double P_000011001=Pd_011[1]*Pd_001[2];
double P_000011101=Pd_011[1]*Pd_101[2];
double P_000111001=Pd_111[1]*Pd_001[2];
double P_000111101=Pd_111[1]*Pd_101[2];
double P_000211001=Pd_211[1]*Pd_001[2];
double P_000211101=Pd_211[1]*Pd_101[2];
double P_000010002=Pd_010[1]*Pd_002[2];
double P_000010102=Pd_010[1]*Pd_102[2];
double P_000010202=Pd_010[1]*Pd_202[2];
double P_000110002=Pd_110[1]*Pd_002[2];
double P_000110102=Pd_110[1]*Pd_102[2];
double P_000110202=Pd_110[1]*Pd_202[2];
double P_002000010=Pd_002[0]*Pd_010[2];
double P_002000110=Pd_002[0]*Pd_110[2];
double P_102000010=Pd_102[0]*Pd_010[2];
double P_102000110=Pd_102[0]*Pd_110[2];
double P_202000010=Pd_202[0]*Pd_010[2];
double P_202000110=Pd_202[0]*Pd_110[2];
double P_001001010=Pd_001[0]*Pd_001[1]*Pd_010[2];
double P_001001110=Pd_001[0]*Pd_001[1]*Pd_110[2];
double P_001101010=Pd_001[0]*Pd_101[1]*Pd_010[2];
double P_001101110=Pd_001[0]*Pd_101[1]*Pd_110[2];
double P_101001010=Pd_101[0]*Pd_001[1]*Pd_010[2];
double P_101001110=Pd_101[0]*Pd_001[1]*Pd_110[2];
double P_101101010=Pd_101[0]*Pd_101[1]*Pd_010[2];
double P_101101110=Pd_101[0]*Pd_101[1]*Pd_110[2];
double P_000002010=Pd_002[1]*Pd_010[2];
double P_000002110=Pd_002[1]*Pd_110[2];
double P_000102010=Pd_102[1]*Pd_010[2];
double P_000102110=Pd_102[1]*Pd_110[2];
double P_000202010=Pd_202[1]*Pd_010[2];
double P_000202110=Pd_202[1]*Pd_110[2];
double P_001000011=Pd_001[0]*Pd_011[2];
double P_001000111=Pd_001[0]*Pd_111[2];
double P_001000211=Pd_001[0]*Pd_211[2];
double P_101000011=Pd_101[0]*Pd_011[2];
double P_101000111=Pd_101[0]*Pd_111[2];
double P_101000211=Pd_101[0]*Pd_211[2];
double P_000001011=Pd_001[1]*Pd_011[2];
double P_000001111=Pd_001[1]*Pd_111[2];
double P_000001211=Pd_001[1]*Pd_211[2];
double P_000101011=Pd_101[1]*Pd_011[2];
double P_000101111=Pd_101[1]*Pd_111[2];
double P_000101211=Pd_101[1]*Pd_211[2];
double P_000000012=Pd_012[2];
double P_000000112=Pd_112[2];
double P_000000212=Pd_212[2];
double P_000000312=Pd_312[2];
ans_temp[ans_id*9+0]+=Pmtrx[0]*(P_012000000*QR_011000000000+P_112000000*QR_011000000100+P_212000000*QR_011000000200+P_312000000*QR_011000000300);
ans_temp[ans_id*9+0]+=Pmtrx[1]*(P_012000000*QR_010001000000+P_112000000*QR_010001000100+P_212000000*QR_010001000200+P_312000000*QR_010001000300);
ans_temp[ans_id*9+0]+=Pmtrx[2]*(P_012000000*QR_010000001000+P_112000000*QR_010000001100+P_212000000*QR_010000001200+P_312000000*QR_010000001300);
ans_temp[ans_id*9+1]+=Pmtrx[0]*(P_012000000*QR_001010000000+P_112000000*QR_001010000100+P_212000000*QR_001010000200+P_312000000*QR_001010000300);
ans_temp[ans_id*9+1]+=Pmtrx[1]*(P_012000000*QR_000011000000+P_112000000*QR_000011000100+P_212000000*QR_000011000200+P_312000000*QR_000011000300);
ans_temp[ans_id*9+1]+=Pmtrx[2]*(P_012000000*QR_000010001000+P_112000000*QR_000010001100+P_212000000*QR_000010001200+P_312000000*QR_000010001300);
ans_temp[ans_id*9+2]+=Pmtrx[0]*(P_012000000*QR_001000010000+P_112000000*QR_001000010100+P_212000000*QR_001000010200+P_312000000*QR_001000010300);
ans_temp[ans_id*9+2]+=Pmtrx[1]*(P_012000000*QR_000001010000+P_112000000*QR_000001010100+P_212000000*QR_000001010200+P_312000000*QR_000001010300);
ans_temp[ans_id*9+2]+=Pmtrx[2]*(P_012000000*QR_000000011000+P_112000000*QR_000000011100+P_212000000*QR_000000011200+P_312000000*QR_000000011300);
ans_temp[ans_id*9+0]+=Pmtrx[3]*(P_011001000*QR_011000000000+P_011101000*QR_011000000010+P_111001000*QR_011000000100+P_111101000*QR_011000000110+P_211001000*QR_011000000200+P_211101000*QR_011000000210);
ans_temp[ans_id*9+0]+=Pmtrx[4]*(P_011001000*QR_010001000000+P_011101000*QR_010001000010+P_111001000*QR_010001000100+P_111101000*QR_010001000110+P_211001000*QR_010001000200+P_211101000*QR_010001000210);
ans_temp[ans_id*9+0]+=Pmtrx[5]*(P_011001000*QR_010000001000+P_011101000*QR_010000001010+P_111001000*QR_010000001100+P_111101000*QR_010000001110+P_211001000*QR_010000001200+P_211101000*QR_010000001210);
ans_temp[ans_id*9+1]+=Pmtrx[3]*(P_011001000*QR_001010000000+P_011101000*QR_001010000010+P_111001000*QR_001010000100+P_111101000*QR_001010000110+P_211001000*QR_001010000200+P_211101000*QR_001010000210);
ans_temp[ans_id*9+1]+=Pmtrx[4]*(P_011001000*QR_000011000000+P_011101000*QR_000011000010+P_111001000*QR_000011000100+P_111101000*QR_000011000110+P_211001000*QR_000011000200+P_211101000*QR_000011000210);
ans_temp[ans_id*9+1]+=Pmtrx[5]*(P_011001000*QR_000010001000+P_011101000*QR_000010001010+P_111001000*QR_000010001100+P_111101000*QR_000010001110+P_211001000*QR_000010001200+P_211101000*QR_000010001210);
ans_temp[ans_id*9+2]+=Pmtrx[3]*(P_011001000*QR_001000010000+P_011101000*QR_001000010010+P_111001000*QR_001000010100+P_111101000*QR_001000010110+P_211001000*QR_001000010200+P_211101000*QR_001000010210);
ans_temp[ans_id*9+2]+=Pmtrx[4]*(P_011001000*QR_000001010000+P_011101000*QR_000001010010+P_111001000*QR_000001010100+P_111101000*QR_000001010110+P_211001000*QR_000001010200+P_211101000*QR_000001010210);
ans_temp[ans_id*9+2]+=Pmtrx[5]*(P_011001000*QR_000000011000+P_011101000*QR_000000011010+P_111001000*QR_000000011100+P_111101000*QR_000000011110+P_211001000*QR_000000011200+P_211101000*QR_000000011210);
ans_temp[ans_id*9+0]+=Pmtrx[6]*(P_010002000*QR_011000000000+P_010102000*QR_011000000010+P_010202000*QR_011000000020+P_110002000*QR_011000000100+P_110102000*QR_011000000110+P_110202000*QR_011000000120);
ans_temp[ans_id*9+0]+=Pmtrx[7]*(P_010002000*QR_010001000000+P_010102000*QR_010001000010+P_010202000*QR_010001000020+P_110002000*QR_010001000100+P_110102000*QR_010001000110+P_110202000*QR_010001000120);
ans_temp[ans_id*9+0]+=Pmtrx[8]*(P_010002000*QR_010000001000+P_010102000*QR_010000001010+P_010202000*QR_010000001020+P_110002000*QR_010000001100+P_110102000*QR_010000001110+P_110202000*QR_010000001120);
ans_temp[ans_id*9+1]+=Pmtrx[6]*(P_010002000*QR_001010000000+P_010102000*QR_001010000010+P_010202000*QR_001010000020+P_110002000*QR_001010000100+P_110102000*QR_001010000110+P_110202000*QR_001010000120);
ans_temp[ans_id*9+1]+=Pmtrx[7]*(P_010002000*QR_000011000000+P_010102000*QR_000011000010+P_010202000*QR_000011000020+P_110002000*QR_000011000100+P_110102000*QR_000011000110+P_110202000*QR_000011000120);
ans_temp[ans_id*9+1]+=Pmtrx[8]*(P_010002000*QR_000010001000+P_010102000*QR_000010001010+P_010202000*QR_000010001020+P_110002000*QR_000010001100+P_110102000*QR_000010001110+P_110202000*QR_000010001120);
ans_temp[ans_id*9+2]+=Pmtrx[6]*(P_010002000*QR_001000010000+P_010102000*QR_001000010010+P_010202000*QR_001000010020+P_110002000*QR_001000010100+P_110102000*QR_001000010110+P_110202000*QR_001000010120);
ans_temp[ans_id*9+2]+=Pmtrx[7]*(P_010002000*QR_000001010000+P_010102000*QR_000001010010+P_010202000*QR_000001010020+P_110002000*QR_000001010100+P_110102000*QR_000001010110+P_110202000*QR_000001010120);
ans_temp[ans_id*9+2]+=Pmtrx[8]*(P_010002000*QR_000000011000+P_010102000*QR_000000011010+P_010202000*QR_000000011020+P_110002000*QR_000000011100+P_110102000*QR_000000011110+P_110202000*QR_000000011120);
ans_temp[ans_id*9+0]+=Pmtrx[9]*(P_011000001*QR_011000000000+P_011000101*QR_011000000001+P_111000001*QR_011000000100+P_111000101*QR_011000000101+P_211000001*QR_011000000200+P_211000101*QR_011000000201);
ans_temp[ans_id*9+0]+=Pmtrx[10]*(P_011000001*QR_010001000000+P_011000101*QR_010001000001+P_111000001*QR_010001000100+P_111000101*QR_010001000101+P_211000001*QR_010001000200+P_211000101*QR_010001000201);
ans_temp[ans_id*9+0]+=Pmtrx[11]*(P_011000001*QR_010000001000+P_011000101*QR_010000001001+P_111000001*QR_010000001100+P_111000101*QR_010000001101+P_211000001*QR_010000001200+P_211000101*QR_010000001201);
ans_temp[ans_id*9+1]+=Pmtrx[9]*(P_011000001*QR_001010000000+P_011000101*QR_001010000001+P_111000001*QR_001010000100+P_111000101*QR_001010000101+P_211000001*QR_001010000200+P_211000101*QR_001010000201);
ans_temp[ans_id*9+1]+=Pmtrx[10]*(P_011000001*QR_000011000000+P_011000101*QR_000011000001+P_111000001*QR_000011000100+P_111000101*QR_000011000101+P_211000001*QR_000011000200+P_211000101*QR_000011000201);
ans_temp[ans_id*9+1]+=Pmtrx[11]*(P_011000001*QR_000010001000+P_011000101*QR_000010001001+P_111000001*QR_000010001100+P_111000101*QR_000010001101+P_211000001*QR_000010001200+P_211000101*QR_000010001201);
ans_temp[ans_id*9+2]+=Pmtrx[9]*(P_011000001*QR_001000010000+P_011000101*QR_001000010001+P_111000001*QR_001000010100+P_111000101*QR_001000010101+P_211000001*QR_001000010200+P_211000101*QR_001000010201);
ans_temp[ans_id*9+2]+=Pmtrx[10]*(P_011000001*QR_000001010000+P_011000101*QR_000001010001+P_111000001*QR_000001010100+P_111000101*QR_000001010101+P_211000001*QR_000001010200+P_211000101*QR_000001010201);
ans_temp[ans_id*9+2]+=Pmtrx[11]*(P_011000001*QR_000000011000+P_011000101*QR_000000011001+P_111000001*QR_000000011100+P_111000101*QR_000000011101+P_211000001*QR_000000011200+P_211000101*QR_000000011201);
ans_temp[ans_id*9+0]+=Pmtrx[12]*(P_010001001*QR_011000000000+P_010001101*QR_011000000001+P_010101001*QR_011000000010+P_010101101*QR_011000000011+P_110001001*QR_011000000100+P_110001101*QR_011000000101+P_110101001*QR_011000000110+P_110101101*QR_011000000111);
ans_temp[ans_id*9+0]+=Pmtrx[13]*(P_010001001*QR_010001000000+P_010001101*QR_010001000001+P_010101001*QR_010001000010+P_010101101*QR_010001000011+P_110001001*QR_010001000100+P_110001101*QR_010001000101+P_110101001*QR_010001000110+P_110101101*QR_010001000111);
ans_temp[ans_id*9+0]+=Pmtrx[14]*(P_010001001*QR_010000001000+P_010001101*QR_010000001001+P_010101001*QR_010000001010+P_010101101*QR_010000001011+P_110001001*QR_010000001100+P_110001101*QR_010000001101+P_110101001*QR_010000001110+P_110101101*QR_010000001111);
ans_temp[ans_id*9+1]+=Pmtrx[12]*(P_010001001*QR_001010000000+P_010001101*QR_001010000001+P_010101001*QR_001010000010+P_010101101*QR_001010000011+P_110001001*QR_001010000100+P_110001101*QR_001010000101+P_110101001*QR_001010000110+P_110101101*QR_001010000111);
ans_temp[ans_id*9+1]+=Pmtrx[13]*(P_010001001*QR_000011000000+P_010001101*QR_000011000001+P_010101001*QR_000011000010+P_010101101*QR_000011000011+P_110001001*QR_000011000100+P_110001101*QR_000011000101+P_110101001*QR_000011000110+P_110101101*QR_000011000111);
ans_temp[ans_id*9+1]+=Pmtrx[14]*(P_010001001*QR_000010001000+P_010001101*QR_000010001001+P_010101001*QR_000010001010+P_010101101*QR_000010001011+P_110001001*QR_000010001100+P_110001101*QR_000010001101+P_110101001*QR_000010001110+P_110101101*QR_000010001111);
ans_temp[ans_id*9+2]+=Pmtrx[12]*(P_010001001*QR_001000010000+P_010001101*QR_001000010001+P_010101001*QR_001000010010+P_010101101*QR_001000010011+P_110001001*QR_001000010100+P_110001101*QR_001000010101+P_110101001*QR_001000010110+P_110101101*QR_001000010111);
ans_temp[ans_id*9+2]+=Pmtrx[13]*(P_010001001*QR_000001010000+P_010001101*QR_000001010001+P_010101001*QR_000001010010+P_010101101*QR_000001010011+P_110001001*QR_000001010100+P_110001101*QR_000001010101+P_110101001*QR_000001010110+P_110101101*QR_000001010111);
ans_temp[ans_id*9+2]+=Pmtrx[14]*(P_010001001*QR_000000011000+P_010001101*QR_000000011001+P_010101001*QR_000000011010+P_010101101*QR_000000011011+P_110001001*QR_000000011100+P_110001101*QR_000000011101+P_110101001*QR_000000011110+P_110101101*QR_000000011111);
ans_temp[ans_id*9+0]+=Pmtrx[15]*(P_010000002*QR_011000000000+P_010000102*QR_011000000001+P_010000202*QR_011000000002+P_110000002*QR_011000000100+P_110000102*QR_011000000101+P_110000202*QR_011000000102);
ans_temp[ans_id*9+0]+=Pmtrx[16]*(P_010000002*QR_010001000000+P_010000102*QR_010001000001+P_010000202*QR_010001000002+P_110000002*QR_010001000100+P_110000102*QR_010001000101+P_110000202*QR_010001000102);
ans_temp[ans_id*9+0]+=Pmtrx[17]*(P_010000002*QR_010000001000+P_010000102*QR_010000001001+P_010000202*QR_010000001002+P_110000002*QR_010000001100+P_110000102*QR_010000001101+P_110000202*QR_010000001102);
ans_temp[ans_id*9+1]+=Pmtrx[15]*(P_010000002*QR_001010000000+P_010000102*QR_001010000001+P_010000202*QR_001010000002+P_110000002*QR_001010000100+P_110000102*QR_001010000101+P_110000202*QR_001010000102);
ans_temp[ans_id*9+1]+=Pmtrx[16]*(P_010000002*QR_000011000000+P_010000102*QR_000011000001+P_010000202*QR_000011000002+P_110000002*QR_000011000100+P_110000102*QR_000011000101+P_110000202*QR_000011000102);
ans_temp[ans_id*9+1]+=Pmtrx[17]*(P_010000002*QR_000010001000+P_010000102*QR_000010001001+P_010000202*QR_000010001002+P_110000002*QR_000010001100+P_110000102*QR_000010001101+P_110000202*QR_000010001102);
ans_temp[ans_id*9+2]+=Pmtrx[15]*(P_010000002*QR_001000010000+P_010000102*QR_001000010001+P_010000202*QR_001000010002+P_110000002*QR_001000010100+P_110000102*QR_001000010101+P_110000202*QR_001000010102);
ans_temp[ans_id*9+2]+=Pmtrx[16]*(P_010000002*QR_000001010000+P_010000102*QR_000001010001+P_010000202*QR_000001010002+P_110000002*QR_000001010100+P_110000102*QR_000001010101+P_110000202*QR_000001010102);
ans_temp[ans_id*9+2]+=Pmtrx[17]*(P_010000002*QR_000000011000+P_010000102*QR_000000011001+P_010000202*QR_000000011002+P_110000002*QR_000000011100+P_110000102*QR_000000011101+P_110000202*QR_000000011102);
ans_temp[ans_id*9+3]+=Pmtrx[0]*(P_002010000*QR_011000000000+P_002110000*QR_011000000010+P_102010000*QR_011000000100+P_102110000*QR_011000000110+P_202010000*QR_011000000200+P_202110000*QR_011000000210);
ans_temp[ans_id*9+3]+=Pmtrx[1]*(P_002010000*QR_010001000000+P_002110000*QR_010001000010+P_102010000*QR_010001000100+P_102110000*QR_010001000110+P_202010000*QR_010001000200+P_202110000*QR_010001000210);
ans_temp[ans_id*9+3]+=Pmtrx[2]*(P_002010000*QR_010000001000+P_002110000*QR_010000001010+P_102010000*QR_010000001100+P_102110000*QR_010000001110+P_202010000*QR_010000001200+P_202110000*QR_010000001210);
ans_temp[ans_id*9+4]+=Pmtrx[0]*(P_002010000*QR_001010000000+P_002110000*QR_001010000010+P_102010000*QR_001010000100+P_102110000*QR_001010000110+P_202010000*QR_001010000200+P_202110000*QR_001010000210);
ans_temp[ans_id*9+4]+=Pmtrx[1]*(P_002010000*QR_000011000000+P_002110000*QR_000011000010+P_102010000*QR_000011000100+P_102110000*QR_000011000110+P_202010000*QR_000011000200+P_202110000*QR_000011000210);
ans_temp[ans_id*9+4]+=Pmtrx[2]*(P_002010000*QR_000010001000+P_002110000*QR_000010001010+P_102010000*QR_000010001100+P_102110000*QR_000010001110+P_202010000*QR_000010001200+P_202110000*QR_000010001210);
ans_temp[ans_id*9+5]+=Pmtrx[0]*(P_002010000*QR_001000010000+P_002110000*QR_001000010010+P_102010000*QR_001000010100+P_102110000*QR_001000010110+P_202010000*QR_001000010200+P_202110000*QR_001000010210);
ans_temp[ans_id*9+5]+=Pmtrx[1]*(P_002010000*QR_000001010000+P_002110000*QR_000001010010+P_102010000*QR_000001010100+P_102110000*QR_000001010110+P_202010000*QR_000001010200+P_202110000*QR_000001010210);
ans_temp[ans_id*9+5]+=Pmtrx[2]*(P_002010000*QR_000000011000+P_002110000*QR_000000011010+P_102010000*QR_000000011100+P_102110000*QR_000000011110+P_202010000*QR_000000011200+P_202110000*QR_000000011210);
ans_temp[ans_id*9+3]+=Pmtrx[3]*(P_001011000*QR_011000000000+P_001111000*QR_011000000010+P_001211000*QR_011000000020+P_101011000*QR_011000000100+P_101111000*QR_011000000110+P_101211000*QR_011000000120);
ans_temp[ans_id*9+3]+=Pmtrx[4]*(P_001011000*QR_010001000000+P_001111000*QR_010001000010+P_001211000*QR_010001000020+P_101011000*QR_010001000100+P_101111000*QR_010001000110+P_101211000*QR_010001000120);
ans_temp[ans_id*9+3]+=Pmtrx[5]*(P_001011000*QR_010000001000+P_001111000*QR_010000001010+P_001211000*QR_010000001020+P_101011000*QR_010000001100+P_101111000*QR_010000001110+P_101211000*QR_010000001120);
ans_temp[ans_id*9+4]+=Pmtrx[3]*(P_001011000*QR_001010000000+P_001111000*QR_001010000010+P_001211000*QR_001010000020+P_101011000*QR_001010000100+P_101111000*QR_001010000110+P_101211000*QR_001010000120);
ans_temp[ans_id*9+4]+=Pmtrx[4]*(P_001011000*QR_000011000000+P_001111000*QR_000011000010+P_001211000*QR_000011000020+P_101011000*QR_000011000100+P_101111000*QR_000011000110+P_101211000*QR_000011000120);
ans_temp[ans_id*9+4]+=Pmtrx[5]*(P_001011000*QR_000010001000+P_001111000*QR_000010001010+P_001211000*QR_000010001020+P_101011000*QR_000010001100+P_101111000*QR_000010001110+P_101211000*QR_000010001120);
ans_temp[ans_id*9+5]+=Pmtrx[3]*(P_001011000*QR_001000010000+P_001111000*QR_001000010010+P_001211000*QR_001000010020+P_101011000*QR_001000010100+P_101111000*QR_001000010110+P_101211000*QR_001000010120);
ans_temp[ans_id*9+5]+=Pmtrx[4]*(P_001011000*QR_000001010000+P_001111000*QR_000001010010+P_001211000*QR_000001010020+P_101011000*QR_000001010100+P_101111000*QR_000001010110+P_101211000*QR_000001010120);
ans_temp[ans_id*9+5]+=Pmtrx[5]*(P_001011000*QR_000000011000+P_001111000*QR_000000011010+P_001211000*QR_000000011020+P_101011000*QR_000000011100+P_101111000*QR_000000011110+P_101211000*QR_000000011120);
ans_temp[ans_id*9+3]+=Pmtrx[6]*(P_000012000*QR_011000000000+P_000112000*QR_011000000010+P_000212000*QR_011000000020+P_000312000*QR_011000000030);
ans_temp[ans_id*9+3]+=Pmtrx[7]*(P_000012000*QR_010001000000+P_000112000*QR_010001000010+P_000212000*QR_010001000020+P_000312000*QR_010001000030);
ans_temp[ans_id*9+3]+=Pmtrx[8]*(P_000012000*QR_010000001000+P_000112000*QR_010000001010+P_000212000*QR_010000001020+P_000312000*QR_010000001030);
ans_temp[ans_id*9+4]+=Pmtrx[6]*(P_000012000*QR_001010000000+P_000112000*QR_001010000010+P_000212000*QR_001010000020+P_000312000*QR_001010000030);
ans_temp[ans_id*9+4]+=Pmtrx[7]*(P_000012000*QR_000011000000+P_000112000*QR_000011000010+P_000212000*QR_000011000020+P_000312000*QR_000011000030);
ans_temp[ans_id*9+4]+=Pmtrx[8]*(P_000012000*QR_000010001000+P_000112000*QR_000010001010+P_000212000*QR_000010001020+P_000312000*QR_000010001030);
ans_temp[ans_id*9+5]+=Pmtrx[6]*(P_000012000*QR_001000010000+P_000112000*QR_001000010010+P_000212000*QR_001000010020+P_000312000*QR_001000010030);
ans_temp[ans_id*9+5]+=Pmtrx[7]*(P_000012000*QR_000001010000+P_000112000*QR_000001010010+P_000212000*QR_000001010020+P_000312000*QR_000001010030);
ans_temp[ans_id*9+5]+=Pmtrx[8]*(P_000012000*QR_000000011000+P_000112000*QR_000000011010+P_000212000*QR_000000011020+P_000312000*QR_000000011030);
ans_temp[ans_id*9+3]+=Pmtrx[9]*(P_001010001*QR_011000000000+P_001010101*QR_011000000001+P_001110001*QR_011000000010+P_001110101*QR_011000000011+P_101010001*QR_011000000100+P_101010101*QR_011000000101+P_101110001*QR_011000000110+P_101110101*QR_011000000111);
ans_temp[ans_id*9+3]+=Pmtrx[10]*(P_001010001*QR_010001000000+P_001010101*QR_010001000001+P_001110001*QR_010001000010+P_001110101*QR_010001000011+P_101010001*QR_010001000100+P_101010101*QR_010001000101+P_101110001*QR_010001000110+P_101110101*QR_010001000111);
ans_temp[ans_id*9+3]+=Pmtrx[11]*(P_001010001*QR_010000001000+P_001010101*QR_010000001001+P_001110001*QR_010000001010+P_001110101*QR_010000001011+P_101010001*QR_010000001100+P_101010101*QR_010000001101+P_101110001*QR_010000001110+P_101110101*QR_010000001111);
ans_temp[ans_id*9+4]+=Pmtrx[9]*(P_001010001*QR_001010000000+P_001010101*QR_001010000001+P_001110001*QR_001010000010+P_001110101*QR_001010000011+P_101010001*QR_001010000100+P_101010101*QR_001010000101+P_101110001*QR_001010000110+P_101110101*QR_001010000111);
ans_temp[ans_id*9+4]+=Pmtrx[10]*(P_001010001*QR_000011000000+P_001010101*QR_000011000001+P_001110001*QR_000011000010+P_001110101*QR_000011000011+P_101010001*QR_000011000100+P_101010101*QR_000011000101+P_101110001*QR_000011000110+P_101110101*QR_000011000111);
ans_temp[ans_id*9+4]+=Pmtrx[11]*(P_001010001*QR_000010001000+P_001010101*QR_000010001001+P_001110001*QR_000010001010+P_001110101*QR_000010001011+P_101010001*QR_000010001100+P_101010101*QR_000010001101+P_101110001*QR_000010001110+P_101110101*QR_000010001111);
ans_temp[ans_id*9+5]+=Pmtrx[9]*(P_001010001*QR_001000010000+P_001010101*QR_001000010001+P_001110001*QR_001000010010+P_001110101*QR_001000010011+P_101010001*QR_001000010100+P_101010101*QR_001000010101+P_101110001*QR_001000010110+P_101110101*QR_001000010111);
ans_temp[ans_id*9+5]+=Pmtrx[10]*(P_001010001*QR_000001010000+P_001010101*QR_000001010001+P_001110001*QR_000001010010+P_001110101*QR_000001010011+P_101010001*QR_000001010100+P_101010101*QR_000001010101+P_101110001*QR_000001010110+P_101110101*QR_000001010111);
ans_temp[ans_id*9+5]+=Pmtrx[11]*(P_001010001*QR_000000011000+P_001010101*QR_000000011001+P_001110001*QR_000000011010+P_001110101*QR_000000011011+P_101010001*QR_000000011100+P_101010101*QR_000000011101+P_101110001*QR_000000011110+P_101110101*QR_000000011111);
ans_temp[ans_id*9+3]+=Pmtrx[12]*(P_000011001*QR_011000000000+P_000011101*QR_011000000001+P_000111001*QR_011000000010+P_000111101*QR_011000000011+P_000211001*QR_011000000020+P_000211101*QR_011000000021);
ans_temp[ans_id*9+3]+=Pmtrx[13]*(P_000011001*QR_010001000000+P_000011101*QR_010001000001+P_000111001*QR_010001000010+P_000111101*QR_010001000011+P_000211001*QR_010001000020+P_000211101*QR_010001000021);
ans_temp[ans_id*9+3]+=Pmtrx[14]*(P_000011001*QR_010000001000+P_000011101*QR_010000001001+P_000111001*QR_010000001010+P_000111101*QR_010000001011+P_000211001*QR_010000001020+P_000211101*QR_010000001021);
ans_temp[ans_id*9+4]+=Pmtrx[12]*(P_000011001*QR_001010000000+P_000011101*QR_001010000001+P_000111001*QR_001010000010+P_000111101*QR_001010000011+P_000211001*QR_001010000020+P_000211101*QR_001010000021);
ans_temp[ans_id*9+4]+=Pmtrx[13]*(P_000011001*QR_000011000000+P_000011101*QR_000011000001+P_000111001*QR_000011000010+P_000111101*QR_000011000011+P_000211001*QR_000011000020+P_000211101*QR_000011000021);
ans_temp[ans_id*9+4]+=Pmtrx[14]*(P_000011001*QR_000010001000+P_000011101*QR_000010001001+P_000111001*QR_000010001010+P_000111101*QR_000010001011+P_000211001*QR_000010001020+P_000211101*QR_000010001021);
ans_temp[ans_id*9+5]+=Pmtrx[12]*(P_000011001*QR_001000010000+P_000011101*QR_001000010001+P_000111001*QR_001000010010+P_000111101*QR_001000010011+P_000211001*QR_001000010020+P_000211101*QR_001000010021);
ans_temp[ans_id*9+5]+=Pmtrx[13]*(P_000011001*QR_000001010000+P_000011101*QR_000001010001+P_000111001*QR_000001010010+P_000111101*QR_000001010011+P_000211001*QR_000001010020+P_000211101*QR_000001010021);
ans_temp[ans_id*9+5]+=Pmtrx[14]*(P_000011001*QR_000000011000+P_000011101*QR_000000011001+P_000111001*QR_000000011010+P_000111101*QR_000000011011+P_000211001*QR_000000011020+P_000211101*QR_000000011021);
ans_temp[ans_id*9+3]+=Pmtrx[15]*(P_000010002*QR_011000000000+P_000010102*QR_011000000001+P_000010202*QR_011000000002+P_000110002*QR_011000000010+P_000110102*QR_011000000011+P_000110202*QR_011000000012);
ans_temp[ans_id*9+3]+=Pmtrx[16]*(P_000010002*QR_010001000000+P_000010102*QR_010001000001+P_000010202*QR_010001000002+P_000110002*QR_010001000010+P_000110102*QR_010001000011+P_000110202*QR_010001000012);
ans_temp[ans_id*9+3]+=Pmtrx[17]*(P_000010002*QR_010000001000+P_000010102*QR_010000001001+P_000010202*QR_010000001002+P_000110002*QR_010000001010+P_000110102*QR_010000001011+P_000110202*QR_010000001012);
ans_temp[ans_id*9+4]+=Pmtrx[15]*(P_000010002*QR_001010000000+P_000010102*QR_001010000001+P_000010202*QR_001010000002+P_000110002*QR_001010000010+P_000110102*QR_001010000011+P_000110202*QR_001010000012);
ans_temp[ans_id*9+4]+=Pmtrx[16]*(P_000010002*QR_000011000000+P_000010102*QR_000011000001+P_000010202*QR_000011000002+P_000110002*QR_000011000010+P_000110102*QR_000011000011+P_000110202*QR_000011000012);
ans_temp[ans_id*9+4]+=Pmtrx[17]*(P_000010002*QR_000010001000+P_000010102*QR_000010001001+P_000010202*QR_000010001002+P_000110002*QR_000010001010+P_000110102*QR_000010001011+P_000110202*QR_000010001012);
ans_temp[ans_id*9+5]+=Pmtrx[15]*(P_000010002*QR_001000010000+P_000010102*QR_001000010001+P_000010202*QR_001000010002+P_000110002*QR_001000010010+P_000110102*QR_001000010011+P_000110202*QR_001000010012);
ans_temp[ans_id*9+5]+=Pmtrx[16]*(P_000010002*QR_000001010000+P_000010102*QR_000001010001+P_000010202*QR_000001010002+P_000110002*QR_000001010010+P_000110102*QR_000001010011+P_000110202*QR_000001010012);
ans_temp[ans_id*9+5]+=Pmtrx[17]*(P_000010002*QR_000000011000+P_000010102*QR_000000011001+P_000010202*QR_000000011002+P_000110002*QR_000000011010+P_000110102*QR_000000011011+P_000110202*QR_000000011012);
ans_temp[ans_id*9+6]+=Pmtrx[0]*(P_002000010*QR_011000000000+P_002000110*QR_011000000001+P_102000010*QR_011000000100+P_102000110*QR_011000000101+P_202000010*QR_011000000200+P_202000110*QR_011000000201);
ans_temp[ans_id*9+6]+=Pmtrx[1]*(P_002000010*QR_010001000000+P_002000110*QR_010001000001+P_102000010*QR_010001000100+P_102000110*QR_010001000101+P_202000010*QR_010001000200+P_202000110*QR_010001000201);
ans_temp[ans_id*9+6]+=Pmtrx[2]*(P_002000010*QR_010000001000+P_002000110*QR_010000001001+P_102000010*QR_010000001100+P_102000110*QR_010000001101+P_202000010*QR_010000001200+P_202000110*QR_010000001201);
ans_temp[ans_id*9+7]+=Pmtrx[0]*(P_002000010*QR_001010000000+P_002000110*QR_001010000001+P_102000010*QR_001010000100+P_102000110*QR_001010000101+P_202000010*QR_001010000200+P_202000110*QR_001010000201);
ans_temp[ans_id*9+7]+=Pmtrx[1]*(P_002000010*QR_000011000000+P_002000110*QR_000011000001+P_102000010*QR_000011000100+P_102000110*QR_000011000101+P_202000010*QR_000011000200+P_202000110*QR_000011000201);
ans_temp[ans_id*9+7]+=Pmtrx[2]*(P_002000010*QR_000010001000+P_002000110*QR_000010001001+P_102000010*QR_000010001100+P_102000110*QR_000010001101+P_202000010*QR_000010001200+P_202000110*QR_000010001201);
ans_temp[ans_id*9+8]+=Pmtrx[0]*(P_002000010*QR_001000010000+P_002000110*QR_001000010001+P_102000010*QR_001000010100+P_102000110*QR_001000010101+P_202000010*QR_001000010200+P_202000110*QR_001000010201);
ans_temp[ans_id*9+8]+=Pmtrx[1]*(P_002000010*QR_000001010000+P_002000110*QR_000001010001+P_102000010*QR_000001010100+P_102000110*QR_000001010101+P_202000010*QR_000001010200+P_202000110*QR_000001010201);
ans_temp[ans_id*9+8]+=Pmtrx[2]*(P_002000010*QR_000000011000+P_002000110*QR_000000011001+P_102000010*QR_000000011100+P_102000110*QR_000000011101+P_202000010*QR_000000011200+P_202000110*QR_000000011201);
ans_temp[ans_id*9+6]+=Pmtrx[3]*(P_001001010*QR_011000000000+P_001001110*QR_011000000001+P_001101010*QR_011000000010+P_001101110*QR_011000000011+P_101001010*QR_011000000100+P_101001110*QR_011000000101+P_101101010*QR_011000000110+P_101101110*QR_011000000111);
ans_temp[ans_id*9+6]+=Pmtrx[4]*(P_001001010*QR_010001000000+P_001001110*QR_010001000001+P_001101010*QR_010001000010+P_001101110*QR_010001000011+P_101001010*QR_010001000100+P_101001110*QR_010001000101+P_101101010*QR_010001000110+P_101101110*QR_010001000111);
ans_temp[ans_id*9+6]+=Pmtrx[5]*(P_001001010*QR_010000001000+P_001001110*QR_010000001001+P_001101010*QR_010000001010+P_001101110*QR_010000001011+P_101001010*QR_010000001100+P_101001110*QR_010000001101+P_101101010*QR_010000001110+P_101101110*QR_010000001111);
ans_temp[ans_id*9+7]+=Pmtrx[3]*(P_001001010*QR_001010000000+P_001001110*QR_001010000001+P_001101010*QR_001010000010+P_001101110*QR_001010000011+P_101001010*QR_001010000100+P_101001110*QR_001010000101+P_101101010*QR_001010000110+P_101101110*QR_001010000111);
ans_temp[ans_id*9+7]+=Pmtrx[4]*(P_001001010*QR_000011000000+P_001001110*QR_000011000001+P_001101010*QR_000011000010+P_001101110*QR_000011000011+P_101001010*QR_000011000100+P_101001110*QR_000011000101+P_101101010*QR_000011000110+P_101101110*QR_000011000111);
ans_temp[ans_id*9+7]+=Pmtrx[5]*(P_001001010*QR_000010001000+P_001001110*QR_000010001001+P_001101010*QR_000010001010+P_001101110*QR_000010001011+P_101001010*QR_000010001100+P_101001110*QR_000010001101+P_101101010*QR_000010001110+P_101101110*QR_000010001111);
ans_temp[ans_id*9+8]+=Pmtrx[3]*(P_001001010*QR_001000010000+P_001001110*QR_001000010001+P_001101010*QR_001000010010+P_001101110*QR_001000010011+P_101001010*QR_001000010100+P_101001110*QR_001000010101+P_101101010*QR_001000010110+P_101101110*QR_001000010111);
ans_temp[ans_id*9+8]+=Pmtrx[4]*(P_001001010*QR_000001010000+P_001001110*QR_000001010001+P_001101010*QR_000001010010+P_001101110*QR_000001010011+P_101001010*QR_000001010100+P_101001110*QR_000001010101+P_101101010*QR_000001010110+P_101101110*QR_000001010111);
ans_temp[ans_id*9+8]+=Pmtrx[5]*(P_001001010*QR_000000011000+P_001001110*QR_000000011001+P_001101010*QR_000000011010+P_001101110*QR_000000011011+P_101001010*QR_000000011100+P_101001110*QR_000000011101+P_101101010*QR_000000011110+P_101101110*QR_000000011111);
ans_temp[ans_id*9+6]+=Pmtrx[6]*(P_000002010*QR_011000000000+P_000002110*QR_011000000001+P_000102010*QR_011000000010+P_000102110*QR_011000000011+P_000202010*QR_011000000020+P_000202110*QR_011000000021);
ans_temp[ans_id*9+6]+=Pmtrx[7]*(P_000002010*QR_010001000000+P_000002110*QR_010001000001+P_000102010*QR_010001000010+P_000102110*QR_010001000011+P_000202010*QR_010001000020+P_000202110*QR_010001000021);
ans_temp[ans_id*9+6]+=Pmtrx[8]*(P_000002010*QR_010000001000+P_000002110*QR_010000001001+P_000102010*QR_010000001010+P_000102110*QR_010000001011+P_000202010*QR_010000001020+P_000202110*QR_010000001021);
ans_temp[ans_id*9+7]+=Pmtrx[6]*(P_000002010*QR_001010000000+P_000002110*QR_001010000001+P_000102010*QR_001010000010+P_000102110*QR_001010000011+P_000202010*QR_001010000020+P_000202110*QR_001010000021);
ans_temp[ans_id*9+7]+=Pmtrx[7]*(P_000002010*QR_000011000000+P_000002110*QR_000011000001+P_000102010*QR_000011000010+P_000102110*QR_000011000011+P_000202010*QR_000011000020+P_000202110*QR_000011000021);
ans_temp[ans_id*9+7]+=Pmtrx[8]*(P_000002010*QR_000010001000+P_000002110*QR_000010001001+P_000102010*QR_000010001010+P_000102110*QR_000010001011+P_000202010*QR_000010001020+P_000202110*QR_000010001021);
ans_temp[ans_id*9+8]+=Pmtrx[6]*(P_000002010*QR_001000010000+P_000002110*QR_001000010001+P_000102010*QR_001000010010+P_000102110*QR_001000010011+P_000202010*QR_001000010020+P_000202110*QR_001000010021);
ans_temp[ans_id*9+8]+=Pmtrx[7]*(P_000002010*QR_000001010000+P_000002110*QR_000001010001+P_000102010*QR_000001010010+P_000102110*QR_000001010011+P_000202010*QR_000001010020+P_000202110*QR_000001010021);
ans_temp[ans_id*9+8]+=Pmtrx[8]*(P_000002010*QR_000000011000+P_000002110*QR_000000011001+P_000102010*QR_000000011010+P_000102110*QR_000000011011+P_000202010*QR_000000011020+P_000202110*QR_000000011021);
ans_temp[ans_id*9+6]+=Pmtrx[9]*(P_001000011*QR_011000000000+P_001000111*QR_011000000001+P_001000211*QR_011000000002+P_101000011*QR_011000000100+P_101000111*QR_011000000101+P_101000211*QR_011000000102);
ans_temp[ans_id*9+6]+=Pmtrx[10]*(P_001000011*QR_010001000000+P_001000111*QR_010001000001+P_001000211*QR_010001000002+P_101000011*QR_010001000100+P_101000111*QR_010001000101+P_101000211*QR_010001000102);
ans_temp[ans_id*9+6]+=Pmtrx[11]*(P_001000011*QR_010000001000+P_001000111*QR_010000001001+P_001000211*QR_010000001002+P_101000011*QR_010000001100+P_101000111*QR_010000001101+P_101000211*QR_010000001102);
ans_temp[ans_id*9+7]+=Pmtrx[9]*(P_001000011*QR_001010000000+P_001000111*QR_001010000001+P_001000211*QR_001010000002+P_101000011*QR_001010000100+P_101000111*QR_001010000101+P_101000211*QR_001010000102);
ans_temp[ans_id*9+7]+=Pmtrx[10]*(P_001000011*QR_000011000000+P_001000111*QR_000011000001+P_001000211*QR_000011000002+P_101000011*QR_000011000100+P_101000111*QR_000011000101+P_101000211*QR_000011000102);
ans_temp[ans_id*9+7]+=Pmtrx[11]*(P_001000011*QR_000010001000+P_001000111*QR_000010001001+P_001000211*QR_000010001002+P_101000011*QR_000010001100+P_101000111*QR_000010001101+P_101000211*QR_000010001102);
ans_temp[ans_id*9+8]+=Pmtrx[9]*(P_001000011*QR_001000010000+P_001000111*QR_001000010001+P_001000211*QR_001000010002+P_101000011*QR_001000010100+P_101000111*QR_001000010101+P_101000211*QR_001000010102);
ans_temp[ans_id*9+8]+=Pmtrx[10]*(P_001000011*QR_000001010000+P_001000111*QR_000001010001+P_001000211*QR_000001010002+P_101000011*QR_000001010100+P_101000111*QR_000001010101+P_101000211*QR_000001010102);
ans_temp[ans_id*9+8]+=Pmtrx[11]*(P_001000011*QR_000000011000+P_001000111*QR_000000011001+P_001000211*QR_000000011002+P_101000011*QR_000000011100+P_101000111*QR_000000011101+P_101000211*QR_000000011102);
ans_temp[ans_id*9+6]+=Pmtrx[12]*(P_000001011*QR_011000000000+P_000001111*QR_011000000001+P_000001211*QR_011000000002+P_000101011*QR_011000000010+P_000101111*QR_011000000011+P_000101211*QR_011000000012);
ans_temp[ans_id*9+6]+=Pmtrx[13]*(P_000001011*QR_010001000000+P_000001111*QR_010001000001+P_000001211*QR_010001000002+P_000101011*QR_010001000010+P_000101111*QR_010001000011+P_000101211*QR_010001000012);
ans_temp[ans_id*9+6]+=Pmtrx[14]*(P_000001011*QR_010000001000+P_000001111*QR_010000001001+P_000001211*QR_010000001002+P_000101011*QR_010000001010+P_000101111*QR_010000001011+P_000101211*QR_010000001012);
ans_temp[ans_id*9+7]+=Pmtrx[12]*(P_000001011*QR_001010000000+P_000001111*QR_001010000001+P_000001211*QR_001010000002+P_000101011*QR_001010000010+P_000101111*QR_001010000011+P_000101211*QR_001010000012);
ans_temp[ans_id*9+7]+=Pmtrx[13]*(P_000001011*QR_000011000000+P_000001111*QR_000011000001+P_000001211*QR_000011000002+P_000101011*QR_000011000010+P_000101111*QR_000011000011+P_000101211*QR_000011000012);
ans_temp[ans_id*9+7]+=Pmtrx[14]*(P_000001011*QR_000010001000+P_000001111*QR_000010001001+P_000001211*QR_000010001002+P_000101011*QR_000010001010+P_000101111*QR_000010001011+P_000101211*QR_000010001012);
ans_temp[ans_id*9+8]+=Pmtrx[12]*(P_000001011*QR_001000010000+P_000001111*QR_001000010001+P_000001211*QR_001000010002+P_000101011*QR_001000010010+P_000101111*QR_001000010011+P_000101211*QR_001000010012);
ans_temp[ans_id*9+8]+=Pmtrx[13]*(P_000001011*QR_000001010000+P_000001111*QR_000001010001+P_000001211*QR_000001010002+P_000101011*QR_000001010010+P_000101111*QR_000001010011+P_000101211*QR_000001010012);
ans_temp[ans_id*9+8]+=Pmtrx[14]*(P_000001011*QR_000000011000+P_000001111*QR_000000011001+P_000001211*QR_000000011002+P_000101011*QR_000000011010+P_000101111*QR_000000011011+P_000101211*QR_000000011012);
ans_temp[ans_id*9+6]+=Pmtrx[15]*(P_000000012*QR_011000000000+P_000000112*QR_011000000001+P_000000212*QR_011000000002+P_000000312*QR_011000000003);
ans_temp[ans_id*9+6]+=Pmtrx[16]*(P_000000012*QR_010001000000+P_000000112*QR_010001000001+P_000000212*QR_010001000002+P_000000312*QR_010001000003);
ans_temp[ans_id*9+6]+=Pmtrx[17]*(P_000000012*QR_010000001000+P_000000112*QR_010000001001+P_000000212*QR_010000001002+P_000000312*QR_010000001003);
ans_temp[ans_id*9+7]+=Pmtrx[15]*(P_000000012*QR_001010000000+P_000000112*QR_001010000001+P_000000212*QR_001010000002+P_000000312*QR_001010000003);
ans_temp[ans_id*9+7]+=Pmtrx[16]*(P_000000012*QR_000011000000+P_000000112*QR_000011000001+P_000000212*QR_000011000002+P_000000312*QR_000011000003);
ans_temp[ans_id*9+7]+=Pmtrx[17]*(P_000000012*QR_000010001000+P_000000112*QR_000010001001+P_000000212*QR_000010001002+P_000000312*QR_000010001003);
ans_temp[ans_id*9+8]+=Pmtrx[15]*(P_000000012*QR_001000010000+P_000000112*QR_001000010001+P_000000212*QR_001000010002+P_000000312*QR_001000010003);
ans_temp[ans_id*9+8]+=Pmtrx[16]*(P_000000012*QR_000001010000+P_000000112*QR_000001010001+P_000000212*QR_000001010002+P_000000312*QR_000001010003);
ans_temp[ans_id*9+8]+=Pmtrx[17]*(P_000000012*QR_000000011000+P_000000112*QR_000000011001+P_000000212*QR_000000011002+P_000000312*QR_000000011003);
}
}
__syncthreads();
int num_thread=NTHREAD/2;
while (num_thread!=0){
__syncthreads();
if(tId_x<num_thread){
for(int ians=0;ians<9;ians++){
ans_temp[tId_x*9+ians]+=ans_temp[(tId_x+num_thread)*9+ians];
}
}
num_thread/=2;
}
if(tId_x==0){
for(int ians=0;ians<9;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*9+ians]=ans_temp[(tId_x)*9+ians];
}
}
}
}
}
__global__ void MD_Kp_dspp_fs(unsigned int contrc_bra_num,unsigned int contrc_ket_num,\
unsigned int * contrc_bra_id,\
unsigned int * contrc_ket_id,\
unsigned int mtrx_len,\
double * Pmtrx_in,\
double * P,\
double * PA,\
double * PB,\
double * Zta_in,\
double * pp_in,\
float * K2_p_in,\
unsigned int * id_bra_in,\
double * Q,\
double * QC,\
double * QD,\
double * Eta_in,\
double * pq_in,\
float * K2_q_in,\
unsigned int * id_ket_in,\
double * ans){
unsigned int tId_x = threadIdx.x;
unsigned int bId_x = blockIdx.x;
unsigned int bId_y = blockIdx.y;
unsigned int tdis = blockDim.x;
unsigned int bdis_x = gridDim.x;
unsigned int bdis_y = gridDim.y;
unsigned int ans_id=tId_x;
double Pmtrx[3]={0.0};
__shared__ double ans_temp[NTHREAD*18];
for(int i=0;i<18;i++){
ans_temp[i*tdis+tId_x]=0.0;
}
for(unsigned int i_contrc_bra=bId_x;i_contrc_bra<contrc_bra_num;i_contrc_bra+=bdis_x){
for(unsigned int j_contrc_ket=bId_y;j_contrc_ket<contrc_ket_num;j_contrc_ket+=bdis_y){
unsigned int primit_bra_start = contrc_bra_id[i_contrc_bra ];
unsigned int primit_bra_end = contrc_bra_id[i_contrc_bra+1];
unsigned int primit_ket_start = contrc_ket_id[j_contrc_ket ];
unsigned int primit_ket_end = contrc_ket_id[j_contrc_ket+1];
for(unsigned int ii=primit_ket_start;ii<primit_ket_end;ii++){
unsigned int id_ket=id_ket_in[ii];
double QX=Q[ii*3+0];
double QY=Q[ii*3+1];
double QZ=Q[ii*3+2];
double Qd_010[3];
Qd_010[0]=QC[ii*3+0];
Qd_010[1]=QC[ii*3+1];
Qd_010[2]=QC[ii*3+2];
double Qd_001[3];
Qd_001[0]=QD[ii*3+0];
Qd_001[1]=QD[ii*3+1];
Qd_001[2]=QD[ii*3+2];
double Eta=Eta_in[ii];
double pq=pq_in[ii];
float K2_q=K2_q_in[ii];
double aQin1=1/(2*Eta);
for(unsigned int j=tId_x;j<primit_bra_end-primit_bra_start;j+=tdis){
unsigned int jj=primit_bra_start+j;
unsigned int id_bra=tex1Dfetch(tex_id_bra,jj);
double P_max=0.0;
for(int p_j=0;p_j<3;p_j++){
for(int p_i=0;p_i<1;p_i++){
Pmtrx[p_i*3+p_j]=Pmtrx_in[(id_ket+p_j)*mtrx_len+(id_bra+p_i)];
double temp_P=fabsf(Pmtrx[p_i*3+p_j]);
if(temp_P>P_max) P_max=temp_P;
}
}
float K2_p=tex1Dfetch(tex_K2_p,jj);
if(fabsf(K2_p*K2_q)<1.0E-14){
break;
}
if(fabsf(P_max*K2_p*K2_q)<1.0E-14) continue;
int2 temp_int2;
temp_int2=tex1Dfetch(tex_Zta,jj);
double Zta=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_pp,jj);
double pp=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+0);
double PX=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+1);
double PY=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+2);
double PZ=__hiloint2double(temp_int2.y,temp_int2.x);
double Pd_010[3];
temp_int2=tex1Dfetch(tex_PA,jj*3+0);
Pd_010[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PA,jj*3+1);
Pd_010[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PA,jj*3+2);
Pd_010[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double alphaT=rsqrt(Eta+Zta);
double lmd=4*P25*pp*pq*alphaT;
alphaT=Eta*Zta*alphaT*alphaT;
double TX=PX-QX;
double TY=PY-QY;
double TZ=PZ-QZ;
double T=alphaT*(TX*TX+TY*TY+TZ*TZ);
double R_000[5];
Ft_fs_4(4,T,R_000);
R_000[0]*=lmd;
R_000[1]*=-2*alphaT*lmd;
R_000[2]*=4*alphaT*alphaT*lmd;
R_000[3]*=-8*alphaT*alphaT*alphaT*lmd;
R_000[4]*=16*alphaT*alphaT*alphaT*alphaT*lmd;
double aPin1=1/(2*Zta);
double R_100[4];
double R_200[3];
double R_300[2];
double R_400[1];
double R_010[4];
double R_110[3];
double R_210[2];
double R_310[1];
double R_020[3];
double R_120[2];
double R_220[1];
double R_030[2];
double R_130[1];
double R_040[1];
double R_001[4];
double R_101[3];
double R_201[2];
double R_301[1];
double R_011[3];
double R_111[2];
double R_211[1];
double R_021[2];
double R_121[1];
double R_031[1];
double R_002[3];
double R_102[2];
double R_202[1];
double R_012[2];
double R_112[1];
double R_022[1];
double R_003[2];
double R_103[1];
double R_013[1];
double R_004[1];
for(int i=0;i<4;i++){
R_100[i]=TX*R_000[i+1];
}
for(int i=0;i<4;i++){
R_010[i]=TY*R_000[i+1];
}
for(int i=0;i<4;i++){
R_001[i]=TZ*R_000[i+1];
}
for(int i=0;i<3;i++){
R_200[i]=TX*R_100[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_110[i]=TX*R_010[i+1];
}
for(int i=0;i<3;i++){
R_020[i]=TY*R_010[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_101[i]=TX*R_001[i+1];
}
for(int i=0;i<3;i++){
R_011[i]=TY*R_001[i+1];
}
for(int i=0;i<3;i++){
R_002[i]=TZ*R_001[i+1]+R_000[i+1];
}
for(int i=0;i<2;i++){
R_300[i]=TX*R_200[i+1]+2*R_100[i+1];
}
for(int i=0;i<2;i++){
R_210[i]=TY*R_200[i+1];
}
for(int i=0;i<2;i++){
R_120[i]=TX*R_020[i+1];
}
for(int i=0;i<2;i++){
R_030[i]=TY*R_020[i+1]+2*R_010[i+1];
}
for(int i=0;i<2;i++){
R_201[i]=TZ*R_200[i+1];
}
for(int i=0;i<2;i++){
R_111[i]=TX*R_011[i+1];
}
for(int i=0;i<2;i++){
R_021[i]=TZ*R_020[i+1];
}
for(int i=0;i<2;i++){
R_102[i]=TX*R_002[i+1];
}
for(int i=0;i<2;i++){
R_012[i]=TY*R_002[i+1];
}
for(int i=0;i<2;i++){
R_003[i]=TZ*R_002[i+1]+2*R_001[i+1];
}
for(int i=0;i<1;i++){
R_400[i]=TX*R_300[i+1]+3*R_200[i+1];
}
for(int i=0;i<1;i++){
R_310[i]=TY*R_300[i+1];
}
for(int i=0;i<1;i++){
R_220[i]=TX*R_120[i+1]+R_020[i+1];
}
for(int i=0;i<1;i++){
R_130[i]=TX*R_030[i+1];
}
for(int i=0;i<1;i++){
R_040[i]=TY*R_030[i+1]+3*R_020[i+1];
}
for(int i=0;i<1;i++){
R_301[i]=TZ*R_300[i+1];
}
for(int i=0;i<1;i++){
R_211[i]=TY*R_201[i+1];
}
for(int i=0;i<1;i++){
R_121[i]=TX*R_021[i+1];
}
for(int i=0;i<1;i++){
R_031[i]=TZ*R_030[i+1];
}
for(int i=0;i<1;i++){
R_202[i]=TX*R_102[i+1]+R_002[i+1];
}
for(int i=0;i<1;i++){
R_112[i]=TX*R_012[i+1];
}
for(int i=0;i<1;i++){
R_022[i]=TY*R_012[i+1]+R_002[i+1];
}
for(int i=0;i<1;i++){
R_103[i]=TX*R_003[i+1];
}
for(int i=0;i<1;i++){
R_013[i]=TY*R_003[i+1];
}
for(int i=0;i<1;i++){
R_004[i]=TZ*R_003[i+1]+3*R_002[i+1];
}
double Pd_110[3];
double Pd_020[3];
double Pd_120[3];
double Pd_220[3];
for(int i=0;i<3;i++){
Pd_110[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_020[i]=Pd_110[i]+Pd_010[i]*Pd_010[i];
}
for(int i=0;i<3;i++){
Pd_120[i]=Pd_010[i]*Pd_110[i]+aPin1*Pd_010[i];
}
for(int i=0;i<3;i++){
Pd_220[i]=aPin1*Pd_110[i];
}
double P_020000000=Pd_020[0];
double P_120000000=Pd_120[0];
double P_220000000=Pd_220[0];
double P_010010000=Pd_010[0]*Pd_010[1];
double P_010110000=Pd_010[0]*Pd_110[1];
double P_110010000=Pd_110[0]*Pd_010[1];
double P_110110000=Pd_110[0]*Pd_110[1];
double P_000020000=Pd_020[1];
double P_000120000=Pd_120[1];
double P_000220000=Pd_220[1];
double P_010000010=Pd_010[0]*Pd_010[2];
double P_010000110=Pd_010[0]*Pd_110[2];
double P_110000010=Pd_110[0]*Pd_010[2];
double P_110000110=Pd_110[0]*Pd_110[2];
double P_000010010=Pd_010[1]*Pd_010[2];
double P_000010110=Pd_010[1]*Pd_110[2];
double P_000110010=Pd_110[1]*Pd_010[2];
double P_000110110=Pd_110[1]*Pd_110[2];
double P_000000020=Pd_020[2];
double P_000000120=Pd_120[2];
double P_000000220=Pd_220[2];
double PR_020000000000=P_020000000*R_000[0]+-1*P_120000000*R_100[0]+P_220000000*R_200[0];
double PR_010010000000=P_010010000*R_000[0]+-1*P_010110000*R_010[0]+-1*P_110010000*R_100[0]+P_110110000*R_110[0];
double PR_000020000000=P_000020000*R_000[0]+-1*P_000120000*R_010[0]+P_000220000*R_020[0];
double PR_010000010000=P_010000010*R_000[0]+-1*P_010000110*R_001[0]+-1*P_110000010*R_100[0]+P_110000110*R_101[0];
double PR_000010010000=P_000010010*R_000[0]+-1*P_000010110*R_001[0]+-1*P_000110010*R_010[0]+P_000110110*R_011[0];
double PR_000000020000=P_000000020*R_000[0]+-1*P_000000120*R_001[0]+P_000000220*R_002[0];
double PR_020000000001=P_020000000*R_001[0]+-1*P_120000000*R_101[0]+P_220000000*R_201[0];
double PR_010010000001=P_010010000*R_001[0]+-1*P_010110000*R_011[0]+-1*P_110010000*R_101[0]+P_110110000*R_111[0];
double PR_000020000001=P_000020000*R_001[0]+-1*P_000120000*R_011[0]+P_000220000*R_021[0];
double PR_010000010001=P_010000010*R_001[0]+-1*P_010000110*R_002[0]+-1*P_110000010*R_101[0]+P_110000110*R_102[0];
double PR_000010010001=P_000010010*R_001[0]+-1*P_000010110*R_002[0]+-1*P_000110010*R_011[0]+P_000110110*R_012[0];
double PR_000000020001=P_000000020*R_001[0]+-1*P_000000120*R_002[0]+P_000000220*R_003[0];
double PR_020000000010=P_020000000*R_010[0]+-1*P_120000000*R_110[0]+P_220000000*R_210[0];
double PR_010010000010=P_010010000*R_010[0]+-1*P_010110000*R_020[0]+-1*P_110010000*R_110[0]+P_110110000*R_120[0];
double PR_000020000010=P_000020000*R_010[0]+-1*P_000120000*R_020[0]+P_000220000*R_030[0];
double PR_010000010010=P_010000010*R_010[0]+-1*P_010000110*R_011[0]+-1*P_110000010*R_110[0]+P_110000110*R_111[0];
double PR_000010010010=P_000010010*R_010[0]+-1*P_000010110*R_011[0]+-1*P_000110010*R_020[0]+P_000110110*R_021[0];
double PR_000000020010=P_000000020*R_010[0]+-1*P_000000120*R_011[0]+P_000000220*R_012[0];
double PR_020000000100=P_020000000*R_100[0]+-1*P_120000000*R_200[0]+P_220000000*R_300[0];
double PR_010010000100=P_010010000*R_100[0]+-1*P_010110000*R_110[0]+-1*P_110010000*R_200[0]+P_110110000*R_210[0];
double PR_000020000100=P_000020000*R_100[0]+-1*P_000120000*R_110[0]+P_000220000*R_120[0];
double PR_010000010100=P_010000010*R_100[0]+-1*P_010000110*R_101[0]+-1*P_110000010*R_200[0]+P_110000110*R_201[0];
double PR_000010010100=P_000010010*R_100[0]+-1*P_000010110*R_101[0]+-1*P_000110010*R_110[0]+P_000110110*R_111[0];
double PR_000000020100=P_000000020*R_100[0]+-1*P_000000120*R_101[0]+P_000000220*R_102[0];
double PR_020000000002=P_020000000*R_002[0]+-1*P_120000000*R_102[0]+P_220000000*R_202[0];
double PR_010010000002=P_010010000*R_002[0]+-1*P_010110000*R_012[0]+-1*P_110010000*R_102[0]+P_110110000*R_112[0];
double PR_000020000002=P_000020000*R_002[0]+-1*P_000120000*R_012[0]+P_000220000*R_022[0];
double PR_010000010002=P_010000010*R_002[0]+-1*P_010000110*R_003[0]+-1*P_110000010*R_102[0]+P_110000110*R_103[0];
double PR_000010010002=P_000010010*R_002[0]+-1*P_000010110*R_003[0]+-1*P_000110010*R_012[0]+P_000110110*R_013[0];
double PR_000000020002=P_000000020*R_002[0]+-1*P_000000120*R_003[0]+P_000000220*R_004[0];
double PR_020000000011=P_020000000*R_011[0]+-1*P_120000000*R_111[0]+P_220000000*R_211[0];
double PR_010010000011=P_010010000*R_011[0]+-1*P_010110000*R_021[0]+-1*P_110010000*R_111[0]+P_110110000*R_121[0];
double PR_000020000011=P_000020000*R_011[0]+-1*P_000120000*R_021[0]+P_000220000*R_031[0];
double PR_010000010011=P_010000010*R_011[0]+-1*P_010000110*R_012[0]+-1*P_110000010*R_111[0]+P_110000110*R_112[0];
double PR_000010010011=P_000010010*R_011[0]+-1*P_000010110*R_012[0]+-1*P_000110010*R_021[0]+P_000110110*R_022[0];
double PR_000000020011=P_000000020*R_011[0]+-1*P_000000120*R_012[0]+P_000000220*R_013[0];
double PR_020000000020=P_020000000*R_020[0]+-1*P_120000000*R_120[0]+P_220000000*R_220[0];
double PR_010010000020=P_010010000*R_020[0]+-1*P_010110000*R_030[0]+-1*P_110010000*R_120[0]+P_110110000*R_130[0];
double PR_000020000020=P_000020000*R_020[0]+-1*P_000120000*R_030[0]+P_000220000*R_040[0];
double PR_010000010020=P_010000010*R_020[0]+-1*P_010000110*R_021[0]+-1*P_110000010*R_120[0]+P_110000110*R_121[0];
double PR_000010010020=P_000010010*R_020[0]+-1*P_000010110*R_021[0]+-1*P_000110010*R_030[0]+P_000110110*R_031[0];
double PR_000000020020=P_000000020*R_020[0]+-1*P_000000120*R_021[0]+P_000000220*R_022[0];
double PR_020000000101=P_020000000*R_101[0]+-1*P_120000000*R_201[0]+P_220000000*R_301[0];
double PR_010010000101=P_010010000*R_101[0]+-1*P_010110000*R_111[0]+-1*P_110010000*R_201[0]+P_110110000*R_211[0];
double PR_000020000101=P_000020000*R_101[0]+-1*P_000120000*R_111[0]+P_000220000*R_121[0];
double PR_010000010101=P_010000010*R_101[0]+-1*P_010000110*R_102[0]+-1*P_110000010*R_201[0]+P_110000110*R_202[0];
double PR_000010010101=P_000010010*R_101[0]+-1*P_000010110*R_102[0]+-1*P_000110010*R_111[0]+P_000110110*R_112[0];
double PR_000000020101=P_000000020*R_101[0]+-1*P_000000120*R_102[0]+P_000000220*R_103[0];
double PR_020000000110=P_020000000*R_110[0]+-1*P_120000000*R_210[0]+P_220000000*R_310[0];
double PR_010010000110=P_010010000*R_110[0]+-1*P_010110000*R_120[0]+-1*P_110010000*R_210[0]+P_110110000*R_220[0];
double PR_000020000110=P_000020000*R_110[0]+-1*P_000120000*R_120[0]+P_000220000*R_130[0];
double PR_010000010110=P_010000010*R_110[0]+-1*P_010000110*R_111[0]+-1*P_110000010*R_210[0]+P_110000110*R_211[0];
double PR_000010010110=P_000010010*R_110[0]+-1*P_000010110*R_111[0]+-1*P_000110010*R_120[0]+P_000110110*R_121[0];
double PR_000000020110=P_000000020*R_110[0]+-1*P_000000120*R_111[0]+P_000000220*R_112[0];
double PR_020000000200=P_020000000*R_200[0]+-1*P_120000000*R_300[0]+P_220000000*R_400[0];
double PR_010010000200=P_010010000*R_200[0]+-1*P_010110000*R_210[0]+-1*P_110010000*R_300[0]+P_110110000*R_310[0];
double PR_000020000200=P_000020000*R_200[0]+-1*P_000120000*R_210[0]+P_000220000*R_220[0];
double PR_010000010200=P_010000010*R_200[0]+-1*P_010000110*R_201[0]+-1*P_110000010*R_300[0]+P_110000110*R_301[0];
double PR_000010010200=P_000010010*R_200[0]+-1*P_000010110*R_201[0]+-1*P_000110010*R_210[0]+P_000110110*R_211[0];
double PR_000000020200=P_000000020*R_200[0]+-1*P_000000120*R_201[0]+P_000000220*R_202[0];
double Qd_101[3];
double Qd_110[3];
double Qd_011[3];
double Qd_111[3];
double Qd_211[3];
for(int i=0;i<3;i++){
Qd_101[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_110[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_011[i]=Qd_101[i]+Qd_010[i]*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_111[i]=Qd_010[i]*Qd_101[i]+aQin1*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_211[i]=aQin1*Qd_101[i];
}
double Q_011000000=Qd_011[0];
double Q_111000000=Qd_111[0];
double Q_211000000=Qd_211[0];
double Q_010001000=Qd_010[0]*Qd_001[1];
double Q_010101000=Qd_010[0]*Qd_101[1];
double Q_110001000=Qd_110[0]*Qd_001[1];
double Q_110101000=Qd_110[0]*Qd_101[1];
double Q_010000001=Qd_010[0]*Qd_001[2];
double Q_010000101=Qd_010[0]*Qd_101[2];
double Q_110000001=Qd_110[0]*Qd_001[2];
double Q_110000101=Qd_110[0]*Qd_101[2];
double Q_001010000=Qd_001[0]*Qd_010[1];
double Q_001110000=Qd_001[0]*Qd_110[1];
double Q_101010000=Qd_101[0]*Qd_010[1];
double Q_101110000=Qd_101[0]*Qd_110[1];
double Q_000011000=Qd_011[1];
double Q_000111000=Qd_111[1];
double Q_000211000=Qd_211[1];
double Q_000010001=Qd_010[1]*Qd_001[2];
double Q_000010101=Qd_010[1]*Qd_101[2];
double Q_000110001=Qd_110[1]*Qd_001[2];
double Q_000110101=Qd_110[1]*Qd_101[2];
double Q_001000010=Qd_001[0]*Qd_010[2];
double Q_001000110=Qd_001[0]*Qd_110[2];
double Q_101000010=Qd_101[0]*Qd_010[2];
double Q_101000110=Qd_101[0]*Qd_110[2];
double Q_000001010=Qd_001[1]*Qd_010[2];
double Q_000001110=Qd_001[1]*Qd_110[2];
double Q_000101010=Qd_101[1]*Qd_010[2];
double Q_000101110=Qd_101[1]*Qd_110[2];
double Q_000000011=Qd_011[2];
double Q_000000111=Qd_111[2];
double Q_000000211=Qd_211[2];
ans_temp[ans_id*18+0]+=Pmtrx[0]*(Q_011000000*PR_020000000000+Q_111000000*PR_020000000100+Q_211000000*PR_020000000200);
ans_temp[ans_id*18+0]+=Pmtrx[1]*(Q_010001000*PR_020000000000+Q_010101000*PR_020000000010+Q_110001000*PR_020000000100+Q_110101000*PR_020000000110);
ans_temp[ans_id*18+0]+=Pmtrx[2]*(Q_010000001*PR_020000000000+Q_010000101*PR_020000000001+Q_110000001*PR_020000000100+Q_110000101*PR_020000000101);
ans_temp[ans_id*18+1]+=Pmtrx[0]*(Q_001010000*PR_020000000000+Q_001110000*PR_020000000010+Q_101010000*PR_020000000100+Q_101110000*PR_020000000110);
ans_temp[ans_id*18+1]+=Pmtrx[1]*(Q_000011000*PR_020000000000+Q_000111000*PR_020000000010+Q_000211000*PR_020000000020);
ans_temp[ans_id*18+1]+=Pmtrx[2]*(Q_000010001*PR_020000000000+Q_000010101*PR_020000000001+Q_000110001*PR_020000000010+Q_000110101*PR_020000000011);
ans_temp[ans_id*18+2]+=Pmtrx[0]*(Q_001000010*PR_020000000000+Q_001000110*PR_020000000001+Q_101000010*PR_020000000100+Q_101000110*PR_020000000101);
ans_temp[ans_id*18+2]+=Pmtrx[1]*(Q_000001010*PR_020000000000+Q_000001110*PR_020000000001+Q_000101010*PR_020000000010+Q_000101110*PR_020000000011);
ans_temp[ans_id*18+2]+=Pmtrx[2]*(Q_000000011*PR_020000000000+Q_000000111*PR_020000000001+Q_000000211*PR_020000000002);
ans_temp[ans_id*18+3]+=Pmtrx[0]*(Q_011000000*PR_010010000000+Q_111000000*PR_010010000100+Q_211000000*PR_010010000200);
ans_temp[ans_id*18+3]+=Pmtrx[1]*(Q_010001000*PR_010010000000+Q_010101000*PR_010010000010+Q_110001000*PR_010010000100+Q_110101000*PR_010010000110);
ans_temp[ans_id*18+3]+=Pmtrx[2]*(Q_010000001*PR_010010000000+Q_010000101*PR_010010000001+Q_110000001*PR_010010000100+Q_110000101*PR_010010000101);
ans_temp[ans_id*18+4]+=Pmtrx[0]*(Q_001010000*PR_010010000000+Q_001110000*PR_010010000010+Q_101010000*PR_010010000100+Q_101110000*PR_010010000110);
ans_temp[ans_id*18+4]+=Pmtrx[1]*(Q_000011000*PR_010010000000+Q_000111000*PR_010010000010+Q_000211000*PR_010010000020);
ans_temp[ans_id*18+4]+=Pmtrx[2]*(Q_000010001*PR_010010000000+Q_000010101*PR_010010000001+Q_000110001*PR_010010000010+Q_000110101*PR_010010000011);
ans_temp[ans_id*18+5]+=Pmtrx[0]*(Q_001000010*PR_010010000000+Q_001000110*PR_010010000001+Q_101000010*PR_010010000100+Q_101000110*PR_010010000101);
ans_temp[ans_id*18+5]+=Pmtrx[1]*(Q_000001010*PR_010010000000+Q_000001110*PR_010010000001+Q_000101010*PR_010010000010+Q_000101110*PR_010010000011);
ans_temp[ans_id*18+5]+=Pmtrx[2]*(Q_000000011*PR_010010000000+Q_000000111*PR_010010000001+Q_000000211*PR_010010000002);
ans_temp[ans_id*18+6]+=Pmtrx[0]*(Q_011000000*PR_000020000000+Q_111000000*PR_000020000100+Q_211000000*PR_000020000200);
ans_temp[ans_id*18+6]+=Pmtrx[1]*(Q_010001000*PR_000020000000+Q_010101000*PR_000020000010+Q_110001000*PR_000020000100+Q_110101000*PR_000020000110);
ans_temp[ans_id*18+6]+=Pmtrx[2]*(Q_010000001*PR_000020000000+Q_010000101*PR_000020000001+Q_110000001*PR_000020000100+Q_110000101*PR_000020000101);
ans_temp[ans_id*18+7]+=Pmtrx[0]*(Q_001010000*PR_000020000000+Q_001110000*PR_000020000010+Q_101010000*PR_000020000100+Q_101110000*PR_000020000110);
ans_temp[ans_id*18+7]+=Pmtrx[1]*(Q_000011000*PR_000020000000+Q_000111000*PR_000020000010+Q_000211000*PR_000020000020);
ans_temp[ans_id*18+7]+=Pmtrx[2]*(Q_000010001*PR_000020000000+Q_000010101*PR_000020000001+Q_000110001*PR_000020000010+Q_000110101*PR_000020000011);
ans_temp[ans_id*18+8]+=Pmtrx[0]*(Q_001000010*PR_000020000000+Q_001000110*PR_000020000001+Q_101000010*PR_000020000100+Q_101000110*PR_000020000101);
ans_temp[ans_id*18+8]+=Pmtrx[1]*(Q_000001010*PR_000020000000+Q_000001110*PR_000020000001+Q_000101010*PR_000020000010+Q_000101110*PR_000020000011);
ans_temp[ans_id*18+8]+=Pmtrx[2]*(Q_000000011*PR_000020000000+Q_000000111*PR_000020000001+Q_000000211*PR_000020000002);
ans_temp[ans_id*18+9]+=Pmtrx[0]*(Q_011000000*PR_010000010000+Q_111000000*PR_010000010100+Q_211000000*PR_010000010200);
ans_temp[ans_id*18+9]+=Pmtrx[1]*(Q_010001000*PR_010000010000+Q_010101000*PR_010000010010+Q_110001000*PR_010000010100+Q_110101000*PR_010000010110);
ans_temp[ans_id*18+9]+=Pmtrx[2]*(Q_010000001*PR_010000010000+Q_010000101*PR_010000010001+Q_110000001*PR_010000010100+Q_110000101*PR_010000010101);
ans_temp[ans_id*18+10]+=Pmtrx[0]*(Q_001010000*PR_010000010000+Q_001110000*PR_010000010010+Q_101010000*PR_010000010100+Q_101110000*PR_010000010110);
ans_temp[ans_id*18+10]+=Pmtrx[1]*(Q_000011000*PR_010000010000+Q_000111000*PR_010000010010+Q_000211000*PR_010000010020);
ans_temp[ans_id*18+10]+=Pmtrx[2]*(Q_000010001*PR_010000010000+Q_000010101*PR_010000010001+Q_000110001*PR_010000010010+Q_000110101*PR_010000010011);
ans_temp[ans_id*18+11]+=Pmtrx[0]*(Q_001000010*PR_010000010000+Q_001000110*PR_010000010001+Q_101000010*PR_010000010100+Q_101000110*PR_010000010101);
ans_temp[ans_id*18+11]+=Pmtrx[1]*(Q_000001010*PR_010000010000+Q_000001110*PR_010000010001+Q_000101010*PR_010000010010+Q_000101110*PR_010000010011);
ans_temp[ans_id*18+11]+=Pmtrx[2]*(Q_000000011*PR_010000010000+Q_000000111*PR_010000010001+Q_000000211*PR_010000010002);
ans_temp[ans_id*18+12]+=Pmtrx[0]*(Q_011000000*PR_000010010000+Q_111000000*PR_000010010100+Q_211000000*PR_000010010200);
ans_temp[ans_id*18+12]+=Pmtrx[1]*(Q_010001000*PR_000010010000+Q_010101000*PR_000010010010+Q_110001000*PR_000010010100+Q_110101000*PR_000010010110);
ans_temp[ans_id*18+12]+=Pmtrx[2]*(Q_010000001*PR_000010010000+Q_010000101*PR_000010010001+Q_110000001*PR_000010010100+Q_110000101*PR_000010010101);
ans_temp[ans_id*18+13]+=Pmtrx[0]*(Q_001010000*PR_000010010000+Q_001110000*PR_000010010010+Q_101010000*PR_000010010100+Q_101110000*PR_000010010110);
ans_temp[ans_id*18+13]+=Pmtrx[1]*(Q_000011000*PR_000010010000+Q_000111000*PR_000010010010+Q_000211000*PR_000010010020);
ans_temp[ans_id*18+13]+=Pmtrx[2]*(Q_000010001*PR_000010010000+Q_000010101*PR_000010010001+Q_000110001*PR_000010010010+Q_000110101*PR_000010010011);
ans_temp[ans_id*18+14]+=Pmtrx[0]*(Q_001000010*PR_000010010000+Q_001000110*PR_000010010001+Q_101000010*PR_000010010100+Q_101000110*PR_000010010101);
ans_temp[ans_id*18+14]+=Pmtrx[1]*(Q_000001010*PR_000010010000+Q_000001110*PR_000010010001+Q_000101010*PR_000010010010+Q_000101110*PR_000010010011);
ans_temp[ans_id*18+14]+=Pmtrx[2]*(Q_000000011*PR_000010010000+Q_000000111*PR_000010010001+Q_000000211*PR_000010010002);
ans_temp[ans_id*18+15]+=Pmtrx[0]*(Q_011000000*PR_000000020000+Q_111000000*PR_000000020100+Q_211000000*PR_000000020200);
ans_temp[ans_id*18+15]+=Pmtrx[1]*(Q_010001000*PR_000000020000+Q_010101000*PR_000000020010+Q_110001000*PR_000000020100+Q_110101000*PR_000000020110);
ans_temp[ans_id*18+15]+=Pmtrx[2]*(Q_010000001*PR_000000020000+Q_010000101*PR_000000020001+Q_110000001*PR_000000020100+Q_110000101*PR_000000020101);
ans_temp[ans_id*18+16]+=Pmtrx[0]*(Q_001010000*PR_000000020000+Q_001110000*PR_000000020010+Q_101010000*PR_000000020100+Q_101110000*PR_000000020110);
ans_temp[ans_id*18+16]+=Pmtrx[1]*(Q_000011000*PR_000000020000+Q_000111000*PR_000000020010+Q_000211000*PR_000000020020);
ans_temp[ans_id*18+16]+=Pmtrx[2]*(Q_000010001*PR_000000020000+Q_000010101*PR_000000020001+Q_000110001*PR_000000020010+Q_000110101*PR_000000020011);
ans_temp[ans_id*18+17]+=Pmtrx[0]*(Q_001000010*PR_000000020000+Q_001000110*PR_000000020001+Q_101000010*PR_000000020100+Q_101000110*PR_000000020101);
ans_temp[ans_id*18+17]+=Pmtrx[1]*(Q_000001010*PR_000000020000+Q_000001110*PR_000000020001+Q_000101010*PR_000000020010+Q_000101110*PR_000000020011);
ans_temp[ans_id*18+17]+=Pmtrx[2]*(Q_000000011*PR_000000020000+Q_000000111*PR_000000020001+Q_000000211*PR_000000020002);
}
}
__syncthreads();
int num_thread=NTHREAD/2;
while (num_thread!=0){
__syncthreads();
if(tId_x<num_thread){
for(int ians=0;ians<18;ians++){
ans_temp[tId_x*18+ians]+=ans_temp[(tId_x+num_thread)*18+ians];
}
}
num_thread/=2;
}
if(tId_x==0){
for(int ians=0;ians<18;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*18+ians]=ans_temp[(tId_x)*18+ians];
}
}
}
}
}
__global__ void MD_Kq_dspp_fs(unsigned int contrc_bra_num,unsigned int contrc_ket_num,\
unsigned int * contrc_bra_id,\
unsigned int * contrc_ket_id,\
unsigned int mtrx_len,\
double * Pmtrx_in,\
double * P,\
double * PA,\
double * PB,\
double * Zta_in,\
double * pp_in,\
float * K2_p_in,\
unsigned int * id_bra_in,\
double * Q,\
double * QC,\
double * QD,\
double * Eta_in,\
double * pq_in,\
float * K2_q_in,\
unsigned int * id_ket_in,\
double * ans){
unsigned int tId_x = threadIdx.x;
unsigned int bId_x = blockIdx.x;
unsigned int bId_y = blockIdx.y;
unsigned int tdis = blockDim.x;
unsigned int bdis_x = gridDim.x;
unsigned int bdis_y = gridDim.y;
unsigned int ans_id=tId_x;
double Pmtrx[3]={0.0};
__shared__ double ans_temp[NTHREAD*18];
for(int i=0;i<18;i++){
ans_temp[i*tdis+tId_x]=0.0;
}
for(unsigned int i_contrc_bra=bId_x;i_contrc_bra<contrc_bra_num;i_contrc_bra+=bdis_x){
for(unsigned int j_contrc_ket=bId_y;j_contrc_ket<contrc_ket_num;j_contrc_ket+=bdis_y){
unsigned int primit_bra_start = contrc_bra_id[i_contrc_bra ];
unsigned int primit_bra_end = contrc_bra_id[i_contrc_bra+1];
unsigned int primit_ket_start = contrc_ket_id[j_contrc_ket ];
unsigned int primit_ket_end = contrc_ket_id[j_contrc_ket+1];
for(unsigned int ii=primit_bra_start;ii<primit_bra_end;ii++){
unsigned int id_bra=id_bra_in[ii];
double PX=P[ii*3+0];
double PY=P[ii*3+1];
double PZ=P[ii*3+2];
double Pd_010[3];
Pd_010[0]=PA[ii*3+0];
Pd_010[1]=PA[ii*3+1];
Pd_010[2]=PA[ii*3+2];
double Zta=Zta_in[ii];
double pp=pp_in[ii];
float K2_p=K2_p_in[ii];
double aPin1=1/(2*Zta);
for(unsigned int j=tId_x;j<primit_ket_end-primit_ket_start;j+=tdis){
unsigned int jj=primit_ket_start+j;
unsigned int id_ket=tex1Dfetch(tex_id_ket,jj);
double P_max=0.0;
for(int p_j=0;p_j<3;p_j++){
for(int p_i=0;p_i<1;p_i++){
Pmtrx[p_i*3+p_j]=Pmtrx_in[(id_ket+p_j)*mtrx_len+(id_bra+p_i)];
double temp_P=fabsf(Pmtrx[p_i*3+p_j]);
if(temp_P>P_max) P_max=temp_P;
}
}
float K2_q=tex1Dfetch(tex_K2_q,jj);
if(fabsf(K2_p*K2_q)<1.0E-14){
break;
}
if(fabsf(P_max*K2_p*K2_q)<1.0E-14) continue;
int2 temp_int2;
temp_int2=tex1Dfetch(tex_Eta,jj);
double Eta=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_pq,jj);
double pq=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+0);
double QX=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+1);
double QY=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+2);
double QZ=__hiloint2double(temp_int2.y,temp_int2.x);
double Qd_010[3];
temp_int2=tex1Dfetch(tex_QC,jj*3+0);
Qd_010[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QC,jj*3+1);
Qd_010[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QC,jj*3+2);
Qd_010[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double Qd_001[3];
temp_int2=tex1Dfetch(tex_QD,jj*3+0);
Qd_001[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QD,jj*3+1);
Qd_001[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QD,jj*3+2);
Qd_001[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double alphaT=rsqrt(Eta+Zta);
double lmd=4*P25*pp*pq*alphaT;
alphaT=Eta*Zta*alphaT*alphaT;
double TX=PX-QX;
double TY=PY-QY;
double TZ=PZ-QZ;
double T=alphaT*(TX*TX+TY*TY+TZ*TZ);
double R_000[5];
Ft_fs_4(4,T,R_000);
R_000[0]*=lmd;
R_000[1]*=-2*alphaT*lmd;
R_000[2]*=4*alphaT*alphaT*lmd;
R_000[3]*=-8*alphaT*alphaT*alphaT*lmd;
R_000[4]*=16*alphaT*alphaT*alphaT*alphaT*lmd;
double aQin1=1/(2*Eta);
double R_100[4];
double R_200[3];
double R_300[2];
double R_400[1];
double R_010[4];
double R_110[3];
double R_210[2];
double R_310[1];
double R_020[3];
double R_120[2];
double R_220[1];
double R_030[2];
double R_130[1];
double R_040[1];
double R_001[4];
double R_101[3];
double R_201[2];
double R_301[1];
double R_011[3];
double R_111[2];
double R_211[1];
double R_021[2];
double R_121[1];
double R_031[1];
double R_002[3];
double R_102[2];
double R_202[1];
double R_012[2];
double R_112[1];
double R_022[1];
double R_003[2];
double R_103[1];
double R_013[1];
double R_004[1];
for(int i=0;i<4;i++){
R_100[i]=TX*R_000[i+1];
}
for(int i=0;i<4;i++){
R_010[i]=TY*R_000[i+1];
}
for(int i=0;i<4;i++){
R_001[i]=TZ*R_000[i+1];
}
for(int i=0;i<3;i++){
R_200[i]=TX*R_100[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_110[i]=TX*R_010[i+1];
}
for(int i=0;i<3;i++){
R_020[i]=TY*R_010[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_101[i]=TX*R_001[i+1];
}
for(int i=0;i<3;i++){
R_011[i]=TY*R_001[i+1];
}
for(int i=0;i<3;i++){
R_002[i]=TZ*R_001[i+1]+R_000[i+1];
}
for(int i=0;i<2;i++){
R_300[i]=TX*R_200[i+1]+2*R_100[i+1];
}
for(int i=0;i<2;i++){
R_210[i]=TY*R_200[i+1];
}
for(int i=0;i<2;i++){
R_120[i]=TX*R_020[i+1];
}
for(int i=0;i<2;i++){
R_030[i]=TY*R_020[i+1]+2*R_010[i+1];
}
for(int i=0;i<2;i++){
R_201[i]=TZ*R_200[i+1];
}
for(int i=0;i<2;i++){
R_111[i]=TX*R_011[i+1];
}
for(int i=0;i<2;i++){
R_021[i]=TZ*R_020[i+1];
}
for(int i=0;i<2;i++){
R_102[i]=TX*R_002[i+1];
}
for(int i=0;i<2;i++){
R_012[i]=TY*R_002[i+1];
}
for(int i=0;i<2;i++){
R_003[i]=TZ*R_002[i+1]+2*R_001[i+1];
}
for(int i=0;i<1;i++){
R_400[i]=TX*R_300[i+1]+3*R_200[i+1];
}
for(int i=0;i<1;i++){
R_310[i]=TY*R_300[i+1];
}
for(int i=0;i<1;i++){
R_220[i]=TX*R_120[i+1]+R_020[i+1];
}
for(int i=0;i<1;i++){
R_130[i]=TX*R_030[i+1];
}
for(int i=0;i<1;i++){
R_040[i]=TY*R_030[i+1]+3*R_020[i+1];
}
for(int i=0;i<1;i++){
R_301[i]=TZ*R_300[i+1];
}
for(int i=0;i<1;i++){
R_211[i]=TY*R_201[i+1];
}
for(int i=0;i<1;i++){
R_121[i]=TX*R_021[i+1];
}
for(int i=0;i<1;i++){
R_031[i]=TZ*R_030[i+1];
}
for(int i=0;i<1;i++){
R_202[i]=TX*R_102[i+1]+R_002[i+1];
}
for(int i=0;i<1;i++){
R_112[i]=TX*R_012[i+1];
}
for(int i=0;i<1;i++){
R_022[i]=TY*R_012[i+1]+R_002[i+1];
}
for(int i=0;i<1;i++){
R_103[i]=TX*R_003[i+1];
}
for(int i=0;i<1;i++){
R_013[i]=TY*R_003[i+1];
}
for(int i=0;i<1;i++){
R_004[i]=TZ*R_003[i+1]+3*R_002[i+1];
}
double Qd_101[3];
double Qd_110[3];
double Qd_011[3];
double Qd_111[3];
double Qd_211[3];
for(int i=0;i<3;i++){
Qd_101[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_110[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_011[i]=Qd_101[i]+Qd_010[i]*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_111[i]=Qd_010[i]*Qd_101[i]+aQin1*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_211[i]=aQin1*Qd_101[i];
}
double Q_011000000=Qd_011[0];
double Q_111000000=Qd_111[0];
double Q_211000000=Qd_211[0];
double Q_010001000=Qd_010[0]*Qd_001[1];
double Q_010101000=Qd_010[0]*Qd_101[1];
double Q_110001000=Qd_110[0]*Qd_001[1];
double Q_110101000=Qd_110[0]*Qd_101[1];
double Q_010000001=Qd_010[0]*Qd_001[2];
double Q_010000101=Qd_010[0]*Qd_101[2];
double Q_110000001=Qd_110[0]*Qd_001[2];
double Q_110000101=Qd_110[0]*Qd_101[2];
double Q_001010000=Qd_001[0]*Qd_010[1];
double Q_001110000=Qd_001[0]*Qd_110[1];
double Q_101010000=Qd_101[0]*Qd_010[1];
double Q_101110000=Qd_101[0]*Qd_110[1];
double Q_000011000=Qd_011[1];
double Q_000111000=Qd_111[1];
double Q_000211000=Qd_211[1];
double Q_000010001=Qd_010[1]*Qd_001[2];
double Q_000010101=Qd_010[1]*Qd_101[2];
double Q_000110001=Qd_110[1]*Qd_001[2];
double Q_000110101=Qd_110[1]*Qd_101[2];
double Q_001000010=Qd_001[0]*Qd_010[2];
double Q_001000110=Qd_001[0]*Qd_110[2];
double Q_101000010=Qd_101[0]*Qd_010[2];
double Q_101000110=Qd_101[0]*Qd_110[2];
double Q_000001010=Qd_001[1]*Qd_010[2];
double Q_000001110=Qd_001[1]*Qd_110[2];
double Q_000101010=Qd_101[1]*Qd_010[2];
double Q_000101110=Qd_101[1]*Qd_110[2];
double Q_000000011=Qd_011[2];
double Q_000000111=Qd_111[2];
double Q_000000211=Qd_211[2];
double QR_011000000000=Q_011000000*R_000[0]+-1*Q_111000000*R_100[0]+Q_211000000*R_200[0];
double QR_010001000000=Q_010001000*R_000[0]+-1*Q_010101000*R_010[0]+-1*Q_110001000*R_100[0]+Q_110101000*R_110[0];
double QR_010000001000=Q_010000001*R_000[0]+-1*Q_010000101*R_001[0]+-1*Q_110000001*R_100[0]+Q_110000101*R_101[0];
double QR_001010000000=Q_001010000*R_000[0]+-1*Q_001110000*R_010[0]+-1*Q_101010000*R_100[0]+Q_101110000*R_110[0];
double QR_000011000000=Q_000011000*R_000[0]+-1*Q_000111000*R_010[0]+Q_000211000*R_020[0];
double QR_000010001000=Q_000010001*R_000[0]+-1*Q_000010101*R_001[0]+-1*Q_000110001*R_010[0]+Q_000110101*R_011[0];
double QR_001000010000=Q_001000010*R_000[0]+-1*Q_001000110*R_001[0]+-1*Q_101000010*R_100[0]+Q_101000110*R_101[0];
double QR_000001010000=Q_000001010*R_000[0]+-1*Q_000001110*R_001[0]+-1*Q_000101010*R_010[0]+Q_000101110*R_011[0];
double QR_000000011000=Q_000000011*R_000[0]+-1*Q_000000111*R_001[0]+Q_000000211*R_002[0];
double QR_011000000001=Q_011000000*R_001[0]+-1*Q_111000000*R_101[0]+Q_211000000*R_201[0];
double QR_010001000001=Q_010001000*R_001[0]+-1*Q_010101000*R_011[0]+-1*Q_110001000*R_101[0]+Q_110101000*R_111[0];
double QR_010000001001=Q_010000001*R_001[0]+-1*Q_010000101*R_002[0]+-1*Q_110000001*R_101[0]+Q_110000101*R_102[0];
double QR_001010000001=Q_001010000*R_001[0]+-1*Q_001110000*R_011[0]+-1*Q_101010000*R_101[0]+Q_101110000*R_111[0];
double QR_000011000001=Q_000011000*R_001[0]+-1*Q_000111000*R_011[0]+Q_000211000*R_021[0];
double QR_000010001001=Q_000010001*R_001[0]+-1*Q_000010101*R_002[0]+-1*Q_000110001*R_011[0]+Q_000110101*R_012[0];
double QR_001000010001=Q_001000010*R_001[0]+-1*Q_001000110*R_002[0]+-1*Q_101000010*R_101[0]+Q_101000110*R_102[0];
double QR_000001010001=Q_000001010*R_001[0]+-1*Q_000001110*R_002[0]+-1*Q_000101010*R_011[0]+Q_000101110*R_012[0];
double QR_000000011001=Q_000000011*R_001[0]+-1*Q_000000111*R_002[0]+Q_000000211*R_003[0];
double QR_011000000010=Q_011000000*R_010[0]+-1*Q_111000000*R_110[0]+Q_211000000*R_210[0];
double QR_010001000010=Q_010001000*R_010[0]+-1*Q_010101000*R_020[0]+-1*Q_110001000*R_110[0]+Q_110101000*R_120[0];
double QR_010000001010=Q_010000001*R_010[0]+-1*Q_010000101*R_011[0]+-1*Q_110000001*R_110[0]+Q_110000101*R_111[0];
double QR_001010000010=Q_001010000*R_010[0]+-1*Q_001110000*R_020[0]+-1*Q_101010000*R_110[0]+Q_101110000*R_120[0];
double QR_000011000010=Q_000011000*R_010[0]+-1*Q_000111000*R_020[0]+Q_000211000*R_030[0];
double QR_000010001010=Q_000010001*R_010[0]+-1*Q_000010101*R_011[0]+-1*Q_000110001*R_020[0]+Q_000110101*R_021[0];
double QR_001000010010=Q_001000010*R_010[0]+-1*Q_001000110*R_011[0]+-1*Q_101000010*R_110[0]+Q_101000110*R_111[0];
double QR_000001010010=Q_000001010*R_010[0]+-1*Q_000001110*R_011[0]+-1*Q_000101010*R_020[0]+Q_000101110*R_021[0];
double QR_000000011010=Q_000000011*R_010[0]+-1*Q_000000111*R_011[0]+Q_000000211*R_012[0];
double QR_011000000100=Q_011000000*R_100[0]+-1*Q_111000000*R_200[0]+Q_211000000*R_300[0];
double QR_010001000100=Q_010001000*R_100[0]+-1*Q_010101000*R_110[0]+-1*Q_110001000*R_200[0]+Q_110101000*R_210[0];
double QR_010000001100=Q_010000001*R_100[0]+-1*Q_010000101*R_101[0]+-1*Q_110000001*R_200[0]+Q_110000101*R_201[0];
double QR_001010000100=Q_001010000*R_100[0]+-1*Q_001110000*R_110[0]+-1*Q_101010000*R_200[0]+Q_101110000*R_210[0];
double QR_000011000100=Q_000011000*R_100[0]+-1*Q_000111000*R_110[0]+Q_000211000*R_120[0];
double QR_000010001100=Q_000010001*R_100[0]+-1*Q_000010101*R_101[0]+-1*Q_000110001*R_110[0]+Q_000110101*R_111[0];
double QR_001000010100=Q_001000010*R_100[0]+-1*Q_001000110*R_101[0]+-1*Q_101000010*R_200[0]+Q_101000110*R_201[0];
double QR_000001010100=Q_000001010*R_100[0]+-1*Q_000001110*R_101[0]+-1*Q_000101010*R_110[0]+Q_000101110*R_111[0];
double QR_000000011100=Q_000000011*R_100[0]+-1*Q_000000111*R_101[0]+Q_000000211*R_102[0];
double QR_011000000002=Q_011000000*R_002[0]+-1*Q_111000000*R_102[0]+Q_211000000*R_202[0];
double QR_010001000002=Q_010001000*R_002[0]+-1*Q_010101000*R_012[0]+-1*Q_110001000*R_102[0]+Q_110101000*R_112[0];
double QR_010000001002=Q_010000001*R_002[0]+-1*Q_010000101*R_003[0]+-1*Q_110000001*R_102[0]+Q_110000101*R_103[0];
double QR_001010000002=Q_001010000*R_002[0]+-1*Q_001110000*R_012[0]+-1*Q_101010000*R_102[0]+Q_101110000*R_112[0];
double QR_000011000002=Q_000011000*R_002[0]+-1*Q_000111000*R_012[0]+Q_000211000*R_022[0];
double QR_000010001002=Q_000010001*R_002[0]+-1*Q_000010101*R_003[0]+-1*Q_000110001*R_012[0]+Q_000110101*R_013[0];
double QR_001000010002=Q_001000010*R_002[0]+-1*Q_001000110*R_003[0]+-1*Q_101000010*R_102[0]+Q_101000110*R_103[0];
double QR_000001010002=Q_000001010*R_002[0]+-1*Q_000001110*R_003[0]+-1*Q_000101010*R_012[0]+Q_000101110*R_013[0];
double QR_000000011002=Q_000000011*R_002[0]+-1*Q_000000111*R_003[0]+Q_000000211*R_004[0];
double QR_011000000011=Q_011000000*R_011[0]+-1*Q_111000000*R_111[0]+Q_211000000*R_211[0];
double QR_010001000011=Q_010001000*R_011[0]+-1*Q_010101000*R_021[0]+-1*Q_110001000*R_111[0]+Q_110101000*R_121[0];
double QR_010000001011=Q_010000001*R_011[0]+-1*Q_010000101*R_012[0]+-1*Q_110000001*R_111[0]+Q_110000101*R_112[0];
double QR_001010000011=Q_001010000*R_011[0]+-1*Q_001110000*R_021[0]+-1*Q_101010000*R_111[0]+Q_101110000*R_121[0];
double QR_000011000011=Q_000011000*R_011[0]+-1*Q_000111000*R_021[0]+Q_000211000*R_031[0];
double QR_000010001011=Q_000010001*R_011[0]+-1*Q_000010101*R_012[0]+-1*Q_000110001*R_021[0]+Q_000110101*R_022[0];
double QR_001000010011=Q_001000010*R_011[0]+-1*Q_001000110*R_012[0]+-1*Q_101000010*R_111[0]+Q_101000110*R_112[0];
double QR_000001010011=Q_000001010*R_011[0]+-1*Q_000001110*R_012[0]+-1*Q_000101010*R_021[0]+Q_000101110*R_022[0];
double QR_000000011011=Q_000000011*R_011[0]+-1*Q_000000111*R_012[0]+Q_000000211*R_013[0];
double QR_011000000020=Q_011000000*R_020[0]+-1*Q_111000000*R_120[0]+Q_211000000*R_220[0];
double QR_010001000020=Q_010001000*R_020[0]+-1*Q_010101000*R_030[0]+-1*Q_110001000*R_120[0]+Q_110101000*R_130[0];
double QR_010000001020=Q_010000001*R_020[0]+-1*Q_010000101*R_021[0]+-1*Q_110000001*R_120[0]+Q_110000101*R_121[0];
double QR_001010000020=Q_001010000*R_020[0]+-1*Q_001110000*R_030[0]+-1*Q_101010000*R_120[0]+Q_101110000*R_130[0];
double QR_000011000020=Q_000011000*R_020[0]+-1*Q_000111000*R_030[0]+Q_000211000*R_040[0];
double QR_000010001020=Q_000010001*R_020[0]+-1*Q_000010101*R_021[0]+-1*Q_000110001*R_030[0]+Q_000110101*R_031[0];
double QR_001000010020=Q_001000010*R_020[0]+-1*Q_001000110*R_021[0]+-1*Q_101000010*R_120[0]+Q_101000110*R_121[0];
double QR_000001010020=Q_000001010*R_020[0]+-1*Q_000001110*R_021[0]+-1*Q_000101010*R_030[0]+Q_000101110*R_031[0];
double QR_000000011020=Q_000000011*R_020[0]+-1*Q_000000111*R_021[0]+Q_000000211*R_022[0];
double QR_011000000101=Q_011000000*R_101[0]+-1*Q_111000000*R_201[0]+Q_211000000*R_301[0];
double QR_010001000101=Q_010001000*R_101[0]+-1*Q_010101000*R_111[0]+-1*Q_110001000*R_201[0]+Q_110101000*R_211[0];
double QR_010000001101=Q_010000001*R_101[0]+-1*Q_010000101*R_102[0]+-1*Q_110000001*R_201[0]+Q_110000101*R_202[0];
double QR_001010000101=Q_001010000*R_101[0]+-1*Q_001110000*R_111[0]+-1*Q_101010000*R_201[0]+Q_101110000*R_211[0];
double QR_000011000101=Q_000011000*R_101[0]+-1*Q_000111000*R_111[0]+Q_000211000*R_121[0];
double QR_000010001101=Q_000010001*R_101[0]+-1*Q_000010101*R_102[0]+-1*Q_000110001*R_111[0]+Q_000110101*R_112[0];
double QR_001000010101=Q_001000010*R_101[0]+-1*Q_001000110*R_102[0]+-1*Q_101000010*R_201[0]+Q_101000110*R_202[0];
double QR_000001010101=Q_000001010*R_101[0]+-1*Q_000001110*R_102[0]+-1*Q_000101010*R_111[0]+Q_000101110*R_112[0];
double QR_000000011101=Q_000000011*R_101[0]+-1*Q_000000111*R_102[0]+Q_000000211*R_103[0];
double QR_011000000110=Q_011000000*R_110[0]+-1*Q_111000000*R_210[0]+Q_211000000*R_310[0];
double QR_010001000110=Q_010001000*R_110[0]+-1*Q_010101000*R_120[0]+-1*Q_110001000*R_210[0]+Q_110101000*R_220[0];
double QR_010000001110=Q_010000001*R_110[0]+-1*Q_010000101*R_111[0]+-1*Q_110000001*R_210[0]+Q_110000101*R_211[0];
double QR_001010000110=Q_001010000*R_110[0]+-1*Q_001110000*R_120[0]+-1*Q_101010000*R_210[0]+Q_101110000*R_220[0];
double QR_000011000110=Q_000011000*R_110[0]+-1*Q_000111000*R_120[0]+Q_000211000*R_130[0];
double QR_000010001110=Q_000010001*R_110[0]+-1*Q_000010101*R_111[0]+-1*Q_000110001*R_120[0]+Q_000110101*R_121[0];
double QR_001000010110=Q_001000010*R_110[0]+-1*Q_001000110*R_111[0]+-1*Q_101000010*R_210[0]+Q_101000110*R_211[0];
double QR_000001010110=Q_000001010*R_110[0]+-1*Q_000001110*R_111[0]+-1*Q_000101010*R_120[0]+Q_000101110*R_121[0];
double QR_000000011110=Q_000000011*R_110[0]+-1*Q_000000111*R_111[0]+Q_000000211*R_112[0];
double QR_011000000200=Q_011000000*R_200[0]+-1*Q_111000000*R_300[0]+Q_211000000*R_400[0];
double QR_010001000200=Q_010001000*R_200[0]+-1*Q_010101000*R_210[0]+-1*Q_110001000*R_300[0]+Q_110101000*R_310[0];
double QR_010000001200=Q_010000001*R_200[0]+-1*Q_010000101*R_201[0]+-1*Q_110000001*R_300[0]+Q_110000101*R_301[0];
double QR_001010000200=Q_001010000*R_200[0]+-1*Q_001110000*R_210[0]+-1*Q_101010000*R_300[0]+Q_101110000*R_310[0];
double QR_000011000200=Q_000011000*R_200[0]+-1*Q_000111000*R_210[0]+Q_000211000*R_220[0];
double QR_000010001200=Q_000010001*R_200[0]+-1*Q_000010101*R_201[0]+-1*Q_000110001*R_210[0]+Q_000110101*R_211[0];
double QR_001000010200=Q_001000010*R_200[0]+-1*Q_001000110*R_201[0]+-1*Q_101000010*R_300[0]+Q_101000110*R_301[0];
double QR_000001010200=Q_000001010*R_200[0]+-1*Q_000001110*R_201[0]+-1*Q_000101010*R_210[0]+Q_000101110*R_211[0];
double QR_000000011200=Q_000000011*R_200[0]+-1*Q_000000111*R_201[0]+Q_000000211*R_202[0];
double Pd_110[3];
double Pd_020[3];
double Pd_120[3];
double Pd_220[3];
for(int i=0;i<3;i++){
Pd_110[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_020[i]=Pd_110[i]+Pd_010[i]*Pd_010[i];
}
for(int i=0;i<3;i++){
Pd_120[i]=Pd_010[i]*Pd_110[i]+aPin1*Pd_010[i];
}
for(int i=0;i<3;i++){
Pd_220[i]=aPin1*Pd_110[i];
}
double P_020000000=Pd_020[0];
double P_120000000=Pd_120[0];
double P_220000000=Pd_220[0];
double P_010010000=Pd_010[0]*Pd_010[1];
double P_010110000=Pd_010[0]*Pd_110[1];
double P_110010000=Pd_110[0]*Pd_010[1];
double P_110110000=Pd_110[0]*Pd_110[1];
double P_000020000=Pd_020[1];
double P_000120000=Pd_120[1];
double P_000220000=Pd_220[1];
double P_010000010=Pd_010[0]*Pd_010[2];
double P_010000110=Pd_010[0]*Pd_110[2];
double P_110000010=Pd_110[0]*Pd_010[2];
double P_110000110=Pd_110[0]*Pd_110[2];
double P_000010010=Pd_010[1]*Pd_010[2];
double P_000010110=Pd_010[1]*Pd_110[2];
double P_000110010=Pd_110[1]*Pd_010[2];
double P_000110110=Pd_110[1]*Pd_110[2];
double P_000000020=Pd_020[2];
double P_000000120=Pd_120[2];
double P_000000220=Pd_220[2];
ans_temp[ans_id*18+0]+=Pmtrx[0]*(P_020000000*QR_011000000000+P_120000000*QR_011000000100+P_220000000*QR_011000000200);
ans_temp[ans_id*18+0]+=Pmtrx[1]*(P_020000000*QR_010001000000+P_120000000*QR_010001000100+P_220000000*QR_010001000200);
ans_temp[ans_id*18+0]+=Pmtrx[2]*(P_020000000*QR_010000001000+P_120000000*QR_010000001100+P_220000000*QR_010000001200);
ans_temp[ans_id*18+1]+=Pmtrx[0]*(P_020000000*QR_001010000000+P_120000000*QR_001010000100+P_220000000*QR_001010000200);
ans_temp[ans_id*18+1]+=Pmtrx[1]*(P_020000000*QR_000011000000+P_120000000*QR_000011000100+P_220000000*QR_000011000200);
ans_temp[ans_id*18+1]+=Pmtrx[2]*(P_020000000*QR_000010001000+P_120000000*QR_000010001100+P_220000000*QR_000010001200);
ans_temp[ans_id*18+2]+=Pmtrx[0]*(P_020000000*QR_001000010000+P_120000000*QR_001000010100+P_220000000*QR_001000010200);
ans_temp[ans_id*18+2]+=Pmtrx[1]*(P_020000000*QR_000001010000+P_120000000*QR_000001010100+P_220000000*QR_000001010200);
ans_temp[ans_id*18+2]+=Pmtrx[2]*(P_020000000*QR_000000011000+P_120000000*QR_000000011100+P_220000000*QR_000000011200);
ans_temp[ans_id*18+3]+=Pmtrx[0]*(P_010010000*QR_011000000000+P_010110000*QR_011000000010+P_110010000*QR_011000000100+P_110110000*QR_011000000110);
ans_temp[ans_id*18+3]+=Pmtrx[1]*(P_010010000*QR_010001000000+P_010110000*QR_010001000010+P_110010000*QR_010001000100+P_110110000*QR_010001000110);
ans_temp[ans_id*18+3]+=Pmtrx[2]*(P_010010000*QR_010000001000+P_010110000*QR_010000001010+P_110010000*QR_010000001100+P_110110000*QR_010000001110);
ans_temp[ans_id*18+4]+=Pmtrx[0]*(P_010010000*QR_001010000000+P_010110000*QR_001010000010+P_110010000*QR_001010000100+P_110110000*QR_001010000110);
ans_temp[ans_id*18+4]+=Pmtrx[1]*(P_010010000*QR_000011000000+P_010110000*QR_000011000010+P_110010000*QR_000011000100+P_110110000*QR_000011000110);
ans_temp[ans_id*18+4]+=Pmtrx[2]*(P_010010000*QR_000010001000+P_010110000*QR_000010001010+P_110010000*QR_000010001100+P_110110000*QR_000010001110);
ans_temp[ans_id*18+5]+=Pmtrx[0]*(P_010010000*QR_001000010000+P_010110000*QR_001000010010+P_110010000*QR_001000010100+P_110110000*QR_001000010110);
ans_temp[ans_id*18+5]+=Pmtrx[1]*(P_010010000*QR_000001010000+P_010110000*QR_000001010010+P_110010000*QR_000001010100+P_110110000*QR_000001010110);
ans_temp[ans_id*18+5]+=Pmtrx[2]*(P_010010000*QR_000000011000+P_010110000*QR_000000011010+P_110010000*QR_000000011100+P_110110000*QR_000000011110);
ans_temp[ans_id*18+6]+=Pmtrx[0]*(P_000020000*QR_011000000000+P_000120000*QR_011000000010+P_000220000*QR_011000000020);
ans_temp[ans_id*18+6]+=Pmtrx[1]*(P_000020000*QR_010001000000+P_000120000*QR_010001000010+P_000220000*QR_010001000020);
ans_temp[ans_id*18+6]+=Pmtrx[2]*(P_000020000*QR_010000001000+P_000120000*QR_010000001010+P_000220000*QR_010000001020);
ans_temp[ans_id*18+7]+=Pmtrx[0]*(P_000020000*QR_001010000000+P_000120000*QR_001010000010+P_000220000*QR_001010000020);
ans_temp[ans_id*18+7]+=Pmtrx[1]*(P_000020000*QR_000011000000+P_000120000*QR_000011000010+P_000220000*QR_000011000020);
ans_temp[ans_id*18+7]+=Pmtrx[2]*(P_000020000*QR_000010001000+P_000120000*QR_000010001010+P_000220000*QR_000010001020);
ans_temp[ans_id*18+8]+=Pmtrx[0]*(P_000020000*QR_001000010000+P_000120000*QR_001000010010+P_000220000*QR_001000010020);
ans_temp[ans_id*18+8]+=Pmtrx[1]*(P_000020000*QR_000001010000+P_000120000*QR_000001010010+P_000220000*QR_000001010020);
ans_temp[ans_id*18+8]+=Pmtrx[2]*(P_000020000*QR_000000011000+P_000120000*QR_000000011010+P_000220000*QR_000000011020);
ans_temp[ans_id*18+9]+=Pmtrx[0]*(P_010000010*QR_011000000000+P_010000110*QR_011000000001+P_110000010*QR_011000000100+P_110000110*QR_011000000101);
ans_temp[ans_id*18+9]+=Pmtrx[1]*(P_010000010*QR_010001000000+P_010000110*QR_010001000001+P_110000010*QR_010001000100+P_110000110*QR_010001000101);
ans_temp[ans_id*18+9]+=Pmtrx[2]*(P_010000010*QR_010000001000+P_010000110*QR_010000001001+P_110000010*QR_010000001100+P_110000110*QR_010000001101);
ans_temp[ans_id*18+10]+=Pmtrx[0]*(P_010000010*QR_001010000000+P_010000110*QR_001010000001+P_110000010*QR_001010000100+P_110000110*QR_001010000101);
ans_temp[ans_id*18+10]+=Pmtrx[1]*(P_010000010*QR_000011000000+P_010000110*QR_000011000001+P_110000010*QR_000011000100+P_110000110*QR_000011000101);
ans_temp[ans_id*18+10]+=Pmtrx[2]*(P_010000010*QR_000010001000+P_010000110*QR_000010001001+P_110000010*QR_000010001100+P_110000110*QR_000010001101);
ans_temp[ans_id*18+11]+=Pmtrx[0]*(P_010000010*QR_001000010000+P_010000110*QR_001000010001+P_110000010*QR_001000010100+P_110000110*QR_001000010101);
ans_temp[ans_id*18+11]+=Pmtrx[1]*(P_010000010*QR_000001010000+P_010000110*QR_000001010001+P_110000010*QR_000001010100+P_110000110*QR_000001010101);
ans_temp[ans_id*18+11]+=Pmtrx[2]*(P_010000010*QR_000000011000+P_010000110*QR_000000011001+P_110000010*QR_000000011100+P_110000110*QR_000000011101);
ans_temp[ans_id*18+12]+=Pmtrx[0]*(P_000010010*QR_011000000000+P_000010110*QR_011000000001+P_000110010*QR_011000000010+P_000110110*QR_011000000011);
ans_temp[ans_id*18+12]+=Pmtrx[1]*(P_000010010*QR_010001000000+P_000010110*QR_010001000001+P_000110010*QR_010001000010+P_000110110*QR_010001000011);
ans_temp[ans_id*18+12]+=Pmtrx[2]*(P_000010010*QR_010000001000+P_000010110*QR_010000001001+P_000110010*QR_010000001010+P_000110110*QR_010000001011);
ans_temp[ans_id*18+13]+=Pmtrx[0]*(P_000010010*QR_001010000000+P_000010110*QR_001010000001+P_000110010*QR_001010000010+P_000110110*QR_001010000011);
ans_temp[ans_id*18+13]+=Pmtrx[1]*(P_000010010*QR_000011000000+P_000010110*QR_000011000001+P_000110010*QR_000011000010+P_000110110*QR_000011000011);
ans_temp[ans_id*18+13]+=Pmtrx[2]*(P_000010010*QR_000010001000+P_000010110*QR_000010001001+P_000110010*QR_000010001010+P_000110110*QR_000010001011);
ans_temp[ans_id*18+14]+=Pmtrx[0]*(P_000010010*QR_001000010000+P_000010110*QR_001000010001+P_000110010*QR_001000010010+P_000110110*QR_001000010011);
ans_temp[ans_id*18+14]+=Pmtrx[1]*(P_000010010*QR_000001010000+P_000010110*QR_000001010001+P_000110010*QR_000001010010+P_000110110*QR_000001010011);
ans_temp[ans_id*18+14]+=Pmtrx[2]*(P_000010010*QR_000000011000+P_000010110*QR_000000011001+P_000110010*QR_000000011010+P_000110110*QR_000000011011);
ans_temp[ans_id*18+15]+=Pmtrx[0]*(P_000000020*QR_011000000000+P_000000120*QR_011000000001+P_000000220*QR_011000000002);
ans_temp[ans_id*18+15]+=Pmtrx[1]*(P_000000020*QR_010001000000+P_000000120*QR_010001000001+P_000000220*QR_010001000002);
ans_temp[ans_id*18+15]+=Pmtrx[2]*(P_000000020*QR_010000001000+P_000000120*QR_010000001001+P_000000220*QR_010000001002);
ans_temp[ans_id*18+16]+=Pmtrx[0]*(P_000000020*QR_001010000000+P_000000120*QR_001010000001+P_000000220*QR_001010000002);
ans_temp[ans_id*18+16]+=Pmtrx[1]*(P_000000020*QR_000011000000+P_000000120*QR_000011000001+P_000000220*QR_000011000002);
ans_temp[ans_id*18+16]+=Pmtrx[2]*(P_000000020*QR_000010001000+P_000000120*QR_000010001001+P_000000220*QR_000010001002);
ans_temp[ans_id*18+17]+=Pmtrx[0]*(P_000000020*QR_001000010000+P_000000120*QR_001000010001+P_000000220*QR_001000010002);
ans_temp[ans_id*18+17]+=Pmtrx[1]*(P_000000020*QR_000001010000+P_000000120*QR_000001010001+P_000000220*QR_000001010002);
ans_temp[ans_id*18+17]+=Pmtrx[2]*(P_000000020*QR_000000011000+P_000000120*QR_000000011001+P_000000220*QR_000000011002);
}
}
__syncthreads();
int num_thread=NTHREAD/2;
while (num_thread!=0){
__syncthreads();
if(tId_x<num_thread){
for(int ians=0;ians<18;ians++){
ans_temp[tId_x*18+ians]+=ans_temp[(tId_x+num_thread)*18+ians];
}
}
num_thread/=2;
}
if(tId_x==0){
for(int ians=0;ians<18;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*18+ians]=ans_temp[(tId_x)*18+ians];
}
}
}
}
}
__global__ void MD_Kp_dppp_fs(unsigned int contrc_bra_num,unsigned int contrc_ket_num,\
unsigned int * contrc_bra_id,\
unsigned int * contrc_ket_id,\
unsigned int mtrx_len,\
double * Pmtrx_in,\
double * P,\
double * PA,\
double * PB,\
double * Zta_in,\
double * pp_in,\
float * K2_p_in,\
unsigned int * id_bra_in,\
double * Q,\
double * QC,\
double * QD,\
double * Eta_in,\
double * pq_in,\
float * K2_q_in,\
unsigned int * id_ket_in,\
double * ans){
unsigned int tId_x = threadIdx.x;
unsigned int bId_x = blockIdx.x;
unsigned int bId_y = blockIdx.y;
unsigned int tdis = blockDim.x;
unsigned int bdis_x = gridDim.x;
unsigned int bdis_y = gridDim.y;
unsigned int ans_id=tId_x;
double Pmtrx[9]={0.0};
__shared__ double ans_temp[NTHREAD*18];
for(int i=0;i<18;i++){
ans_temp[i*tdis+tId_x]=0.0;
}
for(unsigned int i_contrc_bra=bId_x;i_contrc_bra<contrc_bra_num;i_contrc_bra+=bdis_x){
for(unsigned int j_contrc_ket=bId_y;j_contrc_ket<contrc_ket_num;j_contrc_ket+=bdis_y){
unsigned int primit_bra_start = contrc_bra_id[i_contrc_bra ];
unsigned int primit_bra_end = contrc_bra_id[i_contrc_bra+1];
unsigned int primit_ket_start = contrc_ket_id[j_contrc_ket ];
unsigned int primit_ket_end = contrc_ket_id[j_contrc_ket+1];
for(unsigned int ii=primit_ket_start;ii<primit_ket_end;ii++){
unsigned int id_ket=id_ket_in[ii];
double QX=Q[ii*3+0];
double QY=Q[ii*3+1];
double QZ=Q[ii*3+2];
double Qd_010[3];
Qd_010[0]=QC[ii*3+0];
Qd_010[1]=QC[ii*3+1];
Qd_010[2]=QC[ii*3+2];
double Qd_001[3];
Qd_001[0]=QD[ii*3+0];
Qd_001[1]=QD[ii*3+1];
Qd_001[2]=QD[ii*3+2];
double Eta=Eta_in[ii];
double pq=pq_in[ii];
float K2_q=K2_q_in[ii];
double aQin1=1/(2*Eta);
for(unsigned int j=tId_x;j<primit_bra_end-primit_bra_start;j+=tdis){
unsigned int jj=primit_bra_start+j;
unsigned int id_bra=tex1Dfetch(tex_id_bra,jj);
double P_max=0.0;
for(int p_j=0;p_j<3;p_j++){
for(int p_i=0;p_i<3;p_i++){
Pmtrx[p_i*3+p_j]=Pmtrx_in[(id_ket+p_j)*mtrx_len+(id_bra+p_i)];
double temp_P=fabsf(Pmtrx[p_i*3+p_j]);
if(temp_P>P_max) P_max=temp_P;
}
}
float K2_p=tex1Dfetch(tex_K2_p,jj);
if(fabsf(K2_p*K2_q)<1.0E-14){
break;
}
if(fabsf(P_max*K2_p*K2_q)<1.0E-14) continue;
int2 temp_int2;
temp_int2=tex1Dfetch(tex_Zta,jj);
double Zta=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_pp,jj);
double pp=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+0);
double PX=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+1);
double PY=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+2);
double PZ=__hiloint2double(temp_int2.y,temp_int2.x);
double Pd_010[3];
temp_int2=tex1Dfetch(tex_PA,jj*3+0);
Pd_010[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PA,jj*3+1);
Pd_010[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PA,jj*3+2);
Pd_010[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double Pd_001[3];
temp_int2=tex1Dfetch(tex_PB,jj*3+0);
Pd_001[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PB,jj*3+1);
Pd_001[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PB,jj*3+2);
Pd_001[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double alphaT=rsqrt(Eta+Zta);
double lmd=4*P25*pp*pq*alphaT;
alphaT=Eta*Zta*alphaT*alphaT;
double TX=PX-QX;
double TY=PY-QY;
double TZ=PZ-QZ;
double T=alphaT*(TX*TX+TY*TY+TZ*TZ);
double R_000[6];
Ft_fs_5(5,T,R_000);
R_000[0]*=lmd;
R_000[1]*=-2*alphaT*lmd;
R_000[2]*=4*alphaT*alphaT*lmd;
R_000[3]*=-8*alphaT*alphaT*alphaT*lmd;
R_000[4]*=16*alphaT*alphaT*alphaT*alphaT*lmd;
R_000[5]*=-32*alphaT*alphaT*alphaT*alphaT*alphaT*lmd;
double aPin1=1/(2*Zta);
double R_100[5];
double R_200[4];
double R_300[3];
double R_400[2];
double R_500[1];
double R_010[5];
double R_110[4];
double R_210[3];
double R_310[2];
double R_410[1];
double R_020[4];
double R_120[3];
double R_220[2];
double R_320[1];
double R_030[3];
double R_130[2];
double R_230[1];
double R_040[2];
double R_140[1];
double R_050[1];
double R_001[5];
double R_101[4];
double R_201[3];
double R_301[2];
double R_401[1];
double R_011[4];
double R_111[3];
double R_211[2];
double R_311[1];
double R_021[3];
double R_121[2];
double R_221[1];
double R_031[2];
double R_131[1];
double R_041[1];
double R_002[4];
double R_102[3];
double R_202[2];
double R_302[1];
double R_012[3];
double R_112[2];
double R_212[1];
double R_022[2];
double R_122[1];
double R_032[1];
double R_003[3];
double R_103[2];
double R_203[1];
double R_013[2];
double R_113[1];
double R_023[1];
double R_004[2];
double R_104[1];
double R_014[1];
double R_005[1];
for(int i=0;i<5;i++){
R_100[i]=TX*R_000[i+1];
}
for(int i=0;i<5;i++){
R_010[i]=TY*R_000[i+1];
}
for(int i=0;i<5;i++){
R_001[i]=TZ*R_000[i+1];
}
for(int i=0;i<4;i++){
R_200[i]=TX*R_100[i+1]+R_000[i+1];
}
for(int i=0;i<4;i++){
R_110[i]=TX*R_010[i+1];
}
for(int i=0;i<4;i++){
R_020[i]=TY*R_010[i+1]+R_000[i+1];
}
for(int i=0;i<4;i++){
R_101[i]=TX*R_001[i+1];
}
for(int i=0;i<4;i++){
R_011[i]=TY*R_001[i+1];
}
for(int i=0;i<4;i++){
R_002[i]=TZ*R_001[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_300[i]=TX*R_200[i+1]+2*R_100[i+1];
}
for(int i=0;i<3;i++){
R_210[i]=TY*R_200[i+1];
}
for(int i=0;i<3;i++){
R_120[i]=TX*R_020[i+1];
}
for(int i=0;i<3;i++){
R_030[i]=TY*R_020[i+1]+2*R_010[i+1];
}
for(int i=0;i<3;i++){
R_201[i]=TZ*R_200[i+1];
}
for(int i=0;i<3;i++){
R_111[i]=TX*R_011[i+1];
}
for(int i=0;i<3;i++){
R_021[i]=TZ*R_020[i+1];
}
for(int i=0;i<3;i++){
R_102[i]=TX*R_002[i+1];
}
for(int i=0;i<3;i++){
R_012[i]=TY*R_002[i+1];
}
for(int i=0;i<3;i++){
R_003[i]=TZ*R_002[i+1]+2*R_001[i+1];
}
for(int i=0;i<2;i++){
R_400[i]=TX*R_300[i+1]+3*R_200[i+1];
}
for(int i=0;i<2;i++){
R_310[i]=TY*R_300[i+1];
}
for(int i=0;i<2;i++){
R_220[i]=TX*R_120[i+1]+R_020[i+1];
}
for(int i=0;i<2;i++){
R_130[i]=TX*R_030[i+1];
}
for(int i=0;i<2;i++){
R_040[i]=TY*R_030[i+1]+3*R_020[i+1];
}
for(int i=0;i<2;i++){
R_301[i]=TZ*R_300[i+1];
}
for(int i=0;i<2;i++){
R_211[i]=TY*R_201[i+1];
}
for(int i=0;i<2;i++){
R_121[i]=TX*R_021[i+1];
}
for(int i=0;i<2;i++){
R_031[i]=TZ*R_030[i+1];
}
for(int i=0;i<2;i++){
R_202[i]=TX*R_102[i+1]+R_002[i+1];
}
for(int i=0;i<2;i++){
R_112[i]=TX*R_012[i+1];
}
for(int i=0;i<2;i++){
R_022[i]=TY*R_012[i+1]+R_002[i+1];
}
for(int i=0;i<2;i++){
R_103[i]=TX*R_003[i+1];
}
for(int i=0;i<2;i++){
R_013[i]=TY*R_003[i+1];
}
for(int i=0;i<2;i++){
R_004[i]=TZ*R_003[i+1]+3*R_002[i+1];
}
for(int i=0;i<1;i++){
R_500[i]=TX*R_400[i+1]+4*R_300[i+1];
}
for(int i=0;i<1;i++){
R_410[i]=TY*R_400[i+1];
}
for(int i=0;i<1;i++){
R_320[i]=TX*R_220[i+1]+2*R_120[i+1];
}
for(int i=0;i<1;i++){
R_230[i]=TY*R_220[i+1]+2*R_210[i+1];
}
for(int i=0;i<1;i++){
R_140[i]=TX*R_040[i+1];
}
for(int i=0;i<1;i++){
R_050[i]=TY*R_040[i+1]+4*R_030[i+1];
}
for(int i=0;i<1;i++){
R_401[i]=TZ*R_400[i+1];
}
for(int i=0;i<1;i++){
R_311[i]=TY*R_301[i+1];
}
for(int i=0;i<1;i++){
R_221[i]=TZ*R_220[i+1];
}
for(int i=0;i<1;i++){
R_131[i]=TX*R_031[i+1];
}
for(int i=0;i<1;i++){
R_041[i]=TZ*R_040[i+1];
}
for(int i=0;i<1;i++){
R_302[i]=TX*R_202[i+1]+2*R_102[i+1];
}
for(int i=0;i<1;i++){
R_212[i]=TY*R_202[i+1];
}
for(int i=0;i<1;i++){
R_122[i]=TX*R_022[i+1];
}
for(int i=0;i<1;i++){
R_032[i]=TY*R_022[i+1]+2*R_012[i+1];
}
for(int i=0;i<1;i++){
R_203[i]=TZ*R_202[i+1]+2*R_201[i+1];
}
for(int i=0;i<1;i++){
R_113[i]=TX*R_013[i+1];
}
for(int i=0;i<1;i++){
R_023[i]=TZ*R_022[i+1]+2*R_021[i+1];
}
for(int i=0;i<1;i++){
R_104[i]=TX*R_004[i+1];
}
for(int i=0;i<1;i++){
R_014[i]=TY*R_004[i+1];
}
for(int i=0;i<1;i++){
R_005[i]=TZ*R_004[i+1]+4*R_003[i+1];
}
double Pd_101[3];
double Pd_110[3];
double Pd_011[3];
double Pd_111[3];
double Pd_211[3];
double Pd_020[3];
double Pd_120[3];
double Pd_220[3];
double Pd_021[3];
double Pd_121[3];
double Pd_221[3];
double Pd_321[3];
for(int i=0;i<3;i++){
Pd_101[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_110[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_011[i]=Pd_101[i]+Pd_010[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_111[i]=Pd_010[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_211[i]=aPin1*Pd_101[i];
}
for(int i=0;i<3;i++){
Pd_020[i]=Pd_110[i]+Pd_010[i]*Pd_010[i];
}
for(int i=0;i<3;i++){
Pd_120[i]=Pd_010[i]*Pd_110[i]+aPin1*Pd_010[i];
}
for(int i=0;i<3;i++){
Pd_220[i]=aPin1*Pd_110[i];
}
for(int i=0;i<3;i++){
Pd_021[i]=Pd_111[i]+Pd_010[i]*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_121[i]=2*Pd_211[i]+Pd_010[i]*Pd_111[i]+aPin1*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_221[i]=Pd_010[i]*Pd_211[i]+aPin1*Pd_111[i];
}
for(int i=0;i<3;i++){
Pd_321[i]=aPin1*Pd_211[i];
}
double P_021000000=Pd_021[0];
double P_121000000=Pd_121[0];
double P_221000000=Pd_221[0];
double P_321000000=Pd_321[0];
double P_020001000=Pd_020[0]*Pd_001[1];
double P_020101000=Pd_020[0]*Pd_101[1];
double P_120001000=Pd_120[0]*Pd_001[1];
double P_120101000=Pd_120[0]*Pd_101[1];
double P_220001000=Pd_220[0]*Pd_001[1];
double P_220101000=Pd_220[0]*Pd_101[1];
double P_020000001=Pd_020[0]*Pd_001[2];
double P_020000101=Pd_020[0]*Pd_101[2];
double P_120000001=Pd_120[0]*Pd_001[2];
double P_120000101=Pd_120[0]*Pd_101[2];
double P_220000001=Pd_220[0]*Pd_001[2];
double P_220000101=Pd_220[0]*Pd_101[2];
double P_011010000=Pd_011[0]*Pd_010[1];
double P_011110000=Pd_011[0]*Pd_110[1];
double P_111010000=Pd_111[0]*Pd_010[1];
double P_111110000=Pd_111[0]*Pd_110[1];
double P_211010000=Pd_211[0]*Pd_010[1];
double P_211110000=Pd_211[0]*Pd_110[1];
double P_010011000=Pd_010[0]*Pd_011[1];
double P_010111000=Pd_010[0]*Pd_111[1];
double P_010211000=Pd_010[0]*Pd_211[1];
double P_110011000=Pd_110[0]*Pd_011[1];
double P_110111000=Pd_110[0]*Pd_111[1];
double P_110211000=Pd_110[0]*Pd_211[1];
double P_010010001=Pd_010[0]*Pd_010[1]*Pd_001[2];
double P_010010101=Pd_010[0]*Pd_010[1]*Pd_101[2];
double P_010110001=Pd_010[0]*Pd_110[1]*Pd_001[2];
double P_010110101=Pd_010[0]*Pd_110[1]*Pd_101[2];
double P_110010001=Pd_110[0]*Pd_010[1]*Pd_001[2];
double P_110010101=Pd_110[0]*Pd_010[1]*Pd_101[2];
double P_110110001=Pd_110[0]*Pd_110[1]*Pd_001[2];
double P_110110101=Pd_110[0]*Pd_110[1]*Pd_101[2];
double P_001020000=Pd_001[0]*Pd_020[1];
double P_001120000=Pd_001[0]*Pd_120[1];
double P_001220000=Pd_001[0]*Pd_220[1];
double P_101020000=Pd_101[0]*Pd_020[1];
double P_101120000=Pd_101[0]*Pd_120[1];
double P_101220000=Pd_101[0]*Pd_220[1];
double P_000021000=Pd_021[1];
double P_000121000=Pd_121[1];
double P_000221000=Pd_221[1];
double P_000321000=Pd_321[1];
double P_000020001=Pd_020[1]*Pd_001[2];
double P_000020101=Pd_020[1]*Pd_101[2];
double P_000120001=Pd_120[1]*Pd_001[2];
double P_000120101=Pd_120[1]*Pd_101[2];
double P_000220001=Pd_220[1]*Pd_001[2];
double P_000220101=Pd_220[1]*Pd_101[2];
double P_011000010=Pd_011[0]*Pd_010[2];
double P_011000110=Pd_011[0]*Pd_110[2];
double P_111000010=Pd_111[0]*Pd_010[2];
double P_111000110=Pd_111[0]*Pd_110[2];
double P_211000010=Pd_211[0]*Pd_010[2];
double P_211000110=Pd_211[0]*Pd_110[2];
double P_010001010=Pd_010[0]*Pd_001[1]*Pd_010[2];
double P_010001110=Pd_010[0]*Pd_001[1]*Pd_110[2];
double P_010101010=Pd_010[0]*Pd_101[1]*Pd_010[2];
double P_010101110=Pd_010[0]*Pd_101[1]*Pd_110[2];
double P_110001010=Pd_110[0]*Pd_001[1]*Pd_010[2];
double P_110001110=Pd_110[0]*Pd_001[1]*Pd_110[2];
double P_110101010=Pd_110[0]*Pd_101[1]*Pd_010[2];
double P_110101110=Pd_110[0]*Pd_101[1]*Pd_110[2];
double P_010000011=Pd_010[0]*Pd_011[2];
double P_010000111=Pd_010[0]*Pd_111[2];
double P_010000211=Pd_010[0]*Pd_211[2];
double P_110000011=Pd_110[0]*Pd_011[2];
double P_110000111=Pd_110[0]*Pd_111[2];
double P_110000211=Pd_110[0]*Pd_211[2];
double P_001010010=Pd_001[0]*Pd_010[1]*Pd_010[2];
double P_001010110=Pd_001[0]*Pd_010[1]*Pd_110[2];
double P_001110010=Pd_001[0]*Pd_110[1]*Pd_010[2];
double P_001110110=Pd_001[0]*Pd_110[1]*Pd_110[2];
double P_101010010=Pd_101[0]*Pd_010[1]*Pd_010[2];
double P_101010110=Pd_101[0]*Pd_010[1]*Pd_110[2];
double P_101110010=Pd_101[0]*Pd_110[1]*Pd_010[2];
double P_101110110=Pd_101[0]*Pd_110[1]*Pd_110[2];
double P_000011010=Pd_011[1]*Pd_010[2];
double P_000011110=Pd_011[1]*Pd_110[2];
double P_000111010=Pd_111[1]*Pd_010[2];
double P_000111110=Pd_111[1]*Pd_110[2];
double P_000211010=Pd_211[1]*Pd_010[2];
double P_000211110=Pd_211[1]*Pd_110[2];
double P_000010011=Pd_010[1]*Pd_011[2];
double P_000010111=Pd_010[1]*Pd_111[2];
double P_000010211=Pd_010[1]*Pd_211[2];
double P_000110011=Pd_110[1]*Pd_011[2];
double P_000110111=Pd_110[1]*Pd_111[2];
double P_000110211=Pd_110[1]*Pd_211[2];
double P_001000020=Pd_001[0]*Pd_020[2];
double P_001000120=Pd_001[0]*Pd_120[2];
double P_001000220=Pd_001[0]*Pd_220[2];
double P_101000020=Pd_101[0]*Pd_020[2];
double P_101000120=Pd_101[0]*Pd_120[2];
double P_101000220=Pd_101[0]*Pd_220[2];
double P_000001020=Pd_001[1]*Pd_020[2];
double P_000001120=Pd_001[1]*Pd_120[2];
double P_000001220=Pd_001[1]*Pd_220[2];
double P_000101020=Pd_101[1]*Pd_020[2];
double P_000101120=Pd_101[1]*Pd_120[2];
double P_000101220=Pd_101[1]*Pd_220[2];
double P_000000021=Pd_021[2];
double P_000000121=Pd_121[2];
double P_000000221=Pd_221[2];
double P_000000321=Pd_321[2];
double PR_021000000000=P_021000000*R_000[0]+-1*P_121000000*R_100[0]+P_221000000*R_200[0]+-1*P_321000000*R_300[0];
double PR_020001000000=P_020001000*R_000[0]+-1*P_020101000*R_010[0]+-1*P_120001000*R_100[0]+P_120101000*R_110[0]+P_220001000*R_200[0]+-1*P_220101000*R_210[0];
double PR_020000001000=P_020000001*R_000[0]+-1*P_020000101*R_001[0]+-1*P_120000001*R_100[0]+P_120000101*R_101[0]+P_220000001*R_200[0]+-1*P_220000101*R_201[0];
double PR_011010000000=P_011010000*R_000[0]+-1*P_011110000*R_010[0]+-1*P_111010000*R_100[0]+P_111110000*R_110[0]+P_211010000*R_200[0]+-1*P_211110000*R_210[0];
double PR_010011000000=P_010011000*R_000[0]+-1*P_010111000*R_010[0]+P_010211000*R_020[0]+-1*P_110011000*R_100[0]+P_110111000*R_110[0]+-1*P_110211000*R_120[0];
double PR_010010001000=P_010010001*R_000[0]+-1*P_010010101*R_001[0]+-1*P_010110001*R_010[0]+P_010110101*R_011[0]+-1*P_110010001*R_100[0]+P_110010101*R_101[0]+P_110110001*R_110[0]+-1*P_110110101*R_111[0];
double PR_001020000000=P_001020000*R_000[0]+-1*P_001120000*R_010[0]+P_001220000*R_020[0]+-1*P_101020000*R_100[0]+P_101120000*R_110[0]+-1*P_101220000*R_120[0];
double PR_000021000000=P_000021000*R_000[0]+-1*P_000121000*R_010[0]+P_000221000*R_020[0]+-1*P_000321000*R_030[0];
double PR_000020001000=P_000020001*R_000[0]+-1*P_000020101*R_001[0]+-1*P_000120001*R_010[0]+P_000120101*R_011[0]+P_000220001*R_020[0]+-1*P_000220101*R_021[0];
double PR_011000010000=P_011000010*R_000[0]+-1*P_011000110*R_001[0]+-1*P_111000010*R_100[0]+P_111000110*R_101[0]+P_211000010*R_200[0]+-1*P_211000110*R_201[0];
double PR_010001010000=P_010001010*R_000[0]+-1*P_010001110*R_001[0]+-1*P_010101010*R_010[0]+P_010101110*R_011[0]+-1*P_110001010*R_100[0]+P_110001110*R_101[0]+P_110101010*R_110[0]+-1*P_110101110*R_111[0];
double PR_010000011000=P_010000011*R_000[0]+-1*P_010000111*R_001[0]+P_010000211*R_002[0]+-1*P_110000011*R_100[0]+P_110000111*R_101[0]+-1*P_110000211*R_102[0];
double PR_001010010000=P_001010010*R_000[0]+-1*P_001010110*R_001[0]+-1*P_001110010*R_010[0]+P_001110110*R_011[0]+-1*P_101010010*R_100[0]+P_101010110*R_101[0]+P_101110010*R_110[0]+-1*P_101110110*R_111[0];
double PR_000011010000=P_000011010*R_000[0]+-1*P_000011110*R_001[0]+-1*P_000111010*R_010[0]+P_000111110*R_011[0]+P_000211010*R_020[0]+-1*P_000211110*R_021[0];
double PR_000010011000=P_000010011*R_000[0]+-1*P_000010111*R_001[0]+P_000010211*R_002[0]+-1*P_000110011*R_010[0]+P_000110111*R_011[0]+-1*P_000110211*R_012[0];
double PR_001000020000=P_001000020*R_000[0]+-1*P_001000120*R_001[0]+P_001000220*R_002[0]+-1*P_101000020*R_100[0]+P_101000120*R_101[0]+-1*P_101000220*R_102[0];
double PR_000001020000=P_000001020*R_000[0]+-1*P_000001120*R_001[0]+P_000001220*R_002[0]+-1*P_000101020*R_010[0]+P_000101120*R_011[0]+-1*P_000101220*R_012[0];
double PR_000000021000=P_000000021*R_000[0]+-1*P_000000121*R_001[0]+P_000000221*R_002[0]+-1*P_000000321*R_003[0];
double PR_021000000001=P_021000000*R_001[0]+-1*P_121000000*R_101[0]+P_221000000*R_201[0]+-1*P_321000000*R_301[0];
double PR_020001000001=P_020001000*R_001[0]+-1*P_020101000*R_011[0]+-1*P_120001000*R_101[0]+P_120101000*R_111[0]+P_220001000*R_201[0]+-1*P_220101000*R_211[0];
double PR_020000001001=P_020000001*R_001[0]+-1*P_020000101*R_002[0]+-1*P_120000001*R_101[0]+P_120000101*R_102[0]+P_220000001*R_201[0]+-1*P_220000101*R_202[0];
double PR_011010000001=P_011010000*R_001[0]+-1*P_011110000*R_011[0]+-1*P_111010000*R_101[0]+P_111110000*R_111[0]+P_211010000*R_201[0]+-1*P_211110000*R_211[0];
double PR_010011000001=P_010011000*R_001[0]+-1*P_010111000*R_011[0]+P_010211000*R_021[0]+-1*P_110011000*R_101[0]+P_110111000*R_111[0]+-1*P_110211000*R_121[0];
double PR_010010001001=P_010010001*R_001[0]+-1*P_010010101*R_002[0]+-1*P_010110001*R_011[0]+P_010110101*R_012[0]+-1*P_110010001*R_101[0]+P_110010101*R_102[0]+P_110110001*R_111[0]+-1*P_110110101*R_112[0];
double PR_001020000001=P_001020000*R_001[0]+-1*P_001120000*R_011[0]+P_001220000*R_021[0]+-1*P_101020000*R_101[0]+P_101120000*R_111[0]+-1*P_101220000*R_121[0];
double PR_000021000001=P_000021000*R_001[0]+-1*P_000121000*R_011[0]+P_000221000*R_021[0]+-1*P_000321000*R_031[0];
double PR_000020001001=P_000020001*R_001[0]+-1*P_000020101*R_002[0]+-1*P_000120001*R_011[0]+P_000120101*R_012[0]+P_000220001*R_021[0]+-1*P_000220101*R_022[0];
double PR_011000010001=P_011000010*R_001[0]+-1*P_011000110*R_002[0]+-1*P_111000010*R_101[0]+P_111000110*R_102[0]+P_211000010*R_201[0]+-1*P_211000110*R_202[0];
double PR_010001010001=P_010001010*R_001[0]+-1*P_010001110*R_002[0]+-1*P_010101010*R_011[0]+P_010101110*R_012[0]+-1*P_110001010*R_101[0]+P_110001110*R_102[0]+P_110101010*R_111[0]+-1*P_110101110*R_112[0];
double PR_010000011001=P_010000011*R_001[0]+-1*P_010000111*R_002[0]+P_010000211*R_003[0]+-1*P_110000011*R_101[0]+P_110000111*R_102[0]+-1*P_110000211*R_103[0];
double PR_001010010001=P_001010010*R_001[0]+-1*P_001010110*R_002[0]+-1*P_001110010*R_011[0]+P_001110110*R_012[0]+-1*P_101010010*R_101[0]+P_101010110*R_102[0]+P_101110010*R_111[0]+-1*P_101110110*R_112[0];
double PR_000011010001=P_000011010*R_001[0]+-1*P_000011110*R_002[0]+-1*P_000111010*R_011[0]+P_000111110*R_012[0]+P_000211010*R_021[0]+-1*P_000211110*R_022[0];
double PR_000010011001=P_000010011*R_001[0]+-1*P_000010111*R_002[0]+P_000010211*R_003[0]+-1*P_000110011*R_011[0]+P_000110111*R_012[0]+-1*P_000110211*R_013[0];
double PR_001000020001=P_001000020*R_001[0]+-1*P_001000120*R_002[0]+P_001000220*R_003[0]+-1*P_101000020*R_101[0]+P_101000120*R_102[0]+-1*P_101000220*R_103[0];
double PR_000001020001=P_000001020*R_001[0]+-1*P_000001120*R_002[0]+P_000001220*R_003[0]+-1*P_000101020*R_011[0]+P_000101120*R_012[0]+-1*P_000101220*R_013[0];
double PR_000000021001=P_000000021*R_001[0]+-1*P_000000121*R_002[0]+P_000000221*R_003[0]+-1*P_000000321*R_004[0];
double PR_021000000010=P_021000000*R_010[0]+-1*P_121000000*R_110[0]+P_221000000*R_210[0]+-1*P_321000000*R_310[0];
double PR_020001000010=P_020001000*R_010[0]+-1*P_020101000*R_020[0]+-1*P_120001000*R_110[0]+P_120101000*R_120[0]+P_220001000*R_210[0]+-1*P_220101000*R_220[0];
double PR_020000001010=P_020000001*R_010[0]+-1*P_020000101*R_011[0]+-1*P_120000001*R_110[0]+P_120000101*R_111[0]+P_220000001*R_210[0]+-1*P_220000101*R_211[0];
double PR_011010000010=P_011010000*R_010[0]+-1*P_011110000*R_020[0]+-1*P_111010000*R_110[0]+P_111110000*R_120[0]+P_211010000*R_210[0]+-1*P_211110000*R_220[0];
double PR_010011000010=P_010011000*R_010[0]+-1*P_010111000*R_020[0]+P_010211000*R_030[0]+-1*P_110011000*R_110[0]+P_110111000*R_120[0]+-1*P_110211000*R_130[0];
double PR_010010001010=P_010010001*R_010[0]+-1*P_010010101*R_011[0]+-1*P_010110001*R_020[0]+P_010110101*R_021[0]+-1*P_110010001*R_110[0]+P_110010101*R_111[0]+P_110110001*R_120[0]+-1*P_110110101*R_121[0];
double PR_001020000010=P_001020000*R_010[0]+-1*P_001120000*R_020[0]+P_001220000*R_030[0]+-1*P_101020000*R_110[0]+P_101120000*R_120[0]+-1*P_101220000*R_130[0];
double PR_000021000010=P_000021000*R_010[0]+-1*P_000121000*R_020[0]+P_000221000*R_030[0]+-1*P_000321000*R_040[0];
double PR_000020001010=P_000020001*R_010[0]+-1*P_000020101*R_011[0]+-1*P_000120001*R_020[0]+P_000120101*R_021[0]+P_000220001*R_030[0]+-1*P_000220101*R_031[0];
double PR_011000010010=P_011000010*R_010[0]+-1*P_011000110*R_011[0]+-1*P_111000010*R_110[0]+P_111000110*R_111[0]+P_211000010*R_210[0]+-1*P_211000110*R_211[0];
double PR_010001010010=P_010001010*R_010[0]+-1*P_010001110*R_011[0]+-1*P_010101010*R_020[0]+P_010101110*R_021[0]+-1*P_110001010*R_110[0]+P_110001110*R_111[0]+P_110101010*R_120[0]+-1*P_110101110*R_121[0];
double PR_010000011010=P_010000011*R_010[0]+-1*P_010000111*R_011[0]+P_010000211*R_012[0]+-1*P_110000011*R_110[0]+P_110000111*R_111[0]+-1*P_110000211*R_112[0];
double PR_001010010010=P_001010010*R_010[0]+-1*P_001010110*R_011[0]+-1*P_001110010*R_020[0]+P_001110110*R_021[0]+-1*P_101010010*R_110[0]+P_101010110*R_111[0]+P_101110010*R_120[0]+-1*P_101110110*R_121[0];
double PR_000011010010=P_000011010*R_010[0]+-1*P_000011110*R_011[0]+-1*P_000111010*R_020[0]+P_000111110*R_021[0]+P_000211010*R_030[0]+-1*P_000211110*R_031[0];
double PR_000010011010=P_000010011*R_010[0]+-1*P_000010111*R_011[0]+P_000010211*R_012[0]+-1*P_000110011*R_020[0]+P_000110111*R_021[0]+-1*P_000110211*R_022[0];
double PR_001000020010=P_001000020*R_010[0]+-1*P_001000120*R_011[0]+P_001000220*R_012[0]+-1*P_101000020*R_110[0]+P_101000120*R_111[0]+-1*P_101000220*R_112[0];
double PR_000001020010=P_000001020*R_010[0]+-1*P_000001120*R_011[0]+P_000001220*R_012[0]+-1*P_000101020*R_020[0]+P_000101120*R_021[0]+-1*P_000101220*R_022[0];
double PR_000000021010=P_000000021*R_010[0]+-1*P_000000121*R_011[0]+P_000000221*R_012[0]+-1*P_000000321*R_013[0];
double PR_021000000100=P_021000000*R_100[0]+-1*P_121000000*R_200[0]+P_221000000*R_300[0]+-1*P_321000000*R_400[0];
double PR_020001000100=P_020001000*R_100[0]+-1*P_020101000*R_110[0]+-1*P_120001000*R_200[0]+P_120101000*R_210[0]+P_220001000*R_300[0]+-1*P_220101000*R_310[0];
double PR_020000001100=P_020000001*R_100[0]+-1*P_020000101*R_101[0]+-1*P_120000001*R_200[0]+P_120000101*R_201[0]+P_220000001*R_300[0]+-1*P_220000101*R_301[0];
double PR_011010000100=P_011010000*R_100[0]+-1*P_011110000*R_110[0]+-1*P_111010000*R_200[0]+P_111110000*R_210[0]+P_211010000*R_300[0]+-1*P_211110000*R_310[0];
double PR_010011000100=P_010011000*R_100[0]+-1*P_010111000*R_110[0]+P_010211000*R_120[0]+-1*P_110011000*R_200[0]+P_110111000*R_210[0]+-1*P_110211000*R_220[0];
double PR_010010001100=P_010010001*R_100[0]+-1*P_010010101*R_101[0]+-1*P_010110001*R_110[0]+P_010110101*R_111[0]+-1*P_110010001*R_200[0]+P_110010101*R_201[0]+P_110110001*R_210[0]+-1*P_110110101*R_211[0];
double PR_001020000100=P_001020000*R_100[0]+-1*P_001120000*R_110[0]+P_001220000*R_120[0]+-1*P_101020000*R_200[0]+P_101120000*R_210[0]+-1*P_101220000*R_220[0];
double PR_000021000100=P_000021000*R_100[0]+-1*P_000121000*R_110[0]+P_000221000*R_120[0]+-1*P_000321000*R_130[0];
double PR_000020001100=P_000020001*R_100[0]+-1*P_000020101*R_101[0]+-1*P_000120001*R_110[0]+P_000120101*R_111[0]+P_000220001*R_120[0]+-1*P_000220101*R_121[0];
double PR_011000010100=P_011000010*R_100[0]+-1*P_011000110*R_101[0]+-1*P_111000010*R_200[0]+P_111000110*R_201[0]+P_211000010*R_300[0]+-1*P_211000110*R_301[0];
double PR_010001010100=P_010001010*R_100[0]+-1*P_010001110*R_101[0]+-1*P_010101010*R_110[0]+P_010101110*R_111[0]+-1*P_110001010*R_200[0]+P_110001110*R_201[0]+P_110101010*R_210[0]+-1*P_110101110*R_211[0];
double PR_010000011100=P_010000011*R_100[0]+-1*P_010000111*R_101[0]+P_010000211*R_102[0]+-1*P_110000011*R_200[0]+P_110000111*R_201[0]+-1*P_110000211*R_202[0];
double PR_001010010100=P_001010010*R_100[0]+-1*P_001010110*R_101[0]+-1*P_001110010*R_110[0]+P_001110110*R_111[0]+-1*P_101010010*R_200[0]+P_101010110*R_201[0]+P_101110010*R_210[0]+-1*P_101110110*R_211[0];
double PR_000011010100=P_000011010*R_100[0]+-1*P_000011110*R_101[0]+-1*P_000111010*R_110[0]+P_000111110*R_111[0]+P_000211010*R_120[0]+-1*P_000211110*R_121[0];
double PR_000010011100=P_000010011*R_100[0]+-1*P_000010111*R_101[0]+P_000010211*R_102[0]+-1*P_000110011*R_110[0]+P_000110111*R_111[0]+-1*P_000110211*R_112[0];
double PR_001000020100=P_001000020*R_100[0]+-1*P_001000120*R_101[0]+P_001000220*R_102[0]+-1*P_101000020*R_200[0]+P_101000120*R_201[0]+-1*P_101000220*R_202[0];
double PR_000001020100=P_000001020*R_100[0]+-1*P_000001120*R_101[0]+P_000001220*R_102[0]+-1*P_000101020*R_110[0]+P_000101120*R_111[0]+-1*P_000101220*R_112[0];
double PR_000000021100=P_000000021*R_100[0]+-1*P_000000121*R_101[0]+P_000000221*R_102[0]+-1*P_000000321*R_103[0];
double PR_021000000002=P_021000000*R_002[0]+-1*P_121000000*R_102[0]+P_221000000*R_202[0]+-1*P_321000000*R_302[0];
double PR_020001000002=P_020001000*R_002[0]+-1*P_020101000*R_012[0]+-1*P_120001000*R_102[0]+P_120101000*R_112[0]+P_220001000*R_202[0]+-1*P_220101000*R_212[0];
double PR_020000001002=P_020000001*R_002[0]+-1*P_020000101*R_003[0]+-1*P_120000001*R_102[0]+P_120000101*R_103[0]+P_220000001*R_202[0]+-1*P_220000101*R_203[0];
double PR_011010000002=P_011010000*R_002[0]+-1*P_011110000*R_012[0]+-1*P_111010000*R_102[0]+P_111110000*R_112[0]+P_211010000*R_202[0]+-1*P_211110000*R_212[0];
double PR_010011000002=P_010011000*R_002[0]+-1*P_010111000*R_012[0]+P_010211000*R_022[0]+-1*P_110011000*R_102[0]+P_110111000*R_112[0]+-1*P_110211000*R_122[0];
double PR_010010001002=P_010010001*R_002[0]+-1*P_010010101*R_003[0]+-1*P_010110001*R_012[0]+P_010110101*R_013[0]+-1*P_110010001*R_102[0]+P_110010101*R_103[0]+P_110110001*R_112[0]+-1*P_110110101*R_113[0];
double PR_001020000002=P_001020000*R_002[0]+-1*P_001120000*R_012[0]+P_001220000*R_022[0]+-1*P_101020000*R_102[0]+P_101120000*R_112[0]+-1*P_101220000*R_122[0];
double PR_000021000002=P_000021000*R_002[0]+-1*P_000121000*R_012[0]+P_000221000*R_022[0]+-1*P_000321000*R_032[0];
double PR_000020001002=P_000020001*R_002[0]+-1*P_000020101*R_003[0]+-1*P_000120001*R_012[0]+P_000120101*R_013[0]+P_000220001*R_022[0]+-1*P_000220101*R_023[0];
double PR_011000010002=P_011000010*R_002[0]+-1*P_011000110*R_003[0]+-1*P_111000010*R_102[0]+P_111000110*R_103[0]+P_211000010*R_202[0]+-1*P_211000110*R_203[0];
double PR_010001010002=P_010001010*R_002[0]+-1*P_010001110*R_003[0]+-1*P_010101010*R_012[0]+P_010101110*R_013[0]+-1*P_110001010*R_102[0]+P_110001110*R_103[0]+P_110101010*R_112[0]+-1*P_110101110*R_113[0];
double PR_010000011002=P_010000011*R_002[0]+-1*P_010000111*R_003[0]+P_010000211*R_004[0]+-1*P_110000011*R_102[0]+P_110000111*R_103[0]+-1*P_110000211*R_104[0];
double PR_001010010002=P_001010010*R_002[0]+-1*P_001010110*R_003[0]+-1*P_001110010*R_012[0]+P_001110110*R_013[0]+-1*P_101010010*R_102[0]+P_101010110*R_103[0]+P_101110010*R_112[0]+-1*P_101110110*R_113[0];
double PR_000011010002=P_000011010*R_002[0]+-1*P_000011110*R_003[0]+-1*P_000111010*R_012[0]+P_000111110*R_013[0]+P_000211010*R_022[0]+-1*P_000211110*R_023[0];
double PR_000010011002=P_000010011*R_002[0]+-1*P_000010111*R_003[0]+P_000010211*R_004[0]+-1*P_000110011*R_012[0]+P_000110111*R_013[0]+-1*P_000110211*R_014[0];
double PR_001000020002=P_001000020*R_002[0]+-1*P_001000120*R_003[0]+P_001000220*R_004[0]+-1*P_101000020*R_102[0]+P_101000120*R_103[0]+-1*P_101000220*R_104[0];
double PR_000001020002=P_000001020*R_002[0]+-1*P_000001120*R_003[0]+P_000001220*R_004[0]+-1*P_000101020*R_012[0]+P_000101120*R_013[0]+-1*P_000101220*R_014[0];
double PR_000000021002=P_000000021*R_002[0]+-1*P_000000121*R_003[0]+P_000000221*R_004[0]+-1*P_000000321*R_005[0];
double PR_021000000011=P_021000000*R_011[0]+-1*P_121000000*R_111[0]+P_221000000*R_211[0]+-1*P_321000000*R_311[0];
double PR_020001000011=P_020001000*R_011[0]+-1*P_020101000*R_021[0]+-1*P_120001000*R_111[0]+P_120101000*R_121[0]+P_220001000*R_211[0]+-1*P_220101000*R_221[0];
double PR_020000001011=P_020000001*R_011[0]+-1*P_020000101*R_012[0]+-1*P_120000001*R_111[0]+P_120000101*R_112[0]+P_220000001*R_211[0]+-1*P_220000101*R_212[0];
double PR_011010000011=P_011010000*R_011[0]+-1*P_011110000*R_021[0]+-1*P_111010000*R_111[0]+P_111110000*R_121[0]+P_211010000*R_211[0]+-1*P_211110000*R_221[0];
double PR_010011000011=P_010011000*R_011[0]+-1*P_010111000*R_021[0]+P_010211000*R_031[0]+-1*P_110011000*R_111[0]+P_110111000*R_121[0]+-1*P_110211000*R_131[0];
double PR_010010001011=P_010010001*R_011[0]+-1*P_010010101*R_012[0]+-1*P_010110001*R_021[0]+P_010110101*R_022[0]+-1*P_110010001*R_111[0]+P_110010101*R_112[0]+P_110110001*R_121[0]+-1*P_110110101*R_122[0];
double PR_001020000011=P_001020000*R_011[0]+-1*P_001120000*R_021[0]+P_001220000*R_031[0]+-1*P_101020000*R_111[0]+P_101120000*R_121[0]+-1*P_101220000*R_131[0];
double PR_000021000011=P_000021000*R_011[0]+-1*P_000121000*R_021[0]+P_000221000*R_031[0]+-1*P_000321000*R_041[0];
double PR_000020001011=P_000020001*R_011[0]+-1*P_000020101*R_012[0]+-1*P_000120001*R_021[0]+P_000120101*R_022[0]+P_000220001*R_031[0]+-1*P_000220101*R_032[0];
double PR_011000010011=P_011000010*R_011[0]+-1*P_011000110*R_012[0]+-1*P_111000010*R_111[0]+P_111000110*R_112[0]+P_211000010*R_211[0]+-1*P_211000110*R_212[0];
double PR_010001010011=P_010001010*R_011[0]+-1*P_010001110*R_012[0]+-1*P_010101010*R_021[0]+P_010101110*R_022[0]+-1*P_110001010*R_111[0]+P_110001110*R_112[0]+P_110101010*R_121[0]+-1*P_110101110*R_122[0];
double PR_010000011011=P_010000011*R_011[0]+-1*P_010000111*R_012[0]+P_010000211*R_013[0]+-1*P_110000011*R_111[0]+P_110000111*R_112[0]+-1*P_110000211*R_113[0];
double PR_001010010011=P_001010010*R_011[0]+-1*P_001010110*R_012[0]+-1*P_001110010*R_021[0]+P_001110110*R_022[0]+-1*P_101010010*R_111[0]+P_101010110*R_112[0]+P_101110010*R_121[0]+-1*P_101110110*R_122[0];
double PR_000011010011=P_000011010*R_011[0]+-1*P_000011110*R_012[0]+-1*P_000111010*R_021[0]+P_000111110*R_022[0]+P_000211010*R_031[0]+-1*P_000211110*R_032[0];
double PR_000010011011=P_000010011*R_011[0]+-1*P_000010111*R_012[0]+P_000010211*R_013[0]+-1*P_000110011*R_021[0]+P_000110111*R_022[0]+-1*P_000110211*R_023[0];
double PR_001000020011=P_001000020*R_011[0]+-1*P_001000120*R_012[0]+P_001000220*R_013[0]+-1*P_101000020*R_111[0]+P_101000120*R_112[0]+-1*P_101000220*R_113[0];
double PR_000001020011=P_000001020*R_011[0]+-1*P_000001120*R_012[0]+P_000001220*R_013[0]+-1*P_000101020*R_021[0]+P_000101120*R_022[0]+-1*P_000101220*R_023[0];
double PR_000000021011=P_000000021*R_011[0]+-1*P_000000121*R_012[0]+P_000000221*R_013[0]+-1*P_000000321*R_014[0];
double PR_021000000020=P_021000000*R_020[0]+-1*P_121000000*R_120[0]+P_221000000*R_220[0]+-1*P_321000000*R_320[0];
double PR_020001000020=P_020001000*R_020[0]+-1*P_020101000*R_030[0]+-1*P_120001000*R_120[0]+P_120101000*R_130[0]+P_220001000*R_220[0]+-1*P_220101000*R_230[0];
double PR_020000001020=P_020000001*R_020[0]+-1*P_020000101*R_021[0]+-1*P_120000001*R_120[0]+P_120000101*R_121[0]+P_220000001*R_220[0]+-1*P_220000101*R_221[0];
double PR_011010000020=P_011010000*R_020[0]+-1*P_011110000*R_030[0]+-1*P_111010000*R_120[0]+P_111110000*R_130[0]+P_211010000*R_220[0]+-1*P_211110000*R_230[0];
double PR_010011000020=P_010011000*R_020[0]+-1*P_010111000*R_030[0]+P_010211000*R_040[0]+-1*P_110011000*R_120[0]+P_110111000*R_130[0]+-1*P_110211000*R_140[0];
double PR_010010001020=P_010010001*R_020[0]+-1*P_010010101*R_021[0]+-1*P_010110001*R_030[0]+P_010110101*R_031[0]+-1*P_110010001*R_120[0]+P_110010101*R_121[0]+P_110110001*R_130[0]+-1*P_110110101*R_131[0];
double PR_001020000020=P_001020000*R_020[0]+-1*P_001120000*R_030[0]+P_001220000*R_040[0]+-1*P_101020000*R_120[0]+P_101120000*R_130[0]+-1*P_101220000*R_140[0];
double PR_000021000020=P_000021000*R_020[0]+-1*P_000121000*R_030[0]+P_000221000*R_040[0]+-1*P_000321000*R_050[0];
double PR_000020001020=P_000020001*R_020[0]+-1*P_000020101*R_021[0]+-1*P_000120001*R_030[0]+P_000120101*R_031[0]+P_000220001*R_040[0]+-1*P_000220101*R_041[0];
double PR_011000010020=P_011000010*R_020[0]+-1*P_011000110*R_021[0]+-1*P_111000010*R_120[0]+P_111000110*R_121[0]+P_211000010*R_220[0]+-1*P_211000110*R_221[0];
double PR_010001010020=P_010001010*R_020[0]+-1*P_010001110*R_021[0]+-1*P_010101010*R_030[0]+P_010101110*R_031[0]+-1*P_110001010*R_120[0]+P_110001110*R_121[0]+P_110101010*R_130[0]+-1*P_110101110*R_131[0];
double PR_010000011020=P_010000011*R_020[0]+-1*P_010000111*R_021[0]+P_010000211*R_022[0]+-1*P_110000011*R_120[0]+P_110000111*R_121[0]+-1*P_110000211*R_122[0];
double PR_001010010020=P_001010010*R_020[0]+-1*P_001010110*R_021[0]+-1*P_001110010*R_030[0]+P_001110110*R_031[0]+-1*P_101010010*R_120[0]+P_101010110*R_121[0]+P_101110010*R_130[0]+-1*P_101110110*R_131[0];
double PR_000011010020=P_000011010*R_020[0]+-1*P_000011110*R_021[0]+-1*P_000111010*R_030[0]+P_000111110*R_031[0]+P_000211010*R_040[0]+-1*P_000211110*R_041[0];
double PR_000010011020=P_000010011*R_020[0]+-1*P_000010111*R_021[0]+P_000010211*R_022[0]+-1*P_000110011*R_030[0]+P_000110111*R_031[0]+-1*P_000110211*R_032[0];
double PR_001000020020=P_001000020*R_020[0]+-1*P_001000120*R_021[0]+P_001000220*R_022[0]+-1*P_101000020*R_120[0]+P_101000120*R_121[0]+-1*P_101000220*R_122[0];
double PR_000001020020=P_000001020*R_020[0]+-1*P_000001120*R_021[0]+P_000001220*R_022[0]+-1*P_000101020*R_030[0]+P_000101120*R_031[0]+-1*P_000101220*R_032[0];
double PR_000000021020=P_000000021*R_020[0]+-1*P_000000121*R_021[0]+P_000000221*R_022[0]+-1*P_000000321*R_023[0];
double PR_021000000101=P_021000000*R_101[0]+-1*P_121000000*R_201[0]+P_221000000*R_301[0]+-1*P_321000000*R_401[0];
double PR_020001000101=P_020001000*R_101[0]+-1*P_020101000*R_111[0]+-1*P_120001000*R_201[0]+P_120101000*R_211[0]+P_220001000*R_301[0]+-1*P_220101000*R_311[0];
double PR_020000001101=P_020000001*R_101[0]+-1*P_020000101*R_102[0]+-1*P_120000001*R_201[0]+P_120000101*R_202[0]+P_220000001*R_301[0]+-1*P_220000101*R_302[0];
double PR_011010000101=P_011010000*R_101[0]+-1*P_011110000*R_111[0]+-1*P_111010000*R_201[0]+P_111110000*R_211[0]+P_211010000*R_301[0]+-1*P_211110000*R_311[0];
double PR_010011000101=P_010011000*R_101[0]+-1*P_010111000*R_111[0]+P_010211000*R_121[0]+-1*P_110011000*R_201[0]+P_110111000*R_211[0]+-1*P_110211000*R_221[0];
double PR_010010001101=P_010010001*R_101[0]+-1*P_010010101*R_102[0]+-1*P_010110001*R_111[0]+P_010110101*R_112[0]+-1*P_110010001*R_201[0]+P_110010101*R_202[0]+P_110110001*R_211[0]+-1*P_110110101*R_212[0];
double PR_001020000101=P_001020000*R_101[0]+-1*P_001120000*R_111[0]+P_001220000*R_121[0]+-1*P_101020000*R_201[0]+P_101120000*R_211[0]+-1*P_101220000*R_221[0];
double PR_000021000101=P_000021000*R_101[0]+-1*P_000121000*R_111[0]+P_000221000*R_121[0]+-1*P_000321000*R_131[0];
double PR_000020001101=P_000020001*R_101[0]+-1*P_000020101*R_102[0]+-1*P_000120001*R_111[0]+P_000120101*R_112[0]+P_000220001*R_121[0]+-1*P_000220101*R_122[0];
double PR_011000010101=P_011000010*R_101[0]+-1*P_011000110*R_102[0]+-1*P_111000010*R_201[0]+P_111000110*R_202[0]+P_211000010*R_301[0]+-1*P_211000110*R_302[0];
double PR_010001010101=P_010001010*R_101[0]+-1*P_010001110*R_102[0]+-1*P_010101010*R_111[0]+P_010101110*R_112[0]+-1*P_110001010*R_201[0]+P_110001110*R_202[0]+P_110101010*R_211[0]+-1*P_110101110*R_212[0];
double PR_010000011101=P_010000011*R_101[0]+-1*P_010000111*R_102[0]+P_010000211*R_103[0]+-1*P_110000011*R_201[0]+P_110000111*R_202[0]+-1*P_110000211*R_203[0];
double PR_001010010101=P_001010010*R_101[0]+-1*P_001010110*R_102[0]+-1*P_001110010*R_111[0]+P_001110110*R_112[0]+-1*P_101010010*R_201[0]+P_101010110*R_202[0]+P_101110010*R_211[0]+-1*P_101110110*R_212[0];
double PR_000011010101=P_000011010*R_101[0]+-1*P_000011110*R_102[0]+-1*P_000111010*R_111[0]+P_000111110*R_112[0]+P_000211010*R_121[0]+-1*P_000211110*R_122[0];
double PR_000010011101=P_000010011*R_101[0]+-1*P_000010111*R_102[0]+P_000010211*R_103[0]+-1*P_000110011*R_111[0]+P_000110111*R_112[0]+-1*P_000110211*R_113[0];
double PR_001000020101=P_001000020*R_101[0]+-1*P_001000120*R_102[0]+P_001000220*R_103[0]+-1*P_101000020*R_201[0]+P_101000120*R_202[0]+-1*P_101000220*R_203[0];
double PR_000001020101=P_000001020*R_101[0]+-1*P_000001120*R_102[0]+P_000001220*R_103[0]+-1*P_000101020*R_111[0]+P_000101120*R_112[0]+-1*P_000101220*R_113[0];
double PR_000000021101=P_000000021*R_101[0]+-1*P_000000121*R_102[0]+P_000000221*R_103[0]+-1*P_000000321*R_104[0];
double PR_021000000110=P_021000000*R_110[0]+-1*P_121000000*R_210[0]+P_221000000*R_310[0]+-1*P_321000000*R_410[0];
double PR_020001000110=P_020001000*R_110[0]+-1*P_020101000*R_120[0]+-1*P_120001000*R_210[0]+P_120101000*R_220[0]+P_220001000*R_310[0]+-1*P_220101000*R_320[0];
double PR_020000001110=P_020000001*R_110[0]+-1*P_020000101*R_111[0]+-1*P_120000001*R_210[0]+P_120000101*R_211[0]+P_220000001*R_310[0]+-1*P_220000101*R_311[0];
double PR_011010000110=P_011010000*R_110[0]+-1*P_011110000*R_120[0]+-1*P_111010000*R_210[0]+P_111110000*R_220[0]+P_211010000*R_310[0]+-1*P_211110000*R_320[0];
double PR_010011000110=P_010011000*R_110[0]+-1*P_010111000*R_120[0]+P_010211000*R_130[0]+-1*P_110011000*R_210[0]+P_110111000*R_220[0]+-1*P_110211000*R_230[0];
double PR_010010001110=P_010010001*R_110[0]+-1*P_010010101*R_111[0]+-1*P_010110001*R_120[0]+P_010110101*R_121[0]+-1*P_110010001*R_210[0]+P_110010101*R_211[0]+P_110110001*R_220[0]+-1*P_110110101*R_221[0];
double PR_001020000110=P_001020000*R_110[0]+-1*P_001120000*R_120[0]+P_001220000*R_130[0]+-1*P_101020000*R_210[0]+P_101120000*R_220[0]+-1*P_101220000*R_230[0];
double PR_000021000110=P_000021000*R_110[0]+-1*P_000121000*R_120[0]+P_000221000*R_130[0]+-1*P_000321000*R_140[0];
double PR_000020001110=P_000020001*R_110[0]+-1*P_000020101*R_111[0]+-1*P_000120001*R_120[0]+P_000120101*R_121[0]+P_000220001*R_130[0]+-1*P_000220101*R_131[0];
double PR_011000010110=P_011000010*R_110[0]+-1*P_011000110*R_111[0]+-1*P_111000010*R_210[0]+P_111000110*R_211[0]+P_211000010*R_310[0]+-1*P_211000110*R_311[0];
double PR_010001010110=P_010001010*R_110[0]+-1*P_010001110*R_111[0]+-1*P_010101010*R_120[0]+P_010101110*R_121[0]+-1*P_110001010*R_210[0]+P_110001110*R_211[0]+P_110101010*R_220[0]+-1*P_110101110*R_221[0];
double PR_010000011110=P_010000011*R_110[0]+-1*P_010000111*R_111[0]+P_010000211*R_112[0]+-1*P_110000011*R_210[0]+P_110000111*R_211[0]+-1*P_110000211*R_212[0];
double PR_001010010110=P_001010010*R_110[0]+-1*P_001010110*R_111[0]+-1*P_001110010*R_120[0]+P_001110110*R_121[0]+-1*P_101010010*R_210[0]+P_101010110*R_211[0]+P_101110010*R_220[0]+-1*P_101110110*R_221[0];
double PR_000011010110=P_000011010*R_110[0]+-1*P_000011110*R_111[0]+-1*P_000111010*R_120[0]+P_000111110*R_121[0]+P_000211010*R_130[0]+-1*P_000211110*R_131[0];
double PR_000010011110=P_000010011*R_110[0]+-1*P_000010111*R_111[0]+P_000010211*R_112[0]+-1*P_000110011*R_120[0]+P_000110111*R_121[0]+-1*P_000110211*R_122[0];
double PR_001000020110=P_001000020*R_110[0]+-1*P_001000120*R_111[0]+P_001000220*R_112[0]+-1*P_101000020*R_210[0]+P_101000120*R_211[0]+-1*P_101000220*R_212[0];
double PR_000001020110=P_000001020*R_110[0]+-1*P_000001120*R_111[0]+P_000001220*R_112[0]+-1*P_000101020*R_120[0]+P_000101120*R_121[0]+-1*P_000101220*R_122[0];
double PR_000000021110=P_000000021*R_110[0]+-1*P_000000121*R_111[0]+P_000000221*R_112[0]+-1*P_000000321*R_113[0];
double PR_021000000200=P_021000000*R_200[0]+-1*P_121000000*R_300[0]+P_221000000*R_400[0]+-1*P_321000000*R_500[0];
double PR_020001000200=P_020001000*R_200[0]+-1*P_020101000*R_210[0]+-1*P_120001000*R_300[0]+P_120101000*R_310[0]+P_220001000*R_400[0]+-1*P_220101000*R_410[0];
double PR_020000001200=P_020000001*R_200[0]+-1*P_020000101*R_201[0]+-1*P_120000001*R_300[0]+P_120000101*R_301[0]+P_220000001*R_400[0]+-1*P_220000101*R_401[0];
double PR_011010000200=P_011010000*R_200[0]+-1*P_011110000*R_210[0]+-1*P_111010000*R_300[0]+P_111110000*R_310[0]+P_211010000*R_400[0]+-1*P_211110000*R_410[0];
double PR_010011000200=P_010011000*R_200[0]+-1*P_010111000*R_210[0]+P_010211000*R_220[0]+-1*P_110011000*R_300[0]+P_110111000*R_310[0]+-1*P_110211000*R_320[0];
double PR_010010001200=P_010010001*R_200[0]+-1*P_010010101*R_201[0]+-1*P_010110001*R_210[0]+P_010110101*R_211[0]+-1*P_110010001*R_300[0]+P_110010101*R_301[0]+P_110110001*R_310[0]+-1*P_110110101*R_311[0];
double PR_001020000200=P_001020000*R_200[0]+-1*P_001120000*R_210[0]+P_001220000*R_220[0]+-1*P_101020000*R_300[0]+P_101120000*R_310[0]+-1*P_101220000*R_320[0];
double PR_000021000200=P_000021000*R_200[0]+-1*P_000121000*R_210[0]+P_000221000*R_220[0]+-1*P_000321000*R_230[0];
double PR_000020001200=P_000020001*R_200[0]+-1*P_000020101*R_201[0]+-1*P_000120001*R_210[0]+P_000120101*R_211[0]+P_000220001*R_220[0]+-1*P_000220101*R_221[0];
double PR_011000010200=P_011000010*R_200[0]+-1*P_011000110*R_201[0]+-1*P_111000010*R_300[0]+P_111000110*R_301[0]+P_211000010*R_400[0]+-1*P_211000110*R_401[0];
double PR_010001010200=P_010001010*R_200[0]+-1*P_010001110*R_201[0]+-1*P_010101010*R_210[0]+P_010101110*R_211[0]+-1*P_110001010*R_300[0]+P_110001110*R_301[0]+P_110101010*R_310[0]+-1*P_110101110*R_311[0];
double PR_010000011200=P_010000011*R_200[0]+-1*P_010000111*R_201[0]+P_010000211*R_202[0]+-1*P_110000011*R_300[0]+P_110000111*R_301[0]+-1*P_110000211*R_302[0];
double PR_001010010200=P_001010010*R_200[0]+-1*P_001010110*R_201[0]+-1*P_001110010*R_210[0]+P_001110110*R_211[0]+-1*P_101010010*R_300[0]+P_101010110*R_301[0]+P_101110010*R_310[0]+-1*P_101110110*R_311[0];
double PR_000011010200=P_000011010*R_200[0]+-1*P_000011110*R_201[0]+-1*P_000111010*R_210[0]+P_000111110*R_211[0]+P_000211010*R_220[0]+-1*P_000211110*R_221[0];
double PR_000010011200=P_000010011*R_200[0]+-1*P_000010111*R_201[0]+P_000010211*R_202[0]+-1*P_000110011*R_210[0]+P_000110111*R_211[0]+-1*P_000110211*R_212[0];
double PR_001000020200=P_001000020*R_200[0]+-1*P_001000120*R_201[0]+P_001000220*R_202[0]+-1*P_101000020*R_300[0]+P_101000120*R_301[0]+-1*P_101000220*R_302[0];
double PR_000001020200=P_000001020*R_200[0]+-1*P_000001120*R_201[0]+P_000001220*R_202[0]+-1*P_000101020*R_210[0]+P_000101120*R_211[0]+-1*P_000101220*R_212[0];
double PR_000000021200=P_000000021*R_200[0]+-1*P_000000121*R_201[0]+P_000000221*R_202[0]+-1*P_000000321*R_203[0];
double Qd_101[3];
double Qd_110[3];
double Qd_011[3];
double Qd_111[3];
double Qd_211[3];
for(int i=0;i<3;i++){
Qd_101[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_110[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_011[i]=Qd_101[i]+Qd_010[i]*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_111[i]=Qd_010[i]*Qd_101[i]+aQin1*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_211[i]=aQin1*Qd_101[i];
}
double Q_011000000=Qd_011[0];
double Q_111000000=Qd_111[0];
double Q_211000000=Qd_211[0];
double Q_010001000=Qd_010[0]*Qd_001[1];
double Q_010101000=Qd_010[0]*Qd_101[1];
double Q_110001000=Qd_110[0]*Qd_001[1];
double Q_110101000=Qd_110[0]*Qd_101[1];
double Q_010000001=Qd_010[0]*Qd_001[2];
double Q_010000101=Qd_010[0]*Qd_101[2];
double Q_110000001=Qd_110[0]*Qd_001[2];
double Q_110000101=Qd_110[0]*Qd_101[2];
double Q_001010000=Qd_001[0]*Qd_010[1];
double Q_001110000=Qd_001[0]*Qd_110[1];
double Q_101010000=Qd_101[0]*Qd_010[1];
double Q_101110000=Qd_101[0]*Qd_110[1];
double Q_000011000=Qd_011[1];
double Q_000111000=Qd_111[1];
double Q_000211000=Qd_211[1];
double Q_000010001=Qd_010[1]*Qd_001[2];
double Q_000010101=Qd_010[1]*Qd_101[2];
double Q_000110001=Qd_110[1]*Qd_001[2];
double Q_000110101=Qd_110[1]*Qd_101[2];
double Q_001000010=Qd_001[0]*Qd_010[2];
double Q_001000110=Qd_001[0]*Qd_110[2];
double Q_101000010=Qd_101[0]*Qd_010[2];
double Q_101000110=Qd_101[0]*Qd_110[2];
double Q_000001010=Qd_001[1]*Qd_010[2];
double Q_000001110=Qd_001[1]*Qd_110[2];
double Q_000101010=Qd_101[1]*Qd_010[2];
double Q_000101110=Qd_101[1]*Qd_110[2];
double Q_000000011=Qd_011[2];
double Q_000000111=Qd_111[2];
double Q_000000211=Qd_211[2];
ans_temp[ans_id*18+0]+=Pmtrx[0]*(Q_011000000*PR_021000000000+Q_111000000*PR_021000000100+Q_211000000*PR_021000000200);
ans_temp[ans_id*18+0]+=Pmtrx[1]*(Q_010001000*PR_021000000000+Q_010101000*PR_021000000010+Q_110001000*PR_021000000100+Q_110101000*PR_021000000110);
ans_temp[ans_id*18+0]+=Pmtrx[2]*(Q_010000001*PR_021000000000+Q_010000101*PR_021000000001+Q_110000001*PR_021000000100+Q_110000101*PR_021000000101);
ans_temp[ans_id*18+1]+=Pmtrx[0]*(Q_001010000*PR_021000000000+Q_001110000*PR_021000000010+Q_101010000*PR_021000000100+Q_101110000*PR_021000000110);
ans_temp[ans_id*18+1]+=Pmtrx[1]*(Q_000011000*PR_021000000000+Q_000111000*PR_021000000010+Q_000211000*PR_021000000020);
ans_temp[ans_id*18+1]+=Pmtrx[2]*(Q_000010001*PR_021000000000+Q_000010101*PR_021000000001+Q_000110001*PR_021000000010+Q_000110101*PR_021000000011);
ans_temp[ans_id*18+2]+=Pmtrx[0]*(Q_001000010*PR_021000000000+Q_001000110*PR_021000000001+Q_101000010*PR_021000000100+Q_101000110*PR_021000000101);
ans_temp[ans_id*18+2]+=Pmtrx[1]*(Q_000001010*PR_021000000000+Q_000001110*PR_021000000001+Q_000101010*PR_021000000010+Q_000101110*PR_021000000011);
ans_temp[ans_id*18+2]+=Pmtrx[2]*(Q_000000011*PR_021000000000+Q_000000111*PR_021000000001+Q_000000211*PR_021000000002);
ans_temp[ans_id*18+0]+=Pmtrx[3]*(Q_011000000*PR_020001000000+Q_111000000*PR_020001000100+Q_211000000*PR_020001000200);
ans_temp[ans_id*18+0]+=Pmtrx[4]*(Q_010001000*PR_020001000000+Q_010101000*PR_020001000010+Q_110001000*PR_020001000100+Q_110101000*PR_020001000110);
ans_temp[ans_id*18+0]+=Pmtrx[5]*(Q_010000001*PR_020001000000+Q_010000101*PR_020001000001+Q_110000001*PR_020001000100+Q_110000101*PR_020001000101);
ans_temp[ans_id*18+1]+=Pmtrx[3]*(Q_001010000*PR_020001000000+Q_001110000*PR_020001000010+Q_101010000*PR_020001000100+Q_101110000*PR_020001000110);
ans_temp[ans_id*18+1]+=Pmtrx[4]*(Q_000011000*PR_020001000000+Q_000111000*PR_020001000010+Q_000211000*PR_020001000020);
ans_temp[ans_id*18+1]+=Pmtrx[5]*(Q_000010001*PR_020001000000+Q_000010101*PR_020001000001+Q_000110001*PR_020001000010+Q_000110101*PR_020001000011);
ans_temp[ans_id*18+2]+=Pmtrx[3]*(Q_001000010*PR_020001000000+Q_001000110*PR_020001000001+Q_101000010*PR_020001000100+Q_101000110*PR_020001000101);
ans_temp[ans_id*18+2]+=Pmtrx[4]*(Q_000001010*PR_020001000000+Q_000001110*PR_020001000001+Q_000101010*PR_020001000010+Q_000101110*PR_020001000011);
ans_temp[ans_id*18+2]+=Pmtrx[5]*(Q_000000011*PR_020001000000+Q_000000111*PR_020001000001+Q_000000211*PR_020001000002);
ans_temp[ans_id*18+0]+=Pmtrx[6]*(Q_011000000*PR_020000001000+Q_111000000*PR_020000001100+Q_211000000*PR_020000001200);
ans_temp[ans_id*18+0]+=Pmtrx[7]*(Q_010001000*PR_020000001000+Q_010101000*PR_020000001010+Q_110001000*PR_020000001100+Q_110101000*PR_020000001110);
ans_temp[ans_id*18+0]+=Pmtrx[8]*(Q_010000001*PR_020000001000+Q_010000101*PR_020000001001+Q_110000001*PR_020000001100+Q_110000101*PR_020000001101);
ans_temp[ans_id*18+1]+=Pmtrx[6]*(Q_001010000*PR_020000001000+Q_001110000*PR_020000001010+Q_101010000*PR_020000001100+Q_101110000*PR_020000001110);
ans_temp[ans_id*18+1]+=Pmtrx[7]*(Q_000011000*PR_020000001000+Q_000111000*PR_020000001010+Q_000211000*PR_020000001020);
ans_temp[ans_id*18+1]+=Pmtrx[8]*(Q_000010001*PR_020000001000+Q_000010101*PR_020000001001+Q_000110001*PR_020000001010+Q_000110101*PR_020000001011);
ans_temp[ans_id*18+2]+=Pmtrx[6]*(Q_001000010*PR_020000001000+Q_001000110*PR_020000001001+Q_101000010*PR_020000001100+Q_101000110*PR_020000001101);
ans_temp[ans_id*18+2]+=Pmtrx[7]*(Q_000001010*PR_020000001000+Q_000001110*PR_020000001001+Q_000101010*PR_020000001010+Q_000101110*PR_020000001011);
ans_temp[ans_id*18+2]+=Pmtrx[8]*(Q_000000011*PR_020000001000+Q_000000111*PR_020000001001+Q_000000211*PR_020000001002);
ans_temp[ans_id*18+3]+=Pmtrx[0]*(Q_011000000*PR_011010000000+Q_111000000*PR_011010000100+Q_211000000*PR_011010000200);
ans_temp[ans_id*18+3]+=Pmtrx[1]*(Q_010001000*PR_011010000000+Q_010101000*PR_011010000010+Q_110001000*PR_011010000100+Q_110101000*PR_011010000110);
ans_temp[ans_id*18+3]+=Pmtrx[2]*(Q_010000001*PR_011010000000+Q_010000101*PR_011010000001+Q_110000001*PR_011010000100+Q_110000101*PR_011010000101);
ans_temp[ans_id*18+4]+=Pmtrx[0]*(Q_001010000*PR_011010000000+Q_001110000*PR_011010000010+Q_101010000*PR_011010000100+Q_101110000*PR_011010000110);
ans_temp[ans_id*18+4]+=Pmtrx[1]*(Q_000011000*PR_011010000000+Q_000111000*PR_011010000010+Q_000211000*PR_011010000020);
ans_temp[ans_id*18+4]+=Pmtrx[2]*(Q_000010001*PR_011010000000+Q_000010101*PR_011010000001+Q_000110001*PR_011010000010+Q_000110101*PR_011010000011);
ans_temp[ans_id*18+5]+=Pmtrx[0]*(Q_001000010*PR_011010000000+Q_001000110*PR_011010000001+Q_101000010*PR_011010000100+Q_101000110*PR_011010000101);
ans_temp[ans_id*18+5]+=Pmtrx[1]*(Q_000001010*PR_011010000000+Q_000001110*PR_011010000001+Q_000101010*PR_011010000010+Q_000101110*PR_011010000011);
ans_temp[ans_id*18+5]+=Pmtrx[2]*(Q_000000011*PR_011010000000+Q_000000111*PR_011010000001+Q_000000211*PR_011010000002);
ans_temp[ans_id*18+3]+=Pmtrx[3]*(Q_011000000*PR_010011000000+Q_111000000*PR_010011000100+Q_211000000*PR_010011000200);
ans_temp[ans_id*18+3]+=Pmtrx[4]*(Q_010001000*PR_010011000000+Q_010101000*PR_010011000010+Q_110001000*PR_010011000100+Q_110101000*PR_010011000110);
ans_temp[ans_id*18+3]+=Pmtrx[5]*(Q_010000001*PR_010011000000+Q_010000101*PR_010011000001+Q_110000001*PR_010011000100+Q_110000101*PR_010011000101);
ans_temp[ans_id*18+4]+=Pmtrx[3]*(Q_001010000*PR_010011000000+Q_001110000*PR_010011000010+Q_101010000*PR_010011000100+Q_101110000*PR_010011000110);
ans_temp[ans_id*18+4]+=Pmtrx[4]*(Q_000011000*PR_010011000000+Q_000111000*PR_010011000010+Q_000211000*PR_010011000020);
ans_temp[ans_id*18+4]+=Pmtrx[5]*(Q_000010001*PR_010011000000+Q_000010101*PR_010011000001+Q_000110001*PR_010011000010+Q_000110101*PR_010011000011);
ans_temp[ans_id*18+5]+=Pmtrx[3]*(Q_001000010*PR_010011000000+Q_001000110*PR_010011000001+Q_101000010*PR_010011000100+Q_101000110*PR_010011000101);
ans_temp[ans_id*18+5]+=Pmtrx[4]*(Q_000001010*PR_010011000000+Q_000001110*PR_010011000001+Q_000101010*PR_010011000010+Q_000101110*PR_010011000011);
ans_temp[ans_id*18+5]+=Pmtrx[5]*(Q_000000011*PR_010011000000+Q_000000111*PR_010011000001+Q_000000211*PR_010011000002);
ans_temp[ans_id*18+3]+=Pmtrx[6]*(Q_011000000*PR_010010001000+Q_111000000*PR_010010001100+Q_211000000*PR_010010001200);
ans_temp[ans_id*18+3]+=Pmtrx[7]*(Q_010001000*PR_010010001000+Q_010101000*PR_010010001010+Q_110001000*PR_010010001100+Q_110101000*PR_010010001110);
ans_temp[ans_id*18+3]+=Pmtrx[8]*(Q_010000001*PR_010010001000+Q_010000101*PR_010010001001+Q_110000001*PR_010010001100+Q_110000101*PR_010010001101);
ans_temp[ans_id*18+4]+=Pmtrx[6]*(Q_001010000*PR_010010001000+Q_001110000*PR_010010001010+Q_101010000*PR_010010001100+Q_101110000*PR_010010001110);
ans_temp[ans_id*18+4]+=Pmtrx[7]*(Q_000011000*PR_010010001000+Q_000111000*PR_010010001010+Q_000211000*PR_010010001020);
ans_temp[ans_id*18+4]+=Pmtrx[8]*(Q_000010001*PR_010010001000+Q_000010101*PR_010010001001+Q_000110001*PR_010010001010+Q_000110101*PR_010010001011);
ans_temp[ans_id*18+5]+=Pmtrx[6]*(Q_001000010*PR_010010001000+Q_001000110*PR_010010001001+Q_101000010*PR_010010001100+Q_101000110*PR_010010001101);
ans_temp[ans_id*18+5]+=Pmtrx[7]*(Q_000001010*PR_010010001000+Q_000001110*PR_010010001001+Q_000101010*PR_010010001010+Q_000101110*PR_010010001011);
ans_temp[ans_id*18+5]+=Pmtrx[8]*(Q_000000011*PR_010010001000+Q_000000111*PR_010010001001+Q_000000211*PR_010010001002);
ans_temp[ans_id*18+6]+=Pmtrx[0]*(Q_011000000*PR_001020000000+Q_111000000*PR_001020000100+Q_211000000*PR_001020000200);
ans_temp[ans_id*18+6]+=Pmtrx[1]*(Q_010001000*PR_001020000000+Q_010101000*PR_001020000010+Q_110001000*PR_001020000100+Q_110101000*PR_001020000110);
ans_temp[ans_id*18+6]+=Pmtrx[2]*(Q_010000001*PR_001020000000+Q_010000101*PR_001020000001+Q_110000001*PR_001020000100+Q_110000101*PR_001020000101);
ans_temp[ans_id*18+7]+=Pmtrx[0]*(Q_001010000*PR_001020000000+Q_001110000*PR_001020000010+Q_101010000*PR_001020000100+Q_101110000*PR_001020000110);
ans_temp[ans_id*18+7]+=Pmtrx[1]*(Q_000011000*PR_001020000000+Q_000111000*PR_001020000010+Q_000211000*PR_001020000020);
ans_temp[ans_id*18+7]+=Pmtrx[2]*(Q_000010001*PR_001020000000+Q_000010101*PR_001020000001+Q_000110001*PR_001020000010+Q_000110101*PR_001020000011);
ans_temp[ans_id*18+8]+=Pmtrx[0]*(Q_001000010*PR_001020000000+Q_001000110*PR_001020000001+Q_101000010*PR_001020000100+Q_101000110*PR_001020000101);
ans_temp[ans_id*18+8]+=Pmtrx[1]*(Q_000001010*PR_001020000000+Q_000001110*PR_001020000001+Q_000101010*PR_001020000010+Q_000101110*PR_001020000011);
ans_temp[ans_id*18+8]+=Pmtrx[2]*(Q_000000011*PR_001020000000+Q_000000111*PR_001020000001+Q_000000211*PR_001020000002);
ans_temp[ans_id*18+6]+=Pmtrx[3]*(Q_011000000*PR_000021000000+Q_111000000*PR_000021000100+Q_211000000*PR_000021000200);
ans_temp[ans_id*18+6]+=Pmtrx[4]*(Q_010001000*PR_000021000000+Q_010101000*PR_000021000010+Q_110001000*PR_000021000100+Q_110101000*PR_000021000110);
ans_temp[ans_id*18+6]+=Pmtrx[5]*(Q_010000001*PR_000021000000+Q_010000101*PR_000021000001+Q_110000001*PR_000021000100+Q_110000101*PR_000021000101);
ans_temp[ans_id*18+7]+=Pmtrx[3]*(Q_001010000*PR_000021000000+Q_001110000*PR_000021000010+Q_101010000*PR_000021000100+Q_101110000*PR_000021000110);
ans_temp[ans_id*18+7]+=Pmtrx[4]*(Q_000011000*PR_000021000000+Q_000111000*PR_000021000010+Q_000211000*PR_000021000020);
ans_temp[ans_id*18+7]+=Pmtrx[5]*(Q_000010001*PR_000021000000+Q_000010101*PR_000021000001+Q_000110001*PR_000021000010+Q_000110101*PR_000021000011);
ans_temp[ans_id*18+8]+=Pmtrx[3]*(Q_001000010*PR_000021000000+Q_001000110*PR_000021000001+Q_101000010*PR_000021000100+Q_101000110*PR_000021000101);
ans_temp[ans_id*18+8]+=Pmtrx[4]*(Q_000001010*PR_000021000000+Q_000001110*PR_000021000001+Q_000101010*PR_000021000010+Q_000101110*PR_000021000011);
ans_temp[ans_id*18+8]+=Pmtrx[5]*(Q_000000011*PR_000021000000+Q_000000111*PR_000021000001+Q_000000211*PR_000021000002);
ans_temp[ans_id*18+6]+=Pmtrx[6]*(Q_011000000*PR_000020001000+Q_111000000*PR_000020001100+Q_211000000*PR_000020001200);
ans_temp[ans_id*18+6]+=Pmtrx[7]*(Q_010001000*PR_000020001000+Q_010101000*PR_000020001010+Q_110001000*PR_000020001100+Q_110101000*PR_000020001110);
ans_temp[ans_id*18+6]+=Pmtrx[8]*(Q_010000001*PR_000020001000+Q_010000101*PR_000020001001+Q_110000001*PR_000020001100+Q_110000101*PR_000020001101);
ans_temp[ans_id*18+7]+=Pmtrx[6]*(Q_001010000*PR_000020001000+Q_001110000*PR_000020001010+Q_101010000*PR_000020001100+Q_101110000*PR_000020001110);
ans_temp[ans_id*18+7]+=Pmtrx[7]*(Q_000011000*PR_000020001000+Q_000111000*PR_000020001010+Q_000211000*PR_000020001020);
ans_temp[ans_id*18+7]+=Pmtrx[8]*(Q_000010001*PR_000020001000+Q_000010101*PR_000020001001+Q_000110001*PR_000020001010+Q_000110101*PR_000020001011);
ans_temp[ans_id*18+8]+=Pmtrx[6]*(Q_001000010*PR_000020001000+Q_001000110*PR_000020001001+Q_101000010*PR_000020001100+Q_101000110*PR_000020001101);
ans_temp[ans_id*18+8]+=Pmtrx[7]*(Q_000001010*PR_000020001000+Q_000001110*PR_000020001001+Q_000101010*PR_000020001010+Q_000101110*PR_000020001011);
ans_temp[ans_id*18+8]+=Pmtrx[8]*(Q_000000011*PR_000020001000+Q_000000111*PR_000020001001+Q_000000211*PR_000020001002);
ans_temp[ans_id*18+9]+=Pmtrx[0]*(Q_011000000*PR_011000010000+Q_111000000*PR_011000010100+Q_211000000*PR_011000010200);
ans_temp[ans_id*18+9]+=Pmtrx[1]*(Q_010001000*PR_011000010000+Q_010101000*PR_011000010010+Q_110001000*PR_011000010100+Q_110101000*PR_011000010110);
ans_temp[ans_id*18+9]+=Pmtrx[2]*(Q_010000001*PR_011000010000+Q_010000101*PR_011000010001+Q_110000001*PR_011000010100+Q_110000101*PR_011000010101);
ans_temp[ans_id*18+10]+=Pmtrx[0]*(Q_001010000*PR_011000010000+Q_001110000*PR_011000010010+Q_101010000*PR_011000010100+Q_101110000*PR_011000010110);
ans_temp[ans_id*18+10]+=Pmtrx[1]*(Q_000011000*PR_011000010000+Q_000111000*PR_011000010010+Q_000211000*PR_011000010020);
ans_temp[ans_id*18+10]+=Pmtrx[2]*(Q_000010001*PR_011000010000+Q_000010101*PR_011000010001+Q_000110001*PR_011000010010+Q_000110101*PR_011000010011);
ans_temp[ans_id*18+11]+=Pmtrx[0]*(Q_001000010*PR_011000010000+Q_001000110*PR_011000010001+Q_101000010*PR_011000010100+Q_101000110*PR_011000010101);
ans_temp[ans_id*18+11]+=Pmtrx[1]*(Q_000001010*PR_011000010000+Q_000001110*PR_011000010001+Q_000101010*PR_011000010010+Q_000101110*PR_011000010011);
ans_temp[ans_id*18+11]+=Pmtrx[2]*(Q_000000011*PR_011000010000+Q_000000111*PR_011000010001+Q_000000211*PR_011000010002);
ans_temp[ans_id*18+9]+=Pmtrx[3]*(Q_011000000*PR_010001010000+Q_111000000*PR_010001010100+Q_211000000*PR_010001010200);
ans_temp[ans_id*18+9]+=Pmtrx[4]*(Q_010001000*PR_010001010000+Q_010101000*PR_010001010010+Q_110001000*PR_010001010100+Q_110101000*PR_010001010110);
ans_temp[ans_id*18+9]+=Pmtrx[5]*(Q_010000001*PR_010001010000+Q_010000101*PR_010001010001+Q_110000001*PR_010001010100+Q_110000101*PR_010001010101);
ans_temp[ans_id*18+10]+=Pmtrx[3]*(Q_001010000*PR_010001010000+Q_001110000*PR_010001010010+Q_101010000*PR_010001010100+Q_101110000*PR_010001010110);
ans_temp[ans_id*18+10]+=Pmtrx[4]*(Q_000011000*PR_010001010000+Q_000111000*PR_010001010010+Q_000211000*PR_010001010020);
ans_temp[ans_id*18+10]+=Pmtrx[5]*(Q_000010001*PR_010001010000+Q_000010101*PR_010001010001+Q_000110001*PR_010001010010+Q_000110101*PR_010001010011);
ans_temp[ans_id*18+11]+=Pmtrx[3]*(Q_001000010*PR_010001010000+Q_001000110*PR_010001010001+Q_101000010*PR_010001010100+Q_101000110*PR_010001010101);
ans_temp[ans_id*18+11]+=Pmtrx[4]*(Q_000001010*PR_010001010000+Q_000001110*PR_010001010001+Q_000101010*PR_010001010010+Q_000101110*PR_010001010011);
ans_temp[ans_id*18+11]+=Pmtrx[5]*(Q_000000011*PR_010001010000+Q_000000111*PR_010001010001+Q_000000211*PR_010001010002);
ans_temp[ans_id*18+9]+=Pmtrx[6]*(Q_011000000*PR_010000011000+Q_111000000*PR_010000011100+Q_211000000*PR_010000011200);
ans_temp[ans_id*18+9]+=Pmtrx[7]*(Q_010001000*PR_010000011000+Q_010101000*PR_010000011010+Q_110001000*PR_010000011100+Q_110101000*PR_010000011110);
ans_temp[ans_id*18+9]+=Pmtrx[8]*(Q_010000001*PR_010000011000+Q_010000101*PR_010000011001+Q_110000001*PR_010000011100+Q_110000101*PR_010000011101);
ans_temp[ans_id*18+10]+=Pmtrx[6]*(Q_001010000*PR_010000011000+Q_001110000*PR_010000011010+Q_101010000*PR_010000011100+Q_101110000*PR_010000011110);
ans_temp[ans_id*18+10]+=Pmtrx[7]*(Q_000011000*PR_010000011000+Q_000111000*PR_010000011010+Q_000211000*PR_010000011020);
ans_temp[ans_id*18+10]+=Pmtrx[8]*(Q_000010001*PR_010000011000+Q_000010101*PR_010000011001+Q_000110001*PR_010000011010+Q_000110101*PR_010000011011);
ans_temp[ans_id*18+11]+=Pmtrx[6]*(Q_001000010*PR_010000011000+Q_001000110*PR_010000011001+Q_101000010*PR_010000011100+Q_101000110*PR_010000011101);
ans_temp[ans_id*18+11]+=Pmtrx[7]*(Q_000001010*PR_010000011000+Q_000001110*PR_010000011001+Q_000101010*PR_010000011010+Q_000101110*PR_010000011011);
ans_temp[ans_id*18+11]+=Pmtrx[8]*(Q_000000011*PR_010000011000+Q_000000111*PR_010000011001+Q_000000211*PR_010000011002);
ans_temp[ans_id*18+12]+=Pmtrx[0]*(Q_011000000*PR_001010010000+Q_111000000*PR_001010010100+Q_211000000*PR_001010010200);
ans_temp[ans_id*18+12]+=Pmtrx[1]*(Q_010001000*PR_001010010000+Q_010101000*PR_001010010010+Q_110001000*PR_001010010100+Q_110101000*PR_001010010110);
ans_temp[ans_id*18+12]+=Pmtrx[2]*(Q_010000001*PR_001010010000+Q_010000101*PR_001010010001+Q_110000001*PR_001010010100+Q_110000101*PR_001010010101);
ans_temp[ans_id*18+13]+=Pmtrx[0]*(Q_001010000*PR_001010010000+Q_001110000*PR_001010010010+Q_101010000*PR_001010010100+Q_101110000*PR_001010010110);
ans_temp[ans_id*18+13]+=Pmtrx[1]*(Q_000011000*PR_001010010000+Q_000111000*PR_001010010010+Q_000211000*PR_001010010020);
ans_temp[ans_id*18+13]+=Pmtrx[2]*(Q_000010001*PR_001010010000+Q_000010101*PR_001010010001+Q_000110001*PR_001010010010+Q_000110101*PR_001010010011);
ans_temp[ans_id*18+14]+=Pmtrx[0]*(Q_001000010*PR_001010010000+Q_001000110*PR_001010010001+Q_101000010*PR_001010010100+Q_101000110*PR_001010010101);
ans_temp[ans_id*18+14]+=Pmtrx[1]*(Q_000001010*PR_001010010000+Q_000001110*PR_001010010001+Q_000101010*PR_001010010010+Q_000101110*PR_001010010011);
ans_temp[ans_id*18+14]+=Pmtrx[2]*(Q_000000011*PR_001010010000+Q_000000111*PR_001010010001+Q_000000211*PR_001010010002);
ans_temp[ans_id*18+12]+=Pmtrx[3]*(Q_011000000*PR_000011010000+Q_111000000*PR_000011010100+Q_211000000*PR_000011010200);
ans_temp[ans_id*18+12]+=Pmtrx[4]*(Q_010001000*PR_000011010000+Q_010101000*PR_000011010010+Q_110001000*PR_000011010100+Q_110101000*PR_000011010110);
ans_temp[ans_id*18+12]+=Pmtrx[5]*(Q_010000001*PR_000011010000+Q_010000101*PR_000011010001+Q_110000001*PR_000011010100+Q_110000101*PR_000011010101);
ans_temp[ans_id*18+13]+=Pmtrx[3]*(Q_001010000*PR_000011010000+Q_001110000*PR_000011010010+Q_101010000*PR_000011010100+Q_101110000*PR_000011010110);
ans_temp[ans_id*18+13]+=Pmtrx[4]*(Q_000011000*PR_000011010000+Q_000111000*PR_000011010010+Q_000211000*PR_000011010020);
ans_temp[ans_id*18+13]+=Pmtrx[5]*(Q_000010001*PR_000011010000+Q_000010101*PR_000011010001+Q_000110001*PR_000011010010+Q_000110101*PR_000011010011);
ans_temp[ans_id*18+14]+=Pmtrx[3]*(Q_001000010*PR_000011010000+Q_001000110*PR_000011010001+Q_101000010*PR_000011010100+Q_101000110*PR_000011010101);
ans_temp[ans_id*18+14]+=Pmtrx[4]*(Q_000001010*PR_000011010000+Q_000001110*PR_000011010001+Q_000101010*PR_000011010010+Q_000101110*PR_000011010011);
ans_temp[ans_id*18+14]+=Pmtrx[5]*(Q_000000011*PR_000011010000+Q_000000111*PR_000011010001+Q_000000211*PR_000011010002);
ans_temp[ans_id*18+12]+=Pmtrx[6]*(Q_011000000*PR_000010011000+Q_111000000*PR_000010011100+Q_211000000*PR_000010011200);
ans_temp[ans_id*18+12]+=Pmtrx[7]*(Q_010001000*PR_000010011000+Q_010101000*PR_000010011010+Q_110001000*PR_000010011100+Q_110101000*PR_000010011110);
ans_temp[ans_id*18+12]+=Pmtrx[8]*(Q_010000001*PR_000010011000+Q_010000101*PR_000010011001+Q_110000001*PR_000010011100+Q_110000101*PR_000010011101);
ans_temp[ans_id*18+13]+=Pmtrx[6]*(Q_001010000*PR_000010011000+Q_001110000*PR_000010011010+Q_101010000*PR_000010011100+Q_101110000*PR_000010011110);
ans_temp[ans_id*18+13]+=Pmtrx[7]*(Q_000011000*PR_000010011000+Q_000111000*PR_000010011010+Q_000211000*PR_000010011020);
ans_temp[ans_id*18+13]+=Pmtrx[8]*(Q_000010001*PR_000010011000+Q_000010101*PR_000010011001+Q_000110001*PR_000010011010+Q_000110101*PR_000010011011);
ans_temp[ans_id*18+14]+=Pmtrx[6]*(Q_001000010*PR_000010011000+Q_001000110*PR_000010011001+Q_101000010*PR_000010011100+Q_101000110*PR_000010011101);
ans_temp[ans_id*18+14]+=Pmtrx[7]*(Q_000001010*PR_000010011000+Q_000001110*PR_000010011001+Q_000101010*PR_000010011010+Q_000101110*PR_000010011011);
ans_temp[ans_id*18+14]+=Pmtrx[8]*(Q_000000011*PR_000010011000+Q_000000111*PR_000010011001+Q_000000211*PR_000010011002);
ans_temp[ans_id*18+15]+=Pmtrx[0]*(Q_011000000*PR_001000020000+Q_111000000*PR_001000020100+Q_211000000*PR_001000020200);
ans_temp[ans_id*18+15]+=Pmtrx[1]*(Q_010001000*PR_001000020000+Q_010101000*PR_001000020010+Q_110001000*PR_001000020100+Q_110101000*PR_001000020110);
ans_temp[ans_id*18+15]+=Pmtrx[2]*(Q_010000001*PR_001000020000+Q_010000101*PR_001000020001+Q_110000001*PR_001000020100+Q_110000101*PR_001000020101);
ans_temp[ans_id*18+16]+=Pmtrx[0]*(Q_001010000*PR_001000020000+Q_001110000*PR_001000020010+Q_101010000*PR_001000020100+Q_101110000*PR_001000020110);
ans_temp[ans_id*18+16]+=Pmtrx[1]*(Q_000011000*PR_001000020000+Q_000111000*PR_001000020010+Q_000211000*PR_001000020020);
ans_temp[ans_id*18+16]+=Pmtrx[2]*(Q_000010001*PR_001000020000+Q_000010101*PR_001000020001+Q_000110001*PR_001000020010+Q_000110101*PR_001000020011);
ans_temp[ans_id*18+17]+=Pmtrx[0]*(Q_001000010*PR_001000020000+Q_001000110*PR_001000020001+Q_101000010*PR_001000020100+Q_101000110*PR_001000020101);
ans_temp[ans_id*18+17]+=Pmtrx[1]*(Q_000001010*PR_001000020000+Q_000001110*PR_001000020001+Q_000101010*PR_001000020010+Q_000101110*PR_001000020011);
ans_temp[ans_id*18+17]+=Pmtrx[2]*(Q_000000011*PR_001000020000+Q_000000111*PR_001000020001+Q_000000211*PR_001000020002);
ans_temp[ans_id*18+15]+=Pmtrx[3]*(Q_011000000*PR_000001020000+Q_111000000*PR_000001020100+Q_211000000*PR_000001020200);
ans_temp[ans_id*18+15]+=Pmtrx[4]*(Q_010001000*PR_000001020000+Q_010101000*PR_000001020010+Q_110001000*PR_000001020100+Q_110101000*PR_000001020110);
ans_temp[ans_id*18+15]+=Pmtrx[5]*(Q_010000001*PR_000001020000+Q_010000101*PR_000001020001+Q_110000001*PR_000001020100+Q_110000101*PR_000001020101);
ans_temp[ans_id*18+16]+=Pmtrx[3]*(Q_001010000*PR_000001020000+Q_001110000*PR_000001020010+Q_101010000*PR_000001020100+Q_101110000*PR_000001020110);
ans_temp[ans_id*18+16]+=Pmtrx[4]*(Q_000011000*PR_000001020000+Q_000111000*PR_000001020010+Q_000211000*PR_000001020020);
ans_temp[ans_id*18+16]+=Pmtrx[5]*(Q_000010001*PR_000001020000+Q_000010101*PR_000001020001+Q_000110001*PR_000001020010+Q_000110101*PR_000001020011);
ans_temp[ans_id*18+17]+=Pmtrx[3]*(Q_001000010*PR_000001020000+Q_001000110*PR_000001020001+Q_101000010*PR_000001020100+Q_101000110*PR_000001020101);
ans_temp[ans_id*18+17]+=Pmtrx[4]*(Q_000001010*PR_000001020000+Q_000001110*PR_000001020001+Q_000101010*PR_000001020010+Q_000101110*PR_000001020011);
ans_temp[ans_id*18+17]+=Pmtrx[5]*(Q_000000011*PR_000001020000+Q_000000111*PR_000001020001+Q_000000211*PR_000001020002);
ans_temp[ans_id*18+15]+=Pmtrx[6]*(Q_011000000*PR_000000021000+Q_111000000*PR_000000021100+Q_211000000*PR_000000021200);
ans_temp[ans_id*18+15]+=Pmtrx[7]*(Q_010001000*PR_000000021000+Q_010101000*PR_000000021010+Q_110001000*PR_000000021100+Q_110101000*PR_000000021110);
ans_temp[ans_id*18+15]+=Pmtrx[8]*(Q_010000001*PR_000000021000+Q_010000101*PR_000000021001+Q_110000001*PR_000000021100+Q_110000101*PR_000000021101);
ans_temp[ans_id*18+16]+=Pmtrx[6]*(Q_001010000*PR_000000021000+Q_001110000*PR_000000021010+Q_101010000*PR_000000021100+Q_101110000*PR_000000021110);
ans_temp[ans_id*18+16]+=Pmtrx[7]*(Q_000011000*PR_000000021000+Q_000111000*PR_000000021010+Q_000211000*PR_000000021020);
ans_temp[ans_id*18+16]+=Pmtrx[8]*(Q_000010001*PR_000000021000+Q_000010101*PR_000000021001+Q_000110001*PR_000000021010+Q_000110101*PR_000000021011);
ans_temp[ans_id*18+17]+=Pmtrx[6]*(Q_001000010*PR_000000021000+Q_001000110*PR_000000021001+Q_101000010*PR_000000021100+Q_101000110*PR_000000021101);
ans_temp[ans_id*18+17]+=Pmtrx[7]*(Q_000001010*PR_000000021000+Q_000001110*PR_000000021001+Q_000101010*PR_000000021010+Q_000101110*PR_000000021011);
ans_temp[ans_id*18+17]+=Pmtrx[8]*(Q_000000011*PR_000000021000+Q_000000111*PR_000000021001+Q_000000211*PR_000000021002);
}
}
__syncthreads();
int num_thread=NTHREAD/2;
while (num_thread!=0){
__syncthreads();
if(tId_x<num_thread){
for(int ians=0;ians<18;ians++){
ans_temp[tId_x*18+ians]+=ans_temp[(tId_x+num_thread)*18+ians];
}
}
num_thread/=2;
}
if(tId_x==0){
for(int ians=0;ians<18;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*18+ians]=ans_temp[(tId_x)*18+ians];
}
}
}
}
}
__global__ void MD_Kq_dppp_fs(unsigned int contrc_bra_num,unsigned int contrc_ket_num,\
unsigned int * contrc_bra_id,\
unsigned int * contrc_ket_id,\
unsigned int mtrx_len,\
double * Pmtrx_in,\
double * P,\
double * PA,\
double * PB,\
double * Zta_in,\
double * pp_in,\
float * K2_p_in,\
unsigned int * id_bra_in,\
double * Q,\
double * QC,\
double * QD,\
double * Eta_in,\
double * pq_in,\
float * K2_q_in,\
unsigned int * id_ket_in,\
double * ans){
unsigned int tId_x = threadIdx.x;
unsigned int bId_x = blockIdx.x;
unsigned int bId_y = blockIdx.y;
unsigned int tdis = blockDim.x;
unsigned int bdis_x = gridDim.x;
unsigned int bdis_y = gridDim.y;
unsigned int ans_id=tId_x;
double Pmtrx[9]={0.0};
__shared__ double ans_temp[NTHREAD*18];
for(int i=0;i<18;i++){
ans_temp[i*tdis+tId_x]=0.0;
}
for(unsigned int i_contrc_bra=bId_x;i_contrc_bra<contrc_bra_num;i_contrc_bra+=bdis_x){
for(unsigned int j_contrc_ket=bId_y;j_contrc_ket<contrc_ket_num;j_contrc_ket+=bdis_y){
unsigned int primit_bra_start = contrc_bra_id[i_contrc_bra ];
unsigned int primit_bra_end = contrc_bra_id[i_contrc_bra+1];
unsigned int primit_ket_start = contrc_ket_id[j_contrc_ket ];
unsigned int primit_ket_end = contrc_ket_id[j_contrc_ket+1];
for(unsigned int ii=primit_bra_start;ii<primit_bra_end;ii++){
unsigned int id_bra=id_bra_in[ii];
double PX=P[ii*3+0];
double PY=P[ii*3+1];
double PZ=P[ii*3+2];
double Pd_010[3];
Pd_010[0]=PA[ii*3+0];
Pd_010[1]=PA[ii*3+1];
Pd_010[2]=PA[ii*3+2];
double Pd_001[3];
Pd_001[0]=PB[ii*3+0];
Pd_001[1]=PB[ii*3+1];
Pd_001[2]=PB[ii*3+2];
double Zta=Zta_in[ii];
double pp=pp_in[ii];
float K2_p=K2_p_in[ii];
double aPin1=1/(2*Zta);
for(unsigned int j=tId_x;j<primit_ket_end-primit_ket_start;j+=tdis){
unsigned int jj=primit_ket_start+j;
unsigned int id_ket=tex1Dfetch(tex_id_ket,jj);
double P_max=0.0;
for(int p_j=0;p_j<3;p_j++){
for(int p_i=0;p_i<3;p_i++){
Pmtrx[p_i*3+p_j]=Pmtrx_in[(id_ket+p_j)*mtrx_len+(id_bra+p_i)];
double temp_P=fabsf(Pmtrx[p_i*3+p_j]);
if(temp_P>P_max) P_max=temp_P;
}
}
float K2_q=tex1Dfetch(tex_K2_q,jj);
if(fabsf(K2_p*K2_q)<1.0E-14){
break;
}
if(fabsf(P_max*K2_p*K2_q)<1.0E-14) continue;
int2 temp_int2;
temp_int2=tex1Dfetch(tex_Eta,jj);
double Eta=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_pq,jj);
double pq=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+0);
double QX=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+1);
double QY=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+2);
double QZ=__hiloint2double(temp_int2.y,temp_int2.x);
double Qd_010[3];
temp_int2=tex1Dfetch(tex_QC,jj*3+0);
Qd_010[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QC,jj*3+1);
Qd_010[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QC,jj*3+2);
Qd_010[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double Qd_001[3];
temp_int2=tex1Dfetch(tex_QD,jj*3+0);
Qd_001[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QD,jj*3+1);
Qd_001[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QD,jj*3+2);
Qd_001[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double alphaT=rsqrt(Eta+Zta);
double lmd=4*P25*pp*pq*alphaT;
alphaT=Eta*Zta*alphaT*alphaT;
double TX=PX-QX;
double TY=PY-QY;
double TZ=PZ-QZ;
double T=alphaT*(TX*TX+TY*TY+TZ*TZ);
double R_000[6];
Ft_fs_5(5,T,R_000);
R_000[0]*=lmd;
R_000[1]*=-2*alphaT*lmd;
R_000[2]*=4*alphaT*alphaT*lmd;
R_000[3]*=-8*alphaT*alphaT*alphaT*lmd;
R_000[4]*=16*alphaT*alphaT*alphaT*alphaT*lmd;
R_000[5]*=-32*alphaT*alphaT*alphaT*alphaT*alphaT*lmd;
double aQin1=1/(2*Eta);
double R_100[5];
double R_200[4];
double R_300[3];
double R_400[2];
double R_500[1];
double R_010[5];
double R_110[4];
double R_210[3];
double R_310[2];
double R_410[1];
double R_020[4];
double R_120[3];
double R_220[2];
double R_320[1];
double R_030[3];
double R_130[2];
double R_230[1];
double R_040[2];
double R_140[1];
double R_050[1];
double R_001[5];
double R_101[4];
double R_201[3];
double R_301[2];
double R_401[1];
double R_011[4];
double R_111[3];
double R_211[2];
double R_311[1];
double R_021[3];
double R_121[2];
double R_221[1];
double R_031[2];
double R_131[1];
double R_041[1];
double R_002[4];
double R_102[3];
double R_202[2];
double R_302[1];
double R_012[3];
double R_112[2];
double R_212[1];
double R_022[2];
double R_122[1];
double R_032[1];
double R_003[3];
double R_103[2];
double R_203[1];
double R_013[2];
double R_113[1];
double R_023[1];
double R_004[2];
double R_104[1];
double R_014[1];
double R_005[1];
for(int i=0;i<5;i++){
R_100[i]=TX*R_000[i+1];
}
for(int i=0;i<5;i++){
R_010[i]=TY*R_000[i+1];
}
for(int i=0;i<5;i++){
R_001[i]=TZ*R_000[i+1];
}
for(int i=0;i<4;i++){
R_200[i]=TX*R_100[i+1]+R_000[i+1];
}
for(int i=0;i<4;i++){
R_110[i]=TX*R_010[i+1];
}
for(int i=0;i<4;i++){
R_020[i]=TY*R_010[i+1]+R_000[i+1];
}
for(int i=0;i<4;i++){
R_101[i]=TX*R_001[i+1];
}
for(int i=0;i<4;i++){
R_011[i]=TY*R_001[i+1];
}
for(int i=0;i<4;i++){
R_002[i]=TZ*R_001[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_300[i]=TX*R_200[i+1]+2*R_100[i+1];
}
for(int i=0;i<3;i++){
R_210[i]=TY*R_200[i+1];
}
for(int i=0;i<3;i++){
R_120[i]=TX*R_020[i+1];
}
for(int i=0;i<3;i++){
R_030[i]=TY*R_020[i+1]+2*R_010[i+1];
}
for(int i=0;i<3;i++){
R_201[i]=TZ*R_200[i+1];
}
for(int i=0;i<3;i++){
R_111[i]=TX*R_011[i+1];
}
for(int i=0;i<3;i++){
R_021[i]=TZ*R_020[i+1];
}
for(int i=0;i<3;i++){
R_102[i]=TX*R_002[i+1];
}
for(int i=0;i<3;i++){
R_012[i]=TY*R_002[i+1];
}
for(int i=0;i<3;i++){
R_003[i]=TZ*R_002[i+1]+2*R_001[i+1];
}
for(int i=0;i<2;i++){
R_400[i]=TX*R_300[i+1]+3*R_200[i+1];
}
for(int i=0;i<2;i++){
R_310[i]=TY*R_300[i+1];
}
for(int i=0;i<2;i++){
R_220[i]=TX*R_120[i+1]+R_020[i+1];
}
for(int i=0;i<2;i++){
R_130[i]=TX*R_030[i+1];
}
for(int i=0;i<2;i++){
R_040[i]=TY*R_030[i+1]+3*R_020[i+1];
}
for(int i=0;i<2;i++){
R_301[i]=TZ*R_300[i+1];
}
for(int i=0;i<2;i++){
R_211[i]=TY*R_201[i+1];
}
for(int i=0;i<2;i++){
R_121[i]=TX*R_021[i+1];
}
for(int i=0;i<2;i++){
R_031[i]=TZ*R_030[i+1];
}
for(int i=0;i<2;i++){
R_202[i]=TX*R_102[i+1]+R_002[i+1];
}
for(int i=0;i<2;i++){
R_112[i]=TX*R_012[i+1];
}
for(int i=0;i<2;i++){
R_022[i]=TY*R_012[i+1]+R_002[i+1];
}
for(int i=0;i<2;i++){
R_103[i]=TX*R_003[i+1];
}
for(int i=0;i<2;i++){
R_013[i]=TY*R_003[i+1];
}
for(int i=0;i<2;i++){
R_004[i]=TZ*R_003[i+1]+3*R_002[i+1];
}
for(int i=0;i<1;i++){
R_500[i]=TX*R_400[i+1]+4*R_300[i+1];
}
for(int i=0;i<1;i++){
R_410[i]=TY*R_400[i+1];
}
for(int i=0;i<1;i++){
R_320[i]=TX*R_220[i+1]+2*R_120[i+1];
}
for(int i=0;i<1;i++){
R_230[i]=TY*R_220[i+1]+2*R_210[i+1];
}
for(int i=0;i<1;i++){
R_140[i]=TX*R_040[i+1];
}
for(int i=0;i<1;i++){
R_050[i]=TY*R_040[i+1]+4*R_030[i+1];
}
for(int i=0;i<1;i++){
R_401[i]=TZ*R_400[i+1];
}
for(int i=0;i<1;i++){
R_311[i]=TY*R_301[i+1];
}
for(int i=0;i<1;i++){
R_221[i]=TZ*R_220[i+1];
}
for(int i=0;i<1;i++){
R_131[i]=TX*R_031[i+1];
}
for(int i=0;i<1;i++){
R_041[i]=TZ*R_040[i+1];
}
for(int i=0;i<1;i++){
R_302[i]=TX*R_202[i+1]+2*R_102[i+1];
}
for(int i=0;i<1;i++){
R_212[i]=TY*R_202[i+1];
}
for(int i=0;i<1;i++){
R_122[i]=TX*R_022[i+1];
}
for(int i=0;i<1;i++){
R_032[i]=TY*R_022[i+1]+2*R_012[i+1];
}
for(int i=0;i<1;i++){
R_203[i]=TZ*R_202[i+1]+2*R_201[i+1];
}
for(int i=0;i<1;i++){
R_113[i]=TX*R_013[i+1];
}
for(int i=0;i<1;i++){
R_023[i]=TZ*R_022[i+1]+2*R_021[i+1];
}
for(int i=0;i<1;i++){
R_104[i]=TX*R_004[i+1];
}
for(int i=0;i<1;i++){
R_014[i]=TY*R_004[i+1];
}
for(int i=0;i<1;i++){
R_005[i]=TZ*R_004[i+1]+4*R_003[i+1];
}
double Qd_101[3];
double Qd_110[3];
double Qd_011[3];
double Qd_111[3];
double Qd_211[3];
for(int i=0;i<3;i++){
Qd_101[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_110[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_011[i]=Qd_101[i]+Qd_010[i]*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_111[i]=Qd_010[i]*Qd_101[i]+aQin1*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_211[i]=aQin1*Qd_101[i];
}
double Q_011000000=Qd_011[0];
double Q_111000000=Qd_111[0];
double Q_211000000=Qd_211[0];
double Q_010001000=Qd_010[0]*Qd_001[1];
double Q_010101000=Qd_010[0]*Qd_101[1];
double Q_110001000=Qd_110[0]*Qd_001[1];
double Q_110101000=Qd_110[0]*Qd_101[1];
double Q_010000001=Qd_010[0]*Qd_001[2];
double Q_010000101=Qd_010[0]*Qd_101[2];
double Q_110000001=Qd_110[0]*Qd_001[2];
double Q_110000101=Qd_110[0]*Qd_101[2];
double Q_001010000=Qd_001[0]*Qd_010[1];
double Q_001110000=Qd_001[0]*Qd_110[1];
double Q_101010000=Qd_101[0]*Qd_010[1];
double Q_101110000=Qd_101[0]*Qd_110[1];
double Q_000011000=Qd_011[1];
double Q_000111000=Qd_111[1];
double Q_000211000=Qd_211[1];
double Q_000010001=Qd_010[1]*Qd_001[2];
double Q_000010101=Qd_010[1]*Qd_101[2];
double Q_000110001=Qd_110[1]*Qd_001[2];
double Q_000110101=Qd_110[1]*Qd_101[2];
double Q_001000010=Qd_001[0]*Qd_010[2];
double Q_001000110=Qd_001[0]*Qd_110[2];
double Q_101000010=Qd_101[0]*Qd_010[2];
double Q_101000110=Qd_101[0]*Qd_110[2];
double Q_000001010=Qd_001[1]*Qd_010[2];
double Q_000001110=Qd_001[1]*Qd_110[2];
double Q_000101010=Qd_101[1]*Qd_010[2];
double Q_000101110=Qd_101[1]*Qd_110[2];
double Q_000000011=Qd_011[2];
double Q_000000111=Qd_111[2];
double Q_000000211=Qd_211[2];
double QR_011000000000=Q_011000000*R_000[0]+-1*Q_111000000*R_100[0]+Q_211000000*R_200[0];
double QR_010001000000=Q_010001000*R_000[0]+-1*Q_010101000*R_010[0]+-1*Q_110001000*R_100[0]+Q_110101000*R_110[0];
double QR_010000001000=Q_010000001*R_000[0]+-1*Q_010000101*R_001[0]+-1*Q_110000001*R_100[0]+Q_110000101*R_101[0];
double QR_001010000000=Q_001010000*R_000[0]+-1*Q_001110000*R_010[0]+-1*Q_101010000*R_100[0]+Q_101110000*R_110[0];
double QR_000011000000=Q_000011000*R_000[0]+-1*Q_000111000*R_010[0]+Q_000211000*R_020[0];
double QR_000010001000=Q_000010001*R_000[0]+-1*Q_000010101*R_001[0]+-1*Q_000110001*R_010[0]+Q_000110101*R_011[0];
double QR_001000010000=Q_001000010*R_000[0]+-1*Q_001000110*R_001[0]+-1*Q_101000010*R_100[0]+Q_101000110*R_101[0];
double QR_000001010000=Q_000001010*R_000[0]+-1*Q_000001110*R_001[0]+-1*Q_000101010*R_010[0]+Q_000101110*R_011[0];
double QR_000000011000=Q_000000011*R_000[0]+-1*Q_000000111*R_001[0]+Q_000000211*R_002[0];
double QR_011000000001=Q_011000000*R_001[0]+-1*Q_111000000*R_101[0]+Q_211000000*R_201[0];
double QR_010001000001=Q_010001000*R_001[0]+-1*Q_010101000*R_011[0]+-1*Q_110001000*R_101[0]+Q_110101000*R_111[0];
double QR_010000001001=Q_010000001*R_001[0]+-1*Q_010000101*R_002[0]+-1*Q_110000001*R_101[0]+Q_110000101*R_102[0];
double QR_001010000001=Q_001010000*R_001[0]+-1*Q_001110000*R_011[0]+-1*Q_101010000*R_101[0]+Q_101110000*R_111[0];
double QR_000011000001=Q_000011000*R_001[0]+-1*Q_000111000*R_011[0]+Q_000211000*R_021[0];
double QR_000010001001=Q_000010001*R_001[0]+-1*Q_000010101*R_002[0]+-1*Q_000110001*R_011[0]+Q_000110101*R_012[0];
double QR_001000010001=Q_001000010*R_001[0]+-1*Q_001000110*R_002[0]+-1*Q_101000010*R_101[0]+Q_101000110*R_102[0];
double QR_000001010001=Q_000001010*R_001[0]+-1*Q_000001110*R_002[0]+-1*Q_000101010*R_011[0]+Q_000101110*R_012[0];
double QR_000000011001=Q_000000011*R_001[0]+-1*Q_000000111*R_002[0]+Q_000000211*R_003[0];
double QR_011000000010=Q_011000000*R_010[0]+-1*Q_111000000*R_110[0]+Q_211000000*R_210[0];
double QR_010001000010=Q_010001000*R_010[0]+-1*Q_010101000*R_020[0]+-1*Q_110001000*R_110[0]+Q_110101000*R_120[0];
double QR_010000001010=Q_010000001*R_010[0]+-1*Q_010000101*R_011[0]+-1*Q_110000001*R_110[0]+Q_110000101*R_111[0];
double QR_001010000010=Q_001010000*R_010[0]+-1*Q_001110000*R_020[0]+-1*Q_101010000*R_110[0]+Q_101110000*R_120[0];
double QR_000011000010=Q_000011000*R_010[0]+-1*Q_000111000*R_020[0]+Q_000211000*R_030[0];
double QR_000010001010=Q_000010001*R_010[0]+-1*Q_000010101*R_011[0]+-1*Q_000110001*R_020[0]+Q_000110101*R_021[0];
double QR_001000010010=Q_001000010*R_010[0]+-1*Q_001000110*R_011[0]+-1*Q_101000010*R_110[0]+Q_101000110*R_111[0];
double QR_000001010010=Q_000001010*R_010[0]+-1*Q_000001110*R_011[0]+-1*Q_000101010*R_020[0]+Q_000101110*R_021[0];
double QR_000000011010=Q_000000011*R_010[0]+-1*Q_000000111*R_011[0]+Q_000000211*R_012[0];
double QR_011000000100=Q_011000000*R_100[0]+-1*Q_111000000*R_200[0]+Q_211000000*R_300[0];
double QR_010001000100=Q_010001000*R_100[0]+-1*Q_010101000*R_110[0]+-1*Q_110001000*R_200[0]+Q_110101000*R_210[0];
double QR_010000001100=Q_010000001*R_100[0]+-1*Q_010000101*R_101[0]+-1*Q_110000001*R_200[0]+Q_110000101*R_201[0];
double QR_001010000100=Q_001010000*R_100[0]+-1*Q_001110000*R_110[0]+-1*Q_101010000*R_200[0]+Q_101110000*R_210[0];
double QR_000011000100=Q_000011000*R_100[0]+-1*Q_000111000*R_110[0]+Q_000211000*R_120[0];
double QR_000010001100=Q_000010001*R_100[0]+-1*Q_000010101*R_101[0]+-1*Q_000110001*R_110[0]+Q_000110101*R_111[0];
double QR_001000010100=Q_001000010*R_100[0]+-1*Q_001000110*R_101[0]+-1*Q_101000010*R_200[0]+Q_101000110*R_201[0];
double QR_000001010100=Q_000001010*R_100[0]+-1*Q_000001110*R_101[0]+-1*Q_000101010*R_110[0]+Q_000101110*R_111[0];
double QR_000000011100=Q_000000011*R_100[0]+-1*Q_000000111*R_101[0]+Q_000000211*R_102[0];
double QR_011000000002=Q_011000000*R_002[0]+-1*Q_111000000*R_102[0]+Q_211000000*R_202[0];
double QR_010001000002=Q_010001000*R_002[0]+-1*Q_010101000*R_012[0]+-1*Q_110001000*R_102[0]+Q_110101000*R_112[0];
double QR_010000001002=Q_010000001*R_002[0]+-1*Q_010000101*R_003[0]+-1*Q_110000001*R_102[0]+Q_110000101*R_103[0];
double QR_001010000002=Q_001010000*R_002[0]+-1*Q_001110000*R_012[0]+-1*Q_101010000*R_102[0]+Q_101110000*R_112[0];
double QR_000011000002=Q_000011000*R_002[0]+-1*Q_000111000*R_012[0]+Q_000211000*R_022[0];
double QR_000010001002=Q_000010001*R_002[0]+-1*Q_000010101*R_003[0]+-1*Q_000110001*R_012[0]+Q_000110101*R_013[0];
double QR_001000010002=Q_001000010*R_002[0]+-1*Q_001000110*R_003[0]+-1*Q_101000010*R_102[0]+Q_101000110*R_103[0];
double QR_000001010002=Q_000001010*R_002[0]+-1*Q_000001110*R_003[0]+-1*Q_000101010*R_012[0]+Q_000101110*R_013[0];
double QR_000000011002=Q_000000011*R_002[0]+-1*Q_000000111*R_003[0]+Q_000000211*R_004[0];
double QR_011000000011=Q_011000000*R_011[0]+-1*Q_111000000*R_111[0]+Q_211000000*R_211[0];
double QR_010001000011=Q_010001000*R_011[0]+-1*Q_010101000*R_021[0]+-1*Q_110001000*R_111[0]+Q_110101000*R_121[0];
double QR_010000001011=Q_010000001*R_011[0]+-1*Q_010000101*R_012[0]+-1*Q_110000001*R_111[0]+Q_110000101*R_112[0];
double QR_001010000011=Q_001010000*R_011[0]+-1*Q_001110000*R_021[0]+-1*Q_101010000*R_111[0]+Q_101110000*R_121[0];
double QR_000011000011=Q_000011000*R_011[0]+-1*Q_000111000*R_021[0]+Q_000211000*R_031[0];
double QR_000010001011=Q_000010001*R_011[0]+-1*Q_000010101*R_012[0]+-1*Q_000110001*R_021[0]+Q_000110101*R_022[0];
double QR_001000010011=Q_001000010*R_011[0]+-1*Q_001000110*R_012[0]+-1*Q_101000010*R_111[0]+Q_101000110*R_112[0];
double QR_000001010011=Q_000001010*R_011[0]+-1*Q_000001110*R_012[0]+-1*Q_000101010*R_021[0]+Q_000101110*R_022[0];
double QR_000000011011=Q_000000011*R_011[0]+-1*Q_000000111*R_012[0]+Q_000000211*R_013[0];
double QR_011000000020=Q_011000000*R_020[0]+-1*Q_111000000*R_120[0]+Q_211000000*R_220[0];
double QR_010001000020=Q_010001000*R_020[0]+-1*Q_010101000*R_030[0]+-1*Q_110001000*R_120[0]+Q_110101000*R_130[0];
double QR_010000001020=Q_010000001*R_020[0]+-1*Q_010000101*R_021[0]+-1*Q_110000001*R_120[0]+Q_110000101*R_121[0];
double QR_001010000020=Q_001010000*R_020[0]+-1*Q_001110000*R_030[0]+-1*Q_101010000*R_120[0]+Q_101110000*R_130[0];
double QR_000011000020=Q_000011000*R_020[0]+-1*Q_000111000*R_030[0]+Q_000211000*R_040[0];
double QR_000010001020=Q_000010001*R_020[0]+-1*Q_000010101*R_021[0]+-1*Q_000110001*R_030[0]+Q_000110101*R_031[0];
double QR_001000010020=Q_001000010*R_020[0]+-1*Q_001000110*R_021[0]+-1*Q_101000010*R_120[0]+Q_101000110*R_121[0];
double QR_000001010020=Q_000001010*R_020[0]+-1*Q_000001110*R_021[0]+-1*Q_000101010*R_030[0]+Q_000101110*R_031[0];
double QR_000000011020=Q_000000011*R_020[0]+-1*Q_000000111*R_021[0]+Q_000000211*R_022[0];
double QR_011000000101=Q_011000000*R_101[0]+-1*Q_111000000*R_201[0]+Q_211000000*R_301[0];
double QR_010001000101=Q_010001000*R_101[0]+-1*Q_010101000*R_111[0]+-1*Q_110001000*R_201[0]+Q_110101000*R_211[0];
double QR_010000001101=Q_010000001*R_101[0]+-1*Q_010000101*R_102[0]+-1*Q_110000001*R_201[0]+Q_110000101*R_202[0];
double QR_001010000101=Q_001010000*R_101[0]+-1*Q_001110000*R_111[0]+-1*Q_101010000*R_201[0]+Q_101110000*R_211[0];
double QR_000011000101=Q_000011000*R_101[0]+-1*Q_000111000*R_111[0]+Q_000211000*R_121[0];
double QR_000010001101=Q_000010001*R_101[0]+-1*Q_000010101*R_102[0]+-1*Q_000110001*R_111[0]+Q_000110101*R_112[0];
double QR_001000010101=Q_001000010*R_101[0]+-1*Q_001000110*R_102[0]+-1*Q_101000010*R_201[0]+Q_101000110*R_202[0];
double QR_000001010101=Q_000001010*R_101[0]+-1*Q_000001110*R_102[0]+-1*Q_000101010*R_111[0]+Q_000101110*R_112[0];
double QR_000000011101=Q_000000011*R_101[0]+-1*Q_000000111*R_102[0]+Q_000000211*R_103[0];
double QR_011000000110=Q_011000000*R_110[0]+-1*Q_111000000*R_210[0]+Q_211000000*R_310[0];
double QR_010001000110=Q_010001000*R_110[0]+-1*Q_010101000*R_120[0]+-1*Q_110001000*R_210[0]+Q_110101000*R_220[0];
double QR_010000001110=Q_010000001*R_110[0]+-1*Q_010000101*R_111[0]+-1*Q_110000001*R_210[0]+Q_110000101*R_211[0];
double QR_001010000110=Q_001010000*R_110[0]+-1*Q_001110000*R_120[0]+-1*Q_101010000*R_210[0]+Q_101110000*R_220[0];
double QR_000011000110=Q_000011000*R_110[0]+-1*Q_000111000*R_120[0]+Q_000211000*R_130[0];
double QR_000010001110=Q_000010001*R_110[0]+-1*Q_000010101*R_111[0]+-1*Q_000110001*R_120[0]+Q_000110101*R_121[0];
double QR_001000010110=Q_001000010*R_110[0]+-1*Q_001000110*R_111[0]+-1*Q_101000010*R_210[0]+Q_101000110*R_211[0];
double QR_000001010110=Q_000001010*R_110[0]+-1*Q_000001110*R_111[0]+-1*Q_000101010*R_120[0]+Q_000101110*R_121[0];
double QR_000000011110=Q_000000011*R_110[0]+-1*Q_000000111*R_111[0]+Q_000000211*R_112[0];
double QR_011000000200=Q_011000000*R_200[0]+-1*Q_111000000*R_300[0]+Q_211000000*R_400[0];
double QR_010001000200=Q_010001000*R_200[0]+-1*Q_010101000*R_210[0]+-1*Q_110001000*R_300[0]+Q_110101000*R_310[0];
double QR_010000001200=Q_010000001*R_200[0]+-1*Q_010000101*R_201[0]+-1*Q_110000001*R_300[0]+Q_110000101*R_301[0];
double QR_001010000200=Q_001010000*R_200[0]+-1*Q_001110000*R_210[0]+-1*Q_101010000*R_300[0]+Q_101110000*R_310[0];
double QR_000011000200=Q_000011000*R_200[0]+-1*Q_000111000*R_210[0]+Q_000211000*R_220[0];
double QR_000010001200=Q_000010001*R_200[0]+-1*Q_000010101*R_201[0]+-1*Q_000110001*R_210[0]+Q_000110101*R_211[0];
double QR_001000010200=Q_001000010*R_200[0]+-1*Q_001000110*R_201[0]+-1*Q_101000010*R_300[0]+Q_101000110*R_301[0];
double QR_000001010200=Q_000001010*R_200[0]+-1*Q_000001110*R_201[0]+-1*Q_000101010*R_210[0]+Q_000101110*R_211[0];
double QR_000000011200=Q_000000011*R_200[0]+-1*Q_000000111*R_201[0]+Q_000000211*R_202[0];
double QR_011000000003=Q_011000000*R_003[0]+-1*Q_111000000*R_103[0]+Q_211000000*R_203[0];
double QR_010001000003=Q_010001000*R_003[0]+-1*Q_010101000*R_013[0]+-1*Q_110001000*R_103[0]+Q_110101000*R_113[0];
double QR_010000001003=Q_010000001*R_003[0]+-1*Q_010000101*R_004[0]+-1*Q_110000001*R_103[0]+Q_110000101*R_104[0];
double QR_001010000003=Q_001010000*R_003[0]+-1*Q_001110000*R_013[0]+-1*Q_101010000*R_103[0]+Q_101110000*R_113[0];
double QR_000011000003=Q_000011000*R_003[0]+-1*Q_000111000*R_013[0]+Q_000211000*R_023[0];
double QR_000010001003=Q_000010001*R_003[0]+-1*Q_000010101*R_004[0]+-1*Q_000110001*R_013[0]+Q_000110101*R_014[0];
double QR_001000010003=Q_001000010*R_003[0]+-1*Q_001000110*R_004[0]+-1*Q_101000010*R_103[0]+Q_101000110*R_104[0];
double QR_000001010003=Q_000001010*R_003[0]+-1*Q_000001110*R_004[0]+-1*Q_000101010*R_013[0]+Q_000101110*R_014[0];
double QR_000000011003=Q_000000011*R_003[0]+-1*Q_000000111*R_004[0]+Q_000000211*R_005[0];
double QR_011000000012=Q_011000000*R_012[0]+-1*Q_111000000*R_112[0]+Q_211000000*R_212[0];
double QR_010001000012=Q_010001000*R_012[0]+-1*Q_010101000*R_022[0]+-1*Q_110001000*R_112[0]+Q_110101000*R_122[0];
double QR_010000001012=Q_010000001*R_012[0]+-1*Q_010000101*R_013[0]+-1*Q_110000001*R_112[0]+Q_110000101*R_113[0];
double QR_001010000012=Q_001010000*R_012[0]+-1*Q_001110000*R_022[0]+-1*Q_101010000*R_112[0]+Q_101110000*R_122[0];
double QR_000011000012=Q_000011000*R_012[0]+-1*Q_000111000*R_022[0]+Q_000211000*R_032[0];
double QR_000010001012=Q_000010001*R_012[0]+-1*Q_000010101*R_013[0]+-1*Q_000110001*R_022[0]+Q_000110101*R_023[0];
double QR_001000010012=Q_001000010*R_012[0]+-1*Q_001000110*R_013[0]+-1*Q_101000010*R_112[0]+Q_101000110*R_113[0];
double QR_000001010012=Q_000001010*R_012[0]+-1*Q_000001110*R_013[0]+-1*Q_000101010*R_022[0]+Q_000101110*R_023[0];
double QR_000000011012=Q_000000011*R_012[0]+-1*Q_000000111*R_013[0]+Q_000000211*R_014[0];
double QR_011000000021=Q_011000000*R_021[0]+-1*Q_111000000*R_121[0]+Q_211000000*R_221[0];
double QR_010001000021=Q_010001000*R_021[0]+-1*Q_010101000*R_031[0]+-1*Q_110001000*R_121[0]+Q_110101000*R_131[0];
double QR_010000001021=Q_010000001*R_021[0]+-1*Q_010000101*R_022[0]+-1*Q_110000001*R_121[0]+Q_110000101*R_122[0];
double QR_001010000021=Q_001010000*R_021[0]+-1*Q_001110000*R_031[0]+-1*Q_101010000*R_121[0]+Q_101110000*R_131[0];
double QR_000011000021=Q_000011000*R_021[0]+-1*Q_000111000*R_031[0]+Q_000211000*R_041[0];
double QR_000010001021=Q_000010001*R_021[0]+-1*Q_000010101*R_022[0]+-1*Q_000110001*R_031[0]+Q_000110101*R_032[0];
double QR_001000010021=Q_001000010*R_021[0]+-1*Q_001000110*R_022[0]+-1*Q_101000010*R_121[0]+Q_101000110*R_122[0];
double QR_000001010021=Q_000001010*R_021[0]+-1*Q_000001110*R_022[0]+-1*Q_000101010*R_031[0]+Q_000101110*R_032[0];
double QR_000000011021=Q_000000011*R_021[0]+-1*Q_000000111*R_022[0]+Q_000000211*R_023[0];
double QR_011000000030=Q_011000000*R_030[0]+-1*Q_111000000*R_130[0]+Q_211000000*R_230[0];
double QR_010001000030=Q_010001000*R_030[0]+-1*Q_010101000*R_040[0]+-1*Q_110001000*R_130[0]+Q_110101000*R_140[0];
double QR_010000001030=Q_010000001*R_030[0]+-1*Q_010000101*R_031[0]+-1*Q_110000001*R_130[0]+Q_110000101*R_131[0];
double QR_001010000030=Q_001010000*R_030[0]+-1*Q_001110000*R_040[0]+-1*Q_101010000*R_130[0]+Q_101110000*R_140[0];
double QR_000011000030=Q_000011000*R_030[0]+-1*Q_000111000*R_040[0]+Q_000211000*R_050[0];
double QR_000010001030=Q_000010001*R_030[0]+-1*Q_000010101*R_031[0]+-1*Q_000110001*R_040[0]+Q_000110101*R_041[0];
double QR_001000010030=Q_001000010*R_030[0]+-1*Q_001000110*R_031[0]+-1*Q_101000010*R_130[0]+Q_101000110*R_131[0];
double QR_000001010030=Q_000001010*R_030[0]+-1*Q_000001110*R_031[0]+-1*Q_000101010*R_040[0]+Q_000101110*R_041[0];
double QR_000000011030=Q_000000011*R_030[0]+-1*Q_000000111*R_031[0]+Q_000000211*R_032[0];
double QR_011000000102=Q_011000000*R_102[0]+-1*Q_111000000*R_202[0]+Q_211000000*R_302[0];
double QR_010001000102=Q_010001000*R_102[0]+-1*Q_010101000*R_112[0]+-1*Q_110001000*R_202[0]+Q_110101000*R_212[0];
double QR_010000001102=Q_010000001*R_102[0]+-1*Q_010000101*R_103[0]+-1*Q_110000001*R_202[0]+Q_110000101*R_203[0];
double QR_001010000102=Q_001010000*R_102[0]+-1*Q_001110000*R_112[0]+-1*Q_101010000*R_202[0]+Q_101110000*R_212[0];
double QR_000011000102=Q_000011000*R_102[0]+-1*Q_000111000*R_112[0]+Q_000211000*R_122[0];
double QR_000010001102=Q_000010001*R_102[0]+-1*Q_000010101*R_103[0]+-1*Q_000110001*R_112[0]+Q_000110101*R_113[0];
double QR_001000010102=Q_001000010*R_102[0]+-1*Q_001000110*R_103[0]+-1*Q_101000010*R_202[0]+Q_101000110*R_203[0];
double QR_000001010102=Q_000001010*R_102[0]+-1*Q_000001110*R_103[0]+-1*Q_000101010*R_112[0]+Q_000101110*R_113[0];
double QR_000000011102=Q_000000011*R_102[0]+-1*Q_000000111*R_103[0]+Q_000000211*R_104[0];
double QR_011000000111=Q_011000000*R_111[0]+-1*Q_111000000*R_211[0]+Q_211000000*R_311[0];
double QR_010001000111=Q_010001000*R_111[0]+-1*Q_010101000*R_121[0]+-1*Q_110001000*R_211[0]+Q_110101000*R_221[0];
double QR_010000001111=Q_010000001*R_111[0]+-1*Q_010000101*R_112[0]+-1*Q_110000001*R_211[0]+Q_110000101*R_212[0];
double QR_001010000111=Q_001010000*R_111[0]+-1*Q_001110000*R_121[0]+-1*Q_101010000*R_211[0]+Q_101110000*R_221[0];
double QR_000011000111=Q_000011000*R_111[0]+-1*Q_000111000*R_121[0]+Q_000211000*R_131[0];
double QR_000010001111=Q_000010001*R_111[0]+-1*Q_000010101*R_112[0]+-1*Q_000110001*R_121[0]+Q_000110101*R_122[0];
double QR_001000010111=Q_001000010*R_111[0]+-1*Q_001000110*R_112[0]+-1*Q_101000010*R_211[0]+Q_101000110*R_212[0];
double QR_000001010111=Q_000001010*R_111[0]+-1*Q_000001110*R_112[0]+-1*Q_000101010*R_121[0]+Q_000101110*R_122[0];
double QR_000000011111=Q_000000011*R_111[0]+-1*Q_000000111*R_112[0]+Q_000000211*R_113[0];
double QR_011000000120=Q_011000000*R_120[0]+-1*Q_111000000*R_220[0]+Q_211000000*R_320[0];
double QR_010001000120=Q_010001000*R_120[0]+-1*Q_010101000*R_130[0]+-1*Q_110001000*R_220[0]+Q_110101000*R_230[0];
double QR_010000001120=Q_010000001*R_120[0]+-1*Q_010000101*R_121[0]+-1*Q_110000001*R_220[0]+Q_110000101*R_221[0];
double QR_001010000120=Q_001010000*R_120[0]+-1*Q_001110000*R_130[0]+-1*Q_101010000*R_220[0]+Q_101110000*R_230[0];
double QR_000011000120=Q_000011000*R_120[0]+-1*Q_000111000*R_130[0]+Q_000211000*R_140[0];
double QR_000010001120=Q_000010001*R_120[0]+-1*Q_000010101*R_121[0]+-1*Q_000110001*R_130[0]+Q_000110101*R_131[0];
double QR_001000010120=Q_001000010*R_120[0]+-1*Q_001000110*R_121[0]+-1*Q_101000010*R_220[0]+Q_101000110*R_221[0];
double QR_000001010120=Q_000001010*R_120[0]+-1*Q_000001110*R_121[0]+-1*Q_000101010*R_130[0]+Q_000101110*R_131[0];
double QR_000000011120=Q_000000011*R_120[0]+-1*Q_000000111*R_121[0]+Q_000000211*R_122[0];
double QR_011000000201=Q_011000000*R_201[0]+-1*Q_111000000*R_301[0]+Q_211000000*R_401[0];
double QR_010001000201=Q_010001000*R_201[0]+-1*Q_010101000*R_211[0]+-1*Q_110001000*R_301[0]+Q_110101000*R_311[0];
double QR_010000001201=Q_010000001*R_201[0]+-1*Q_010000101*R_202[0]+-1*Q_110000001*R_301[0]+Q_110000101*R_302[0];
double QR_001010000201=Q_001010000*R_201[0]+-1*Q_001110000*R_211[0]+-1*Q_101010000*R_301[0]+Q_101110000*R_311[0];
double QR_000011000201=Q_000011000*R_201[0]+-1*Q_000111000*R_211[0]+Q_000211000*R_221[0];
double QR_000010001201=Q_000010001*R_201[0]+-1*Q_000010101*R_202[0]+-1*Q_000110001*R_211[0]+Q_000110101*R_212[0];
double QR_001000010201=Q_001000010*R_201[0]+-1*Q_001000110*R_202[0]+-1*Q_101000010*R_301[0]+Q_101000110*R_302[0];
double QR_000001010201=Q_000001010*R_201[0]+-1*Q_000001110*R_202[0]+-1*Q_000101010*R_211[0]+Q_000101110*R_212[0];
double QR_000000011201=Q_000000011*R_201[0]+-1*Q_000000111*R_202[0]+Q_000000211*R_203[0];
double QR_011000000210=Q_011000000*R_210[0]+-1*Q_111000000*R_310[0]+Q_211000000*R_410[0];
double QR_010001000210=Q_010001000*R_210[0]+-1*Q_010101000*R_220[0]+-1*Q_110001000*R_310[0]+Q_110101000*R_320[0];
double QR_010000001210=Q_010000001*R_210[0]+-1*Q_010000101*R_211[0]+-1*Q_110000001*R_310[0]+Q_110000101*R_311[0];
double QR_001010000210=Q_001010000*R_210[0]+-1*Q_001110000*R_220[0]+-1*Q_101010000*R_310[0]+Q_101110000*R_320[0];
double QR_000011000210=Q_000011000*R_210[0]+-1*Q_000111000*R_220[0]+Q_000211000*R_230[0];
double QR_000010001210=Q_000010001*R_210[0]+-1*Q_000010101*R_211[0]+-1*Q_000110001*R_220[0]+Q_000110101*R_221[0];
double QR_001000010210=Q_001000010*R_210[0]+-1*Q_001000110*R_211[0]+-1*Q_101000010*R_310[0]+Q_101000110*R_311[0];
double QR_000001010210=Q_000001010*R_210[0]+-1*Q_000001110*R_211[0]+-1*Q_000101010*R_220[0]+Q_000101110*R_221[0];
double QR_000000011210=Q_000000011*R_210[0]+-1*Q_000000111*R_211[0]+Q_000000211*R_212[0];
double QR_011000000300=Q_011000000*R_300[0]+-1*Q_111000000*R_400[0]+Q_211000000*R_500[0];
double QR_010001000300=Q_010001000*R_300[0]+-1*Q_010101000*R_310[0]+-1*Q_110001000*R_400[0]+Q_110101000*R_410[0];
double QR_010000001300=Q_010000001*R_300[0]+-1*Q_010000101*R_301[0]+-1*Q_110000001*R_400[0]+Q_110000101*R_401[0];
double QR_001010000300=Q_001010000*R_300[0]+-1*Q_001110000*R_310[0]+-1*Q_101010000*R_400[0]+Q_101110000*R_410[0];
double QR_000011000300=Q_000011000*R_300[0]+-1*Q_000111000*R_310[0]+Q_000211000*R_320[0];
double QR_000010001300=Q_000010001*R_300[0]+-1*Q_000010101*R_301[0]+-1*Q_000110001*R_310[0]+Q_000110101*R_311[0];
double QR_001000010300=Q_001000010*R_300[0]+-1*Q_001000110*R_301[0]+-1*Q_101000010*R_400[0]+Q_101000110*R_401[0];
double QR_000001010300=Q_000001010*R_300[0]+-1*Q_000001110*R_301[0]+-1*Q_000101010*R_310[0]+Q_000101110*R_311[0];
double QR_000000011300=Q_000000011*R_300[0]+-1*Q_000000111*R_301[0]+Q_000000211*R_302[0];
double Pd_101[3];
double Pd_110[3];
double Pd_011[3];
double Pd_111[3];
double Pd_211[3];
double Pd_020[3];
double Pd_120[3];
double Pd_220[3];
double Pd_021[3];
double Pd_121[3];
double Pd_221[3];
double Pd_321[3];
for(int i=0;i<3;i++){
Pd_101[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_110[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_011[i]=Pd_101[i]+Pd_010[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_111[i]=Pd_010[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_211[i]=aPin1*Pd_101[i];
}
for(int i=0;i<3;i++){
Pd_020[i]=Pd_110[i]+Pd_010[i]*Pd_010[i];
}
for(int i=0;i<3;i++){
Pd_120[i]=Pd_010[i]*Pd_110[i]+aPin1*Pd_010[i];
}
for(int i=0;i<3;i++){
Pd_220[i]=aPin1*Pd_110[i];
}
for(int i=0;i<3;i++){
Pd_021[i]=Pd_111[i]+Pd_010[i]*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_121[i]=2*Pd_211[i]+Pd_010[i]*Pd_111[i]+aPin1*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_221[i]=Pd_010[i]*Pd_211[i]+aPin1*Pd_111[i];
}
for(int i=0;i<3;i++){
Pd_321[i]=aPin1*Pd_211[i];
}
double P_021000000=Pd_021[0];
double P_121000000=Pd_121[0];
double P_221000000=Pd_221[0];
double P_321000000=Pd_321[0];
double P_020001000=Pd_020[0]*Pd_001[1];
double P_020101000=Pd_020[0]*Pd_101[1];
double P_120001000=Pd_120[0]*Pd_001[1];
double P_120101000=Pd_120[0]*Pd_101[1];
double P_220001000=Pd_220[0]*Pd_001[1];
double P_220101000=Pd_220[0]*Pd_101[1];
double P_020000001=Pd_020[0]*Pd_001[2];
double P_020000101=Pd_020[0]*Pd_101[2];
double P_120000001=Pd_120[0]*Pd_001[2];
double P_120000101=Pd_120[0]*Pd_101[2];
double P_220000001=Pd_220[0]*Pd_001[2];
double P_220000101=Pd_220[0]*Pd_101[2];
double P_011010000=Pd_011[0]*Pd_010[1];
double P_011110000=Pd_011[0]*Pd_110[1];
double P_111010000=Pd_111[0]*Pd_010[1];
double P_111110000=Pd_111[0]*Pd_110[1];
double P_211010000=Pd_211[0]*Pd_010[1];
double P_211110000=Pd_211[0]*Pd_110[1];
double P_010011000=Pd_010[0]*Pd_011[1];
double P_010111000=Pd_010[0]*Pd_111[1];
double P_010211000=Pd_010[0]*Pd_211[1];
double P_110011000=Pd_110[0]*Pd_011[1];
double P_110111000=Pd_110[0]*Pd_111[1];
double P_110211000=Pd_110[0]*Pd_211[1];
double P_010010001=Pd_010[0]*Pd_010[1]*Pd_001[2];
double P_010010101=Pd_010[0]*Pd_010[1]*Pd_101[2];
double P_010110001=Pd_010[0]*Pd_110[1]*Pd_001[2];
double P_010110101=Pd_010[0]*Pd_110[1]*Pd_101[2];
double P_110010001=Pd_110[0]*Pd_010[1]*Pd_001[2];
double P_110010101=Pd_110[0]*Pd_010[1]*Pd_101[2];
double P_110110001=Pd_110[0]*Pd_110[1]*Pd_001[2];
double P_110110101=Pd_110[0]*Pd_110[1]*Pd_101[2];
double P_001020000=Pd_001[0]*Pd_020[1];
double P_001120000=Pd_001[0]*Pd_120[1];
double P_001220000=Pd_001[0]*Pd_220[1];
double P_101020000=Pd_101[0]*Pd_020[1];
double P_101120000=Pd_101[0]*Pd_120[1];
double P_101220000=Pd_101[0]*Pd_220[1];
double P_000021000=Pd_021[1];
double P_000121000=Pd_121[1];
double P_000221000=Pd_221[1];
double P_000321000=Pd_321[1];
double P_000020001=Pd_020[1]*Pd_001[2];
double P_000020101=Pd_020[1]*Pd_101[2];
double P_000120001=Pd_120[1]*Pd_001[2];
double P_000120101=Pd_120[1]*Pd_101[2];
double P_000220001=Pd_220[1]*Pd_001[2];
double P_000220101=Pd_220[1]*Pd_101[2];
double P_011000010=Pd_011[0]*Pd_010[2];
double P_011000110=Pd_011[0]*Pd_110[2];
double P_111000010=Pd_111[0]*Pd_010[2];
double P_111000110=Pd_111[0]*Pd_110[2];
double P_211000010=Pd_211[0]*Pd_010[2];
double P_211000110=Pd_211[0]*Pd_110[2];
double P_010001010=Pd_010[0]*Pd_001[1]*Pd_010[2];
double P_010001110=Pd_010[0]*Pd_001[1]*Pd_110[2];
double P_010101010=Pd_010[0]*Pd_101[1]*Pd_010[2];
double P_010101110=Pd_010[0]*Pd_101[1]*Pd_110[2];
double P_110001010=Pd_110[0]*Pd_001[1]*Pd_010[2];
double P_110001110=Pd_110[0]*Pd_001[1]*Pd_110[2];
double P_110101010=Pd_110[0]*Pd_101[1]*Pd_010[2];
double P_110101110=Pd_110[0]*Pd_101[1]*Pd_110[2];
double P_010000011=Pd_010[0]*Pd_011[2];
double P_010000111=Pd_010[0]*Pd_111[2];
double P_010000211=Pd_010[0]*Pd_211[2];
double P_110000011=Pd_110[0]*Pd_011[2];
double P_110000111=Pd_110[0]*Pd_111[2];
double P_110000211=Pd_110[0]*Pd_211[2];
double P_001010010=Pd_001[0]*Pd_010[1]*Pd_010[2];
double P_001010110=Pd_001[0]*Pd_010[1]*Pd_110[2];
double P_001110010=Pd_001[0]*Pd_110[1]*Pd_010[2];
double P_001110110=Pd_001[0]*Pd_110[1]*Pd_110[2];
double P_101010010=Pd_101[0]*Pd_010[1]*Pd_010[2];
double P_101010110=Pd_101[0]*Pd_010[1]*Pd_110[2];
double P_101110010=Pd_101[0]*Pd_110[1]*Pd_010[2];
double P_101110110=Pd_101[0]*Pd_110[1]*Pd_110[2];
double P_000011010=Pd_011[1]*Pd_010[2];
double P_000011110=Pd_011[1]*Pd_110[2];
double P_000111010=Pd_111[1]*Pd_010[2];
double P_000111110=Pd_111[1]*Pd_110[2];
double P_000211010=Pd_211[1]*Pd_010[2];
double P_000211110=Pd_211[1]*Pd_110[2];
double P_000010011=Pd_010[1]*Pd_011[2];
double P_000010111=Pd_010[1]*Pd_111[2];
double P_000010211=Pd_010[1]*Pd_211[2];
double P_000110011=Pd_110[1]*Pd_011[2];
double P_000110111=Pd_110[1]*Pd_111[2];
double P_000110211=Pd_110[1]*Pd_211[2];
double P_001000020=Pd_001[0]*Pd_020[2];
double P_001000120=Pd_001[0]*Pd_120[2];
double P_001000220=Pd_001[0]*Pd_220[2];
double P_101000020=Pd_101[0]*Pd_020[2];
double P_101000120=Pd_101[0]*Pd_120[2];
double P_101000220=Pd_101[0]*Pd_220[2];
double P_000001020=Pd_001[1]*Pd_020[2];
double P_000001120=Pd_001[1]*Pd_120[2];
double P_000001220=Pd_001[1]*Pd_220[2];
double P_000101020=Pd_101[1]*Pd_020[2];
double P_000101120=Pd_101[1]*Pd_120[2];
double P_000101220=Pd_101[1]*Pd_220[2];
double P_000000021=Pd_021[2];
double P_000000121=Pd_121[2];
double P_000000221=Pd_221[2];
double P_000000321=Pd_321[2];
ans_temp[ans_id*18+0]+=Pmtrx[0]*(P_021000000*QR_011000000000+P_121000000*QR_011000000100+P_221000000*QR_011000000200+P_321000000*QR_011000000300);
ans_temp[ans_id*18+0]+=Pmtrx[1]*(P_021000000*QR_010001000000+P_121000000*QR_010001000100+P_221000000*QR_010001000200+P_321000000*QR_010001000300);
ans_temp[ans_id*18+0]+=Pmtrx[2]*(P_021000000*QR_010000001000+P_121000000*QR_010000001100+P_221000000*QR_010000001200+P_321000000*QR_010000001300);
ans_temp[ans_id*18+1]+=Pmtrx[0]*(P_021000000*QR_001010000000+P_121000000*QR_001010000100+P_221000000*QR_001010000200+P_321000000*QR_001010000300);
ans_temp[ans_id*18+1]+=Pmtrx[1]*(P_021000000*QR_000011000000+P_121000000*QR_000011000100+P_221000000*QR_000011000200+P_321000000*QR_000011000300);
ans_temp[ans_id*18+1]+=Pmtrx[2]*(P_021000000*QR_000010001000+P_121000000*QR_000010001100+P_221000000*QR_000010001200+P_321000000*QR_000010001300);
ans_temp[ans_id*18+2]+=Pmtrx[0]*(P_021000000*QR_001000010000+P_121000000*QR_001000010100+P_221000000*QR_001000010200+P_321000000*QR_001000010300);
ans_temp[ans_id*18+2]+=Pmtrx[1]*(P_021000000*QR_000001010000+P_121000000*QR_000001010100+P_221000000*QR_000001010200+P_321000000*QR_000001010300);
ans_temp[ans_id*18+2]+=Pmtrx[2]*(P_021000000*QR_000000011000+P_121000000*QR_000000011100+P_221000000*QR_000000011200+P_321000000*QR_000000011300);
ans_temp[ans_id*18+0]+=Pmtrx[3]*(P_020001000*QR_011000000000+P_020101000*QR_011000000010+P_120001000*QR_011000000100+P_120101000*QR_011000000110+P_220001000*QR_011000000200+P_220101000*QR_011000000210);
ans_temp[ans_id*18+0]+=Pmtrx[4]*(P_020001000*QR_010001000000+P_020101000*QR_010001000010+P_120001000*QR_010001000100+P_120101000*QR_010001000110+P_220001000*QR_010001000200+P_220101000*QR_010001000210);
ans_temp[ans_id*18+0]+=Pmtrx[5]*(P_020001000*QR_010000001000+P_020101000*QR_010000001010+P_120001000*QR_010000001100+P_120101000*QR_010000001110+P_220001000*QR_010000001200+P_220101000*QR_010000001210);
ans_temp[ans_id*18+1]+=Pmtrx[3]*(P_020001000*QR_001010000000+P_020101000*QR_001010000010+P_120001000*QR_001010000100+P_120101000*QR_001010000110+P_220001000*QR_001010000200+P_220101000*QR_001010000210);
ans_temp[ans_id*18+1]+=Pmtrx[4]*(P_020001000*QR_000011000000+P_020101000*QR_000011000010+P_120001000*QR_000011000100+P_120101000*QR_000011000110+P_220001000*QR_000011000200+P_220101000*QR_000011000210);
ans_temp[ans_id*18+1]+=Pmtrx[5]*(P_020001000*QR_000010001000+P_020101000*QR_000010001010+P_120001000*QR_000010001100+P_120101000*QR_000010001110+P_220001000*QR_000010001200+P_220101000*QR_000010001210);
ans_temp[ans_id*18+2]+=Pmtrx[3]*(P_020001000*QR_001000010000+P_020101000*QR_001000010010+P_120001000*QR_001000010100+P_120101000*QR_001000010110+P_220001000*QR_001000010200+P_220101000*QR_001000010210);
ans_temp[ans_id*18+2]+=Pmtrx[4]*(P_020001000*QR_000001010000+P_020101000*QR_000001010010+P_120001000*QR_000001010100+P_120101000*QR_000001010110+P_220001000*QR_000001010200+P_220101000*QR_000001010210);
ans_temp[ans_id*18+2]+=Pmtrx[5]*(P_020001000*QR_000000011000+P_020101000*QR_000000011010+P_120001000*QR_000000011100+P_120101000*QR_000000011110+P_220001000*QR_000000011200+P_220101000*QR_000000011210);
ans_temp[ans_id*18+0]+=Pmtrx[6]*(P_020000001*QR_011000000000+P_020000101*QR_011000000001+P_120000001*QR_011000000100+P_120000101*QR_011000000101+P_220000001*QR_011000000200+P_220000101*QR_011000000201);
ans_temp[ans_id*18+0]+=Pmtrx[7]*(P_020000001*QR_010001000000+P_020000101*QR_010001000001+P_120000001*QR_010001000100+P_120000101*QR_010001000101+P_220000001*QR_010001000200+P_220000101*QR_010001000201);
ans_temp[ans_id*18+0]+=Pmtrx[8]*(P_020000001*QR_010000001000+P_020000101*QR_010000001001+P_120000001*QR_010000001100+P_120000101*QR_010000001101+P_220000001*QR_010000001200+P_220000101*QR_010000001201);
ans_temp[ans_id*18+1]+=Pmtrx[6]*(P_020000001*QR_001010000000+P_020000101*QR_001010000001+P_120000001*QR_001010000100+P_120000101*QR_001010000101+P_220000001*QR_001010000200+P_220000101*QR_001010000201);
ans_temp[ans_id*18+1]+=Pmtrx[7]*(P_020000001*QR_000011000000+P_020000101*QR_000011000001+P_120000001*QR_000011000100+P_120000101*QR_000011000101+P_220000001*QR_000011000200+P_220000101*QR_000011000201);
ans_temp[ans_id*18+1]+=Pmtrx[8]*(P_020000001*QR_000010001000+P_020000101*QR_000010001001+P_120000001*QR_000010001100+P_120000101*QR_000010001101+P_220000001*QR_000010001200+P_220000101*QR_000010001201);
ans_temp[ans_id*18+2]+=Pmtrx[6]*(P_020000001*QR_001000010000+P_020000101*QR_001000010001+P_120000001*QR_001000010100+P_120000101*QR_001000010101+P_220000001*QR_001000010200+P_220000101*QR_001000010201);
ans_temp[ans_id*18+2]+=Pmtrx[7]*(P_020000001*QR_000001010000+P_020000101*QR_000001010001+P_120000001*QR_000001010100+P_120000101*QR_000001010101+P_220000001*QR_000001010200+P_220000101*QR_000001010201);
ans_temp[ans_id*18+2]+=Pmtrx[8]*(P_020000001*QR_000000011000+P_020000101*QR_000000011001+P_120000001*QR_000000011100+P_120000101*QR_000000011101+P_220000001*QR_000000011200+P_220000101*QR_000000011201);
ans_temp[ans_id*18+3]+=Pmtrx[0]*(P_011010000*QR_011000000000+P_011110000*QR_011000000010+P_111010000*QR_011000000100+P_111110000*QR_011000000110+P_211010000*QR_011000000200+P_211110000*QR_011000000210);
ans_temp[ans_id*18+3]+=Pmtrx[1]*(P_011010000*QR_010001000000+P_011110000*QR_010001000010+P_111010000*QR_010001000100+P_111110000*QR_010001000110+P_211010000*QR_010001000200+P_211110000*QR_010001000210);
ans_temp[ans_id*18+3]+=Pmtrx[2]*(P_011010000*QR_010000001000+P_011110000*QR_010000001010+P_111010000*QR_010000001100+P_111110000*QR_010000001110+P_211010000*QR_010000001200+P_211110000*QR_010000001210);
ans_temp[ans_id*18+4]+=Pmtrx[0]*(P_011010000*QR_001010000000+P_011110000*QR_001010000010+P_111010000*QR_001010000100+P_111110000*QR_001010000110+P_211010000*QR_001010000200+P_211110000*QR_001010000210);
ans_temp[ans_id*18+4]+=Pmtrx[1]*(P_011010000*QR_000011000000+P_011110000*QR_000011000010+P_111010000*QR_000011000100+P_111110000*QR_000011000110+P_211010000*QR_000011000200+P_211110000*QR_000011000210);
ans_temp[ans_id*18+4]+=Pmtrx[2]*(P_011010000*QR_000010001000+P_011110000*QR_000010001010+P_111010000*QR_000010001100+P_111110000*QR_000010001110+P_211010000*QR_000010001200+P_211110000*QR_000010001210);
ans_temp[ans_id*18+5]+=Pmtrx[0]*(P_011010000*QR_001000010000+P_011110000*QR_001000010010+P_111010000*QR_001000010100+P_111110000*QR_001000010110+P_211010000*QR_001000010200+P_211110000*QR_001000010210);
ans_temp[ans_id*18+5]+=Pmtrx[1]*(P_011010000*QR_000001010000+P_011110000*QR_000001010010+P_111010000*QR_000001010100+P_111110000*QR_000001010110+P_211010000*QR_000001010200+P_211110000*QR_000001010210);
ans_temp[ans_id*18+5]+=Pmtrx[2]*(P_011010000*QR_000000011000+P_011110000*QR_000000011010+P_111010000*QR_000000011100+P_111110000*QR_000000011110+P_211010000*QR_000000011200+P_211110000*QR_000000011210);
ans_temp[ans_id*18+3]+=Pmtrx[3]*(P_010011000*QR_011000000000+P_010111000*QR_011000000010+P_010211000*QR_011000000020+P_110011000*QR_011000000100+P_110111000*QR_011000000110+P_110211000*QR_011000000120);
ans_temp[ans_id*18+3]+=Pmtrx[4]*(P_010011000*QR_010001000000+P_010111000*QR_010001000010+P_010211000*QR_010001000020+P_110011000*QR_010001000100+P_110111000*QR_010001000110+P_110211000*QR_010001000120);
ans_temp[ans_id*18+3]+=Pmtrx[5]*(P_010011000*QR_010000001000+P_010111000*QR_010000001010+P_010211000*QR_010000001020+P_110011000*QR_010000001100+P_110111000*QR_010000001110+P_110211000*QR_010000001120);
ans_temp[ans_id*18+4]+=Pmtrx[3]*(P_010011000*QR_001010000000+P_010111000*QR_001010000010+P_010211000*QR_001010000020+P_110011000*QR_001010000100+P_110111000*QR_001010000110+P_110211000*QR_001010000120);
ans_temp[ans_id*18+4]+=Pmtrx[4]*(P_010011000*QR_000011000000+P_010111000*QR_000011000010+P_010211000*QR_000011000020+P_110011000*QR_000011000100+P_110111000*QR_000011000110+P_110211000*QR_000011000120);
ans_temp[ans_id*18+4]+=Pmtrx[5]*(P_010011000*QR_000010001000+P_010111000*QR_000010001010+P_010211000*QR_000010001020+P_110011000*QR_000010001100+P_110111000*QR_000010001110+P_110211000*QR_000010001120);
ans_temp[ans_id*18+5]+=Pmtrx[3]*(P_010011000*QR_001000010000+P_010111000*QR_001000010010+P_010211000*QR_001000010020+P_110011000*QR_001000010100+P_110111000*QR_001000010110+P_110211000*QR_001000010120);
ans_temp[ans_id*18+5]+=Pmtrx[4]*(P_010011000*QR_000001010000+P_010111000*QR_000001010010+P_010211000*QR_000001010020+P_110011000*QR_000001010100+P_110111000*QR_000001010110+P_110211000*QR_000001010120);
ans_temp[ans_id*18+5]+=Pmtrx[5]*(P_010011000*QR_000000011000+P_010111000*QR_000000011010+P_010211000*QR_000000011020+P_110011000*QR_000000011100+P_110111000*QR_000000011110+P_110211000*QR_000000011120);
ans_temp[ans_id*18+3]+=Pmtrx[6]*(P_010010001*QR_011000000000+P_010010101*QR_011000000001+P_010110001*QR_011000000010+P_010110101*QR_011000000011+P_110010001*QR_011000000100+P_110010101*QR_011000000101+P_110110001*QR_011000000110+P_110110101*QR_011000000111);
ans_temp[ans_id*18+3]+=Pmtrx[7]*(P_010010001*QR_010001000000+P_010010101*QR_010001000001+P_010110001*QR_010001000010+P_010110101*QR_010001000011+P_110010001*QR_010001000100+P_110010101*QR_010001000101+P_110110001*QR_010001000110+P_110110101*QR_010001000111);
ans_temp[ans_id*18+3]+=Pmtrx[8]*(P_010010001*QR_010000001000+P_010010101*QR_010000001001+P_010110001*QR_010000001010+P_010110101*QR_010000001011+P_110010001*QR_010000001100+P_110010101*QR_010000001101+P_110110001*QR_010000001110+P_110110101*QR_010000001111);
ans_temp[ans_id*18+4]+=Pmtrx[6]*(P_010010001*QR_001010000000+P_010010101*QR_001010000001+P_010110001*QR_001010000010+P_010110101*QR_001010000011+P_110010001*QR_001010000100+P_110010101*QR_001010000101+P_110110001*QR_001010000110+P_110110101*QR_001010000111);
ans_temp[ans_id*18+4]+=Pmtrx[7]*(P_010010001*QR_000011000000+P_010010101*QR_000011000001+P_010110001*QR_000011000010+P_010110101*QR_000011000011+P_110010001*QR_000011000100+P_110010101*QR_000011000101+P_110110001*QR_000011000110+P_110110101*QR_000011000111);
ans_temp[ans_id*18+4]+=Pmtrx[8]*(P_010010001*QR_000010001000+P_010010101*QR_000010001001+P_010110001*QR_000010001010+P_010110101*QR_000010001011+P_110010001*QR_000010001100+P_110010101*QR_000010001101+P_110110001*QR_000010001110+P_110110101*QR_000010001111);
ans_temp[ans_id*18+5]+=Pmtrx[6]*(P_010010001*QR_001000010000+P_010010101*QR_001000010001+P_010110001*QR_001000010010+P_010110101*QR_001000010011+P_110010001*QR_001000010100+P_110010101*QR_001000010101+P_110110001*QR_001000010110+P_110110101*QR_001000010111);
ans_temp[ans_id*18+5]+=Pmtrx[7]*(P_010010001*QR_000001010000+P_010010101*QR_000001010001+P_010110001*QR_000001010010+P_010110101*QR_000001010011+P_110010001*QR_000001010100+P_110010101*QR_000001010101+P_110110001*QR_000001010110+P_110110101*QR_000001010111);
ans_temp[ans_id*18+5]+=Pmtrx[8]*(P_010010001*QR_000000011000+P_010010101*QR_000000011001+P_010110001*QR_000000011010+P_010110101*QR_000000011011+P_110010001*QR_000000011100+P_110010101*QR_000000011101+P_110110001*QR_000000011110+P_110110101*QR_000000011111);
ans_temp[ans_id*18+6]+=Pmtrx[0]*(P_001020000*QR_011000000000+P_001120000*QR_011000000010+P_001220000*QR_011000000020+P_101020000*QR_011000000100+P_101120000*QR_011000000110+P_101220000*QR_011000000120);
ans_temp[ans_id*18+6]+=Pmtrx[1]*(P_001020000*QR_010001000000+P_001120000*QR_010001000010+P_001220000*QR_010001000020+P_101020000*QR_010001000100+P_101120000*QR_010001000110+P_101220000*QR_010001000120);
ans_temp[ans_id*18+6]+=Pmtrx[2]*(P_001020000*QR_010000001000+P_001120000*QR_010000001010+P_001220000*QR_010000001020+P_101020000*QR_010000001100+P_101120000*QR_010000001110+P_101220000*QR_010000001120);
ans_temp[ans_id*18+7]+=Pmtrx[0]*(P_001020000*QR_001010000000+P_001120000*QR_001010000010+P_001220000*QR_001010000020+P_101020000*QR_001010000100+P_101120000*QR_001010000110+P_101220000*QR_001010000120);
ans_temp[ans_id*18+7]+=Pmtrx[1]*(P_001020000*QR_000011000000+P_001120000*QR_000011000010+P_001220000*QR_000011000020+P_101020000*QR_000011000100+P_101120000*QR_000011000110+P_101220000*QR_000011000120);
ans_temp[ans_id*18+7]+=Pmtrx[2]*(P_001020000*QR_000010001000+P_001120000*QR_000010001010+P_001220000*QR_000010001020+P_101020000*QR_000010001100+P_101120000*QR_000010001110+P_101220000*QR_000010001120);
ans_temp[ans_id*18+8]+=Pmtrx[0]*(P_001020000*QR_001000010000+P_001120000*QR_001000010010+P_001220000*QR_001000010020+P_101020000*QR_001000010100+P_101120000*QR_001000010110+P_101220000*QR_001000010120);
ans_temp[ans_id*18+8]+=Pmtrx[1]*(P_001020000*QR_000001010000+P_001120000*QR_000001010010+P_001220000*QR_000001010020+P_101020000*QR_000001010100+P_101120000*QR_000001010110+P_101220000*QR_000001010120);
ans_temp[ans_id*18+8]+=Pmtrx[2]*(P_001020000*QR_000000011000+P_001120000*QR_000000011010+P_001220000*QR_000000011020+P_101020000*QR_000000011100+P_101120000*QR_000000011110+P_101220000*QR_000000011120);
ans_temp[ans_id*18+6]+=Pmtrx[3]*(P_000021000*QR_011000000000+P_000121000*QR_011000000010+P_000221000*QR_011000000020+P_000321000*QR_011000000030);
ans_temp[ans_id*18+6]+=Pmtrx[4]*(P_000021000*QR_010001000000+P_000121000*QR_010001000010+P_000221000*QR_010001000020+P_000321000*QR_010001000030);
ans_temp[ans_id*18+6]+=Pmtrx[5]*(P_000021000*QR_010000001000+P_000121000*QR_010000001010+P_000221000*QR_010000001020+P_000321000*QR_010000001030);
ans_temp[ans_id*18+7]+=Pmtrx[3]*(P_000021000*QR_001010000000+P_000121000*QR_001010000010+P_000221000*QR_001010000020+P_000321000*QR_001010000030);
ans_temp[ans_id*18+7]+=Pmtrx[4]*(P_000021000*QR_000011000000+P_000121000*QR_000011000010+P_000221000*QR_000011000020+P_000321000*QR_000011000030);
ans_temp[ans_id*18+7]+=Pmtrx[5]*(P_000021000*QR_000010001000+P_000121000*QR_000010001010+P_000221000*QR_000010001020+P_000321000*QR_000010001030);
ans_temp[ans_id*18+8]+=Pmtrx[3]*(P_000021000*QR_001000010000+P_000121000*QR_001000010010+P_000221000*QR_001000010020+P_000321000*QR_001000010030);
ans_temp[ans_id*18+8]+=Pmtrx[4]*(P_000021000*QR_000001010000+P_000121000*QR_000001010010+P_000221000*QR_000001010020+P_000321000*QR_000001010030);
ans_temp[ans_id*18+8]+=Pmtrx[5]*(P_000021000*QR_000000011000+P_000121000*QR_000000011010+P_000221000*QR_000000011020+P_000321000*QR_000000011030);
ans_temp[ans_id*18+6]+=Pmtrx[6]*(P_000020001*QR_011000000000+P_000020101*QR_011000000001+P_000120001*QR_011000000010+P_000120101*QR_011000000011+P_000220001*QR_011000000020+P_000220101*QR_011000000021);
ans_temp[ans_id*18+6]+=Pmtrx[7]*(P_000020001*QR_010001000000+P_000020101*QR_010001000001+P_000120001*QR_010001000010+P_000120101*QR_010001000011+P_000220001*QR_010001000020+P_000220101*QR_010001000021);
ans_temp[ans_id*18+6]+=Pmtrx[8]*(P_000020001*QR_010000001000+P_000020101*QR_010000001001+P_000120001*QR_010000001010+P_000120101*QR_010000001011+P_000220001*QR_010000001020+P_000220101*QR_010000001021);
ans_temp[ans_id*18+7]+=Pmtrx[6]*(P_000020001*QR_001010000000+P_000020101*QR_001010000001+P_000120001*QR_001010000010+P_000120101*QR_001010000011+P_000220001*QR_001010000020+P_000220101*QR_001010000021);
ans_temp[ans_id*18+7]+=Pmtrx[7]*(P_000020001*QR_000011000000+P_000020101*QR_000011000001+P_000120001*QR_000011000010+P_000120101*QR_000011000011+P_000220001*QR_000011000020+P_000220101*QR_000011000021);
ans_temp[ans_id*18+7]+=Pmtrx[8]*(P_000020001*QR_000010001000+P_000020101*QR_000010001001+P_000120001*QR_000010001010+P_000120101*QR_000010001011+P_000220001*QR_000010001020+P_000220101*QR_000010001021);
ans_temp[ans_id*18+8]+=Pmtrx[6]*(P_000020001*QR_001000010000+P_000020101*QR_001000010001+P_000120001*QR_001000010010+P_000120101*QR_001000010011+P_000220001*QR_001000010020+P_000220101*QR_001000010021);
ans_temp[ans_id*18+8]+=Pmtrx[7]*(P_000020001*QR_000001010000+P_000020101*QR_000001010001+P_000120001*QR_000001010010+P_000120101*QR_000001010011+P_000220001*QR_000001010020+P_000220101*QR_000001010021);
ans_temp[ans_id*18+8]+=Pmtrx[8]*(P_000020001*QR_000000011000+P_000020101*QR_000000011001+P_000120001*QR_000000011010+P_000120101*QR_000000011011+P_000220001*QR_000000011020+P_000220101*QR_000000011021);
ans_temp[ans_id*18+9]+=Pmtrx[0]*(P_011000010*QR_011000000000+P_011000110*QR_011000000001+P_111000010*QR_011000000100+P_111000110*QR_011000000101+P_211000010*QR_011000000200+P_211000110*QR_011000000201);
ans_temp[ans_id*18+9]+=Pmtrx[1]*(P_011000010*QR_010001000000+P_011000110*QR_010001000001+P_111000010*QR_010001000100+P_111000110*QR_010001000101+P_211000010*QR_010001000200+P_211000110*QR_010001000201);
ans_temp[ans_id*18+9]+=Pmtrx[2]*(P_011000010*QR_010000001000+P_011000110*QR_010000001001+P_111000010*QR_010000001100+P_111000110*QR_010000001101+P_211000010*QR_010000001200+P_211000110*QR_010000001201);
ans_temp[ans_id*18+10]+=Pmtrx[0]*(P_011000010*QR_001010000000+P_011000110*QR_001010000001+P_111000010*QR_001010000100+P_111000110*QR_001010000101+P_211000010*QR_001010000200+P_211000110*QR_001010000201);
ans_temp[ans_id*18+10]+=Pmtrx[1]*(P_011000010*QR_000011000000+P_011000110*QR_000011000001+P_111000010*QR_000011000100+P_111000110*QR_000011000101+P_211000010*QR_000011000200+P_211000110*QR_000011000201);
ans_temp[ans_id*18+10]+=Pmtrx[2]*(P_011000010*QR_000010001000+P_011000110*QR_000010001001+P_111000010*QR_000010001100+P_111000110*QR_000010001101+P_211000010*QR_000010001200+P_211000110*QR_000010001201);
ans_temp[ans_id*18+11]+=Pmtrx[0]*(P_011000010*QR_001000010000+P_011000110*QR_001000010001+P_111000010*QR_001000010100+P_111000110*QR_001000010101+P_211000010*QR_001000010200+P_211000110*QR_001000010201);
ans_temp[ans_id*18+11]+=Pmtrx[1]*(P_011000010*QR_000001010000+P_011000110*QR_000001010001+P_111000010*QR_000001010100+P_111000110*QR_000001010101+P_211000010*QR_000001010200+P_211000110*QR_000001010201);
ans_temp[ans_id*18+11]+=Pmtrx[2]*(P_011000010*QR_000000011000+P_011000110*QR_000000011001+P_111000010*QR_000000011100+P_111000110*QR_000000011101+P_211000010*QR_000000011200+P_211000110*QR_000000011201);
ans_temp[ans_id*18+9]+=Pmtrx[3]*(P_010001010*QR_011000000000+P_010001110*QR_011000000001+P_010101010*QR_011000000010+P_010101110*QR_011000000011+P_110001010*QR_011000000100+P_110001110*QR_011000000101+P_110101010*QR_011000000110+P_110101110*QR_011000000111);
ans_temp[ans_id*18+9]+=Pmtrx[4]*(P_010001010*QR_010001000000+P_010001110*QR_010001000001+P_010101010*QR_010001000010+P_010101110*QR_010001000011+P_110001010*QR_010001000100+P_110001110*QR_010001000101+P_110101010*QR_010001000110+P_110101110*QR_010001000111);
ans_temp[ans_id*18+9]+=Pmtrx[5]*(P_010001010*QR_010000001000+P_010001110*QR_010000001001+P_010101010*QR_010000001010+P_010101110*QR_010000001011+P_110001010*QR_010000001100+P_110001110*QR_010000001101+P_110101010*QR_010000001110+P_110101110*QR_010000001111);
ans_temp[ans_id*18+10]+=Pmtrx[3]*(P_010001010*QR_001010000000+P_010001110*QR_001010000001+P_010101010*QR_001010000010+P_010101110*QR_001010000011+P_110001010*QR_001010000100+P_110001110*QR_001010000101+P_110101010*QR_001010000110+P_110101110*QR_001010000111);
ans_temp[ans_id*18+10]+=Pmtrx[4]*(P_010001010*QR_000011000000+P_010001110*QR_000011000001+P_010101010*QR_000011000010+P_010101110*QR_000011000011+P_110001010*QR_000011000100+P_110001110*QR_000011000101+P_110101010*QR_000011000110+P_110101110*QR_000011000111);
ans_temp[ans_id*18+10]+=Pmtrx[5]*(P_010001010*QR_000010001000+P_010001110*QR_000010001001+P_010101010*QR_000010001010+P_010101110*QR_000010001011+P_110001010*QR_000010001100+P_110001110*QR_000010001101+P_110101010*QR_000010001110+P_110101110*QR_000010001111);
ans_temp[ans_id*18+11]+=Pmtrx[3]*(P_010001010*QR_001000010000+P_010001110*QR_001000010001+P_010101010*QR_001000010010+P_010101110*QR_001000010011+P_110001010*QR_001000010100+P_110001110*QR_001000010101+P_110101010*QR_001000010110+P_110101110*QR_001000010111);
ans_temp[ans_id*18+11]+=Pmtrx[4]*(P_010001010*QR_000001010000+P_010001110*QR_000001010001+P_010101010*QR_000001010010+P_010101110*QR_000001010011+P_110001010*QR_000001010100+P_110001110*QR_000001010101+P_110101010*QR_000001010110+P_110101110*QR_000001010111);
ans_temp[ans_id*18+11]+=Pmtrx[5]*(P_010001010*QR_000000011000+P_010001110*QR_000000011001+P_010101010*QR_000000011010+P_010101110*QR_000000011011+P_110001010*QR_000000011100+P_110001110*QR_000000011101+P_110101010*QR_000000011110+P_110101110*QR_000000011111);
ans_temp[ans_id*18+9]+=Pmtrx[6]*(P_010000011*QR_011000000000+P_010000111*QR_011000000001+P_010000211*QR_011000000002+P_110000011*QR_011000000100+P_110000111*QR_011000000101+P_110000211*QR_011000000102);
ans_temp[ans_id*18+9]+=Pmtrx[7]*(P_010000011*QR_010001000000+P_010000111*QR_010001000001+P_010000211*QR_010001000002+P_110000011*QR_010001000100+P_110000111*QR_010001000101+P_110000211*QR_010001000102);
ans_temp[ans_id*18+9]+=Pmtrx[8]*(P_010000011*QR_010000001000+P_010000111*QR_010000001001+P_010000211*QR_010000001002+P_110000011*QR_010000001100+P_110000111*QR_010000001101+P_110000211*QR_010000001102);
ans_temp[ans_id*18+10]+=Pmtrx[6]*(P_010000011*QR_001010000000+P_010000111*QR_001010000001+P_010000211*QR_001010000002+P_110000011*QR_001010000100+P_110000111*QR_001010000101+P_110000211*QR_001010000102);
ans_temp[ans_id*18+10]+=Pmtrx[7]*(P_010000011*QR_000011000000+P_010000111*QR_000011000001+P_010000211*QR_000011000002+P_110000011*QR_000011000100+P_110000111*QR_000011000101+P_110000211*QR_000011000102);
ans_temp[ans_id*18+10]+=Pmtrx[8]*(P_010000011*QR_000010001000+P_010000111*QR_000010001001+P_010000211*QR_000010001002+P_110000011*QR_000010001100+P_110000111*QR_000010001101+P_110000211*QR_000010001102);
ans_temp[ans_id*18+11]+=Pmtrx[6]*(P_010000011*QR_001000010000+P_010000111*QR_001000010001+P_010000211*QR_001000010002+P_110000011*QR_001000010100+P_110000111*QR_001000010101+P_110000211*QR_001000010102);
ans_temp[ans_id*18+11]+=Pmtrx[7]*(P_010000011*QR_000001010000+P_010000111*QR_000001010001+P_010000211*QR_000001010002+P_110000011*QR_000001010100+P_110000111*QR_000001010101+P_110000211*QR_000001010102);
ans_temp[ans_id*18+11]+=Pmtrx[8]*(P_010000011*QR_000000011000+P_010000111*QR_000000011001+P_010000211*QR_000000011002+P_110000011*QR_000000011100+P_110000111*QR_000000011101+P_110000211*QR_000000011102);
ans_temp[ans_id*18+12]+=Pmtrx[0]*(P_001010010*QR_011000000000+P_001010110*QR_011000000001+P_001110010*QR_011000000010+P_001110110*QR_011000000011+P_101010010*QR_011000000100+P_101010110*QR_011000000101+P_101110010*QR_011000000110+P_101110110*QR_011000000111);
ans_temp[ans_id*18+12]+=Pmtrx[1]*(P_001010010*QR_010001000000+P_001010110*QR_010001000001+P_001110010*QR_010001000010+P_001110110*QR_010001000011+P_101010010*QR_010001000100+P_101010110*QR_010001000101+P_101110010*QR_010001000110+P_101110110*QR_010001000111);
ans_temp[ans_id*18+12]+=Pmtrx[2]*(P_001010010*QR_010000001000+P_001010110*QR_010000001001+P_001110010*QR_010000001010+P_001110110*QR_010000001011+P_101010010*QR_010000001100+P_101010110*QR_010000001101+P_101110010*QR_010000001110+P_101110110*QR_010000001111);
ans_temp[ans_id*18+13]+=Pmtrx[0]*(P_001010010*QR_001010000000+P_001010110*QR_001010000001+P_001110010*QR_001010000010+P_001110110*QR_001010000011+P_101010010*QR_001010000100+P_101010110*QR_001010000101+P_101110010*QR_001010000110+P_101110110*QR_001010000111);
ans_temp[ans_id*18+13]+=Pmtrx[1]*(P_001010010*QR_000011000000+P_001010110*QR_000011000001+P_001110010*QR_000011000010+P_001110110*QR_000011000011+P_101010010*QR_000011000100+P_101010110*QR_000011000101+P_101110010*QR_000011000110+P_101110110*QR_000011000111);
ans_temp[ans_id*18+13]+=Pmtrx[2]*(P_001010010*QR_000010001000+P_001010110*QR_000010001001+P_001110010*QR_000010001010+P_001110110*QR_000010001011+P_101010010*QR_000010001100+P_101010110*QR_000010001101+P_101110010*QR_000010001110+P_101110110*QR_000010001111);
ans_temp[ans_id*18+14]+=Pmtrx[0]*(P_001010010*QR_001000010000+P_001010110*QR_001000010001+P_001110010*QR_001000010010+P_001110110*QR_001000010011+P_101010010*QR_001000010100+P_101010110*QR_001000010101+P_101110010*QR_001000010110+P_101110110*QR_001000010111);
ans_temp[ans_id*18+14]+=Pmtrx[1]*(P_001010010*QR_000001010000+P_001010110*QR_000001010001+P_001110010*QR_000001010010+P_001110110*QR_000001010011+P_101010010*QR_000001010100+P_101010110*QR_000001010101+P_101110010*QR_000001010110+P_101110110*QR_000001010111);
ans_temp[ans_id*18+14]+=Pmtrx[2]*(P_001010010*QR_000000011000+P_001010110*QR_000000011001+P_001110010*QR_000000011010+P_001110110*QR_000000011011+P_101010010*QR_000000011100+P_101010110*QR_000000011101+P_101110010*QR_000000011110+P_101110110*QR_000000011111);
ans_temp[ans_id*18+12]+=Pmtrx[3]*(P_000011010*QR_011000000000+P_000011110*QR_011000000001+P_000111010*QR_011000000010+P_000111110*QR_011000000011+P_000211010*QR_011000000020+P_000211110*QR_011000000021);
ans_temp[ans_id*18+12]+=Pmtrx[4]*(P_000011010*QR_010001000000+P_000011110*QR_010001000001+P_000111010*QR_010001000010+P_000111110*QR_010001000011+P_000211010*QR_010001000020+P_000211110*QR_010001000021);
ans_temp[ans_id*18+12]+=Pmtrx[5]*(P_000011010*QR_010000001000+P_000011110*QR_010000001001+P_000111010*QR_010000001010+P_000111110*QR_010000001011+P_000211010*QR_010000001020+P_000211110*QR_010000001021);
ans_temp[ans_id*18+13]+=Pmtrx[3]*(P_000011010*QR_001010000000+P_000011110*QR_001010000001+P_000111010*QR_001010000010+P_000111110*QR_001010000011+P_000211010*QR_001010000020+P_000211110*QR_001010000021);
ans_temp[ans_id*18+13]+=Pmtrx[4]*(P_000011010*QR_000011000000+P_000011110*QR_000011000001+P_000111010*QR_000011000010+P_000111110*QR_000011000011+P_000211010*QR_000011000020+P_000211110*QR_000011000021);
ans_temp[ans_id*18+13]+=Pmtrx[5]*(P_000011010*QR_000010001000+P_000011110*QR_000010001001+P_000111010*QR_000010001010+P_000111110*QR_000010001011+P_000211010*QR_000010001020+P_000211110*QR_000010001021);
ans_temp[ans_id*18+14]+=Pmtrx[3]*(P_000011010*QR_001000010000+P_000011110*QR_001000010001+P_000111010*QR_001000010010+P_000111110*QR_001000010011+P_000211010*QR_001000010020+P_000211110*QR_001000010021);
ans_temp[ans_id*18+14]+=Pmtrx[4]*(P_000011010*QR_000001010000+P_000011110*QR_000001010001+P_000111010*QR_000001010010+P_000111110*QR_000001010011+P_000211010*QR_000001010020+P_000211110*QR_000001010021);
ans_temp[ans_id*18+14]+=Pmtrx[5]*(P_000011010*QR_000000011000+P_000011110*QR_000000011001+P_000111010*QR_000000011010+P_000111110*QR_000000011011+P_000211010*QR_000000011020+P_000211110*QR_000000011021);
ans_temp[ans_id*18+12]+=Pmtrx[6]*(P_000010011*QR_011000000000+P_000010111*QR_011000000001+P_000010211*QR_011000000002+P_000110011*QR_011000000010+P_000110111*QR_011000000011+P_000110211*QR_011000000012);
ans_temp[ans_id*18+12]+=Pmtrx[7]*(P_000010011*QR_010001000000+P_000010111*QR_010001000001+P_000010211*QR_010001000002+P_000110011*QR_010001000010+P_000110111*QR_010001000011+P_000110211*QR_010001000012);
ans_temp[ans_id*18+12]+=Pmtrx[8]*(P_000010011*QR_010000001000+P_000010111*QR_010000001001+P_000010211*QR_010000001002+P_000110011*QR_010000001010+P_000110111*QR_010000001011+P_000110211*QR_010000001012);
ans_temp[ans_id*18+13]+=Pmtrx[6]*(P_000010011*QR_001010000000+P_000010111*QR_001010000001+P_000010211*QR_001010000002+P_000110011*QR_001010000010+P_000110111*QR_001010000011+P_000110211*QR_001010000012);
ans_temp[ans_id*18+13]+=Pmtrx[7]*(P_000010011*QR_000011000000+P_000010111*QR_000011000001+P_000010211*QR_000011000002+P_000110011*QR_000011000010+P_000110111*QR_000011000011+P_000110211*QR_000011000012);
ans_temp[ans_id*18+13]+=Pmtrx[8]*(P_000010011*QR_000010001000+P_000010111*QR_000010001001+P_000010211*QR_000010001002+P_000110011*QR_000010001010+P_000110111*QR_000010001011+P_000110211*QR_000010001012);
ans_temp[ans_id*18+14]+=Pmtrx[6]*(P_000010011*QR_001000010000+P_000010111*QR_001000010001+P_000010211*QR_001000010002+P_000110011*QR_001000010010+P_000110111*QR_001000010011+P_000110211*QR_001000010012);
ans_temp[ans_id*18+14]+=Pmtrx[7]*(P_000010011*QR_000001010000+P_000010111*QR_000001010001+P_000010211*QR_000001010002+P_000110011*QR_000001010010+P_000110111*QR_000001010011+P_000110211*QR_000001010012);
ans_temp[ans_id*18+14]+=Pmtrx[8]*(P_000010011*QR_000000011000+P_000010111*QR_000000011001+P_000010211*QR_000000011002+P_000110011*QR_000000011010+P_000110111*QR_000000011011+P_000110211*QR_000000011012);
ans_temp[ans_id*18+15]+=Pmtrx[0]*(P_001000020*QR_011000000000+P_001000120*QR_011000000001+P_001000220*QR_011000000002+P_101000020*QR_011000000100+P_101000120*QR_011000000101+P_101000220*QR_011000000102);
ans_temp[ans_id*18+15]+=Pmtrx[1]*(P_001000020*QR_010001000000+P_001000120*QR_010001000001+P_001000220*QR_010001000002+P_101000020*QR_010001000100+P_101000120*QR_010001000101+P_101000220*QR_010001000102);
ans_temp[ans_id*18+15]+=Pmtrx[2]*(P_001000020*QR_010000001000+P_001000120*QR_010000001001+P_001000220*QR_010000001002+P_101000020*QR_010000001100+P_101000120*QR_010000001101+P_101000220*QR_010000001102);
ans_temp[ans_id*18+16]+=Pmtrx[0]*(P_001000020*QR_001010000000+P_001000120*QR_001010000001+P_001000220*QR_001010000002+P_101000020*QR_001010000100+P_101000120*QR_001010000101+P_101000220*QR_001010000102);
ans_temp[ans_id*18+16]+=Pmtrx[1]*(P_001000020*QR_000011000000+P_001000120*QR_000011000001+P_001000220*QR_000011000002+P_101000020*QR_000011000100+P_101000120*QR_000011000101+P_101000220*QR_000011000102);
ans_temp[ans_id*18+16]+=Pmtrx[2]*(P_001000020*QR_000010001000+P_001000120*QR_000010001001+P_001000220*QR_000010001002+P_101000020*QR_000010001100+P_101000120*QR_000010001101+P_101000220*QR_000010001102);
ans_temp[ans_id*18+17]+=Pmtrx[0]*(P_001000020*QR_001000010000+P_001000120*QR_001000010001+P_001000220*QR_001000010002+P_101000020*QR_001000010100+P_101000120*QR_001000010101+P_101000220*QR_001000010102);
ans_temp[ans_id*18+17]+=Pmtrx[1]*(P_001000020*QR_000001010000+P_001000120*QR_000001010001+P_001000220*QR_000001010002+P_101000020*QR_000001010100+P_101000120*QR_000001010101+P_101000220*QR_000001010102);
ans_temp[ans_id*18+17]+=Pmtrx[2]*(P_001000020*QR_000000011000+P_001000120*QR_000000011001+P_001000220*QR_000000011002+P_101000020*QR_000000011100+P_101000120*QR_000000011101+P_101000220*QR_000000011102);
ans_temp[ans_id*18+15]+=Pmtrx[3]*(P_000001020*QR_011000000000+P_000001120*QR_011000000001+P_000001220*QR_011000000002+P_000101020*QR_011000000010+P_000101120*QR_011000000011+P_000101220*QR_011000000012);
ans_temp[ans_id*18+15]+=Pmtrx[4]*(P_000001020*QR_010001000000+P_000001120*QR_010001000001+P_000001220*QR_010001000002+P_000101020*QR_010001000010+P_000101120*QR_010001000011+P_000101220*QR_010001000012);
ans_temp[ans_id*18+15]+=Pmtrx[5]*(P_000001020*QR_010000001000+P_000001120*QR_010000001001+P_000001220*QR_010000001002+P_000101020*QR_010000001010+P_000101120*QR_010000001011+P_000101220*QR_010000001012);
ans_temp[ans_id*18+16]+=Pmtrx[3]*(P_000001020*QR_001010000000+P_000001120*QR_001010000001+P_000001220*QR_001010000002+P_000101020*QR_001010000010+P_000101120*QR_001010000011+P_000101220*QR_001010000012);
ans_temp[ans_id*18+16]+=Pmtrx[4]*(P_000001020*QR_000011000000+P_000001120*QR_000011000001+P_000001220*QR_000011000002+P_000101020*QR_000011000010+P_000101120*QR_000011000011+P_000101220*QR_000011000012);
ans_temp[ans_id*18+16]+=Pmtrx[5]*(P_000001020*QR_000010001000+P_000001120*QR_000010001001+P_000001220*QR_000010001002+P_000101020*QR_000010001010+P_000101120*QR_000010001011+P_000101220*QR_000010001012);
ans_temp[ans_id*18+17]+=Pmtrx[3]*(P_000001020*QR_001000010000+P_000001120*QR_001000010001+P_000001220*QR_001000010002+P_000101020*QR_001000010010+P_000101120*QR_001000010011+P_000101220*QR_001000010012);
ans_temp[ans_id*18+17]+=Pmtrx[4]*(P_000001020*QR_000001010000+P_000001120*QR_000001010001+P_000001220*QR_000001010002+P_000101020*QR_000001010010+P_000101120*QR_000001010011+P_000101220*QR_000001010012);
ans_temp[ans_id*18+17]+=Pmtrx[5]*(P_000001020*QR_000000011000+P_000001120*QR_000000011001+P_000001220*QR_000000011002+P_000101020*QR_000000011010+P_000101120*QR_000000011011+P_000101220*QR_000000011012);
ans_temp[ans_id*18+15]+=Pmtrx[6]*(P_000000021*QR_011000000000+P_000000121*QR_011000000001+P_000000221*QR_011000000002+P_000000321*QR_011000000003);
ans_temp[ans_id*18+15]+=Pmtrx[7]*(P_000000021*QR_010001000000+P_000000121*QR_010001000001+P_000000221*QR_010001000002+P_000000321*QR_010001000003);
ans_temp[ans_id*18+15]+=Pmtrx[8]*(P_000000021*QR_010000001000+P_000000121*QR_010000001001+P_000000221*QR_010000001002+P_000000321*QR_010000001003);
ans_temp[ans_id*18+16]+=Pmtrx[6]*(P_000000021*QR_001010000000+P_000000121*QR_001010000001+P_000000221*QR_001010000002+P_000000321*QR_001010000003);
ans_temp[ans_id*18+16]+=Pmtrx[7]*(P_000000021*QR_000011000000+P_000000121*QR_000011000001+P_000000221*QR_000011000002+P_000000321*QR_000011000003);
ans_temp[ans_id*18+16]+=Pmtrx[8]*(P_000000021*QR_000010001000+P_000000121*QR_000010001001+P_000000221*QR_000010001002+P_000000321*QR_000010001003);
ans_temp[ans_id*18+17]+=Pmtrx[6]*(P_000000021*QR_001000010000+P_000000121*QR_001000010001+P_000000221*QR_001000010002+P_000000321*QR_001000010003);
ans_temp[ans_id*18+17]+=Pmtrx[7]*(P_000000021*QR_000001010000+P_000000121*QR_000001010001+P_000000221*QR_000001010002+P_000000321*QR_000001010003);
ans_temp[ans_id*18+17]+=Pmtrx[8]*(P_000000021*QR_000000011000+P_000000121*QR_000000011001+P_000000221*QR_000000011002+P_000000321*QR_000000011003);
}
}
__syncthreads();
int num_thread=NTHREAD/2;
while (num_thread!=0){
__syncthreads();
if(tId_x<num_thread){
for(int ians=0;ians<18;ians++){
ans_temp[tId_x*18+ians]+=ans_temp[(tId_x+num_thread)*18+ians];
}
}
num_thread/=2;
}
if(tId_x==0){
for(int ians=0;ians<18;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*18+ians]=ans_temp[(tId_x)*18+ians];
}
}
}
}
}
__global__ void MD_Kp_ddpp_fs(unsigned int contrc_bra_num,unsigned int contrc_ket_num,\
unsigned int * contrc_bra_id,\
unsigned int * contrc_ket_id,\
unsigned int mtrx_len,\
double * Pmtrx_in,\
double * P,\
double * PA,\
double * PB,\
double * Zta_in,\
double * pp_in,\
float * K2_p_in,\
unsigned int * id_bra_in,\
double * Q,\
double * QC,\
double * QD,\
double * Eta_in,\
double * pq_in,\
float * K2_q_in,\
unsigned int * id_ket_in,\
double * ans){
unsigned int tId_x = threadIdx.x;
unsigned int bId_x = blockIdx.x;
unsigned int bId_y = blockIdx.y;
unsigned int tdis = blockDim.x;
unsigned int bdis_x = gridDim.x;
unsigned int bdis_y = gridDim.y;
unsigned int ans_id=tId_x;
double Pmtrx[18]={0.0};
__shared__ double ans_temp[NTHREAD*18];
for(int i=0;i<18;i++){
ans_temp[i*tdis+tId_x]=0.0;
}
for(unsigned int i_contrc_bra=bId_x;i_contrc_bra<contrc_bra_num;i_contrc_bra+=bdis_x){
for(unsigned int j_contrc_ket=bId_y;j_contrc_ket<contrc_ket_num;j_contrc_ket+=bdis_y){
unsigned int primit_bra_start = contrc_bra_id[i_contrc_bra ];
unsigned int primit_bra_end = contrc_bra_id[i_contrc_bra+1];
unsigned int primit_ket_start = contrc_ket_id[j_contrc_ket ];
unsigned int primit_ket_end = contrc_ket_id[j_contrc_ket+1];
for(unsigned int ii=primit_ket_start;ii<primit_ket_end;ii++){
unsigned int id_ket=id_ket_in[ii];
double QX=Q[ii*3+0];
double QY=Q[ii*3+1];
double QZ=Q[ii*3+2];
double Qd_010[3];
Qd_010[0]=QC[ii*3+0];
Qd_010[1]=QC[ii*3+1];
Qd_010[2]=QC[ii*3+2];
double Qd_001[3];
Qd_001[0]=QD[ii*3+0];
Qd_001[1]=QD[ii*3+1];
Qd_001[2]=QD[ii*3+2];
double Eta=Eta_in[ii];
double pq=pq_in[ii];
float K2_q=K2_q_in[ii];
double aQin1=1/(2*Eta);
for(unsigned int j=tId_x;j<primit_bra_end-primit_bra_start;j+=tdis){
unsigned int jj=primit_bra_start+j;
unsigned int id_bra=tex1Dfetch(tex_id_bra,jj);
double P_max=0.0;
for(int p_j=0;p_j<3;p_j++){
for(int p_i=0;p_i<6;p_i++){
Pmtrx[p_i*3+p_j]=Pmtrx_in[(id_ket+p_j)*mtrx_len+(id_bra+p_i)];
double temp_P=fabsf(Pmtrx[p_i*3+p_j]);
if(temp_P>P_max) P_max=temp_P;
}
}
float K2_p=tex1Dfetch(tex_K2_p,jj);
if(fabsf(K2_p*K2_q)<1.0E-14){
break;
}
if(fabsf(P_max*K2_p*K2_q)<1.0E-14) continue;
int2 temp_int2;
temp_int2=tex1Dfetch(tex_Zta,jj);
double Zta=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_pp,jj);
double pp=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+0);
double PX=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+1);
double PY=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+2);
double PZ=__hiloint2double(temp_int2.y,temp_int2.x);
double Pd_010[3];
temp_int2=tex1Dfetch(tex_PA,jj*3+0);
Pd_010[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PA,jj*3+1);
Pd_010[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PA,jj*3+2);
Pd_010[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double Pd_001[3];
temp_int2=tex1Dfetch(tex_PB,jj*3+0);
Pd_001[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PB,jj*3+1);
Pd_001[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PB,jj*3+2);
Pd_001[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double alphaT=rsqrt(Eta+Zta);
double lmd=4*P25*pp*pq*alphaT;
alphaT=Eta*Zta*alphaT*alphaT;
double TX=PX-QX;
double TY=PY-QY;
double TZ=PZ-QZ;
double T=alphaT*(TX*TX+TY*TY+TZ*TZ);
double R_000[7];
Ft_fs_6(6,T,R_000);
R_000[0]*=lmd;
R_000[1]*=-2*alphaT*lmd;
R_000[2]*=4*alphaT*alphaT*lmd;
R_000[3]*=-8*alphaT*alphaT*alphaT*lmd;
R_000[4]*=16*alphaT*alphaT*alphaT*alphaT*lmd;
R_000[5]*=-32*alphaT*alphaT*alphaT*alphaT*alphaT*lmd;
R_000[6]*=64*alphaT*alphaT*alphaT*alphaT*alphaT*alphaT*lmd;
double aPin1=1/(2*Zta);
double R_100[6];
double R_200[5];
double R_300[4];
double R_400[3];
double R_500[2];
double R_600[1];
double R_010[6];
double R_110[5];
double R_210[4];
double R_310[3];
double R_410[2];
double R_510[1];
double R_020[5];
double R_120[4];
double R_220[3];
double R_320[2];
double R_420[1];
double R_030[4];
double R_130[3];
double R_230[2];
double R_330[1];
double R_040[3];
double R_140[2];
double R_240[1];
double R_050[2];
double R_150[1];
double R_060[1];
double R_001[6];
double R_101[5];
double R_201[4];
double R_301[3];
double R_401[2];
double R_501[1];
double R_011[5];
double R_111[4];
double R_211[3];
double R_311[2];
double R_411[1];
double R_021[4];
double R_121[3];
double R_221[2];
double R_321[1];
double R_031[3];
double R_131[2];
double R_231[1];
double R_041[2];
double R_141[1];
double R_051[1];
double R_002[5];
double R_102[4];
double R_202[3];
double R_302[2];
double R_402[1];
double R_012[4];
double R_112[3];
double R_212[2];
double R_312[1];
double R_022[3];
double R_122[2];
double R_222[1];
double R_032[2];
double R_132[1];
double R_042[1];
double R_003[4];
double R_103[3];
double R_203[2];
double R_303[1];
double R_013[3];
double R_113[2];
double R_213[1];
double R_023[2];
double R_123[1];
double R_033[1];
double R_004[3];
double R_104[2];
double R_204[1];
double R_014[2];
double R_114[1];
double R_024[1];
double R_005[2];
double R_105[1];
double R_015[1];
double R_006[1];
for(int i=0;i<6;i++){
R_100[i]=TX*R_000[i+1];
}
for(int i=0;i<6;i++){
R_010[i]=TY*R_000[i+1];
}
for(int i=0;i<6;i++){
R_001[i]=TZ*R_000[i+1];
}
for(int i=0;i<5;i++){
R_200[i]=TX*R_100[i+1]+R_000[i+1];
}
for(int i=0;i<5;i++){
R_110[i]=TX*R_010[i+1];
}
for(int i=0;i<5;i++){
R_020[i]=TY*R_010[i+1]+R_000[i+1];
}
for(int i=0;i<5;i++){
R_101[i]=TX*R_001[i+1];
}
for(int i=0;i<5;i++){
R_011[i]=TY*R_001[i+1];
}
for(int i=0;i<5;i++){
R_002[i]=TZ*R_001[i+1]+R_000[i+1];
}
for(int i=0;i<4;i++){
R_300[i]=TX*R_200[i+1]+2*R_100[i+1];
}
for(int i=0;i<4;i++){
R_210[i]=TY*R_200[i+1];
}
for(int i=0;i<4;i++){
R_120[i]=TX*R_020[i+1];
}
for(int i=0;i<4;i++){
R_030[i]=TY*R_020[i+1]+2*R_010[i+1];
}
for(int i=0;i<4;i++){
R_201[i]=TZ*R_200[i+1];
}
for(int i=0;i<4;i++){
R_111[i]=TX*R_011[i+1];
}
for(int i=0;i<4;i++){
R_021[i]=TZ*R_020[i+1];
}
for(int i=0;i<4;i++){
R_102[i]=TX*R_002[i+1];
}
for(int i=0;i<4;i++){
R_012[i]=TY*R_002[i+1];
}
for(int i=0;i<4;i++){
R_003[i]=TZ*R_002[i+1]+2*R_001[i+1];
}
for(int i=0;i<3;i++){
R_400[i]=TX*R_300[i+1]+3*R_200[i+1];
}
for(int i=0;i<3;i++){
R_310[i]=TY*R_300[i+1];
}
for(int i=0;i<3;i++){
R_220[i]=TX*R_120[i+1]+R_020[i+1];
}
for(int i=0;i<3;i++){
R_130[i]=TX*R_030[i+1];
}
for(int i=0;i<3;i++){
R_040[i]=TY*R_030[i+1]+3*R_020[i+1];
}
for(int i=0;i<3;i++){
R_301[i]=TZ*R_300[i+1];
}
for(int i=0;i<3;i++){
R_211[i]=TY*R_201[i+1];
}
for(int i=0;i<3;i++){
R_121[i]=TX*R_021[i+1];
}
for(int i=0;i<3;i++){
R_031[i]=TZ*R_030[i+1];
}
for(int i=0;i<3;i++){
R_202[i]=TX*R_102[i+1]+R_002[i+1];
}
for(int i=0;i<3;i++){
R_112[i]=TX*R_012[i+1];
}
for(int i=0;i<3;i++){
R_022[i]=TY*R_012[i+1]+R_002[i+1];
}
for(int i=0;i<3;i++){
R_103[i]=TX*R_003[i+1];
}
for(int i=0;i<3;i++){
R_013[i]=TY*R_003[i+1];
}
for(int i=0;i<3;i++){
R_004[i]=TZ*R_003[i+1]+3*R_002[i+1];
}
for(int i=0;i<2;i++){
R_500[i]=TX*R_400[i+1]+4*R_300[i+1];
}
for(int i=0;i<2;i++){
R_410[i]=TY*R_400[i+1];
}
for(int i=0;i<2;i++){
R_320[i]=TX*R_220[i+1]+2*R_120[i+1];
}
for(int i=0;i<2;i++){
R_230[i]=TY*R_220[i+1]+2*R_210[i+1];
}
for(int i=0;i<2;i++){
R_140[i]=TX*R_040[i+1];
}
for(int i=0;i<2;i++){
R_050[i]=TY*R_040[i+1]+4*R_030[i+1];
}
for(int i=0;i<2;i++){
R_401[i]=TZ*R_400[i+1];
}
for(int i=0;i<2;i++){
R_311[i]=TY*R_301[i+1];
}
for(int i=0;i<2;i++){
R_221[i]=TZ*R_220[i+1];
}
for(int i=0;i<2;i++){
R_131[i]=TX*R_031[i+1];
}
for(int i=0;i<2;i++){
R_041[i]=TZ*R_040[i+1];
}
for(int i=0;i<2;i++){
R_302[i]=TX*R_202[i+1]+2*R_102[i+1];
}
for(int i=0;i<2;i++){
R_212[i]=TY*R_202[i+1];
}
for(int i=0;i<2;i++){
R_122[i]=TX*R_022[i+1];
}
for(int i=0;i<2;i++){
R_032[i]=TY*R_022[i+1]+2*R_012[i+1];
}
for(int i=0;i<2;i++){
R_203[i]=TZ*R_202[i+1]+2*R_201[i+1];
}
for(int i=0;i<2;i++){
R_113[i]=TX*R_013[i+1];
}
for(int i=0;i<2;i++){
R_023[i]=TZ*R_022[i+1]+2*R_021[i+1];
}
for(int i=0;i<2;i++){
R_104[i]=TX*R_004[i+1];
}
for(int i=0;i<2;i++){
R_014[i]=TY*R_004[i+1];
}
for(int i=0;i<2;i++){
R_005[i]=TZ*R_004[i+1]+4*R_003[i+1];
}
for(int i=0;i<1;i++){
R_600[i]=TX*R_500[i+1]+5*R_400[i+1];
}
for(int i=0;i<1;i++){
R_510[i]=TY*R_500[i+1];
}
for(int i=0;i<1;i++){
R_420[i]=TX*R_320[i+1]+3*R_220[i+1];
}
for(int i=0;i<1;i++){
R_330[i]=TX*R_230[i+1]+2*R_130[i+1];
}
for(int i=0;i<1;i++){
R_240[i]=TY*R_230[i+1]+3*R_220[i+1];
}
for(int i=0;i<1;i++){
R_150[i]=TX*R_050[i+1];
}
for(int i=0;i<1;i++){
R_060[i]=TY*R_050[i+1]+5*R_040[i+1];
}
for(int i=0;i<1;i++){
R_501[i]=TZ*R_500[i+1];
}
for(int i=0;i<1;i++){
R_411[i]=TY*R_401[i+1];
}
for(int i=0;i<1;i++){
R_321[i]=TZ*R_320[i+1];
}
for(int i=0;i<1;i++){
R_231[i]=TZ*R_230[i+1];
}
for(int i=0;i<1;i++){
R_141[i]=TX*R_041[i+1];
}
for(int i=0;i<1;i++){
R_051[i]=TZ*R_050[i+1];
}
for(int i=0;i<1;i++){
R_402[i]=TX*R_302[i+1]+3*R_202[i+1];
}
for(int i=0;i<1;i++){
R_312[i]=TY*R_302[i+1];
}
for(int i=0;i<1;i++){
R_222[i]=TX*R_122[i+1]+R_022[i+1];
}
for(int i=0;i<1;i++){
R_132[i]=TX*R_032[i+1];
}
for(int i=0;i<1;i++){
R_042[i]=TY*R_032[i+1]+3*R_022[i+1];
}
for(int i=0;i<1;i++){
R_303[i]=TX*R_203[i+1]+2*R_103[i+1];
}
for(int i=0;i<1;i++){
R_213[i]=TY*R_203[i+1];
}
for(int i=0;i<1;i++){
R_123[i]=TX*R_023[i+1];
}
for(int i=0;i<1;i++){
R_033[i]=TY*R_023[i+1]+2*R_013[i+1];
}
for(int i=0;i<1;i++){
R_204[i]=TZ*R_203[i+1]+3*R_202[i+1];
}
for(int i=0;i<1;i++){
R_114[i]=TX*R_014[i+1];
}
for(int i=0;i<1;i++){
R_024[i]=TZ*R_023[i+1]+3*R_022[i+1];
}
for(int i=0;i<1;i++){
R_105[i]=TX*R_005[i+1];
}
for(int i=0;i<1;i++){
R_015[i]=TY*R_005[i+1];
}
for(int i=0;i<1;i++){
R_006[i]=TZ*R_005[i+1]+5*R_004[i+1];
}
double Pd_101[3];
double Pd_002[3];
double Pd_102[3];
double Pd_202[3];
double Pd_110[3];
double Pd_011[3];
double Pd_111[3];
double Pd_211[3];
double Pd_012[3];
double Pd_112[3];
double Pd_212[3];
double Pd_312[3];
double Pd_020[3];
double Pd_120[3];
double Pd_220[3];
double Pd_021[3];
double Pd_121[3];
double Pd_221[3];
double Pd_321[3];
double Pd_022[3];
double Pd_122[3];
double Pd_222[3];
double Pd_322[3];
double Pd_422[3];
for(int i=0;i<3;i++){
Pd_101[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_002[i]=Pd_101[i]+Pd_001[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_102[i]=Pd_001[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_202[i]=aPin1*Pd_101[i];
}
for(int i=0;i<3;i++){
Pd_110[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_011[i]=Pd_101[i]+Pd_010[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_111[i]=Pd_010[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_211[i]=aPin1*Pd_101[i];
}
for(int i=0;i<3;i++){
Pd_012[i]=Pd_111[i]+Pd_001[i]*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_112[i]=2*Pd_211[i]+Pd_001[i]*Pd_111[i]+aPin1*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_212[i]=Pd_001[i]*Pd_211[i]+aPin1*Pd_111[i];
}
for(int i=0;i<3;i++){
Pd_312[i]=aPin1*Pd_211[i];
}
for(int i=0;i<3;i++){
Pd_020[i]=Pd_110[i]+Pd_010[i]*Pd_010[i];
}
for(int i=0;i<3;i++){
Pd_120[i]=Pd_010[i]*Pd_110[i]+aPin1*Pd_010[i];
}
for(int i=0;i<3;i++){
Pd_220[i]=aPin1*Pd_110[i];
}
for(int i=0;i<3;i++){
Pd_021[i]=Pd_111[i]+Pd_010[i]*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_121[i]=2*Pd_211[i]+Pd_010[i]*Pd_111[i]+aPin1*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_221[i]=Pd_010[i]*Pd_211[i]+aPin1*Pd_111[i];
}
for(int i=0;i<3;i++){
Pd_321[i]=aPin1*Pd_211[i];
}
for(int i=0;i<3;i++){
Pd_022[i]=Pd_112[i]+Pd_010[i]*Pd_012[i];
}
for(int i=0;i<3;i++){
Pd_122[i]=2*Pd_212[i]+Pd_010[i]*Pd_112[i]+aPin1*Pd_012[i];
}
for(int i=0;i<3;i++){
Pd_222[i]=3*Pd_312[i]+Pd_010[i]*Pd_212[i]+aPin1*Pd_112[i];
}
for(int i=0;i<3;i++){
Pd_322[i]=Pd_010[i]*Pd_312[i]+aPin1*Pd_212[i];
}
for(int i=0;i<3;i++){
Pd_422[i]=aPin1*Pd_312[i];
}
double P_022000000=Pd_022[0];
double P_122000000=Pd_122[0];
double P_222000000=Pd_222[0];
double P_322000000=Pd_322[0];
double P_422000000=Pd_422[0];
double P_021001000=Pd_021[0]*Pd_001[1];
double P_021101000=Pd_021[0]*Pd_101[1];
double P_121001000=Pd_121[0]*Pd_001[1];
double P_121101000=Pd_121[0]*Pd_101[1];
double P_221001000=Pd_221[0]*Pd_001[1];
double P_221101000=Pd_221[0]*Pd_101[1];
double P_321001000=Pd_321[0]*Pd_001[1];
double P_321101000=Pd_321[0]*Pd_101[1];
double P_020002000=Pd_020[0]*Pd_002[1];
double P_020102000=Pd_020[0]*Pd_102[1];
double P_020202000=Pd_020[0]*Pd_202[1];
double P_120002000=Pd_120[0]*Pd_002[1];
double P_120102000=Pd_120[0]*Pd_102[1];
double P_120202000=Pd_120[0]*Pd_202[1];
double P_220002000=Pd_220[0]*Pd_002[1];
double P_220102000=Pd_220[0]*Pd_102[1];
double P_220202000=Pd_220[0]*Pd_202[1];
double P_021000001=Pd_021[0]*Pd_001[2];
double P_021000101=Pd_021[0]*Pd_101[2];
double P_121000001=Pd_121[0]*Pd_001[2];
double P_121000101=Pd_121[0]*Pd_101[2];
double P_221000001=Pd_221[0]*Pd_001[2];
double P_221000101=Pd_221[0]*Pd_101[2];
double P_321000001=Pd_321[0]*Pd_001[2];
double P_321000101=Pd_321[0]*Pd_101[2];
double P_020001001=Pd_020[0]*Pd_001[1]*Pd_001[2];
double P_020001101=Pd_020[0]*Pd_001[1]*Pd_101[2];
double P_020101001=Pd_020[0]*Pd_101[1]*Pd_001[2];
double P_020101101=Pd_020[0]*Pd_101[1]*Pd_101[2];
double P_120001001=Pd_120[0]*Pd_001[1]*Pd_001[2];
double P_120001101=Pd_120[0]*Pd_001[1]*Pd_101[2];
double P_120101001=Pd_120[0]*Pd_101[1]*Pd_001[2];
double P_120101101=Pd_120[0]*Pd_101[1]*Pd_101[2];
double P_220001001=Pd_220[0]*Pd_001[1]*Pd_001[2];
double P_220001101=Pd_220[0]*Pd_001[1]*Pd_101[2];
double P_220101001=Pd_220[0]*Pd_101[1]*Pd_001[2];
double P_220101101=Pd_220[0]*Pd_101[1]*Pd_101[2];
double P_020000002=Pd_020[0]*Pd_002[2];
double P_020000102=Pd_020[0]*Pd_102[2];
double P_020000202=Pd_020[0]*Pd_202[2];
double P_120000002=Pd_120[0]*Pd_002[2];
double P_120000102=Pd_120[0]*Pd_102[2];
double P_120000202=Pd_120[0]*Pd_202[2];
double P_220000002=Pd_220[0]*Pd_002[2];
double P_220000102=Pd_220[0]*Pd_102[2];
double P_220000202=Pd_220[0]*Pd_202[2];
double P_012010000=Pd_012[0]*Pd_010[1];
double P_012110000=Pd_012[0]*Pd_110[1];
double P_112010000=Pd_112[0]*Pd_010[1];
double P_112110000=Pd_112[0]*Pd_110[1];
double P_212010000=Pd_212[0]*Pd_010[1];
double P_212110000=Pd_212[0]*Pd_110[1];
double P_312010000=Pd_312[0]*Pd_010[1];
double P_312110000=Pd_312[0]*Pd_110[1];
double P_011011000=Pd_011[0]*Pd_011[1];
double P_011111000=Pd_011[0]*Pd_111[1];
double P_011211000=Pd_011[0]*Pd_211[1];
double P_111011000=Pd_111[0]*Pd_011[1];
double P_111111000=Pd_111[0]*Pd_111[1];
double P_111211000=Pd_111[0]*Pd_211[1];
double P_211011000=Pd_211[0]*Pd_011[1];
double P_211111000=Pd_211[0]*Pd_111[1];
double P_211211000=Pd_211[0]*Pd_211[1];
double P_010012000=Pd_010[0]*Pd_012[1];
double P_010112000=Pd_010[0]*Pd_112[1];
double P_010212000=Pd_010[0]*Pd_212[1];
double P_010312000=Pd_010[0]*Pd_312[1];
double P_110012000=Pd_110[0]*Pd_012[1];
double P_110112000=Pd_110[0]*Pd_112[1];
double P_110212000=Pd_110[0]*Pd_212[1];
double P_110312000=Pd_110[0]*Pd_312[1];
double P_011010001=Pd_011[0]*Pd_010[1]*Pd_001[2];
double P_011010101=Pd_011[0]*Pd_010[1]*Pd_101[2];
double P_011110001=Pd_011[0]*Pd_110[1]*Pd_001[2];
double P_011110101=Pd_011[0]*Pd_110[1]*Pd_101[2];
double P_111010001=Pd_111[0]*Pd_010[1]*Pd_001[2];
double P_111010101=Pd_111[0]*Pd_010[1]*Pd_101[2];
double P_111110001=Pd_111[0]*Pd_110[1]*Pd_001[2];
double P_111110101=Pd_111[0]*Pd_110[1]*Pd_101[2];
double P_211010001=Pd_211[0]*Pd_010[1]*Pd_001[2];
double P_211010101=Pd_211[0]*Pd_010[1]*Pd_101[2];
double P_211110001=Pd_211[0]*Pd_110[1]*Pd_001[2];
double P_211110101=Pd_211[0]*Pd_110[1]*Pd_101[2];
double P_010011001=Pd_010[0]*Pd_011[1]*Pd_001[2];
double P_010011101=Pd_010[0]*Pd_011[1]*Pd_101[2];
double P_010111001=Pd_010[0]*Pd_111[1]*Pd_001[2];
double P_010111101=Pd_010[0]*Pd_111[1]*Pd_101[2];
double P_010211001=Pd_010[0]*Pd_211[1]*Pd_001[2];
double P_010211101=Pd_010[0]*Pd_211[1]*Pd_101[2];
double P_110011001=Pd_110[0]*Pd_011[1]*Pd_001[2];
double P_110011101=Pd_110[0]*Pd_011[1]*Pd_101[2];
double P_110111001=Pd_110[0]*Pd_111[1]*Pd_001[2];
double P_110111101=Pd_110[0]*Pd_111[1]*Pd_101[2];
double P_110211001=Pd_110[0]*Pd_211[1]*Pd_001[2];
double P_110211101=Pd_110[0]*Pd_211[1]*Pd_101[2];
double P_010010002=Pd_010[0]*Pd_010[1]*Pd_002[2];
double P_010010102=Pd_010[0]*Pd_010[1]*Pd_102[2];
double P_010010202=Pd_010[0]*Pd_010[1]*Pd_202[2];
double P_010110002=Pd_010[0]*Pd_110[1]*Pd_002[2];
double P_010110102=Pd_010[0]*Pd_110[1]*Pd_102[2];
double P_010110202=Pd_010[0]*Pd_110[1]*Pd_202[2];
double P_110010002=Pd_110[0]*Pd_010[1]*Pd_002[2];
double P_110010102=Pd_110[0]*Pd_010[1]*Pd_102[2];
double P_110010202=Pd_110[0]*Pd_010[1]*Pd_202[2];
double P_110110002=Pd_110[0]*Pd_110[1]*Pd_002[2];
double P_110110102=Pd_110[0]*Pd_110[1]*Pd_102[2];
double P_110110202=Pd_110[0]*Pd_110[1]*Pd_202[2];
double P_002020000=Pd_002[0]*Pd_020[1];
double P_002120000=Pd_002[0]*Pd_120[1];
double P_002220000=Pd_002[0]*Pd_220[1];
double P_102020000=Pd_102[0]*Pd_020[1];
double P_102120000=Pd_102[0]*Pd_120[1];
double P_102220000=Pd_102[0]*Pd_220[1];
double P_202020000=Pd_202[0]*Pd_020[1];
double P_202120000=Pd_202[0]*Pd_120[1];
double P_202220000=Pd_202[0]*Pd_220[1];
double P_001021000=Pd_001[0]*Pd_021[1];
double P_001121000=Pd_001[0]*Pd_121[1];
double P_001221000=Pd_001[0]*Pd_221[1];
double P_001321000=Pd_001[0]*Pd_321[1];
double P_101021000=Pd_101[0]*Pd_021[1];
double P_101121000=Pd_101[0]*Pd_121[1];
double P_101221000=Pd_101[0]*Pd_221[1];
double P_101321000=Pd_101[0]*Pd_321[1];
double P_000022000=Pd_022[1];
double P_000122000=Pd_122[1];
double P_000222000=Pd_222[1];
double P_000322000=Pd_322[1];
double P_000422000=Pd_422[1];
double P_001020001=Pd_001[0]*Pd_020[1]*Pd_001[2];
double P_001020101=Pd_001[0]*Pd_020[1]*Pd_101[2];
double P_001120001=Pd_001[0]*Pd_120[1]*Pd_001[2];
double P_001120101=Pd_001[0]*Pd_120[1]*Pd_101[2];
double P_001220001=Pd_001[0]*Pd_220[1]*Pd_001[2];
double P_001220101=Pd_001[0]*Pd_220[1]*Pd_101[2];
double P_101020001=Pd_101[0]*Pd_020[1]*Pd_001[2];
double P_101020101=Pd_101[0]*Pd_020[1]*Pd_101[2];
double P_101120001=Pd_101[0]*Pd_120[1]*Pd_001[2];
double P_101120101=Pd_101[0]*Pd_120[1]*Pd_101[2];
double P_101220001=Pd_101[0]*Pd_220[1]*Pd_001[2];
double P_101220101=Pd_101[0]*Pd_220[1]*Pd_101[2];
double P_000021001=Pd_021[1]*Pd_001[2];
double P_000021101=Pd_021[1]*Pd_101[2];
double P_000121001=Pd_121[1]*Pd_001[2];
double P_000121101=Pd_121[1]*Pd_101[2];
double P_000221001=Pd_221[1]*Pd_001[2];
double P_000221101=Pd_221[1]*Pd_101[2];
double P_000321001=Pd_321[1]*Pd_001[2];
double P_000321101=Pd_321[1]*Pd_101[2];
double P_000020002=Pd_020[1]*Pd_002[2];
double P_000020102=Pd_020[1]*Pd_102[2];
double P_000020202=Pd_020[1]*Pd_202[2];
double P_000120002=Pd_120[1]*Pd_002[2];
double P_000120102=Pd_120[1]*Pd_102[2];
double P_000120202=Pd_120[1]*Pd_202[2];
double P_000220002=Pd_220[1]*Pd_002[2];
double P_000220102=Pd_220[1]*Pd_102[2];
double P_000220202=Pd_220[1]*Pd_202[2];
double P_012000010=Pd_012[0]*Pd_010[2];
double P_012000110=Pd_012[0]*Pd_110[2];
double P_112000010=Pd_112[0]*Pd_010[2];
double P_112000110=Pd_112[0]*Pd_110[2];
double P_212000010=Pd_212[0]*Pd_010[2];
double P_212000110=Pd_212[0]*Pd_110[2];
double P_312000010=Pd_312[0]*Pd_010[2];
double P_312000110=Pd_312[0]*Pd_110[2];
double P_011001010=Pd_011[0]*Pd_001[1]*Pd_010[2];
double P_011001110=Pd_011[0]*Pd_001[1]*Pd_110[2];
double P_011101010=Pd_011[0]*Pd_101[1]*Pd_010[2];
double P_011101110=Pd_011[0]*Pd_101[1]*Pd_110[2];
double P_111001010=Pd_111[0]*Pd_001[1]*Pd_010[2];
double P_111001110=Pd_111[0]*Pd_001[1]*Pd_110[2];
double P_111101010=Pd_111[0]*Pd_101[1]*Pd_010[2];
double P_111101110=Pd_111[0]*Pd_101[1]*Pd_110[2];
double P_211001010=Pd_211[0]*Pd_001[1]*Pd_010[2];
double P_211001110=Pd_211[0]*Pd_001[1]*Pd_110[2];
double P_211101010=Pd_211[0]*Pd_101[1]*Pd_010[2];
double P_211101110=Pd_211[0]*Pd_101[1]*Pd_110[2];
double P_010002010=Pd_010[0]*Pd_002[1]*Pd_010[2];
double P_010002110=Pd_010[0]*Pd_002[1]*Pd_110[2];
double P_010102010=Pd_010[0]*Pd_102[1]*Pd_010[2];
double P_010102110=Pd_010[0]*Pd_102[1]*Pd_110[2];
double P_010202010=Pd_010[0]*Pd_202[1]*Pd_010[2];
double P_010202110=Pd_010[0]*Pd_202[1]*Pd_110[2];
double P_110002010=Pd_110[0]*Pd_002[1]*Pd_010[2];
double P_110002110=Pd_110[0]*Pd_002[1]*Pd_110[2];
double P_110102010=Pd_110[0]*Pd_102[1]*Pd_010[2];
double P_110102110=Pd_110[0]*Pd_102[1]*Pd_110[2];
double P_110202010=Pd_110[0]*Pd_202[1]*Pd_010[2];
double P_110202110=Pd_110[0]*Pd_202[1]*Pd_110[2];
double P_011000011=Pd_011[0]*Pd_011[2];
double P_011000111=Pd_011[0]*Pd_111[2];
double P_011000211=Pd_011[0]*Pd_211[2];
double P_111000011=Pd_111[0]*Pd_011[2];
double P_111000111=Pd_111[0]*Pd_111[2];
double P_111000211=Pd_111[0]*Pd_211[2];
double P_211000011=Pd_211[0]*Pd_011[2];
double P_211000111=Pd_211[0]*Pd_111[2];
double P_211000211=Pd_211[0]*Pd_211[2];
double P_010001011=Pd_010[0]*Pd_001[1]*Pd_011[2];
double P_010001111=Pd_010[0]*Pd_001[1]*Pd_111[2];
double P_010001211=Pd_010[0]*Pd_001[1]*Pd_211[2];
double P_010101011=Pd_010[0]*Pd_101[1]*Pd_011[2];
double P_010101111=Pd_010[0]*Pd_101[1]*Pd_111[2];
double P_010101211=Pd_010[0]*Pd_101[1]*Pd_211[2];
double P_110001011=Pd_110[0]*Pd_001[1]*Pd_011[2];
double P_110001111=Pd_110[0]*Pd_001[1]*Pd_111[2];
double P_110001211=Pd_110[0]*Pd_001[1]*Pd_211[2];
double P_110101011=Pd_110[0]*Pd_101[1]*Pd_011[2];
double P_110101111=Pd_110[0]*Pd_101[1]*Pd_111[2];
double P_110101211=Pd_110[0]*Pd_101[1]*Pd_211[2];
double P_010000012=Pd_010[0]*Pd_012[2];
double P_010000112=Pd_010[0]*Pd_112[2];
double P_010000212=Pd_010[0]*Pd_212[2];
double P_010000312=Pd_010[0]*Pd_312[2];
double P_110000012=Pd_110[0]*Pd_012[2];
double P_110000112=Pd_110[0]*Pd_112[2];
double P_110000212=Pd_110[0]*Pd_212[2];
double P_110000312=Pd_110[0]*Pd_312[2];
double P_002010010=Pd_002[0]*Pd_010[1]*Pd_010[2];
double P_002010110=Pd_002[0]*Pd_010[1]*Pd_110[2];
double P_002110010=Pd_002[0]*Pd_110[1]*Pd_010[2];
double P_002110110=Pd_002[0]*Pd_110[1]*Pd_110[2];
double P_102010010=Pd_102[0]*Pd_010[1]*Pd_010[2];
double P_102010110=Pd_102[0]*Pd_010[1]*Pd_110[2];
double P_102110010=Pd_102[0]*Pd_110[1]*Pd_010[2];
double P_102110110=Pd_102[0]*Pd_110[1]*Pd_110[2];
double P_202010010=Pd_202[0]*Pd_010[1]*Pd_010[2];
double P_202010110=Pd_202[0]*Pd_010[1]*Pd_110[2];
double P_202110010=Pd_202[0]*Pd_110[1]*Pd_010[2];
double P_202110110=Pd_202[0]*Pd_110[1]*Pd_110[2];
double P_001011010=Pd_001[0]*Pd_011[1]*Pd_010[2];
double P_001011110=Pd_001[0]*Pd_011[1]*Pd_110[2];
double P_001111010=Pd_001[0]*Pd_111[1]*Pd_010[2];
double P_001111110=Pd_001[0]*Pd_111[1]*Pd_110[2];
double P_001211010=Pd_001[0]*Pd_211[1]*Pd_010[2];
double P_001211110=Pd_001[0]*Pd_211[1]*Pd_110[2];
double P_101011010=Pd_101[0]*Pd_011[1]*Pd_010[2];
double P_101011110=Pd_101[0]*Pd_011[1]*Pd_110[2];
double P_101111010=Pd_101[0]*Pd_111[1]*Pd_010[2];
double P_101111110=Pd_101[0]*Pd_111[1]*Pd_110[2];
double P_101211010=Pd_101[0]*Pd_211[1]*Pd_010[2];
double P_101211110=Pd_101[0]*Pd_211[1]*Pd_110[2];
double P_000012010=Pd_012[1]*Pd_010[2];
double P_000012110=Pd_012[1]*Pd_110[2];
double P_000112010=Pd_112[1]*Pd_010[2];
double P_000112110=Pd_112[1]*Pd_110[2];
double P_000212010=Pd_212[1]*Pd_010[2];
double P_000212110=Pd_212[1]*Pd_110[2];
double P_000312010=Pd_312[1]*Pd_010[2];
double P_000312110=Pd_312[1]*Pd_110[2];
double P_001010011=Pd_001[0]*Pd_010[1]*Pd_011[2];
double P_001010111=Pd_001[0]*Pd_010[1]*Pd_111[2];
double P_001010211=Pd_001[0]*Pd_010[1]*Pd_211[2];
double P_001110011=Pd_001[0]*Pd_110[1]*Pd_011[2];
double P_001110111=Pd_001[0]*Pd_110[1]*Pd_111[2];
double P_001110211=Pd_001[0]*Pd_110[1]*Pd_211[2];
double P_101010011=Pd_101[0]*Pd_010[1]*Pd_011[2];
double P_101010111=Pd_101[0]*Pd_010[1]*Pd_111[2];
double P_101010211=Pd_101[0]*Pd_010[1]*Pd_211[2];
double P_101110011=Pd_101[0]*Pd_110[1]*Pd_011[2];
double P_101110111=Pd_101[0]*Pd_110[1]*Pd_111[2];
double P_101110211=Pd_101[0]*Pd_110[1]*Pd_211[2];
double P_000011011=Pd_011[1]*Pd_011[2];
double P_000011111=Pd_011[1]*Pd_111[2];
double P_000011211=Pd_011[1]*Pd_211[2];
double P_000111011=Pd_111[1]*Pd_011[2];
double P_000111111=Pd_111[1]*Pd_111[2];
double P_000111211=Pd_111[1]*Pd_211[2];
double P_000211011=Pd_211[1]*Pd_011[2];
double P_000211111=Pd_211[1]*Pd_111[2];
double P_000211211=Pd_211[1]*Pd_211[2];
double P_000010012=Pd_010[1]*Pd_012[2];
double P_000010112=Pd_010[1]*Pd_112[2];
double P_000010212=Pd_010[1]*Pd_212[2];
double P_000010312=Pd_010[1]*Pd_312[2];
double P_000110012=Pd_110[1]*Pd_012[2];
double P_000110112=Pd_110[1]*Pd_112[2];
double P_000110212=Pd_110[1]*Pd_212[2];
double P_000110312=Pd_110[1]*Pd_312[2];
double P_002000020=Pd_002[0]*Pd_020[2];
double P_002000120=Pd_002[0]*Pd_120[2];
double P_002000220=Pd_002[0]*Pd_220[2];
double P_102000020=Pd_102[0]*Pd_020[2];
double P_102000120=Pd_102[0]*Pd_120[2];
double P_102000220=Pd_102[0]*Pd_220[2];
double P_202000020=Pd_202[0]*Pd_020[2];
double P_202000120=Pd_202[0]*Pd_120[2];
double P_202000220=Pd_202[0]*Pd_220[2];
double P_001001020=Pd_001[0]*Pd_001[1]*Pd_020[2];
double P_001001120=Pd_001[0]*Pd_001[1]*Pd_120[2];
double P_001001220=Pd_001[0]*Pd_001[1]*Pd_220[2];
double P_001101020=Pd_001[0]*Pd_101[1]*Pd_020[2];
double P_001101120=Pd_001[0]*Pd_101[1]*Pd_120[2];
double P_001101220=Pd_001[0]*Pd_101[1]*Pd_220[2];
double P_101001020=Pd_101[0]*Pd_001[1]*Pd_020[2];
double P_101001120=Pd_101[0]*Pd_001[1]*Pd_120[2];
double P_101001220=Pd_101[0]*Pd_001[1]*Pd_220[2];
double P_101101020=Pd_101[0]*Pd_101[1]*Pd_020[2];
double P_101101120=Pd_101[0]*Pd_101[1]*Pd_120[2];
double P_101101220=Pd_101[0]*Pd_101[1]*Pd_220[2];
double P_000002020=Pd_002[1]*Pd_020[2];
double P_000002120=Pd_002[1]*Pd_120[2];
double P_000002220=Pd_002[1]*Pd_220[2];
double P_000102020=Pd_102[1]*Pd_020[2];
double P_000102120=Pd_102[1]*Pd_120[2];
double P_000102220=Pd_102[1]*Pd_220[2];
double P_000202020=Pd_202[1]*Pd_020[2];
double P_000202120=Pd_202[1]*Pd_120[2];
double P_000202220=Pd_202[1]*Pd_220[2];
double P_001000021=Pd_001[0]*Pd_021[2];
double P_001000121=Pd_001[0]*Pd_121[2];
double P_001000221=Pd_001[0]*Pd_221[2];
double P_001000321=Pd_001[0]*Pd_321[2];
double P_101000021=Pd_101[0]*Pd_021[2];
double P_101000121=Pd_101[0]*Pd_121[2];
double P_101000221=Pd_101[0]*Pd_221[2];
double P_101000321=Pd_101[0]*Pd_321[2];
double P_000001021=Pd_001[1]*Pd_021[2];
double P_000001121=Pd_001[1]*Pd_121[2];
double P_000001221=Pd_001[1]*Pd_221[2];
double P_000001321=Pd_001[1]*Pd_321[2];
double P_000101021=Pd_101[1]*Pd_021[2];
double P_000101121=Pd_101[1]*Pd_121[2];
double P_000101221=Pd_101[1]*Pd_221[2];
double P_000101321=Pd_101[1]*Pd_321[2];
double P_000000022=Pd_022[2];
double P_000000122=Pd_122[2];
double P_000000222=Pd_222[2];
double P_000000322=Pd_322[2];
double P_000000422=Pd_422[2];
double PR_022000000000=P_022000000*R_000[0]+-1*P_122000000*R_100[0]+P_222000000*R_200[0]+-1*P_322000000*R_300[0]+P_422000000*R_400[0];
double PR_021001000000=P_021001000*R_000[0]+-1*P_021101000*R_010[0]+-1*P_121001000*R_100[0]+P_121101000*R_110[0]+P_221001000*R_200[0]+-1*P_221101000*R_210[0]+-1*P_321001000*R_300[0]+P_321101000*R_310[0];
double PR_020002000000=P_020002000*R_000[0]+-1*P_020102000*R_010[0]+P_020202000*R_020[0]+-1*P_120002000*R_100[0]+P_120102000*R_110[0]+-1*P_120202000*R_120[0]+P_220002000*R_200[0]+-1*P_220102000*R_210[0]+P_220202000*R_220[0];
double PR_021000001000=P_021000001*R_000[0]+-1*P_021000101*R_001[0]+-1*P_121000001*R_100[0]+P_121000101*R_101[0]+P_221000001*R_200[0]+-1*P_221000101*R_201[0]+-1*P_321000001*R_300[0]+P_321000101*R_301[0];
double PR_020001001000=P_020001001*R_000[0]+-1*P_020001101*R_001[0]+-1*P_020101001*R_010[0]+P_020101101*R_011[0]+-1*P_120001001*R_100[0]+P_120001101*R_101[0]+P_120101001*R_110[0]+-1*P_120101101*R_111[0]+P_220001001*R_200[0]+-1*P_220001101*R_201[0]+-1*P_220101001*R_210[0]+P_220101101*R_211[0];
double PR_020000002000=P_020000002*R_000[0]+-1*P_020000102*R_001[0]+P_020000202*R_002[0]+-1*P_120000002*R_100[0]+P_120000102*R_101[0]+-1*P_120000202*R_102[0]+P_220000002*R_200[0]+-1*P_220000102*R_201[0]+P_220000202*R_202[0];
double PR_012010000000=P_012010000*R_000[0]+-1*P_012110000*R_010[0]+-1*P_112010000*R_100[0]+P_112110000*R_110[0]+P_212010000*R_200[0]+-1*P_212110000*R_210[0]+-1*P_312010000*R_300[0]+P_312110000*R_310[0];
double PR_011011000000=P_011011000*R_000[0]+-1*P_011111000*R_010[0]+P_011211000*R_020[0]+-1*P_111011000*R_100[0]+P_111111000*R_110[0]+-1*P_111211000*R_120[0]+P_211011000*R_200[0]+-1*P_211111000*R_210[0]+P_211211000*R_220[0];
double PR_010012000000=P_010012000*R_000[0]+-1*P_010112000*R_010[0]+P_010212000*R_020[0]+-1*P_010312000*R_030[0]+-1*P_110012000*R_100[0]+P_110112000*R_110[0]+-1*P_110212000*R_120[0]+P_110312000*R_130[0];
double PR_011010001000=P_011010001*R_000[0]+-1*P_011010101*R_001[0]+-1*P_011110001*R_010[0]+P_011110101*R_011[0]+-1*P_111010001*R_100[0]+P_111010101*R_101[0]+P_111110001*R_110[0]+-1*P_111110101*R_111[0]+P_211010001*R_200[0]+-1*P_211010101*R_201[0]+-1*P_211110001*R_210[0]+P_211110101*R_211[0];
double PR_010011001000=P_010011001*R_000[0]+-1*P_010011101*R_001[0]+-1*P_010111001*R_010[0]+P_010111101*R_011[0]+P_010211001*R_020[0]+-1*P_010211101*R_021[0]+-1*P_110011001*R_100[0]+P_110011101*R_101[0]+P_110111001*R_110[0]+-1*P_110111101*R_111[0]+-1*P_110211001*R_120[0]+P_110211101*R_121[0];
double PR_010010002000=P_010010002*R_000[0]+-1*P_010010102*R_001[0]+P_010010202*R_002[0]+-1*P_010110002*R_010[0]+P_010110102*R_011[0]+-1*P_010110202*R_012[0]+-1*P_110010002*R_100[0]+P_110010102*R_101[0]+-1*P_110010202*R_102[0]+P_110110002*R_110[0]+-1*P_110110102*R_111[0]+P_110110202*R_112[0];
double PR_002020000000=P_002020000*R_000[0]+-1*P_002120000*R_010[0]+P_002220000*R_020[0]+-1*P_102020000*R_100[0]+P_102120000*R_110[0]+-1*P_102220000*R_120[0]+P_202020000*R_200[0]+-1*P_202120000*R_210[0]+P_202220000*R_220[0];
double PR_001021000000=P_001021000*R_000[0]+-1*P_001121000*R_010[0]+P_001221000*R_020[0]+-1*P_001321000*R_030[0]+-1*P_101021000*R_100[0]+P_101121000*R_110[0]+-1*P_101221000*R_120[0]+P_101321000*R_130[0];
double PR_000022000000=P_000022000*R_000[0]+-1*P_000122000*R_010[0]+P_000222000*R_020[0]+-1*P_000322000*R_030[0]+P_000422000*R_040[0];
double PR_001020001000=P_001020001*R_000[0]+-1*P_001020101*R_001[0]+-1*P_001120001*R_010[0]+P_001120101*R_011[0]+P_001220001*R_020[0]+-1*P_001220101*R_021[0]+-1*P_101020001*R_100[0]+P_101020101*R_101[0]+P_101120001*R_110[0]+-1*P_101120101*R_111[0]+-1*P_101220001*R_120[0]+P_101220101*R_121[0];
double PR_000021001000=P_000021001*R_000[0]+-1*P_000021101*R_001[0]+-1*P_000121001*R_010[0]+P_000121101*R_011[0]+P_000221001*R_020[0]+-1*P_000221101*R_021[0]+-1*P_000321001*R_030[0]+P_000321101*R_031[0];
double PR_000020002000=P_000020002*R_000[0]+-1*P_000020102*R_001[0]+P_000020202*R_002[0]+-1*P_000120002*R_010[0]+P_000120102*R_011[0]+-1*P_000120202*R_012[0]+P_000220002*R_020[0]+-1*P_000220102*R_021[0]+P_000220202*R_022[0];
double PR_012000010000=P_012000010*R_000[0]+-1*P_012000110*R_001[0]+-1*P_112000010*R_100[0]+P_112000110*R_101[0]+P_212000010*R_200[0]+-1*P_212000110*R_201[0]+-1*P_312000010*R_300[0]+P_312000110*R_301[0];
double PR_011001010000=P_011001010*R_000[0]+-1*P_011001110*R_001[0]+-1*P_011101010*R_010[0]+P_011101110*R_011[0]+-1*P_111001010*R_100[0]+P_111001110*R_101[0]+P_111101010*R_110[0]+-1*P_111101110*R_111[0]+P_211001010*R_200[0]+-1*P_211001110*R_201[0]+-1*P_211101010*R_210[0]+P_211101110*R_211[0];
double PR_010002010000=P_010002010*R_000[0]+-1*P_010002110*R_001[0]+-1*P_010102010*R_010[0]+P_010102110*R_011[0]+P_010202010*R_020[0]+-1*P_010202110*R_021[0]+-1*P_110002010*R_100[0]+P_110002110*R_101[0]+P_110102010*R_110[0]+-1*P_110102110*R_111[0]+-1*P_110202010*R_120[0]+P_110202110*R_121[0];
double PR_011000011000=P_011000011*R_000[0]+-1*P_011000111*R_001[0]+P_011000211*R_002[0]+-1*P_111000011*R_100[0]+P_111000111*R_101[0]+-1*P_111000211*R_102[0]+P_211000011*R_200[0]+-1*P_211000111*R_201[0]+P_211000211*R_202[0];
double PR_010001011000=P_010001011*R_000[0]+-1*P_010001111*R_001[0]+P_010001211*R_002[0]+-1*P_010101011*R_010[0]+P_010101111*R_011[0]+-1*P_010101211*R_012[0]+-1*P_110001011*R_100[0]+P_110001111*R_101[0]+-1*P_110001211*R_102[0]+P_110101011*R_110[0]+-1*P_110101111*R_111[0]+P_110101211*R_112[0];
double PR_010000012000=P_010000012*R_000[0]+-1*P_010000112*R_001[0]+P_010000212*R_002[0]+-1*P_010000312*R_003[0]+-1*P_110000012*R_100[0]+P_110000112*R_101[0]+-1*P_110000212*R_102[0]+P_110000312*R_103[0];
double PR_002010010000=P_002010010*R_000[0]+-1*P_002010110*R_001[0]+-1*P_002110010*R_010[0]+P_002110110*R_011[0]+-1*P_102010010*R_100[0]+P_102010110*R_101[0]+P_102110010*R_110[0]+-1*P_102110110*R_111[0]+P_202010010*R_200[0]+-1*P_202010110*R_201[0]+-1*P_202110010*R_210[0]+P_202110110*R_211[0];
double PR_001011010000=P_001011010*R_000[0]+-1*P_001011110*R_001[0]+-1*P_001111010*R_010[0]+P_001111110*R_011[0]+P_001211010*R_020[0]+-1*P_001211110*R_021[0]+-1*P_101011010*R_100[0]+P_101011110*R_101[0]+P_101111010*R_110[0]+-1*P_101111110*R_111[0]+-1*P_101211010*R_120[0]+P_101211110*R_121[0];
double PR_000012010000=P_000012010*R_000[0]+-1*P_000012110*R_001[0]+-1*P_000112010*R_010[0]+P_000112110*R_011[0]+P_000212010*R_020[0]+-1*P_000212110*R_021[0]+-1*P_000312010*R_030[0]+P_000312110*R_031[0];
double PR_001010011000=P_001010011*R_000[0]+-1*P_001010111*R_001[0]+P_001010211*R_002[0]+-1*P_001110011*R_010[0]+P_001110111*R_011[0]+-1*P_001110211*R_012[0]+-1*P_101010011*R_100[0]+P_101010111*R_101[0]+-1*P_101010211*R_102[0]+P_101110011*R_110[0]+-1*P_101110111*R_111[0]+P_101110211*R_112[0];
double PR_000011011000=P_000011011*R_000[0]+-1*P_000011111*R_001[0]+P_000011211*R_002[0]+-1*P_000111011*R_010[0]+P_000111111*R_011[0]+-1*P_000111211*R_012[0]+P_000211011*R_020[0]+-1*P_000211111*R_021[0]+P_000211211*R_022[0];
double PR_000010012000=P_000010012*R_000[0]+-1*P_000010112*R_001[0]+P_000010212*R_002[0]+-1*P_000010312*R_003[0]+-1*P_000110012*R_010[0]+P_000110112*R_011[0]+-1*P_000110212*R_012[0]+P_000110312*R_013[0];
double PR_002000020000=P_002000020*R_000[0]+-1*P_002000120*R_001[0]+P_002000220*R_002[0]+-1*P_102000020*R_100[0]+P_102000120*R_101[0]+-1*P_102000220*R_102[0]+P_202000020*R_200[0]+-1*P_202000120*R_201[0]+P_202000220*R_202[0];
double PR_001001020000=P_001001020*R_000[0]+-1*P_001001120*R_001[0]+P_001001220*R_002[0]+-1*P_001101020*R_010[0]+P_001101120*R_011[0]+-1*P_001101220*R_012[0]+-1*P_101001020*R_100[0]+P_101001120*R_101[0]+-1*P_101001220*R_102[0]+P_101101020*R_110[0]+-1*P_101101120*R_111[0]+P_101101220*R_112[0];
double PR_000002020000=P_000002020*R_000[0]+-1*P_000002120*R_001[0]+P_000002220*R_002[0]+-1*P_000102020*R_010[0]+P_000102120*R_011[0]+-1*P_000102220*R_012[0]+P_000202020*R_020[0]+-1*P_000202120*R_021[0]+P_000202220*R_022[0];
double PR_001000021000=P_001000021*R_000[0]+-1*P_001000121*R_001[0]+P_001000221*R_002[0]+-1*P_001000321*R_003[0]+-1*P_101000021*R_100[0]+P_101000121*R_101[0]+-1*P_101000221*R_102[0]+P_101000321*R_103[0];
double PR_000001021000=P_000001021*R_000[0]+-1*P_000001121*R_001[0]+P_000001221*R_002[0]+-1*P_000001321*R_003[0]+-1*P_000101021*R_010[0]+P_000101121*R_011[0]+-1*P_000101221*R_012[0]+P_000101321*R_013[0];
double PR_000000022000=P_000000022*R_000[0]+-1*P_000000122*R_001[0]+P_000000222*R_002[0]+-1*P_000000322*R_003[0]+P_000000422*R_004[0];
double PR_022000000001=P_022000000*R_001[0]+-1*P_122000000*R_101[0]+P_222000000*R_201[0]+-1*P_322000000*R_301[0]+P_422000000*R_401[0];
double PR_021001000001=P_021001000*R_001[0]+-1*P_021101000*R_011[0]+-1*P_121001000*R_101[0]+P_121101000*R_111[0]+P_221001000*R_201[0]+-1*P_221101000*R_211[0]+-1*P_321001000*R_301[0]+P_321101000*R_311[0];
double PR_020002000001=P_020002000*R_001[0]+-1*P_020102000*R_011[0]+P_020202000*R_021[0]+-1*P_120002000*R_101[0]+P_120102000*R_111[0]+-1*P_120202000*R_121[0]+P_220002000*R_201[0]+-1*P_220102000*R_211[0]+P_220202000*R_221[0];
double PR_021000001001=P_021000001*R_001[0]+-1*P_021000101*R_002[0]+-1*P_121000001*R_101[0]+P_121000101*R_102[0]+P_221000001*R_201[0]+-1*P_221000101*R_202[0]+-1*P_321000001*R_301[0]+P_321000101*R_302[0];
double PR_020001001001=P_020001001*R_001[0]+-1*P_020001101*R_002[0]+-1*P_020101001*R_011[0]+P_020101101*R_012[0]+-1*P_120001001*R_101[0]+P_120001101*R_102[0]+P_120101001*R_111[0]+-1*P_120101101*R_112[0]+P_220001001*R_201[0]+-1*P_220001101*R_202[0]+-1*P_220101001*R_211[0]+P_220101101*R_212[0];
double PR_020000002001=P_020000002*R_001[0]+-1*P_020000102*R_002[0]+P_020000202*R_003[0]+-1*P_120000002*R_101[0]+P_120000102*R_102[0]+-1*P_120000202*R_103[0]+P_220000002*R_201[0]+-1*P_220000102*R_202[0]+P_220000202*R_203[0];
double PR_012010000001=P_012010000*R_001[0]+-1*P_012110000*R_011[0]+-1*P_112010000*R_101[0]+P_112110000*R_111[0]+P_212010000*R_201[0]+-1*P_212110000*R_211[0]+-1*P_312010000*R_301[0]+P_312110000*R_311[0];
double PR_011011000001=P_011011000*R_001[0]+-1*P_011111000*R_011[0]+P_011211000*R_021[0]+-1*P_111011000*R_101[0]+P_111111000*R_111[0]+-1*P_111211000*R_121[0]+P_211011000*R_201[0]+-1*P_211111000*R_211[0]+P_211211000*R_221[0];
double PR_010012000001=P_010012000*R_001[0]+-1*P_010112000*R_011[0]+P_010212000*R_021[0]+-1*P_010312000*R_031[0]+-1*P_110012000*R_101[0]+P_110112000*R_111[0]+-1*P_110212000*R_121[0]+P_110312000*R_131[0];
double PR_011010001001=P_011010001*R_001[0]+-1*P_011010101*R_002[0]+-1*P_011110001*R_011[0]+P_011110101*R_012[0]+-1*P_111010001*R_101[0]+P_111010101*R_102[0]+P_111110001*R_111[0]+-1*P_111110101*R_112[0]+P_211010001*R_201[0]+-1*P_211010101*R_202[0]+-1*P_211110001*R_211[0]+P_211110101*R_212[0];
double PR_010011001001=P_010011001*R_001[0]+-1*P_010011101*R_002[0]+-1*P_010111001*R_011[0]+P_010111101*R_012[0]+P_010211001*R_021[0]+-1*P_010211101*R_022[0]+-1*P_110011001*R_101[0]+P_110011101*R_102[0]+P_110111001*R_111[0]+-1*P_110111101*R_112[0]+-1*P_110211001*R_121[0]+P_110211101*R_122[0];
double PR_010010002001=P_010010002*R_001[0]+-1*P_010010102*R_002[0]+P_010010202*R_003[0]+-1*P_010110002*R_011[0]+P_010110102*R_012[0]+-1*P_010110202*R_013[0]+-1*P_110010002*R_101[0]+P_110010102*R_102[0]+-1*P_110010202*R_103[0]+P_110110002*R_111[0]+-1*P_110110102*R_112[0]+P_110110202*R_113[0];
double PR_002020000001=P_002020000*R_001[0]+-1*P_002120000*R_011[0]+P_002220000*R_021[0]+-1*P_102020000*R_101[0]+P_102120000*R_111[0]+-1*P_102220000*R_121[0]+P_202020000*R_201[0]+-1*P_202120000*R_211[0]+P_202220000*R_221[0];
double PR_001021000001=P_001021000*R_001[0]+-1*P_001121000*R_011[0]+P_001221000*R_021[0]+-1*P_001321000*R_031[0]+-1*P_101021000*R_101[0]+P_101121000*R_111[0]+-1*P_101221000*R_121[0]+P_101321000*R_131[0];
double PR_000022000001=P_000022000*R_001[0]+-1*P_000122000*R_011[0]+P_000222000*R_021[0]+-1*P_000322000*R_031[0]+P_000422000*R_041[0];
double PR_001020001001=P_001020001*R_001[0]+-1*P_001020101*R_002[0]+-1*P_001120001*R_011[0]+P_001120101*R_012[0]+P_001220001*R_021[0]+-1*P_001220101*R_022[0]+-1*P_101020001*R_101[0]+P_101020101*R_102[0]+P_101120001*R_111[0]+-1*P_101120101*R_112[0]+-1*P_101220001*R_121[0]+P_101220101*R_122[0];
double PR_000021001001=P_000021001*R_001[0]+-1*P_000021101*R_002[0]+-1*P_000121001*R_011[0]+P_000121101*R_012[0]+P_000221001*R_021[0]+-1*P_000221101*R_022[0]+-1*P_000321001*R_031[0]+P_000321101*R_032[0];
double PR_000020002001=P_000020002*R_001[0]+-1*P_000020102*R_002[0]+P_000020202*R_003[0]+-1*P_000120002*R_011[0]+P_000120102*R_012[0]+-1*P_000120202*R_013[0]+P_000220002*R_021[0]+-1*P_000220102*R_022[0]+P_000220202*R_023[0];
double PR_012000010001=P_012000010*R_001[0]+-1*P_012000110*R_002[0]+-1*P_112000010*R_101[0]+P_112000110*R_102[0]+P_212000010*R_201[0]+-1*P_212000110*R_202[0]+-1*P_312000010*R_301[0]+P_312000110*R_302[0];
double PR_011001010001=P_011001010*R_001[0]+-1*P_011001110*R_002[0]+-1*P_011101010*R_011[0]+P_011101110*R_012[0]+-1*P_111001010*R_101[0]+P_111001110*R_102[0]+P_111101010*R_111[0]+-1*P_111101110*R_112[0]+P_211001010*R_201[0]+-1*P_211001110*R_202[0]+-1*P_211101010*R_211[0]+P_211101110*R_212[0];
double PR_010002010001=P_010002010*R_001[0]+-1*P_010002110*R_002[0]+-1*P_010102010*R_011[0]+P_010102110*R_012[0]+P_010202010*R_021[0]+-1*P_010202110*R_022[0]+-1*P_110002010*R_101[0]+P_110002110*R_102[0]+P_110102010*R_111[0]+-1*P_110102110*R_112[0]+-1*P_110202010*R_121[0]+P_110202110*R_122[0];
double PR_011000011001=P_011000011*R_001[0]+-1*P_011000111*R_002[0]+P_011000211*R_003[0]+-1*P_111000011*R_101[0]+P_111000111*R_102[0]+-1*P_111000211*R_103[0]+P_211000011*R_201[0]+-1*P_211000111*R_202[0]+P_211000211*R_203[0];
double PR_010001011001=P_010001011*R_001[0]+-1*P_010001111*R_002[0]+P_010001211*R_003[0]+-1*P_010101011*R_011[0]+P_010101111*R_012[0]+-1*P_010101211*R_013[0]+-1*P_110001011*R_101[0]+P_110001111*R_102[0]+-1*P_110001211*R_103[0]+P_110101011*R_111[0]+-1*P_110101111*R_112[0]+P_110101211*R_113[0];
double PR_010000012001=P_010000012*R_001[0]+-1*P_010000112*R_002[0]+P_010000212*R_003[0]+-1*P_010000312*R_004[0]+-1*P_110000012*R_101[0]+P_110000112*R_102[0]+-1*P_110000212*R_103[0]+P_110000312*R_104[0];
double PR_002010010001=P_002010010*R_001[0]+-1*P_002010110*R_002[0]+-1*P_002110010*R_011[0]+P_002110110*R_012[0]+-1*P_102010010*R_101[0]+P_102010110*R_102[0]+P_102110010*R_111[0]+-1*P_102110110*R_112[0]+P_202010010*R_201[0]+-1*P_202010110*R_202[0]+-1*P_202110010*R_211[0]+P_202110110*R_212[0];
double PR_001011010001=P_001011010*R_001[0]+-1*P_001011110*R_002[0]+-1*P_001111010*R_011[0]+P_001111110*R_012[0]+P_001211010*R_021[0]+-1*P_001211110*R_022[0]+-1*P_101011010*R_101[0]+P_101011110*R_102[0]+P_101111010*R_111[0]+-1*P_101111110*R_112[0]+-1*P_101211010*R_121[0]+P_101211110*R_122[0];
double PR_000012010001=P_000012010*R_001[0]+-1*P_000012110*R_002[0]+-1*P_000112010*R_011[0]+P_000112110*R_012[0]+P_000212010*R_021[0]+-1*P_000212110*R_022[0]+-1*P_000312010*R_031[0]+P_000312110*R_032[0];
double PR_001010011001=P_001010011*R_001[0]+-1*P_001010111*R_002[0]+P_001010211*R_003[0]+-1*P_001110011*R_011[0]+P_001110111*R_012[0]+-1*P_001110211*R_013[0]+-1*P_101010011*R_101[0]+P_101010111*R_102[0]+-1*P_101010211*R_103[0]+P_101110011*R_111[0]+-1*P_101110111*R_112[0]+P_101110211*R_113[0];
double PR_000011011001=P_000011011*R_001[0]+-1*P_000011111*R_002[0]+P_000011211*R_003[0]+-1*P_000111011*R_011[0]+P_000111111*R_012[0]+-1*P_000111211*R_013[0]+P_000211011*R_021[0]+-1*P_000211111*R_022[0]+P_000211211*R_023[0];
double PR_000010012001=P_000010012*R_001[0]+-1*P_000010112*R_002[0]+P_000010212*R_003[0]+-1*P_000010312*R_004[0]+-1*P_000110012*R_011[0]+P_000110112*R_012[0]+-1*P_000110212*R_013[0]+P_000110312*R_014[0];
double PR_002000020001=P_002000020*R_001[0]+-1*P_002000120*R_002[0]+P_002000220*R_003[0]+-1*P_102000020*R_101[0]+P_102000120*R_102[0]+-1*P_102000220*R_103[0]+P_202000020*R_201[0]+-1*P_202000120*R_202[0]+P_202000220*R_203[0];
double PR_001001020001=P_001001020*R_001[0]+-1*P_001001120*R_002[0]+P_001001220*R_003[0]+-1*P_001101020*R_011[0]+P_001101120*R_012[0]+-1*P_001101220*R_013[0]+-1*P_101001020*R_101[0]+P_101001120*R_102[0]+-1*P_101001220*R_103[0]+P_101101020*R_111[0]+-1*P_101101120*R_112[0]+P_101101220*R_113[0];
double PR_000002020001=P_000002020*R_001[0]+-1*P_000002120*R_002[0]+P_000002220*R_003[0]+-1*P_000102020*R_011[0]+P_000102120*R_012[0]+-1*P_000102220*R_013[0]+P_000202020*R_021[0]+-1*P_000202120*R_022[0]+P_000202220*R_023[0];
double PR_001000021001=P_001000021*R_001[0]+-1*P_001000121*R_002[0]+P_001000221*R_003[0]+-1*P_001000321*R_004[0]+-1*P_101000021*R_101[0]+P_101000121*R_102[0]+-1*P_101000221*R_103[0]+P_101000321*R_104[0];
double PR_000001021001=P_000001021*R_001[0]+-1*P_000001121*R_002[0]+P_000001221*R_003[0]+-1*P_000001321*R_004[0]+-1*P_000101021*R_011[0]+P_000101121*R_012[0]+-1*P_000101221*R_013[0]+P_000101321*R_014[0];
double PR_000000022001=P_000000022*R_001[0]+-1*P_000000122*R_002[0]+P_000000222*R_003[0]+-1*P_000000322*R_004[0]+P_000000422*R_005[0];
double PR_022000000010=P_022000000*R_010[0]+-1*P_122000000*R_110[0]+P_222000000*R_210[0]+-1*P_322000000*R_310[0]+P_422000000*R_410[0];
double PR_021001000010=P_021001000*R_010[0]+-1*P_021101000*R_020[0]+-1*P_121001000*R_110[0]+P_121101000*R_120[0]+P_221001000*R_210[0]+-1*P_221101000*R_220[0]+-1*P_321001000*R_310[0]+P_321101000*R_320[0];
double PR_020002000010=P_020002000*R_010[0]+-1*P_020102000*R_020[0]+P_020202000*R_030[0]+-1*P_120002000*R_110[0]+P_120102000*R_120[0]+-1*P_120202000*R_130[0]+P_220002000*R_210[0]+-1*P_220102000*R_220[0]+P_220202000*R_230[0];
double PR_021000001010=P_021000001*R_010[0]+-1*P_021000101*R_011[0]+-1*P_121000001*R_110[0]+P_121000101*R_111[0]+P_221000001*R_210[0]+-1*P_221000101*R_211[0]+-1*P_321000001*R_310[0]+P_321000101*R_311[0];
double PR_020001001010=P_020001001*R_010[0]+-1*P_020001101*R_011[0]+-1*P_020101001*R_020[0]+P_020101101*R_021[0]+-1*P_120001001*R_110[0]+P_120001101*R_111[0]+P_120101001*R_120[0]+-1*P_120101101*R_121[0]+P_220001001*R_210[0]+-1*P_220001101*R_211[0]+-1*P_220101001*R_220[0]+P_220101101*R_221[0];
double PR_020000002010=P_020000002*R_010[0]+-1*P_020000102*R_011[0]+P_020000202*R_012[0]+-1*P_120000002*R_110[0]+P_120000102*R_111[0]+-1*P_120000202*R_112[0]+P_220000002*R_210[0]+-1*P_220000102*R_211[0]+P_220000202*R_212[0];
double PR_012010000010=P_012010000*R_010[0]+-1*P_012110000*R_020[0]+-1*P_112010000*R_110[0]+P_112110000*R_120[0]+P_212010000*R_210[0]+-1*P_212110000*R_220[0]+-1*P_312010000*R_310[0]+P_312110000*R_320[0];
double PR_011011000010=P_011011000*R_010[0]+-1*P_011111000*R_020[0]+P_011211000*R_030[0]+-1*P_111011000*R_110[0]+P_111111000*R_120[0]+-1*P_111211000*R_130[0]+P_211011000*R_210[0]+-1*P_211111000*R_220[0]+P_211211000*R_230[0];
double PR_010012000010=P_010012000*R_010[0]+-1*P_010112000*R_020[0]+P_010212000*R_030[0]+-1*P_010312000*R_040[0]+-1*P_110012000*R_110[0]+P_110112000*R_120[0]+-1*P_110212000*R_130[0]+P_110312000*R_140[0];
double PR_011010001010=P_011010001*R_010[0]+-1*P_011010101*R_011[0]+-1*P_011110001*R_020[0]+P_011110101*R_021[0]+-1*P_111010001*R_110[0]+P_111010101*R_111[0]+P_111110001*R_120[0]+-1*P_111110101*R_121[0]+P_211010001*R_210[0]+-1*P_211010101*R_211[0]+-1*P_211110001*R_220[0]+P_211110101*R_221[0];
double PR_010011001010=P_010011001*R_010[0]+-1*P_010011101*R_011[0]+-1*P_010111001*R_020[0]+P_010111101*R_021[0]+P_010211001*R_030[0]+-1*P_010211101*R_031[0]+-1*P_110011001*R_110[0]+P_110011101*R_111[0]+P_110111001*R_120[0]+-1*P_110111101*R_121[0]+-1*P_110211001*R_130[0]+P_110211101*R_131[0];
double PR_010010002010=P_010010002*R_010[0]+-1*P_010010102*R_011[0]+P_010010202*R_012[0]+-1*P_010110002*R_020[0]+P_010110102*R_021[0]+-1*P_010110202*R_022[0]+-1*P_110010002*R_110[0]+P_110010102*R_111[0]+-1*P_110010202*R_112[0]+P_110110002*R_120[0]+-1*P_110110102*R_121[0]+P_110110202*R_122[0];
double PR_002020000010=P_002020000*R_010[0]+-1*P_002120000*R_020[0]+P_002220000*R_030[0]+-1*P_102020000*R_110[0]+P_102120000*R_120[0]+-1*P_102220000*R_130[0]+P_202020000*R_210[0]+-1*P_202120000*R_220[0]+P_202220000*R_230[0];
double PR_001021000010=P_001021000*R_010[0]+-1*P_001121000*R_020[0]+P_001221000*R_030[0]+-1*P_001321000*R_040[0]+-1*P_101021000*R_110[0]+P_101121000*R_120[0]+-1*P_101221000*R_130[0]+P_101321000*R_140[0];
double PR_000022000010=P_000022000*R_010[0]+-1*P_000122000*R_020[0]+P_000222000*R_030[0]+-1*P_000322000*R_040[0]+P_000422000*R_050[0];
double PR_001020001010=P_001020001*R_010[0]+-1*P_001020101*R_011[0]+-1*P_001120001*R_020[0]+P_001120101*R_021[0]+P_001220001*R_030[0]+-1*P_001220101*R_031[0]+-1*P_101020001*R_110[0]+P_101020101*R_111[0]+P_101120001*R_120[0]+-1*P_101120101*R_121[0]+-1*P_101220001*R_130[0]+P_101220101*R_131[0];
double PR_000021001010=P_000021001*R_010[0]+-1*P_000021101*R_011[0]+-1*P_000121001*R_020[0]+P_000121101*R_021[0]+P_000221001*R_030[0]+-1*P_000221101*R_031[0]+-1*P_000321001*R_040[0]+P_000321101*R_041[0];
double PR_000020002010=P_000020002*R_010[0]+-1*P_000020102*R_011[0]+P_000020202*R_012[0]+-1*P_000120002*R_020[0]+P_000120102*R_021[0]+-1*P_000120202*R_022[0]+P_000220002*R_030[0]+-1*P_000220102*R_031[0]+P_000220202*R_032[0];
double PR_012000010010=P_012000010*R_010[0]+-1*P_012000110*R_011[0]+-1*P_112000010*R_110[0]+P_112000110*R_111[0]+P_212000010*R_210[0]+-1*P_212000110*R_211[0]+-1*P_312000010*R_310[0]+P_312000110*R_311[0];
double PR_011001010010=P_011001010*R_010[0]+-1*P_011001110*R_011[0]+-1*P_011101010*R_020[0]+P_011101110*R_021[0]+-1*P_111001010*R_110[0]+P_111001110*R_111[0]+P_111101010*R_120[0]+-1*P_111101110*R_121[0]+P_211001010*R_210[0]+-1*P_211001110*R_211[0]+-1*P_211101010*R_220[0]+P_211101110*R_221[0];
double PR_010002010010=P_010002010*R_010[0]+-1*P_010002110*R_011[0]+-1*P_010102010*R_020[0]+P_010102110*R_021[0]+P_010202010*R_030[0]+-1*P_010202110*R_031[0]+-1*P_110002010*R_110[0]+P_110002110*R_111[0]+P_110102010*R_120[0]+-1*P_110102110*R_121[0]+-1*P_110202010*R_130[0]+P_110202110*R_131[0];
double PR_011000011010=P_011000011*R_010[0]+-1*P_011000111*R_011[0]+P_011000211*R_012[0]+-1*P_111000011*R_110[0]+P_111000111*R_111[0]+-1*P_111000211*R_112[0]+P_211000011*R_210[0]+-1*P_211000111*R_211[0]+P_211000211*R_212[0];
double PR_010001011010=P_010001011*R_010[0]+-1*P_010001111*R_011[0]+P_010001211*R_012[0]+-1*P_010101011*R_020[0]+P_010101111*R_021[0]+-1*P_010101211*R_022[0]+-1*P_110001011*R_110[0]+P_110001111*R_111[0]+-1*P_110001211*R_112[0]+P_110101011*R_120[0]+-1*P_110101111*R_121[0]+P_110101211*R_122[0];
double PR_010000012010=P_010000012*R_010[0]+-1*P_010000112*R_011[0]+P_010000212*R_012[0]+-1*P_010000312*R_013[0]+-1*P_110000012*R_110[0]+P_110000112*R_111[0]+-1*P_110000212*R_112[0]+P_110000312*R_113[0];
double PR_002010010010=P_002010010*R_010[0]+-1*P_002010110*R_011[0]+-1*P_002110010*R_020[0]+P_002110110*R_021[0]+-1*P_102010010*R_110[0]+P_102010110*R_111[0]+P_102110010*R_120[0]+-1*P_102110110*R_121[0]+P_202010010*R_210[0]+-1*P_202010110*R_211[0]+-1*P_202110010*R_220[0]+P_202110110*R_221[0];
double PR_001011010010=P_001011010*R_010[0]+-1*P_001011110*R_011[0]+-1*P_001111010*R_020[0]+P_001111110*R_021[0]+P_001211010*R_030[0]+-1*P_001211110*R_031[0]+-1*P_101011010*R_110[0]+P_101011110*R_111[0]+P_101111010*R_120[0]+-1*P_101111110*R_121[0]+-1*P_101211010*R_130[0]+P_101211110*R_131[0];
double PR_000012010010=P_000012010*R_010[0]+-1*P_000012110*R_011[0]+-1*P_000112010*R_020[0]+P_000112110*R_021[0]+P_000212010*R_030[0]+-1*P_000212110*R_031[0]+-1*P_000312010*R_040[0]+P_000312110*R_041[0];
double PR_001010011010=P_001010011*R_010[0]+-1*P_001010111*R_011[0]+P_001010211*R_012[0]+-1*P_001110011*R_020[0]+P_001110111*R_021[0]+-1*P_001110211*R_022[0]+-1*P_101010011*R_110[0]+P_101010111*R_111[0]+-1*P_101010211*R_112[0]+P_101110011*R_120[0]+-1*P_101110111*R_121[0]+P_101110211*R_122[0];
double PR_000011011010=P_000011011*R_010[0]+-1*P_000011111*R_011[0]+P_000011211*R_012[0]+-1*P_000111011*R_020[0]+P_000111111*R_021[0]+-1*P_000111211*R_022[0]+P_000211011*R_030[0]+-1*P_000211111*R_031[0]+P_000211211*R_032[0];
double PR_000010012010=P_000010012*R_010[0]+-1*P_000010112*R_011[0]+P_000010212*R_012[0]+-1*P_000010312*R_013[0]+-1*P_000110012*R_020[0]+P_000110112*R_021[0]+-1*P_000110212*R_022[0]+P_000110312*R_023[0];
double PR_002000020010=P_002000020*R_010[0]+-1*P_002000120*R_011[0]+P_002000220*R_012[0]+-1*P_102000020*R_110[0]+P_102000120*R_111[0]+-1*P_102000220*R_112[0]+P_202000020*R_210[0]+-1*P_202000120*R_211[0]+P_202000220*R_212[0];
double PR_001001020010=P_001001020*R_010[0]+-1*P_001001120*R_011[0]+P_001001220*R_012[0]+-1*P_001101020*R_020[0]+P_001101120*R_021[0]+-1*P_001101220*R_022[0]+-1*P_101001020*R_110[0]+P_101001120*R_111[0]+-1*P_101001220*R_112[0]+P_101101020*R_120[0]+-1*P_101101120*R_121[0]+P_101101220*R_122[0];
double PR_000002020010=P_000002020*R_010[0]+-1*P_000002120*R_011[0]+P_000002220*R_012[0]+-1*P_000102020*R_020[0]+P_000102120*R_021[0]+-1*P_000102220*R_022[0]+P_000202020*R_030[0]+-1*P_000202120*R_031[0]+P_000202220*R_032[0];
double PR_001000021010=P_001000021*R_010[0]+-1*P_001000121*R_011[0]+P_001000221*R_012[0]+-1*P_001000321*R_013[0]+-1*P_101000021*R_110[0]+P_101000121*R_111[0]+-1*P_101000221*R_112[0]+P_101000321*R_113[0];
double PR_000001021010=P_000001021*R_010[0]+-1*P_000001121*R_011[0]+P_000001221*R_012[0]+-1*P_000001321*R_013[0]+-1*P_000101021*R_020[0]+P_000101121*R_021[0]+-1*P_000101221*R_022[0]+P_000101321*R_023[0];
double PR_000000022010=P_000000022*R_010[0]+-1*P_000000122*R_011[0]+P_000000222*R_012[0]+-1*P_000000322*R_013[0]+P_000000422*R_014[0];
double PR_022000000100=P_022000000*R_100[0]+-1*P_122000000*R_200[0]+P_222000000*R_300[0]+-1*P_322000000*R_400[0]+P_422000000*R_500[0];
double PR_021001000100=P_021001000*R_100[0]+-1*P_021101000*R_110[0]+-1*P_121001000*R_200[0]+P_121101000*R_210[0]+P_221001000*R_300[0]+-1*P_221101000*R_310[0]+-1*P_321001000*R_400[0]+P_321101000*R_410[0];
double PR_020002000100=P_020002000*R_100[0]+-1*P_020102000*R_110[0]+P_020202000*R_120[0]+-1*P_120002000*R_200[0]+P_120102000*R_210[0]+-1*P_120202000*R_220[0]+P_220002000*R_300[0]+-1*P_220102000*R_310[0]+P_220202000*R_320[0];
double PR_021000001100=P_021000001*R_100[0]+-1*P_021000101*R_101[0]+-1*P_121000001*R_200[0]+P_121000101*R_201[0]+P_221000001*R_300[0]+-1*P_221000101*R_301[0]+-1*P_321000001*R_400[0]+P_321000101*R_401[0];
double PR_020001001100=P_020001001*R_100[0]+-1*P_020001101*R_101[0]+-1*P_020101001*R_110[0]+P_020101101*R_111[0]+-1*P_120001001*R_200[0]+P_120001101*R_201[0]+P_120101001*R_210[0]+-1*P_120101101*R_211[0]+P_220001001*R_300[0]+-1*P_220001101*R_301[0]+-1*P_220101001*R_310[0]+P_220101101*R_311[0];
double PR_020000002100=P_020000002*R_100[0]+-1*P_020000102*R_101[0]+P_020000202*R_102[0]+-1*P_120000002*R_200[0]+P_120000102*R_201[0]+-1*P_120000202*R_202[0]+P_220000002*R_300[0]+-1*P_220000102*R_301[0]+P_220000202*R_302[0];
double PR_012010000100=P_012010000*R_100[0]+-1*P_012110000*R_110[0]+-1*P_112010000*R_200[0]+P_112110000*R_210[0]+P_212010000*R_300[0]+-1*P_212110000*R_310[0]+-1*P_312010000*R_400[0]+P_312110000*R_410[0];
double PR_011011000100=P_011011000*R_100[0]+-1*P_011111000*R_110[0]+P_011211000*R_120[0]+-1*P_111011000*R_200[0]+P_111111000*R_210[0]+-1*P_111211000*R_220[0]+P_211011000*R_300[0]+-1*P_211111000*R_310[0]+P_211211000*R_320[0];
double PR_010012000100=P_010012000*R_100[0]+-1*P_010112000*R_110[0]+P_010212000*R_120[0]+-1*P_010312000*R_130[0]+-1*P_110012000*R_200[0]+P_110112000*R_210[0]+-1*P_110212000*R_220[0]+P_110312000*R_230[0];
double PR_011010001100=P_011010001*R_100[0]+-1*P_011010101*R_101[0]+-1*P_011110001*R_110[0]+P_011110101*R_111[0]+-1*P_111010001*R_200[0]+P_111010101*R_201[0]+P_111110001*R_210[0]+-1*P_111110101*R_211[0]+P_211010001*R_300[0]+-1*P_211010101*R_301[0]+-1*P_211110001*R_310[0]+P_211110101*R_311[0];
double PR_010011001100=P_010011001*R_100[0]+-1*P_010011101*R_101[0]+-1*P_010111001*R_110[0]+P_010111101*R_111[0]+P_010211001*R_120[0]+-1*P_010211101*R_121[0]+-1*P_110011001*R_200[0]+P_110011101*R_201[0]+P_110111001*R_210[0]+-1*P_110111101*R_211[0]+-1*P_110211001*R_220[0]+P_110211101*R_221[0];
double PR_010010002100=P_010010002*R_100[0]+-1*P_010010102*R_101[0]+P_010010202*R_102[0]+-1*P_010110002*R_110[0]+P_010110102*R_111[0]+-1*P_010110202*R_112[0]+-1*P_110010002*R_200[0]+P_110010102*R_201[0]+-1*P_110010202*R_202[0]+P_110110002*R_210[0]+-1*P_110110102*R_211[0]+P_110110202*R_212[0];
double PR_002020000100=P_002020000*R_100[0]+-1*P_002120000*R_110[0]+P_002220000*R_120[0]+-1*P_102020000*R_200[0]+P_102120000*R_210[0]+-1*P_102220000*R_220[0]+P_202020000*R_300[0]+-1*P_202120000*R_310[0]+P_202220000*R_320[0];
double PR_001021000100=P_001021000*R_100[0]+-1*P_001121000*R_110[0]+P_001221000*R_120[0]+-1*P_001321000*R_130[0]+-1*P_101021000*R_200[0]+P_101121000*R_210[0]+-1*P_101221000*R_220[0]+P_101321000*R_230[0];
double PR_000022000100=P_000022000*R_100[0]+-1*P_000122000*R_110[0]+P_000222000*R_120[0]+-1*P_000322000*R_130[0]+P_000422000*R_140[0];
double PR_001020001100=P_001020001*R_100[0]+-1*P_001020101*R_101[0]+-1*P_001120001*R_110[0]+P_001120101*R_111[0]+P_001220001*R_120[0]+-1*P_001220101*R_121[0]+-1*P_101020001*R_200[0]+P_101020101*R_201[0]+P_101120001*R_210[0]+-1*P_101120101*R_211[0]+-1*P_101220001*R_220[0]+P_101220101*R_221[0];
double PR_000021001100=P_000021001*R_100[0]+-1*P_000021101*R_101[0]+-1*P_000121001*R_110[0]+P_000121101*R_111[0]+P_000221001*R_120[0]+-1*P_000221101*R_121[0]+-1*P_000321001*R_130[0]+P_000321101*R_131[0];
double PR_000020002100=P_000020002*R_100[0]+-1*P_000020102*R_101[0]+P_000020202*R_102[0]+-1*P_000120002*R_110[0]+P_000120102*R_111[0]+-1*P_000120202*R_112[0]+P_000220002*R_120[0]+-1*P_000220102*R_121[0]+P_000220202*R_122[0];
double PR_012000010100=P_012000010*R_100[0]+-1*P_012000110*R_101[0]+-1*P_112000010*R_200[0]+P_112000110*R_201[0]+P_212000010*R_300[0]+-1*P_212000110*R_301[0]+-1*P_312000010*R_400[0]+P_312000110*R_401[0];
double PR_011001010100=P_011001010*R_100[0]+-1*P_011001110*R_101[0]+-1*P_011101010*R_110[0]+P_011101110*R_111[0]+-1*P_111001010*R_200[0]+P_111001110*R_201[0]+P_111101010*R_210[0]+-1*P_111101110*R_211[0]+P_211001010*R_300[0]+-1*P_211001110*R_301[0]+-1*P_211101010*R_310[0]+P_211101110*R_311[0];
double PR_010002010100=P_010002010*R_100[0]+-1*P_010002110*R_101[0]+-1*P_010102010*R_110[0]+P_010102110*R_111[0]+P_010202010*R_120[0]+-1*P_010202110*R_121[0]+-1*P_110002010*R_200[0]+P_110002110*R_201[0]+P_110102010*R_210[0]+-1*P_110102110*R_211[0]+-1*P_110202010*R_220[0]+P_110202110*R_221[0];
double PR_011000011100=P_011000011*R_100[0]+-1*P_011000111*R_101[0]+P_011000211*R_102[0]+-1*P_111000011*R_200[0]+P_111000111*R_201[0]+-1*P_111000211*R_202[0]+P_211000011*R_300[0]+-1*P_211000111*R_301[0]+P_211000211*R_302[0];
double PR_010001011100=P_010001011*R_100[0]+-1*P_010001111*R_101[0]+P_010001211*R_102[0]+-1*P_010101011*R_110[0]+P_010101111*R_111[0]+-1*P_010101211*R_112[0]+-1*P_110001011*R_200[0]+P_110001111*R_201[0]+-1*P_110001211*R_202[0]+P_110101011*R_210[0]+-1*P_110101111*R_211[0]+P_110101211*R_212[0];
double PR_010000012100=P_010000012*R_100[0]+-1*P_010000112*R_101[0]+P_010000212*R_102[0]+-1*P_010000312*R_103[0]+-1*P_110000012*R_200[0]+P_110000112*R_201[0]+-1*P_110000212*R_202[0]+P_110000312*R_203[0];
double PR_002010010100=P_002010010*R_100[0]+-1*P_002010110*R_101[0]+-1*P_002110010*R_110[0]+P_002110110*R_111[0]+-1*P_102010010*R_200[0]+P_102010110*R_201[0]+P_102110010*R_210[0]+-1*P_102110110*R_211[0]+P_202010010*R_300[0]+-1*P_202010110*R_301[0]+-1*P_202110010*R_310[0]+P_202110110*R_311[0];
double PR_001011010100=P_001011010*R_100[0]+-1*P_001011110*R_101[0]+-1*P_001111010*R_110[0]+P_001111110*R_111[0]+P_001211010*R_120[0]+-1*P_001211110*R_121[0]+-1*P_101011010*R_200[0]+P_101011110*R_201[0]+P_101111010*R_210[0]+-1*P_101111110*R_211[0]+-1*P_101211010*R_220[0]+P_101211110*R_221[0];
double PR_000012010100=P_000012010*R_100[0]+-1*P_000012110*R_101[0]+-1*P_000112010*R_110[0]+P_000112110*R_111[0]+P_000212010*R_120[0]+-1*P_000212110*R_121[0]+-1*P_000312010*R_130[0]+P_000312110*R_131[0];
double PR_001010011100=P_001010011*R_100[0]+-1*P_001010111*R_101[0]+P_001010211*R_102[0]+-1*P_001110011*R_110[0]+P_001110111*R_111[0]+-1*P_001110211*R_112[0]+-1*P_101010011*R_200[0]+P_101010111*R_201[0]+-1*P_101010211*R_202[0]+P_101110011*R_210[0]+-1*P_101110111*R_211[0]+P_101110211*R_212[0];
double PR_000011011100=P_000011011*R_100[0]+-1*P_000011111*R_101[0]+P_000011211*R_102[0]+-1*P_000111011*R_110[0]+P_000111111*R_111[0]+-1*P_000111211*R_112[0]+P_000211011*R_120[0]+-1*P_000211111*R_121[0]+P_000211211*R_122[0];
double PR_000010012100=P_000010012*R_100[0]+-1*P_000010112*R_101[0]+P_000010212*R_102[0]+-1*P_000010312*R_103[0]+-1*P_000110012*R_110[0]+P_000110112*R_111[0]+-1*P_000110212*R_112[0]+P_000110312*R_113[0];
double PR_002000020100=P_002000020*R_100[0]+-1*P_002000120*R_101[0]+P_002000220*R_102[0]+-1*P_102000020*R_200[0]+P_102000120*R_201[0]+-1*P_102000220*R_202[0]+P_202000020*R_300[0]+-1*P_202000120*R_301[0]+P_202000220*R_302[0];
double PR_001001020100=P_001001020*R_100[0]+-1*P_001001120*R_101[0]+P_001001220*R_102[0]+-1*P_001101020*R_110[0]+P_001101120*R_111[0]+-1*P_001101220*R_112[0]+-1*P_101001020*R_200[0]+P_101001120*R_201[0]+-1*P_101001220*R_202[0]+P_101101020*R_210[0]+-1*P_101101120*R_211[0]+P_101101220*R_212[0];
double PR_000002020100=P_000002020*R_100[0]+-1*P_000002120*R_101[0]+P_000002220*R_102[0]+-1*P_000102020*R_110[0]+P_000102120*R_111[0]+-1*P_000102220*R_112[0]+P_000202020*R_120[0]+-1*P_000202120*R_121[0]+P_000202220*R_122[0];
double PR_001000021100=P_001000021*R_100[0]+-1*P_001000121*R_101[0]+P_001000221*R_102[0]+-1*P_001000321*R_103[0]+-1*P_101000021*R_200[0]+P_101000121*R_201[0]+-1*P_101000221*R_202[0]+P_101000321*R_203[0];
double PR_000001021100=P_000001021*R_100[0]+-1*P_000001121*R_101[0]+P_000001221*R_102[0]+-1*P_000001321*R_103[0]+-1*P_000101021*R_110[0]+P_000101121*R_111[0]+-1*P_000101221*R_112[0]+P_000101321*R_113[0];
double PR_000000022100=P_000000022*R_100[0]+-1*P_000000122*R_101[0]+P_000000222*R_102[0]+-1*P_000000322*R_103[0]+P_000000422*R_104[0];
double PR_022000000002=P_022000000*R_002[0]+-1*P_122000000*R_102[0]+P_222000000*R_202[0]+-1*P_322000000*R_302[0]+P_422000000*R_402[0];
double PR_021001000002=P_021001000*R_002[0]+-1*P_021101000*R_012[0]+-1*P_121001000*R_102[0]+P_121101000*R_112[0]+P_221001000*R_202[0]+-1*P_221101000*R_212[0]+-1*P_321001000*R_302[0]+P_321101000*R_312[0];
double PR_020002000002=P_020002000*R_002[0]+-1*P_020102000*R_012[0]+P_020202000*R_022[0]+-1*P_120002000*R_102[0]+P_120102000*R_112[0]+-1*P_120202000*R_122[0]+P_220002000*R_202[0]+-1*P_220102000*R_212[0]+P_220202000*R_222[0];
double PR_021000001002=P_021000001*R_002[0]+-1*P_021000101*R_003[0]+-1*P_121000001*R_102[0]+P_121000101*R_103[0]+P_221000001*R_202[0]+-1*P_221000101*R_203[0]+-1*P_321000001*R_302[0]+P_321000101*R_303[0];
double PR_020001001002=P_020001001*R_002[0]+-1*P_020001101*R_003[0]+-1*P_020101001*R_012[0]+P_020101101*R_013[0]+-1*P_120001001*R_102[0]+P_120001101*R_103[0]+P_120101001*R_112[0]+-1*P_120101101*R_113[0]+P_220001001*R_202[0]+-1*P_220001101*R_203[0]+-1*P_220101001*R_212[0]+P_220101101*R_213[0];
double PR_020000002002=P_020000002*R_002[0]+-1*P_020000102*R_003[0]+P_020000202*R_004[0]+-1*P_120000002*R_102[0]+P_120000102*R_103[0]+-1*P_120000202*R_104[0]+P_220000002*R_202[0]+-1*P_220000102*R_203[0]+P_220000202*R_204[0];
double PR_012010000002=P_012010000*R_002[0]+-1*P_012110000*R_012[0]+-1*P_112010000*R_102[0]+P_112110000*R_112[0]+P_212010000*R_202[0]+-1*P_212110000*R_212[0]+-1*P_312010000*R_302[0]+P_312110000*R_312[0];
double PR_011011000002=P_011011000*R_002[0]+-1*P_011111000*R_012[0]+P_011211000*R_022[0]+-1*P_111011000*R_102[0]+P_111111000*R_112[0]+-1*P_111211000*R_122[0]+P_211011000*R_202[0]+-1*P_211111000*R_212[0]+P_211211000*R_222[0];
double PR_010012000002=P_010012000*R_002[0]+-1*P_010112000*R_012[0]+P_010212000*R_022[0]+-1*P_010312000*R_032[0]+-1*P_110012000*R_102[0]+P_110112000*R_112[0]+-1*P_110212000*R_122[0]+P_110312000*R_132[0];
double PR_011010001002=P_011010001*R_002[0]+-1*P_011010101*R_003[0]+-1*P_011110001*R_012[0]+P_011110101*R_013[0]+-1*P_111010001*R_102[0]+P_111010101*R_103[0]+P_111110001*R_112[0]+-1*P_111110101*R_113[0]+P_211010001*R_202[0]+-1*P_211010101*R_203[0]+-1*P_211110001*R_212[0]+P_211110101*R_213[0];
double PR_010011001002=P_010011001*R_002[0]+-1*P_010011101*R_003[0]+-1*P_010111001*R_012[0]+P_010111101*R_013[0]+P_010211001*R_022[0]+-1*P_010211101*R_023[0]+-1*P_110011001*R_102[0]+P_110011101*R_103[0]+P_110111001*R_112[0]+-1*P_110111101*R_113[0]+-1*P_110211001*R_122[0]+P_110211101*R_123[0];
double PR_010010002002=P_010010002*R_002[0]+-1*P_010010102*R_003[0]+P_010010202*R_004[0]+-1*P_010110002*R_012[0]+P_010110102*R_013[0]+-1*P_010110202*R_014[0]+-1*P_110010002*R_102[0]+P_110010102*R_103[0]+-1*P_110010202*R_104[0]+P_110110002*R_112[0]+-1*P_110110102*R_113[0]+P_110110202*R_114[0];
double PR_002020000002=P_002020000*R_002[0]+-1*P_002120000*R_012[0]+P_002220000*R_022[0]+-1*P_102020000*R_102[0]+P_102120000*R_112[0]+-1*P_102220000*R_122[0]+P_202020000*R_202[0]+-1*P_202120000*R_212[0]+P_202220000*R_222[0];
double PR_001021000002=P_001021000*R_002[0]+-1*P_001121000*R_012[0]+P_001221000*R_022[0]+-1*P_001321000*R_032[0]+-1*P_101021000*R_102[0]+P_101121000*R_112[0]+-1*P_101221000*R_122[0]+P_101321000*R_132[0];
double PR_000022000002=P_000022000*R_002[0]+-1*P_000122000*R_012[0]+P_000222000*R_022[0]+-1*P_000322000*R_032[0]+P_000422000*R_042[0];
double PR_001020001002=P_001020001*R_002[0]+-1*P_001020101*R_003[0]+-1*P_001120001*R_012[0]+P_001120101*R_013[0]+P_001220001*R_022[0]+-1*P_001220101*R_023[0]+-1*P_101020001*R_102[0]+P_101020101*R_103[0]+P_101120001*R_112[0]+-1*P_101120101*R_113[0]+-1*P_101220001*R_122[0]+P_101220101*R_123[0];
double PR_000021001002=P_000021001*R_002[0]+-1*P_000021101*R_003[0]+-1*P_000121001*R_012[0]+P_000121101*R_013[0]+P_000221001*R_022[0]+-1*P_000221101*R_023[0]+-1*P_000321001*R_032[0]+P_000321101*R_033[0];
double PR_000020002002=P_000020002*R_002[0]+-1*P_000020102*R_003[0]+P_000020202*R_004[0]+-1*P_000120002*R_012[0]+P_000120102*R_013[0]+-1*P_000120202*R_014[0]+P_000220002*R_022[0]+-1*P_000220102*R_023[0]+P_000220202*R_024[0];
double PR_012000010002=P_012000010*R_002[0]+-1*P_012000110*R_003[0]+-1*P_112000010*R_102[0]+P_112000110*R_103[0]+P_212000010*R_202[0]+-1*P_212000110*R_203[0]+-1*P_312000010*R_302[0]+P_312000110*R_303[0];
double PR_011001010002=P_011001010*R_002[0]+-1*P_011001110*R_003[0]+-1*P_011101010*R_012[0]+P_011101110*R_013[0]+-1*P_111001010*R_102[0]+P_111001110*R_103[0]+P_111101010*R_112[0]+-1*P_111101110*R_113[0]+P_211001010*R_202[0]+-1*P_211001110*R_203[0]+-1*P_211101010*R_212[0]+P_211101110*R_213[0];
double PR_010002010002=P_010002010*R_002[0]+-1*P_010002110*R_003[0]+-1*P_010102010*R_012[0]+P_010102110*R_013[0]+P_010202010*R_022[0]+-1*P_010202110*R_023[0]+-1*P_110002010*R_102[0]+P_110002110*R_103[0]+P_110102010*R_112[0]+-1*P_110102110*R_113[0]+-1*P_110202010*R_122[0]+P_110202110*R_123[0];
double PR_011000011002=P_011000011*R_002[0]+-1*P_011000111*R_003[0]+P_011000211*R_004[0]+-1*P_111000011*R_102[0]+P_111000111*R_103[0]+-1*P_111000211*R_104[0]+P_211000011*R_202[0]+-1*P_211000111*R_203[0]+P_211000211*R_204[0];
double PR_010001011002=P_010001011*R_002[0]+-1*P_010001111*R_003[0]+P_010001211*R_004[0]+-1*P_010101011*R_012[0]+P_010101111*R_013[0]+-1*P_010101211*R_014[0]+-1*P_110001011*R_102[0]+P_110001111*R_103[0]+-1*P_110001211*R_104[0]+P_110101011*R_112[0]+-1*P_110101111*R_113[0]+P_110101211*R_114[0];
double PR_010000012002=P_010000012*R_002[0]+-1*P_010000112*R_003[0]+P_010000212*R_004[0]+-1*P_010000312*R_005[0]+-1*P_110000012*R_102[0]+P_110000112*R_103[0]+-1*P_110000212*R_104[0]+P_110000312*R_105[0];
double PR_002010010002=P_002010010*R_002[0]+-1*P_002010110*R_003[0]+-1*P_002110010*R_012[0]+P_002110110*R_013[0]+-1*P_102010010*R_102[0]+P_102010110*R_103[0]+P_102110010*R_112[0]+-1*P_102110110*R_113[0]+P_202010010*R_202[0]+-1*P_202010110*R_203[0]+-1*P_202110010*R_212[0]+P_202110110*R_213[0];
double PR_001011010002=P_001011010*R_002[0]+-1*P_001011110*R_003[0]+-1*P_001111010*R_012[0]+P_001111110*R_013[0]+P_001211010*R_022[0]+-1*P_001211110*R_023[0]+-1*P_101011010*R_102[0]+P_101011110*R_103[0]+P_101111010*R_112[0]+-1*P_101111110*R_113[0]+-1*P_101211010*R_122[0]+P_101211110*R_123[0];
double PR_000012010002=P_000012010*R_002[0]+-1*P_000012110*R_003[0]+-1*P_000112010*R_012[0]+P_000112110*R_013[0]+P_000212010*R_022[0]+-1*P_000212110*R_023[0]+-1*P_000312010*R_032[0]+P_000312110*R_033[0];
double PR_001010011002=P_001010011*R_002[0]+-1*P_001010111*R_003[0]+P_001010211*R_004[0]+-1*P_001110011*R_012[0]+P_001110111*R_013[0]+-1*P_001110211*R_014[0]+-1*P_101010011*R_102[0]+P_101010111*R_103[0]+-1*P_101010211*R_104[0]+P_101110011*R_112[0]+-1*P_101110111*R_113[0]+P_101110211*R_114[0];
double PR_000011011002=P_000011011*R_002[0]+-1*P_000011111*R_003[0]+P_000011211*R_004[0]+-1*P_000111011*R_012[0]+P_000111111*R_013[0]+-1*P_000111211*R_014[0]+P_000211011*R_022[0]+-1*P_000211111*R_023[0]+P_000211211*R_024[0];
double PR_000010012002=P_000010012*R_002[0]+-1*P_000010112*R_003[0]+P_000010212*R_004[0]+-1*P_000010312*R_005[0]+-1*P_000110012*R_012[0]+P_000110112*R_013[0]+-1*P_000110212*R_014[0]+P_000110312*R_015[0];
double PR_002000020002=P_002000020*R_002[0]+-1*P_002000120*R_003[0]+P_002000220*R_004[0]+-1*P_102000020*R_102[0]+P_102000120*R_103[0]+-1*P_102000220*R_104[0]+P_202000020*R_202[0]+-1*P_202000120*R_203[0]+P_202000220*R_204[0];
double PR_001001020002=P_001001020*R_002[0]+-1*P_001001120*R_003[0]+P_001001220*R_004[0]+-1*P_001101020*R_012[0]+P_001101120*R_013[0]+-1*P_001101220*R_014[0]+-1*P_101001020*R_102[0]+P_101001120*R_103[0]+-1*P_101001220*R_104[0]+P_101101020*R_112[0]+-1*P_101101120*R_113[0]+P_101101220*R_114[0];
double PR_000002020002=P_000002020*R_002[0]+-1*P_000002120*R_003[0]+P_000002220*R_004[0]+-1*P_000102020*R_012[0]+P_000102120*R_013[0]+-1*P_000102220*R_014[0]+P_000202020*R_022[0]+-1*P_000202120*R_023[0]+P_000202220*R_024[0];
double PR_001000021002=P_001000021*R_002[0]+-1*P_001000121*R_003[0]+P_001000221*R_004[0]+-1*P_001000321*R_005[0]+-1*P_101000021*R_102[0]+P_101000121*R_103[0]+-1*P_101000221*R_104[0]+P_101000321*R_105[0];
double PR_000001021002=P_000001021*R_002[0]+-1*P_000001121*R_003[0]+P_000001221*R_004[0]+-1*P_000001321*R_005[0]+-1*P_000101021*R_012[0]+P_000101121*R_013[0]+-1*P_000101221*R_014[0]+P_000101321*R_015[0];
double PR_000000022002=P_000000022*R_002[0]+-1*P_000000122*R_003[0]+P_000000222*R_004[0]+-1*P_000000322*R_005[0]+P_000000422*R_006[0];
double PR_022000000011=P_022000000*R_011[0]+-1*P_122000000*R_111[0]+P_222000000*R_211[0]+-1*P_322000000*R_311[0]+P_422000000*R_411[0];
double PR_021001000011=P_021001000*R_011[0]+-1*P_021101000*R_021[0]+-1*P_121001000*R_111[0]+P_121101000*R_121[0]+P_221001000*R_211[0]+-1*P_221101000*R_221[0]+-1*P_321001000*R_311[0]+P_321101000*R_321[0];
double PR_020002000011=P_020002000*R_011[0]+-1*P_020102000*R_021[0]+P_020202000*R_031[0]+-1*P_120002000*R_111[0]+P_120102000*R_121[0]+-1*P_120202000*R_131[0]+P_220002000*R_211[0]+-1*P_220102000*R_221[0]+P_220202000*R_231[0];
double PR_021000001011=P_021000001*R_011[0]+-1*P_021000101*R_012[0]+-1*P_121000001*R_111[0]+P_121000101*R_112[0]+P_221000001*R_211[0]+-1*P_221000101*R_212[0]+-1*P_321000001*R_311[0]+P_321000101*R_312[0];
double PR_020001001011=P_020001001*R_011[0]+-1*P_020001101*R_012[0]+-1*P_020101001*R_021[0]+P_020101101*R_022[0]+-1*P_120001001*R_111[0]+P_120001101*R_112[0]+P_120101001*R_121[0]+-1*P_120101101*R_122[0]+P_220001001*R_211[0]+-1*P_220001101*R_212[0]+-1*P_220101001*R_221[0]+P_220101101*R_222[0];
double PR_020000002011=P_020000002*R_011[0]+-1*P_020000102*R_012[0]+P_020000202*R_013[0]+-1*P_120000002*R_111[0]+P_120000102*R_112[0]+-1*P_120000202*R_113[0]+P_220000002*R_211[0]+-1*P_220000102*R_212[0]+P_220000202*R_213[0];
double PR_012010000011=P_012010000*R_011[0]+-1*P_012110000*R_021[0]+-1*P_112010000*R_111[0]+P_112110000*R_121[0]+P_212010000*R_211[0]+-1*P_212110000*R_221[0]+-1*P_312010000*R_311[0]+P_312110000*R_321[0];
double PR_011011000011=P_011011000*R_011[0]+-1*P_011111000*R_021[0]+P_011211000*R_031[0]+-1*P_111011000*R_111[0]+P_111111000*R_121[0]+-1*P_111211000*R_131[0]+P_211011000*R_211[0]+-1*P_211111000*R_221[0]+P_211211000*R_231[0];
double PR_010012000011=P_010012000*R_011[0]+-1*P_010112000*R_021[0]+P_010212000*R_031[0]+-1*P_010312000*R_041[0]+-1*P_110012000*R_111[0]+P_110112000*R_121[0]+-1*P_110212000*R_131[0]+P_110312000*R_141[0];
double PR_011010001011=P_011010001*R_011[0]+-1*P_011010101*R_012[0]+-1*P_011110001*R_021[0]+P_011110101*R_022[0]+-1*P_111010001*R_111[0]+P_111010101*R_112[0]+P_111110001*R_121[0]+-1*P_111110101*R_122[0]+P_211010001*R_211[0]+-1*P_211010101*R_212[0]+-1*P_211110001*R_221[0]+P_211110101*R_222[0];
double PR_010011001011=P_010011001*R_011[0]+-1*P_010011101*R_012[0]+-1*P_010111001*R_021[0]+P_010111101*R_022[0]+P_010211001*R_031[0]+-1*P_010211101*R_032[0]+-1*P_110011001*R_111[0]+P_110011101*R_112[0]+P_110111001*R_121[0]+-1*P_110111101*R_122[0]+-1*P_110211001*R_131[0]+P_110211101*R_132[0];
double PR_010010002011=P_010010002*R_011[0]+-1*P_010010102*R_012[0]+P_010010202*R_013[0]+-1*P_010110002*R_021[0]+P_010110102*R_022[0]+-1*P_010110202*R_023[0]+-1*P_110010002*R_111[0]+P_110010102*R_112[0]+-1*P_110010202*R_113[0]+P_110110002*R_121[0]+-1*P_110110102*R_122[0]+P_110110202*R_123[0];
double PR_002020000011=P_002020000*R_011[0]+-1*P_002120000*R_021[0]+P_002220000*R_031[0]+-1*P_102020000*R_111[0]+P_102120000*R_121[0]+-1*P_102220000*R_131[0]+P_202020000*R_211[0]+-1*P_202120000*R_221[0]+P_202220000*R_231[0];
double PR_001021000011=P_001021000*R_011[0]+-1*P_001121000*R_021[0]+P_001221000*R_031[0]+-1*P_001321000*R_041[0]+-1*P_101021000*R_111[0]+P_101121000*R_121[0]+-1*P_101221000*R_131[0]+P_101321000*R_141[0];
double PR_000022000011=P_000022000*R_011[0]+-1*P_000122000*R_021[0]+P_000222000*R_031[0]+-1*P_000322000*R_041[0]+P_000422000*R_051[0];
double PR_001020001011=P_001020001*R_011[0]+-1*P_001020101*R_012[0]+-1*P_001120001*R_021[0]+P_001120101*R_022[0]+P_001220001*R_031[0]+-1*P_001220101*R_032[0]+-1*P_101020001*R_111[0]+P_101020101*R_112[0]+P_101120001*R_121[0]+-1*P_101120101*R_122[0]+-1*P_101220001*R_131[0]+P_101220101*R_132[0];
double PR_000021001011=P_000021001*R_011[0]+-1*P_000021101*R_012[0]+-1*P_000121001*R_021[0]+P_000121101*R_022[0]+P_000221001*R_031[0]+-1*P_000221101*R_032[0]+-1*P_000321001*R_041[0]+P_000321101*R_042[0];
double PR_000020002011=P_000020002*R_011[0]+-1*P_000020102*R_012[0]+P_000020202*R_013[0]+-1*P_000120002*R_021[0]+P_000120102*R_022[0]+-1*P_000120202*R_023[0]+P_000220002*R_031[0]+-1*P_000220102*R_032[0]+P_000220202*R_033[0];
double PR_012000010011=P_012000010*R_011[0]+-1*P_012000110*R_012[0]+-1*P_112000010*R_111[0]+P_112000110*R_112[0]+P_212000010*R_211[0]+-1*P_212000110*R_212[0]+-1*P_312000010*R_311[0]+P_312000110*R_312[0];
double PR_011001010011=P_011001010*R_011[0]+-1*P_011001110*R_012[0]+-1*P_011101010*R_021[0]+P_011101110*R_022[0]+-1*P_111001010*R_111[0]+P_111001110*R_112[0]+P_111101010*R_121[0]+-1*P_111101110*R_122[0]+P_211001010*R_211[0]+-1*P_211001110*R_212[0]+-1*P_211101010*R_221[0]+P_211101110*R_222[0];
double PR_010002010011=P_010002010*R_011[0]+-1*P_010002110*R_012[0]+-1*P_010102010*R_021[0]+P_010102110*R_022[0]+P_010202010*R_031[0]+-1*P_010202110*R_032[0]+-1*P_110002010*R_111[0]+P_110002110*R_112[0]+P_110102010*R_121[0]+-1*P_110102110*R_122[0]+-1*P_110202010*R_131[0]+P_110202110*R_132[0];
double PR_011000011011=P_011000011*R_011[0]+-1*P_011000111*R_012[0]+P_011000211*R_013[0]+-1*P_111000011*R_111[0]+P_111000111*R_112[0]+-1*P_111000211*R_113[0]+P_211000011*R_211[0]+-1*P_211000111*R_212[0]+P_211000211*R_213[0];
double PR_010001011011=P_010001011*R_011[0]+-1*P_010001111*R_012[0]+P_010001211*R_013[0]+-1*P_010101011*R_021[0]+P_010101111*R_022[0]+-1*P_010101211*R_023[0]+-1*P_110001011*R_111[0]+P_110001111*R_112[0]+-1*P_110001211*R_113[0]+P_110101011*R_121[0]+-1*P_110101111*R_122[0]+P_110101211*R_123[0];
double PR_010000012011=P_010000012*R_011[0]+-1*P_010000112*R_012[0]+P_010000212*R_013[0]+-1*P_010000312*R_014[0]+-1*P_110000012*R_111[0]+P_110000112*R_112[0]+-1*P_110000212*R_113[0]+P_110000312*R_114[0];
double PR_002010010011=P_002010010*R_011[0]+-1*P_002010110*R_012[0]+-1*P_002110010*R_021[0]+P_002110110*R_022[0]+-1*P_102010010*R_111[0]+P_102010110*R_112[0]+P_102110010*R_121[0]+-1*P_102110110*R_122[0]+P_202010010*R_211[0]+-1*P_202010110*R_212[0]+-1*P_202110010*R_221[0]+P_202110110*R_222[0];
double PR_001011010011=P_001011010*R_011[0]+-1*P_001011110*R_012[0]+-1*P_001111010*R_021[0]+P_001111110*R_022[0]+P_001211010*R_031[0]+-1*P_001211110*R_032[0]+-1*P_101011010*R_111[0]+P_101011110*R_112[0]+P_101111010*R_121[0]+-1*P_101111110*R_122[0]+-1*P_101211010*R_131[0]+P_101211110*R_132[0];
double PR_000012010011=P_000012010*R_011[0]+-1*P_000012110*R_012[0]+-1*P_000112010*R_021[0]+P_000112110*R_022[0]+P_000212010*R_031[0]+-1*P_000212110*R_032[0]+-1*P_000312010*R_041[0]+P_000312110*R_042[0];
double PR_001010011011=P_001010011*R_011[0]+-1*P_001010111*R_012[0]+P_001010211*R_013[0]+-1*P_001110011*R_021[0]+P_001110111*R_022[0]+-1*P_001110211*R_023[0]+-1*P_101010011*R_111[0]+P_101010111*R_112[0]+-1*P_101010211*R_113[0]+P_101110011*R_121[0]+-1*P_101110111*R_122[0]+P_101110211*R_123[0];
double PR_000011011011=P_000011011*R_011[0]+-1*P_000011111*R_012[0]+P_000011211*R_013[0]+-1*P_000111011*R_021[0]+P_000111111*R_022[0]+-1*P_000111211*R_023[0]+P_000211011*R_031[0]+-1*P_000211111*R_032[0]+P_000211211*R_033[0];
double PR_000010012011=P_000010012*R_011[0]+-1*P_000010112*R_012[0]+P_000010212*R_013[0]+-1*P_000010312*R_014[0]+-1*P_000110012*R_021[0]+P_000110112*R_022[0]+-1*P_000110212*R_023[0]+P_000110312*R_024[0];
double PR_002000020011=P_002000020*R_011[0]+-1*P_002000120*R_012[0]+P_002000220*R_013[0]+-1*P_102000020*R_111[0]+P_102000120*R_112[0]+-1*P_102000220*R_113[0]+P_202000020*R_211[0]+-1*P_202000120*R_212[0]+P_202000220*R_213[0];
double PR_001001020011=P_001001020*R_011[0]+-1*P_001001120*R_012[0]+P_001001220*R_013[0]+-1*P_001101020*R_021[0]+P_001101120*R_022[0]+-1*P_001101220*R_023[0]+-1*P_101001020*R_111[0]+P_101001120*R_112[0]+-1*P_101001220*R_113[0]+P_101101020*R_121[0]+-1*P_101101120*R_122[0]+P_101101220*R_123[0];
double PR_000002020011=P_000002020*R_011[0]+-1*P_000002120*R_012[0]+P_000002220*R_013[0]+-1*P_000102020*R_021[0]+P_000102120*R_022[0]+-1*P_000102220*R_023[0]+P_000202020*R_031[0]+-1*P_000202120*R_032[0]+P_000202220*R_033[0];
double PR_001000021011=P_001000021*R_011[0]+-1*P_001000121*R_012[0]+P_001000221*R_013[0]+-1*P_001000321*R_014[0]+-1*P_101000021*R_111[0]+P_101000121*R_112[0]+-1*P_101000221*R_113[0]+P_101000321*R_114[0];
double PR_000001021011=P_000001021*R_011[0]+-1*P_000001121*R_012[0]+P_000001221*R_013[0]+-1*P_000001321*R_014[0]+-1*P_000101021*R_021[0]+P_000101121*R_022[0]+-1*P_000101221*R_023[0]+P_000101321*R_024[0];
double PR_000000022011=P_000000022*R_011[0]+-1*P_000000122*R_012[0]+P_000000222*R_013[0]+-1*P_000000322*R_014[0]+P_000000422*R_015[0];
double PR_022000000020=P_022000000*R_020[0]+-1*P_122000000*R_120[0]+P_222000000*R_220[0]+-1*P_322000000*R_320[0]+P_422000000*R_420[0];
double PR_021001000020=P_021001000*R_020[0]+-1*P_021101000*R_030[0]+-1*P_121001000*R_120[0]+P_121101000*R_130[0]+P_221001000*R_220[0]+-1*P_221101000*R_230[0]+-1*P_321001000*R_320[0]+P_321101000*R_330[0];
double PR_020002000020=P_020002000*R_020[0]+-1*P_020102000*R_030[0]+P_020202000*R_040[0]+-1*P_120002000*R_120[0]+P_120102000*R_130[0]+-1*P_120202000*R_140[0]+P_220002000*R_220[0]+-1*P_220102000*R_230[0]+P_220202000*R_240[0];
double PR_021000001020=P_021000001*R_020[0]+-1*P_021000101*R_021[0]+-1*P_121000001*R_120[0]+P_121000101*R_121[0]+P_221000001*R_220[0]+-1*P_221000101*R_221[0]+-1*P_321000001*R_320[0]+P_321000101*R_321[0];
double PR_020001001020=P_020001001*R_020[0]+-1*P_020001101*R_021[0]+-1*P_020101001*R_030[0]+P_020101101*R_031[0]+-1*P_120001001*R_120[0]+P_120001101*R_121[0]+P_120101001*R_130[0]+-1*P_120101101*R_131[0]+P_220001001*R_220[0]+-1*P_220001101*R_221[0]+-1*P_220101001*R_230[0]+P_220101101*R_231[0];
double PR_020000002020=P_020000002*R_020[0]+-1*P_020000102*R_021[0]+P_020000202*R_022[0]+-1*P_120000002*R_120[0]+P_120000102*R_121[0]+-1*P_120000202*R_122[0]+P_220000002*R_220[0]+-1*P_220000102*R_221[0]+P_220000202*R_222[0];
double PR_012010000020=P_012010000*R_020[0]+-1*P_012110000*R_030[0]+-1*P_112010000*R_120[0]+P_112110000*R_130[0]+P_212010000*R_220[0]+-1*P_212110000*R_230[0]+-1*P_312010000*R_320[0]+P_312110000*R_330[0];
double PR_011011000020=P_011011000*R_020[0]+-1*P_011111000*R_030[0]+P_011211000*R_040[0]+-1*P_111011000*R_120[0]+P_111111000*R_130[0]+-1*P_111211000*R_140[0]+P_211011000*R_220[0]+-1*P_211111000*R_230[0]+P_211211000*R_240[0];
double PR_010012000020=P_010012000*R_020[0]+-1*P_010112000*R_030[0]+P_010212000*R_040[0]+-1*P_010312000*R_050[0]+-1*P_110012000*R_120[0]+P_110112000*R_130[0]+-1*P_110212000*R_140[0]+P_110312000*R_150[0];
double PR_011010001020=P_011010001*R_020[0]+-1*P_011010101*R_021[0]+-1*P_011110001*R_030[0]+P_011110101*R_031[0]+-1*P_111010001*R_120[0]+P_111010101*R_121[0]+P_111110001*R_130[0]+-1*P_111110101*R_131[0]+P_211010001*R_220[0]+-1*P_211010101*R_221[0]+-1*P_211110001*R_230[0]+P_211110101*R_231[0];
double PR_010011001020=P_010011001*R_020[0]+-1*P_010011101*R_021[0]+-1*P_010111001*R_030[0]+P_010111101*R_031[0]+P_010211001*R_040[0]+-1*P_010211101*R_041[0]+-1*P_110011001*R_120[0]+P_110011101*R_121[0]+P_110111001*R_130[0]+-1*P_110111101*R_131[0]+-1*P_110211001*R_140[0]+P_110211101*R_141[0];
double PR_010010002020=P_010010002*R_020[0]+-1*P_010010102*R_021[0]+P_010010202*R_022[0]+-1*P_010110002*R_030[0]+P_010110102*R_031[0]+-1*P_010110202*R_032[0]+-1*P_110010002*R_120[0]+P_110010102*R_121[0]+-1*P_110010202*R_122[0]+P_110110002*R_130[0]+-1*P_110110102*R_131[0]+P_110110202*R_132[0];
double PR_002020000020=P_002020000*R_020[0]+-1*P_002120000*R_030[0]+P_002220000*R_040[0]+-1*P_102020000*R_120[0]+P_102120000*R_130[0]+-1*P_102220000*R_140[0]+P_202020000*R_220[0]+-1*P_202120000*R_230[0]+P_202220000*R_240[0];
double PR_001021000020=P_001021000*R_020[0]+-1*P_001121000*R_030[0]+P_001221000*R_040[0]+-1*P_001321000*R_050[0]+-1*P_101021000*R_120[0]+P_101121000*R_130[0]+-1*P_101221000*R_140[0]+P_101321000*R_150[0];
double PR_000022000020=P_000022000*R_020[0]+-1*P_000122000*R_030[0]+P_000222000*R_040[0]+-1*P_000322000*R_050[0]+P_000422000*R_060[0];
double PR_001020001020=P_001020001*R_020[0]+-1*P_001020101*R_021[0]+-1*P_001120001*R_030[0]+P_001120101*R_031[0]+P_001220001*R_040[0]+-1*P_001220101*R_041[0]+-1*P_101020001*R_120[0]+P_101020101*R_121[0]+P_101120001*R_130[0]+-1*P_101120101*R_131[0]+-1*P_101220001*R_140[0]+P_101220101*R_141[0];
double PR_000021001020=P_000021001*R_020[0]+-1*P_000021101*R_021[0]+-1*P_000121001*R_030[0]+P_000121101*R_031[0]+P_000221001*R_040[0]+-1*P_000221101*R_041[0]+-1*P_000321001*R_050[0]+P_000321101*R_051[0];
double PR_000020002020=P_000020002*R_020[0]+-1*P_000020102*R_021[0]+P_000020202*R_022[0]+-1*P_000120002*R_030[0]+P_000120102*R_031[0]+-1*P_000120202*R_032[0]+P_000220002*R_040[0]+-1*P_000220102*R_041[0]+P_000220202*R_042[0];
double PR_012000010020=P_012000010*R_020[0]+-1*P_012000110*R_021[0]+-1*P_112000010*R_120[0]+P_112000110*R_121[0]+P_212000010*R_220[0]+-1*P_212000110*R_221[0]+-1*P_312000010*R_320[0]+P_312000110*R_321[0];
double PR_011001010020=P_011001010*R_020[0]+-1*P_011001110*R_021[0]+-1*P_011101010*R_030[0]+P_011101110*R_031[0]+-1*P_111001010*R_120[0]+P_111001110*R_121[0]+P_111101010*R_130[0]+-1*P_111101110*R_131[0]+P_211001010*R_220[0]+-1*P_211001110*R_221[0]+-1*P_211101010*R_230[0]+P_211101110*R_231[0];
double PR_010002010020=P_010002010*R_020[0]+-1*P_010002110*R_021[0]+-1*P_010102010*R_030[0]+P_010102110*R_031[0]+P_010202010*R_040[0]+-1*P_010202110*R_041[0]+-1*P_110002010*R_120[0]+P_110002110*R_121[0]+P_110102010*R_130[0]+-1*P_110102110*R_131[0]+-1*P_110202010*R_140[0]+P_110202110*R_141[0];
double PR_011000011020=P_011000011*R_020[0]+-1*P_011000111*R_021[0]+P_011000211*R_022[0]+-1*P_111000011*R_120[0]+P_111000111*R_121[0]+-1*P_111000211*R_122[0]+P_211000011*R_220[0]+-1*P_211000111*R_221[0]+P_211000211*R_222[0];
double PR_010001011020=P_010001011*R_020[0]+-1*P_010001111*R_021[0]+P_010001211*R_022[0]+-1*P_010101011*R_030[0]+P_010101111*R_031[0]+-1*P_010101211*R_032[0]+-1*P_110001011*R_120[0]+P_110001111*R_121[0]+-1*P_110001211*R_122[0]+P_110101011*R_130[0]+-1*P_110101111*R_131[0]+P_110101211*R_132[0];
double PR_010000012020=P_010000012*R_020[0]+-1*P_010000112*R_021[0]+P_010000212*R_022[0]+-1*P_010000312*R_023[0]+-1*P_110000012*R_120[0]+P_110000112*R_121[0]+-1*P_110000212*R_122[0]+P_110000312*R_123[0];
double PR_002010010020=P_002010010*R_020[0]+-1*P_002010110*R_021[0]+-1*P_002110010*R_030[0]+P_002110110*R_031[0]+-1*P_102010010*R_120[0]+P_102010110*R_121[0]+P_102110010*R_130[0]+-1*P_102110110*R_131[0]+P_202010010*R_220[0]+-1*P_202010110*R_221[0]+-1*P_202110010*R_230[0]+P_202110110*R_231[0];
double PR_001011010020=P_001011010*R_020[0]+-1*P_001011110*R_021[0]+-1*P_001111010*R_030[0]+P_001111110*R_031[0]+P_001211010*R_040[0]+-1*P_001211110*R_041[0]+-1*P_101011010*R_120[0]+P_101011110*R_121[0]+P_101111010*R_130[0]+-1*P_101111110*R_131[0]+-1*P_101211010*R_140[0]+P_101211110*R_141[0];
double PR_000012010020=P_000012010*R_020[0]+-1*P_000012110*R_021[0]+-1*P_000112010*R_030[0]+P_000112110*R_031[0]+P_000212010*R_040[0]+-1*P_000212110*R_041[0]+-1*P_000312010*R_050[0]+P_000312110*R_051[0];
double PR_001010011020=P_001010011*R_020[0]+-1*P_001010111*R_021[0]+P_001010211*R_022[0]+-1*P_001110011*R_030[0]+P_001110111*R_031[0]+-1*P_001110211*R_032[0]+-1*P_101010011*R_120[0]+P_101010111*R_121[0]+-1*P_101010211*R_122[0]+P_101110011*R_130[0]+-1*P_101110111*R_131[0]+P_101110211*R_132[0];
double PR_000011011020=P_000011011*R_020[0]+-1*P_000011111*R_021[0]+P_000011211*R_022[0]+-1*P_000111011*R_030[0]+P_000111111*R_031[0]+-1*P_000111211*R_032[0]+P_000211011*R_040[0]+-1*P_000211111*R_041[0]+P_000211211*R_042[0];
double PR_000010012020=P_000010012*R_020[0]+-1*P_000010112*R_021[0]+P_000010212*R_022[0]+-1*P_000010312*R_023[0]+-1*P_000110012*R_030[0]+P_000110112*R_031[0]+-1*P_000110212*R_032[0]+P_000110312*R_033[0];
double PR_002000020020=P_002000020*R_020[0]+-1*P_002000120*R_021[0]+P_002000220*R_022[0]+-1*P_102000020*R_120[0]+P_102000120*R_121[0]+-1*P_102000220*R_122[0]+P_202000020*R_220[0]+-1*P_202000120*R_221[0]+P_202000220*R_222[0];
double PR_001001020020=P_001001020*R_020[0]+-1*P_001001120*R_021[0]+P_001001220*R_022[0]+-1*P_001101020*R_030[0]+P_001101120*R_031[0]+-1*P_001101220*R_032[0]+-1*P_101001020*R_120[0]+P_101001120*R_121[0]+-1*P_101001220*R_122[0]+P_101101020*R_130[0]+-1*P_101101120*R_131[0]+P_101101220*R_132[0];
double PR_000002020020=P_000002020*R_020[0]+-1*P_000002120*R_021[0]+P_000002220*R_022[0]+-1*P_000102020*R_030[0]+P_000102120*R_031[0]+-1*P_000102220*R_032[0]+P_000202020*R_040[0]+-1*P_000202120*R_041[0]+P_000202220*R_042[0];
double PR_001000021020=P_001000021*R_020[0]+-1*P_001000121*R_021[0]+P_001000221*R_022[0]+-1*P_001000321*R_023[0]+-1*P_101000021*R_120[0]+P_101000121*R_121[0]+-1*P_101000221*R_122[0]+P_101000321*R_123[0];
double PR_000001021020=P_000001021*R_020[0]+-1*P_000001121*R_021[0]+P_000001221*R_022[0]+-1*P_000001321*R_023[0]+-1*P_000101021*R_030[0]+P_000101121*R_031[0]+-1*P_000101221*R_032[0]+P_000101321*R_033[0];
double PR_000000022020=P_000000022*R_020[0]+-1*P_000000122*R_021[0]+P_000000222*R_022[0]+-1*P_000000322*R_023[0]+P_000000422*R_024[0];
double PR_022000000101=P_022000000*R_101[0]+-1*P_122000000*R_201[0]+P_222000000*R_301[0]+-1*P_322000000*R_401[0]+P_422000000*R_501[0];
double PR_021001000101=P_021001000*R_101[0]+-1*P_021101000*R_111[0]+-1*P_121001000*R_201[0]+P_121101000*R_211[0]+P_221001000*R_301[0]+-1*P_221101000*R_311[0]+-1*P_321001000*R_401[0]+P_321101000*R_411[0];
double PR_020002000101=P_020002000*R_101[0]+-1*P_020102000*R_111[0]+P_020202000*R_121[0]+-1*P_120002000*R_201[0]+P_120102000*R_211[0]+-1*P_120202000*R_221[0]+P_220002000*R_301[0]+-1*P_220102000*R_311[0]+P_220202000*R_321[0];
double PR_021000001101=P_021000001*R_101[0]+-1*P_021000101*R_102[0]+-1*P_121000001*R_201[0]+P_121000101*R_202[0]+P_221000001*R_301[0]+-1*P_221000101*R_302[0]+-1*P_321000001*R_401[0]+P_321000101*R_402[0];
double PR_020001001101=P_020001001*R_101[0]+-1*P_020001101*R_102[0]+-1*P_020101001*R_111[0]+P_020101101*R_112[0]+-1*P_120001001*R_201[0]+P_120001101*R_202[0]+P_120101001*R_211[0]+-1*P_120101101*R_212[0]+P_220001001*R_301[0]+-1*P_220001101*R_302[0]+-1*P_220101001*R_311[0]+P_220101101*R_312[0];
double PR_020000002101=P_020000002*R_101[0]+-1*P_020000102*R_102[0]+P_020000202*R_103[0]+-1*P_120000002*R_201[0]+P_120000102*R_202[0]+-1*P_120000202*R_203[0]+P_220000002*R_301[0]+-1*P_220000102*R_302[0]+P_220000202*R_303[0];
double PR_012010000101=P_012010000*R_101[0]+-1*P_012110000*R_111[0]+-1*P_112010000*R_201[0]+P_112110000*R_211[0]+P_212010000*R_301[0]+-1*P_212110000*R_311[0]+-1*P_312010000*R_401[0]+P_312110000*R_411[0];
double PR_011011000101=P_011011000*R_101[0]+-1*P_011111000*R_111[0]+P_011211000*R_121[0]+-1*P_111011000*R_201[0]+P_111111000*R_211[0]+-1*P_111211000*R_221[0]+P_211011000*R_301[0]+-1*P_211111000*R_311[0]+P_211211000*R_321[0];
double PR_010012000101=P_010012000*R_101[0]+-1*P_010112000*R_111[0]+P_010212000*R_121[0]+-1*P_010312000*R_131[0]+-1*P_110012000*R_201[0]+P_110112000*R_211[0]+-1*P_110212000*R_221[0]+P_110312000*R_231[0];
double PR_011010001101=P_011010001*R_101[0]+-1*P_011010101*R_102[0]+-1*P_011110001*R_111[0]+P_011110101*R_112[0]+-1*P_111010001*R_201[0]+P_111010101*R_202[0]+P_111110001*R_211[0]+-1*P_111110101*R_212[0]+P_211010001*R_301[0]+-1*P_211010101*R_302[0]+-1*P_211110001*R_311[0]+P_211110101*R_312[0];
double PR_010011001101=P_010011001*R_101[0]+-1*P_010011101*R_102[0]+-1*P_010111001*R_111[0]+P_010111101*R_112[0]+P_010211001*R_121[0]+-1*P_010211101*R_122[0]+-1*P_110011001*R_201[0]+P_110011101*R_202[0]+P_110111001*R_211[0]+-1*P_110111101*R_212[0]+-1*P_110211001*R_221[0]+P_110211101*R_222[0];
double PR_010010002101=P_010010002*R_101[0]+-1*P_010010102*R_102[0]+P_010010202*R_103[0]+-1*P_010110002*R_111[0]+P_010110102*R_112[0]+-1*P_010110202*R_113[0]+-1*P_110010002*R_201[0]+P_110010102*R_202[0]+-1*P_110010202*R_203[0]+P_110110002*R_211[0]+-1*P_110110102*R_212[0]+P_110110202*R_213[0];
double PR_002020000101=P_002020000*R_101[0]+-1*P_002120000*R_111[0]+P_002220000*R_121[0]+-1*P_102020000*R_201[0]+P_102120000*R_211[0]+-1*P_102220000*R_221[0]+P_202020000*R_301[0]+-1*P_202120000*R_311[0]+P_202220000*R_321[0];
double PR_001021000101=P_001021000*R_101[0]+-1*P_001121000*R_111[0]+P_001221000*R_121[0]+-1*P_001321000*R_131[0]+-1*P_101021000*R_201[0]+P_101121000*R_211[0]+-1*P_101221000*R_221[0]+P_101321000*R_231[0];
double PR_000022000101=P_000022000*R_101[0]+-1*P_000122000*R_111[0]+P_000222000*R_121[0]+-1*P_000322000*R_131[0]+P_000422000*R_141[0];
double PR_001020001101=P_001020001*R_101[0]+-1*P_001020101*R_102[0]+-1*P_001120001*R_111[0]+P_001120101*R_112[0]+P_001220001*R_121[0]+-1*P_001220101*R_122[0]+-1*P_101020001*R_201[0]+P_101020101*R_202[0]+P_101120001*R_211[0]+-1*P_101120101*R_212[0]+-1*P_101220001*R_221[0]+P_101220101*R_222[0];
double PR_000021001101=P_000021001*R_101[0]+-1*P_000021101*R_102[0]+-1*P_000121001*R_111[0]+P_000121101*R_112[0]+P_000221001*R_121[0]+-1*P_000221101*R_122[0]+-1*P_000321001*R_131[0]+P_000321101*R_132[0];
double PR_000020002101=P_000020002*R_101[0]+-1*P_000020102*R_102[0]+P_000020202*R_103[0]+-1*P_000120002*R_111[0]+P_000120102*R_112[0]+-1*P_000120202*R_113[0]+P_000220002*R_121[0]+-1*P_000220102*R_122[0]+P_000220202*R_123[0];
double PR_012000010101=P_012000010*R_101[0]+-1*P_012000110*R_102[0]+-1*P_112000010*R_201[0]+P_112000110*R_202[0]+P_212000010*R_301[0]+-1*P_212000110*R_302[0]+-1*P_312000010*R_401[0]+P_312000110*R_402[0];
double PR_011001010101=P_011001010*R_101[0]+-1*P_011001110*R_102[0]+-1*P_011101010*R_111[0]+P_011101110*R_112[0]+-1*P_111001010*R_201[0]+P_111001110*R_202[0]+P_111101010*R_211[0]+-1*P_111101110*R_212[0]+P_211001010*R_301[0]+-1*P_211001110*R_302[0]+-1*P_211101010*R_311[0]+P_211101110*R_312[0];
double PR_010002010101=P_010002010*R_101[0]+-1*P_010002110*R_102[0]+-1*P_010102010*R_111[0]+P_010102110*R_112[0]+P_010202010*R_121[0]+-1*P_010202110*R_122[0]+-1*P_110002010*R_201[0]+P_110002110*R_202[0]+P_110102010*R_211[0]+-1*P_110102110*R_212[0]+-1*P_110202010*R_221[0]+P_110202110*R_222[0];
double PR_011000011101=P_011000011*R_101[0]+-1*P_011000111*R_102[0]+P_011000211*R_103[0]+-1*P_111000011*R_201[0]+P_111000111*R_202[0]+-1*P_111000211*R_203[0]+P_211000011*R_301[0]+-1*P_211000111*R_302[0]+P_211000211*R_303[0];
double PR_010001011101=P_010001011*R_101[0]+-1*P_010001111*R_102[0]+P_010001211*R_103[0]+-1*P_010101011*R_111[0]+P_010101111*R_112[0]+-1*P_010101211*R_113[0]+-1*P_110001011*R_201[0]+P_110001111*R_202[0]+-1*P_110001211*R_203[0]+P_110101011*R_211[0]+-1*P_110101111*R_212[0]+P_110101211*R_213[0];
double PR_010000012101=P_010000012*R_101[0]+-1*P_010000112*R_102[0]+P_010000212*R_103[0]+-1*P_010000312*R_104[0]+-1*P_110000012*R_201[0]+P_110000112*R_202[0]+-1*P_110000212*R_203[0]+P_110000312*R_204[0];
double PR_002010010101=P_002010010*R_101[0]+-1*P_002010110*R_102[0]+-1*P_002110010*R_111[0]+P_002110110*R_112[0]+-1*P_102010010*R_201[0]+P_102010110*R_202[0]+P_102110010*R_211[0]+-1*P_102110110*R_212[0]+P_202010010*R_301[0]+-1*P_202010110*R_302[0]+-1*P_202110010*R_311[0]+P_202110110*R_312[0];
double PR_001011010101=P_001011010*R_101[0]+-1*P_001011110*R_102[0]+-1*P_001111010*R_111[0]+P_001111110*R_112[0]+P_001211010*R_121[0]+-1*P_001211110*R_122[0]+-1*P_101011010*R_201[0]+P_101011110*R_202[0]+P_101111010*R_211[0]+-1*P_101111110*R_212[0]+-1*P_101211010*R_221[0]+P_101211110*R_222[0];
double PR_000012010101=P_000012010*R_101[0]+-1*P_000012110*R_102[0]+-1*P_000112010*R_111[0]+P_000112110*R_112[0]+P_000212010*R_121[0]+-1*P_000212110*R_122[0]+-1*P_000312010*R_131[0]+P_000312110*R_132[0];
double PR_001010011101=P_001010011*R_101[0]+-1*P_001010111*R_102[0]+P_001010211*R_103[0]+-1*P_001110011*R_111[0]+P_001110111*R_112[0]+-1*P_001110211*R_113[0]+-1*P_101010011*R_201[0]+P_101010111*R_202[0]+-1*P_101010211*R_203[0]+P_101110011*R_211[0]+-1*P_101110111*R_212[0]+P_101110211*R_213[0];
double PR_000011011101=P_000011011*R_101[0]+-1*P_000011111*R_102[0]+P_000011211*R_103[0]+-1*P_000111011*R_111[0]+P_000111111*R_112[0]+-1*P_000111211*R_113[0]+P_000211011*R_121[0]+-1*P_000211111*R_122[0]+P_000211211*R_123[0];
double PR_000010012101=P_000010012*R_101[0]+-1*P_000010112*R_102[0]+P_000010212*R_103[0]+-1*P_000010312*R_104[0]+-1*P_000110012*R_111[0]+P_000110112*R_112[0]+-1*P_000110212*R_113[0]+P_000110312*R_114[0];
double PR_002000020101=P_002000020*R_101[0]+-1*P_002000120*R_102[0]+P_002000220*R_103[0]+-1*P_102000020*R_201[0]+P_102000120*R_202[0]+-1*P_102000220*R_203[0]+P_202000020*R_301[0]+-1*P_202000120*R_302[0]+P_202000220*R_303[0];
double PR_001001020101=P_001001020*R_101[0]+-1*P_001001120*R_102[0]+P_001001220*R_103[0]+-1*P_001101020*R_111[0]+P_001101120*R_112[0]+-1*P_001101220*R_113[0]+-1*P_101001020*R_201[0]+P_101001120*R_202[0]+-1*P_101001220*R_203[0]+P_101101020*R_211[0]+-1*P_101101120*R_212[0]+P_101101220*R_213[0];
double PR_000002020101=P_000002020*R_101[0]+-1*P_000002120*R_102[0]+P_000002220*R_103[0]+-1*P_000102020*R_111[0]+P_000102120*R_112[0]+-1*P_000102220*R_113[0]+P_000202020*R_121[0]+-1*P_000202120*R_122[0]+P_000202220*R_123[0];
double PR_001000021101=P_001000021*R_101[0]+-1*P_001000121*R_102[0]+P_001000221*R_103[0]+-1*P_001000321*R_104[0]+-1*P_101000021*R_201[0]+P_101000121*R_202[0]+-1*P_101000221*R_203[0]+P_101000321*R_204[0];
double PR_000001021101=P_000001021*R_101[0]+-1*P_000001121*R_102[0]+P_000001221*R_103[0]+-1*P_000001321*R_104[0]+-1*P_000101021*R_111[0]+P_000101121*R_112[0]+-1*P_000101221*R_113[0]+P_000101321*R_114[0];
double PR_000000022101=P_000000022*R_101[0]+-1*P_000000122*R_102[0]+P_000000222*R_103[0]+-1*P_000000322*R_104[0]+P_000000422*R_105[0];
double PR_022000000110=P_022000000*R_110[0]+-1*P_122000000*R_210[0]+P_222000000*R_310[0]+-1*P_322000000*R_410[0]+P_422000000*R_510[0];
double PR_021001000110=P_021001000*R_110[0]+-1*P_021101000*R_120[0]+-1*P_121001000*R_210[0]+P_121101000*R_220[0]+P_221001000*R_310[0]+-1*P_221101000*R_320[0]+-1*P_321001000*R_410[0]+P_321101000*R_420[0];
double PR_020002000110=P_020002000*R_110[0]+-1*P_020102000*R_120[0]+P_020202000*R_130[0]+-1*P_120002000*R_210[0]+P_120102000*R_220[0]+-1*P_120202000*R_230[0]+P_220002000*R_310[0]+-1*P_220102000*R_320[0]+P_220202000*R_330[0];
double PR_021000001110=P_021000001*R_110[0]+-1*P_021000101*R_111[0]+-1*P_121000001*R_210[0]+P_121000101*R_211[0]+P_221000001*R_310[0]+-1*P_221000101*R_311[0]+-1*P_321000001*R_410[0]+P_321000101*R_411[0];
double PR_020001001110=P_020001001*R_110[0]+-1*P_020001101*R_111[0]+-1*P_020101001*R_120[0]+P_020101101*R_121[0]+-1*P_120001001*R_210[0]+P_120001101*R_211[0]+P_120101001*R_220[0]+-1*P_120101101*R_221[0]+P_220001001*R_310[0]+-1*P_220001101*R_311[0]+-1*P_220101001*R_320[0]+P_220101101*R_321[0];
double PR_020000002110=P_020000002*R_110[0]+-1*P_020000102*R_111[0]+P_020000202*R_112[0]+-1*P_120000002*R_210[0]+P_120000102*R_211[0]+-1*P_120000202*R_212[0]+P_220000002*R_310[0]+-1*P_220000102*R_311[0]+P_220000202*R_312[0];
double PR_012010000110=P_012010000*R_110[0]+-1*P_012110000*R_120[0]+-1*P_112010000*R_210[0]+P_112110000*R_220[0]+P_212010000*R_310[0]+-1*P_212110000*R_320[0]+-1*P_312010000*R_410[0]+P_312110000*R_420[0];
double PR_011011000110=P_011011000*R_110[0]+-1*P_011111000*R_120[0]+P_011211000*R_130[0]+-1*P_111011000*R_210[0]+P_111111000*R_220[0]+-1*P_111211000*R_230[0]+P_211011000*R_310[0]+-1*P_211111000*R_320[0]+P_211211000*R_330[0];
double PR_010012000110=P_010012000*R_110[0]+-1*P_010112000*R_120[0]+P_010212000*R_130[0]+-1*P_010312000*R_140[0]+-1*P_110012000*R_210[0]+P_110112000*R_220[0]+-1*P_110212000*R_230[0]+P_110312000*R_240[0];
double PR_011010001110=P_011010001*R_110[0]+-1*P_011010101*R_111[0]+-1*P_011110001*R_120[0]+P_011110101*R_121[0]+-1*P_111010001*R_210[0]+P_111010101*R_211[0]+P_111110001*R_220[0]+-1*P_111110101*R_221[0]+P_211010001*R_310[0]+-1*P_211010101*R_311[0]+-1*P_211110001*R_320[0]+P_211110101*R_321[0];
double PR_010011001110=P_010011001*R_110[0]+-1*P_010011101*R_111[0]+-1*P_010111001*R_120[0]+P_010111101*R_121[0]+P_010211001*R_130[0]+-1*P_010211101*R_131[0]+-1*P_110011001*R_210[0]+P_110011101*R_211[0]+P_110111001*R_220[0]+-1*P_110111101*R_221[0]+-1*P_110211001*R_230[0]+P_110211101*R_231[0];
double PR_010010002110=P_010010002*R_110[0]+-1*P_010010102*R_111[0]+P_010010202*R_112[0]+-1*P_010110002*R_120[0]+P_010110102*R_121[0]+-1*P_010110202*R_122[0]+-1*P_110010002*R_210[0]+P_110010102*R_211[0]+-1*P_110010202*R_212[0]+P_110110002*R_220[0]+-1*P_110110102*R_221[0]+P_110110202*R_222[0];
double PR_002020000110=P_002020000*R_110[0]+-1*P_002120000*R_120[0]+P_002220000*R_130[0]+-1*P_102020000*R_210[0]+P_102120000*R_220[0]+-1*P_102220000*R_230[0]+P_202020000*R_310[0]+-1*P_202120000*R_320[0]+P_202220000*R_330[0];
double PR_001021000110=P_001021000*R_110[0]+-1*P_001121000*R_120[0]+P_001221000*R_130[0]+-1*P_001321000*R_140[0]+-1*P_101021000*R_210[0]+P_101121000*R_220[0]+-1*P_101221000*R_230[0]+P_101321000*R_240[0];
double PR_000022000110=P_000022000*R_110[0]+-1*P_000122000*R_120[0]+P_000222000*R_130[0]+-1*P_000322000*R_140[0]+P_000422000*R_150[0];
double PR_001020001110=P_001020001*R_110[0]+-1*P_001020101*R_111[0]+-1*P_001120001*R_120[0]+P_001120101*R_121[0]+P_001220001*R_130[0]+-1*P_001220101*R_131[0]+-1*P_101020001*R_210[0]+P_101020101*R_211[0]+P_101120001*R_220[0]+-1*P_101120101*R_221[0]+-1*P_101220001*R_230[0]+P_101220101*R_231[0];
double PR_000021001110=P_000021001*R_110[0]+-1*P_000021101*R_111[0]+-1*P_000121001*R_120[0]+P_000121101*R_121[0]+P_000221001*R_130[0]+-1*P_000221101*R_131[0]+-1*P_000321001*R_140[0]+P_000321101*R_141[0];
double PR_000020002110=P_000020002*R_110[0]+-1*P_000020102*R_111[0]+P_000020202*R_112[0]+-1*P_000120002*R_120[0]+P_000120102*R_121[0]+-1*P_000120202*R_122[0]+P_000220002*R_130[0]+-1*P_000220102*R_131[0]+P_000220202*R_132[0];
double PR_012000010110=P_012000010*R_110[0]+-1*P_012000110*R_111[0]+-1*P_112000010*R_210[0]+P_112000110*R_211[0]+P_212000010*R_310[0]+-1*P_212000110*R_311[0]+-1*P_312000010*R_410[0]+P_312000110*R_411[0];
double PR_011001010110=P_011001010*R_110[0]+-1*P_011001110*R_111[0]+-1*P_011101010*R_120[0]+P_011101110*R_121[0]+-1*P_111001010*R_210[0]+P_111001110*R_211[0]+P_111101010*R_220[0]+-1*P_111101110*R_221[0]+P_211001010*R_310[0]+-1*P_211001110*R_311[0]+-1*P_211101010*R_320[0]+P_211101110*R_321[0];
double PR_010002010110=P_010002010*R_110[0]+-1*P_010002110*R_111[0]+-1*P_010102010*R_120[0]+P_010102110*R_121[0]+P_010202010*R_130[0]+-1*P_010202110*R_131[0]+-1*P_110002010*R_210[0]+P_110002110*R_211[0]+P_110102010*R_220[0]+-1*P_110102110*R_221[0]+-1*P_110202010*R_230[0]+P_110202110*R_231[0];
double PR_011000011110=P_011000011*R_110[0]+-1*P_011000111*R_111[0]+P_011000211*R_112[0]+-1*P_111000011*R_210[0]+P_111000111*R_211[0]+-1*P_111000211*R_212[0]+P_211000011*R_310[0]+-1*P_211000111*R_311[0]+P_211000211*R_312[0];
double PR_010001011110=P_010001011*R_110[0]+-1*P_010001111*R_111[0]+P_010001211*R_112[0]+-1*P_010101011*R_120[0]+P_010101111*R_121[0]+-1*P_010101211*R_122[0]+-1*P_110001011*R_210[0]+P_110001111*R_211[0]+-1*P_110001211*R_212[0]+P_110101011*R_220[0]+-1*P_110101111*R_221[0]+P_110101211*R_222[0];
double PR_010000012110=P_010000012*R_110[0]+-1*P_010000112*R_111[0]+P_010000212*R_112[0]+-1*P_010000312*R_113[0]+-1*P_110000012*R_210[0]+P_110000112*R_211[0]+-1*P_110000212*R_212[0]+P_110000312*R_213[0];
double PR_002010010110=P_002010010*R_110[0]+-1*P_002010110*R_111[0]+-1*P_002110010*R_120[0]+P_002110110*R_121[0]+-1*P_102010010*R_210[0]+P_102010110*R_211[0]+P_102110010*R_220[0]+-1*P_102110110*R_221[0]+P_202010010*R_310[0]+-1*P_202010110*R_311[0]+-1*P_202110010*R_320[0]+P_202110110*R_321[0];
double PR_001011010110=P_001011010*R_110[0]+-1*P_001011110*R_111[0]+-1*P_001111010*R_120[0]+P_001111110*R_121[0]+P_001211010*R_130[0]+-1*P_001211110*R_131[0]+-1*P_101011010*R_210[0]+P_101011110*R_211[0]+P_101111010*R_220[0]+-1*P_101111110*R_221[0]+-1*P_101211010*R_230[0]+P_101211110*R_231[0];
double PR_000012010110=P_000012010*R_110[0]+-1*P_000012110*R_111[0]+-1*P_000112010*R_120[0]+P_000112110*R_121[0]+P_000212010*R_130[0]+-1*P_000212110*R_131[0]+-1*P_000312010*R_140[0]+P_000312110*R_141[0];
double PR_001010011110=P_001010011*R_110[0]+-1*P_001010111*R_111[0]+P_001010211*R_112[0]+-1*P_001110011*R_120[0]+P_001110111*R_121[0]+-1*P_001110211*R_122[0]+-1*P_101010011*R_210[0]+P_101010111*R_211[0]+-1*P_101010211*R_212[0]+P_101110011*R_220[0]+-1*P_101110111*R_221[0]+P_101110211*R_222[0];
double PR_000011011110=P_000011011*R_110[0]+-1*P_000011111*R_111[0]+P_000011211*R_112[0]+-1*P_000111011*R_120[0]+P_000111111*R_121[0]+-1*P_000111211*R_122[0]+P_000211011*R_130[0]+-1*P_000211111*R_131[0]+P_000211211*R_132[0];
double PR_000010012110=P_000010012*R_110[0]+-1*P_000010112*R_111[0]+P_000010212*R_112[0]+-1*P_000010312*R_113[0]+-1*P_000110012*R_120[0]+P_000110112*R_121[0]+-1*P_000110212*R_122[0]+P_000110312*R_123[0];
double PR_002000020110=P_002000020*R_110[0]+-1*P_002000120*R_111[0]+P_002000220*R_112[0]+-1*P_102000020*R_210[0]+P_102000120*R_211[0]+-1*P_102000220*R_212[0]+P_202000020*R_310[0]+-1*P_202000120*R_311[0]+P_202000220*R_312[0];
double PR_001001020110=P_001001020*R_110[0]+-1*P_001001120*R_111[0]+P_001001220*R_112[0]+-1*P_001101020*R_120[0]+P_001101120*R_121[0]+-1*P_001101220*R_122[0]+-1*P_101001020*R_210[0]+P_101001120*R_211[0]+-1*P_101001220*R_212[0]+P_101101020*R_220[0]+-1*P_101101120*R_221[0]+P_101101220*R_222[0];
double PR_000002020110=P_000002020*R_110[0]+-1*P_000002120*R_111[0]+P_000002220*R_112[0]+-1*P_000102020*R_120[0]+P_000102120*R_121[0]+-1*P_000102220*R_122[0]+P_000202020*R_130[0]+-1*P_000202120*R_131[0]+P_000202220*R_132[0];
double PR_001000021110=P_001000021*R_110[0]+-1*P_001000121*R_111[0]+P_001000221*R_112[0]+-1*P_001000321*R_113[0]+-1*P_101000021*R_210[0]+P_101000121*R_211[0]+-1*P_101000221*R_212[0]+P_101000321*R_213[0];
double PR_000001021110=P_000001021*R_110[0]+-1*P_000001121*R_111[0]+P_000001221*R_112[0]+-1*P_000001321*R_113[0]+-1*P_000101021*R_120[0]+P_000101121*R_121[0]+-1*P_000101221*R_122[0]+P_000101321*R_123[0];
double PR_000000022110=P_000000022*R_110[0]+-1*P_000000122*R_111[0]+P_000000222*R_112[0]+-1*P_000000322*R_113[0]+P_000000422*R_114[0];
double PR_022000000200=P_022000000*R_200[0]+-1*P_122000000*R_300[0]+P_222000000*R_400[0]+-1*P_322000000*R_500[0]+P_422000000*R_600[0];
double PR_021001000200=P_021001000*R_200[0]+-1*P_021101000*R_210[0]+-1*P_121001000*R_300[0]+P_121101000*R_310[0]+P_221001000*R_400[0]+-1*P_221101000*R_410[0]+-1*P_321001000*R_500[0]+P_321101000*R_510[0];
double PR_020002000200=P_020002000*R_200[0]+-1*P_020102000*R_210[0]+P_020202000*R_220[0]+-1*P_120002000*R_300[0]+P_120102000*R_310[0]+-1*P_120202000*R_320[0]+P_220002000*R_400[0]+-1*P_220102000*R_410[0]+P_220202000*R_420[0];
double PR_021000001200=P_021000001*R_200[0]+-1*P_021000101*R_201[0]+-1*P_121000001*R_300[0]+P_121000101*R_301[0]+P_221000001*R_400[0]+-1*P_221000101*R_401[0]+-1*P_321000001*R_500[0]+P_321000101*R_501[0];
double PR_020001001200=P_020001001*R_200[0]+-1*P_020001101*R_201[0]+-1*P_020101001*R_210[0]+P_020101101*R_211[0]+-1*P_120001001*R_300[0]+P_120001101*R_301[0]+P_120101001*R_310[0]+-1*P_120101101*R_311[0]+P_220001001*R_400[0]+-1*P_220001101*R_401[0]+-1*P_220101001*R_410[0]+P_220101101*R_411[0];
double PR_020000002200=P_020000002*R_200[0]+-1*P_020000102*R_201[0]+P_020000202*R_202[0]+-1*P_120000002*R_300[0]+P_120000102*R_301[0]+-1*P_120000202*R_302[0]+P_220000002*R_400[0]+-1*P_220000102*R_401[0]+P_220000202*R_402[0];
double PR_012010000200=P_012010000*R_200[0]+-1*P_012110000*R_210[0]+-1*P_112010000*R_300[0]+P_112110000*R_310[0]+P_212010000*R_400[0]+-1*P_212110000*R_410[0]+-1*P_312010000*R_500[0]+P_312110000*R_510[0];
double PR_011011000200=P_011011000*R_200[0]+-1*P_011111000*R_210[0]+P_011211000*R_220[0]+-1*P_111011000*R_300[0]+P_111111000*R_310[0]+-1*P_111211000*R_320[0]+P_211011000*R_400[0]+-1*P_211111000*R_410[0]+P_211211000*R_420[0];
double PR_010012000200=P_010012000*R_200[0]+-1*P_010112000*R_210[0]+P_010212000*R_220[0]+-1*P_010312000*R_230[0]+-1*P_110012000*R_300[0]+P_110112000*R_310[0]+-1*P_110212000*R_320[0]+P_110312000*R_330[0];
double PR_011010001200=P_011010001*R_200[0]+-1*P_011010101*R_201[0]+-1*P_011110001*R_210[0]+P_011110101*R_211[0]+-1*P_111010001*R_300[0]+P_111010101*R_301[0]+P_111110001*R_310[0]+-1*P_111110101*R_311[0]+P_211010001*R_400[0]+-1*P_211010101*R_401[0]+-1*P_211110001*R_410[0]+P_211110101*R_411[0];
double PR_010011001200=P_010011001*R_200[0]+-1*P_010011101*R_201[0]+-1*P_010111001*R_210[0]+P_010111101*R_211[0]+P_010211001*R_220[0]+-1*P_010211101*R_221[0]+-1*P_110011001*R_300[0]+P_110011101*R_301[0]+P_110111001*R_310[0]+-1*P_110111101*R_311[0]+-1*P_110211001*R_320[0]+P_110211101*R_321[0];
double PR_010010002200=P_010010002*R_200[0]+-1*P_010010102*R_201[0]+P_010010202*R_202[0]+-1*P_010110002*R_210[0]+P_010110102*R_211[0]+-1*P_010110202*R_212[0]+-1*P_110010002*R_300[0]+P_110010102*R_301[0]+-1*P_110010202*R_302[0]+P_110110002*R_310[0]+-1*P_110110102*R_311[0]+P_110110202*R_312[0];
double PR_002020000200=P_002020000*R_200[0]+-1*P_002120000*R_210[0]+P_002220000*R_220[0]+-1*P_102020000*R_300[0]+P_102120000*R_310[0]+-1*P_102220000*R_320[0]+P_202020000*R_400[0]+-1*P_202120000*R_410[0]+P_202220000*R_420[0];
double PR_001021000200=P_001021000*R_200[0]+-1*P_001121000*R_210[0]+P_001221000*R_220[0]+-1*P_001321000*R_230[0]+-1*P_101021000*R_300[0]+P_101121000*R_310[0]+-1*P_101221000*R_320[0]+P_101321000*R_330[0];
double PR_000022000200=P_000022000*R_200[0]+-1*P_000122000*R_210[0]+P_000222000*R_220[0]+-1*P_000322000*R_230[0]+P_000422000*R_240[0];
double PR_001020001200=P_001020001*R_200[0]+-1*P_001020101*R_201[0]+-1*P_001120001*R_210[0]+P_001120101*R_211[0]+P_001220001*R_220[0]+-1*P_001220101*R_221[0]+-1*P_101020001*R_300[0]+P_101020101*R_301[0]+P_101120001*R_310[0]+-1*P_101120101*R_311[0]+-1*P_101220001*R_320[0]+P_101220101*R_321[0];
double PR_000021001200=P_000021001*R_200[0]+-1*P_000021101*R_201[0]+-1*P_000121001*R_210[0]+P_000121101*R_211[0]+P_000221001*R_220[0]+-1*P_000221101*R_221[0]+-1*P_000321001*R_230[0]+P_000321101*R_231[0];
double PR_000020002200=P_000020002*R_200[0]+-1*P_000020102*R_201[0]+P_000020202*R_202[0]+-1*P_000120002*R_210[0]+P_000120102*R_211[0]+-1*P_000120202*R_212[0]+P_000220002*R_220[0]+-1*P_000220102*R_221[0]+P_000220202*R_222[0];
double PR_012000010200=P_012000010*R_200[0]+-1*P_012000110*R_201[0]+-1*P_112000010*R_300[0]+P_112000110*R_301[0]+P_212000010*R_400[0]+-1*P_212000110*R_401[0]+-1*P_312000010*R_500[0]+P_312000110*R_501[0];
double PR_011001010200=P_011001010*R_200[0]+-1*P_011001110*R_201[0]+-1*P_011101010*R_210[0]+P_011101110*R_211[0]+-1*P_111001010*R_300[0]+P_111001110*R_301[0]+P_111101010*R_310[0]+-1*P_111101110*R_311[0]+P_211001010*R_400[0]+-1*P_211001110*R_401[0]+-1*P_211101010*R_410[0]+P_211101110*R_411[0];
double PR_010002010200=P_010002010*R_200[0]+-1*P_010002110*R_201[0]+-1*P_010102010*R_210[0]+P_010102110*R_211[0]+P_010202010*R_220[0]+-1*P_010202110*R_221[0]+-1*P_110002010*R_300[0]+P_110002110*R_301[0]+P_110102010*R_310[0]+-1*P_110102110*R_311[0]+-1*P_110202010*R_320[0]+P_110202110*R_321[0];
double PR_011000011200=P_011000011*R_200[0]+-1*P_011000111*R_201[0]+P_011000211*R_202[0]+-1*P_111000011*R_300[0]+P_111000111*R_301[0]+-1*P_111000211*R_302[0]+P_211000011*R_400[0]+-1*P_211000111*R_401[0]+P_211000211*R_402[0];
double PR_010001011200=P_010001011*R_200[0]+-1*P_010001111*R_201[0]+P_010001211*R_202[0]+-1*P_010101011*R_210[0]+P_010101111*R_211[0]+-1*P_010101211*R_212[0]+-1*P_110001011*R_300[0]+P_110001111*R_301[0]+-1*P_110001211*R_302[0]+P_110101011*R_310[0]+-1*P_110101111*R_311[0]+P_110101211*R_312[0];
double PR_010000012200=P_010000012*R_200[0]+-1*P_010000112*R_201[0]+P_010000212*R_202[0]+-1*P_010000312*R_203[0]+-1*P_110000012*R_300[0]+P_110000112*R_301[0]+-1*P_110000212*R_302[0]+P_110000312*R_303[0];
double PR_002010010200=P_002010010*R_200[0]+-1*P_002010110*R_201[0]+-1*P_002110010*R_210[0]+P_002110110*R_211[0]+-1*P_102010010*R_300[0]+P_102010110*R_301[0]+P_102110010*R_310[0]+-1*P_102110110*R_311[0]+P_202010010*R_400[0]+-1*P_202010110*R_401[0]+-1*P_202110010*R_410[0]+P_202110110*R_411[0];
double PR_001011010200=P_001011010*R_200[0]+-1*P_001011110*R_201[0]+-1*P_001111010*R_210[0]+P_001111110*R_211[0]+P_001211010*R_220[0]+-1*P_001211110*R_221[0]+-1*P_101011010*R_300[0]+P_101011110*R_301[0]+P_101111010*R_310[0]+-1*P_101111110*R_311[0]+-1*P_101211010*R_320[0]+P_101211110*R_321[0];
double PR_000012010200=P_000012010*R_200[0]+-1*P_000012110*R_201[0]+-1*P_000112010*R_210[0]+P_000112110*R_211[0]+P_000212010*R_220[0]+-1*P_000212110*R_221[0]+-1*P_000312010*R_230[0]+P_000312110*R_231[0];
double PR_001010011200=P_001010011*R_200[0]+-1*P_001010111*R_201[0]+P_001010211*R_202[0]+-1*P_001110011*R_210[0]+P_001110111*R_211[0]+-1*P_001110211*R_212[0]+-1*P_101010011*R_300[0]+P_101010111*R_301[0]+-1*P_101010211*R_302[0]+P_101110011*R_310[0]+-1*P_101110111*R_311[0]+P_101110211*R_312[0];
double PR_000011011200=P_000011011*R_200[0]+-1*P_000011111*R_201[0]+P_000011211*R_202[0]+-1*P_000111011*R_210[0]+P_000111111*R_211[0]+-1*P_000111211*R_212[0]+P_000211011*R_220[0]+-1*P_000211111*R_221[0]+P_000211211*R_222[0];
double PR_000010012200=P_000010012*R_200[0]+-1*P_000010112*R_201[0]+P_000010212*R_202[0]+-1*P_000010312*R_203[0]+-1*P_000110012*R_210[0]+P_000110112*R_211[0]+-1*P_000110212*R_212[0]+P_000110312*R_213[0];
double PR_002000020200=P_002000020*R_200[0]+-1*P_002000120*R_201[0]+P_002000220*R_202[0]+-1*P_102000020*R_300[0]+P_102000120*R_301[0]+-1*P_102000220*R_302[0]+P_202000020*R_400[0]+-1*P_202000120*R_401[0]+P_202000220*R_402[0];
double PR_001001020200=P_001001020*R_200[0]+-1*P_001001120*R_201[0]+P_001001220*R_202[0]+-1*P_001101020*R_210[0]+P_001101120*R_211[0]+-1*P_001101220*R_212[0]+-1*P_101001020*R_300[0]+P_101001120*R_301[0]+-1*P_101001220*R_302[0]+P_101101020*R_310[0]+-1*P_101101120*R_311[0]+P_101101220*R_312[0];
double PR_000002020200=P_000002020*R_200[0]+-1*P_000002120*R_201[0]+P_000002220*R_202[0]+-1*P_000102020*R_210[0]+P_000102120*R_211[0]+-1*P_000102220*R_212[0]+P_000202020*R_220[0]+-1*P_000202120*R_221[0]+P_000202220*R_222[0];
double PR_001000021200=P_001000021*R_200[0]+-1*P_001000121*R_201[0]+P_001000221*R_202[0]+-1*P_001000321*R_203[0]+-1*P_101000021*R_300[0]+P_101000121*R_301[0]+-1*P_101000221*R_302[0]+P_101000321*R_303[0];
double PR_000001021200=P_000001021*R_200[0]+-1*P_000001121*R_201[0]+P_000001221*R_202[0]+-1*P_000001321*R_203[0]+-1*P_000101021*R_210[0]+P_000101121*R_211[0]+-1*P_000101221*R_212[0]+P_000101321*R_213[0];
double PR_000000022200=P_000000022*R_200[0]+-1*P_000000122*R_201[0]+P_000000222*R_202[0]+-1*P_000000322*R_203[0]+P_000000422*R_204[0];
double Qd_101[3];
double Qd_110[3];
double Qd_011[3];
double Qd_111[3];
double Qd_211[3];
for(int i=0;i<3;i++){
Qd_101[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_110[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_011[i]=Qd_101[i]+Qd_010[i]*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_111[i]=Qd_010[i]*Qd_101[i]+aQin1*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_211[i]=aQin1*Qd_101[i];
}
double Q_011000000=Qd_011[0];
double Q_111000000=Qd_111[0];
double Q_211000000=Qd_211[0];
double Q_010001000=Qd_010[0]*Qd_001[1];
double Q_010101000=Qd_010[0]*Qd_101[1];
double Q_110001000=Qd_110[0]*Qd_001[1];
double Q_110101000=Qd_110[0]*Qd_101[1];
double Q_010000001=Qd_010[0]*Qd_001[2];
double Q_010000101=Qd_010[0]*Qd_101[2];
double Q_110000001=Qd_110[0]*Qd_001[2];
double Q_110000101=Qd_110[0]*Qd_101[2];
double Q_001010000=Qd_001[0]*Qd_010[1];
double Q_001110000=Qd_001[0]*Qd_110[1];
double Q_101010000=Qd_101[0]*Qd_010[1];
double Q_101110000=Qd_101[0]*Qd_110[1];
double Q_000011000=Qd_011[1];
double Q_000111000=Qd_111[1];
double Q_000211000=Qd_211[1];
double Q_000010001=Qd_010[1]*Qd_001[2];
double Q_000010101=Qd_010[1]*Qd_101[2];
double Q_000110001=Qd_110[1]*Qd_001[2];
double Q_000110101=Qd_110[1]*Qd_101[2];
double Q_001000010=Qd_001[0]*Qd_010[2];
double Q_001000110=Qd_001[0]*Qd_110[2];
double Q_101000010=Qd_101[0]*Qd_010[2];
double Q_101000110=Qd_101[0]*Qd_110[2];
double Q_000001010=Qd_001[1]*Qd_010[2];
double Q_000001110=Qd_001[1]*Qd_110[2];
double Q_000101010=Qd_101[1]*Qd_010[2];
double Q_000101110=Qd_101[1]*Qd_110[2];
double Q_000000011=Qd_011[2];
double Q_000000111=Qd_111[2];
double Q_000000211=Qd_211[2];
ans_temp[ans_id*18+0]+=Pmtrx[0]*(Q_011000000*PR_022000000000+Q_111000000*PR_022000000100+Q_211000000*PR_022000000200);
ans_temp[ans_id*18+0]+=Pmtrx[1]*(Q_010001000*PR_022000000000+Q_010101000*PR_022000000010+Q_110001000*PR_022000000100+Q_110101000*PR_022000000110);
ans_temp[ans_id*18+0]+=Pmtrx[2]*(Q_010000001*PR_022000000000+Q_010000101*PR_022000000001+Q_110000001*PR_022000000100+Q_110000101*PR_022000000101);
ans_temp[ans_id*18+1]+=Pmtrx[0]*(Q_001010000*PR_022000000000+Q_001110000*PR_022000000010+Q_101010000*PR_022000000100+Q_101110000*PR_022000000110);
ans_temp[ans_id*18+1]+=Pmtrx[1]*(Q_000011000*PR_022000000000+Q_000111000*PR_022000000010+Q_000211000*PR_022000000020);
ans_temp[ans_id*18+1]+=Pmtrx[2]*(Q_000010001*PR_022000000000+Q_000010101*PR_022000000001+Q_000110001*PR_022000000010+Q_000110101*PR_022000000011);
ans_temp[ans_id*18+2]+=Pmtrx[0]*(Q_001000010*PR_022000000000+Q_001000110*PR_022000000001+Q_101000010*PR_022000000100+Q_101000110*PR_022000000101);
ans_temp[ans_id*18+2]+=Pmtrx[1]*(Q_000001010*PR_022000000000+Q_000001110*PR_022000000001+Q_000101010*PR_022000000010+Q_000101110*PR_022000000011);
ans_temp[ans_id*18+2]+=Pmtrx[2]*(Q_000000011*PR_022000000000+Q_000000111*PR_022000000001+Q_000000211*PR_022000000002);
ans_temp[ans_id*18+0]+=Pmtrx[3]*(Q_011000000*PR_021001000000+Q_111000000*PR_021001000100+Q_211000000*PR_021001000200);
ans_temp[ans_id*18+0]+=Pmtrx[4]*(Q_010001000*PR_021001000000+Q_010101000*PR_021001000010+Q_110001000*PR_021001000100+Q_110101000*PR_021001000110);
ans_temp[ans_id*18+0]+=Pmtrx[5]*(Q_010000001*PR_021001000000+Q_010000101*PR_021001000001+Q_110000001*PR_021001000100+Q_110000101*PR_021001000101);
ans_temp[ans_id*18+1]+=Pmtrx[3]*(Q_001010000*PR_021001000000+Q_001110000*PR_021001000010+Q_101010000*PR_021001000100+Q_101110000*PR_021001000110);
ans_temp[ans_id*18+1]+=Pmtrx[4]*(Q_000011000*PR_021001000000+Q_000111000*PR_021001000010+Q_000211000*PR_021001000020);
ans_temp[ans_id*18+1]+=Pmtrx[5]*(Q_000010001*PR_021001000000+Q_000010101*PR_021001000001+Q_000110001*PR_021001000010+Q_000110101*PR_021001000011);
ans_temp[ans_id*18+2]+=Pmtrx[3]*(Q_001000010*PR_021001000000+Q_001000110*PR_021001000001+Q_101000010*PR_021001000100+Q_101000110*PR_021001000101);
ans_temp[ans_id*18+2]+=Pmtrx[4]*(Q_000001010*PR_021001000000+Q_000001110*PR_021001000001+Q_000101010*PR_021001000010+Q_000101110*PR_021001000011);
ans_temp[ans_id*18+2]+=Pmtrx[5]*(Q_000000011*PR_021001000000+Q_000000111*PR_021001000001+Q_000000211*PR_021001000002);
ans_temp[ans_id*18+0]+=Pmtrx[6]*(Q_011000000*PR_020002000000+Q_111000000*PR_020002000100+Q_211000000*PR_020002000200);
ans_temp[ans_id*18+0]+=Pmtrx[7]*(Q_010001000*PR_020002000000+Q_010101000*PR_020002000010+Q_110001000*PR_020002000100+Q_110101000*PR_020002000110);
ans_temp[ans_id*18+0]+=Pmtrx[8]*(Q_010000001*PR_020002000000+Q_010000101*PR_020002000001+Q_110000001*PR_020002000100+Q_110000101*PR_020002000101);
ans_temp[ans_id*18+1]+=Pmtrx[6]*(Q_001010000*PR_020002000000+Q_001110000*PR_020002000010+Q_101010000*PR_020002000100+Q_101110000*PR_020002000110);
ans_temp[ans_id*18+1]+=Pmtrx[7]*(Q_000011000*PR_020002000000+Q_000111000*PR_020002000010+Q_000211000*PR_020002000020);
ans_temp[ans_id*18+1]+=Pmtrx[8]*(Q_000010001*PR_020002000000+Q_000010101*PR_020002000001+Q_000110001*PR_020002000010+Q_000110101*PR_020002000011);
ans_temp[ans_id*18+2]+=Pmtrx[6]*(Q_001000010*PR_020002000000+Q_001000110*PR_020002000001+Q_101000010*PR_020002000100+Q_101000110*PR_020002000101);
ans_temp[ans_id*18+2]+=Pmtrx[7]*(Q_000001010*PR_020002000000+Q_000001110*PR_020002000001+Q_000101010*PR_020002000010+Q_000101110*PR_020002000011);
ans_temp[ans_id*18+2]+=Pmtrx[8]*(Q_000000011*PR_020002000000+Q_000000111*PR_020002000001+Q_000000211*PR_020002000002);
ans_temp[ans_id*18+0]+=Pmtrx[9]*(Q_011000000*PR_021000001000+Q_111000000*PR_021000001100+Q_211000000*PR_021000001200);
ans_temp[ans_id*18+0]+=Pmtrx[10]*(Q_010001000*PR_021000001000+Q_010101000*PR_021000001010+Q_110001000*PR_021000001100+Q_110101000*PR_021000001110);
ans_temp[ans_id*18+0]+=Pmtrx[11]*(Q_010000001*PR_021000001000+Q_010000101*PR_021000001001+Q_110000001*PR_021000001100+Q_110000101*PR_021000001101);
ans_temp[ans_id*18+1]+=Pmtrx[9]*(Q_001010000*PR_021000001000+Q_001110000*PR_021000001010+Q_101010000*PR_021000001100+Q_101110000*PR_021000001110);
ans_temp[ans_id*18+1]+=Pmtrx[10]*(Q_000011000*PR_021000001000+Q_000111000*PR_021000001010+Q_000211000*PR_021000001020);
ans_temp[ans_id*18+1]+=Pmtrx[11]*(Q_000010001*PR_021000001000+Q_000010101*PR_021000001001+Q_000110001*PR_021000001010+Q_000110101*PR_021000001011);
ans_temp[ans_id*18+2]+=Pmtrx[9]*(Q_001000010*PR_021000001000+Q_001000110*PR_021000001001+Q_101000010*PR_021000001100+Q_101000110*PR_021000001101);
ans_temp[ans_id*18+2]+=Pmtrx[10]*(Q_000001010*PR_021000001000+Q_000001110*PR_021000001001+Q_000101010*PR_021000001010+Q_000101110*PR_021000001011);
ans_temp[ans_id*18+2]+=Pmtrx[11]*(Q_000000011*PR_021000001000+Q_000000111*PR_021000001001+Q_000000211*PR_021000001002);
ans_temp[ans_id*18+0]+=Pmtrx[12]*(Q_011000000*PR_020001001000+Q_111000000*PR_020001001100+Q_211000000*PR_020001001200);
ans_temp[ans_id*18+0]+=Pmtrx[13]*(Q_010001000*PR_020001001000+Q_010101000*PR_020001001010+Q_110001000*PR_020001001100+Q_110101000*PR_020001001110);
ans_temp[ans_id*18+0]+=Pmtrx[14]*(Q_010000001*PR_020001001000+Q_010000101*PR_020001001001+Q_110000001*PR_020001001100+Q_110000101*PR_020001001101);
ans_temp[ans_id*18+1]+=Pmtrx[12]*(Q_001010000*PR_020001001000+Q_001110000*PR_020001001010+Q_101010000*PR_020001001100+Q_101110000*PR_020001001110);
ans_temp[ans_id*18+1]+=Pmtrx[13]*(Q_000011000*PR_020001001000+Q_000111000*PR_020001001010+Q_000211000*PR_020001001020);
ans_temp[ans_id*18+1]+=Pmtrx[14]*(Q_000010001*PR_020001001000+Q_000010101*PR_020001001001+Q_000110001*PR_020001001010+Q_000110101*PR_020001001011);
ans_temp[ans_id*18+2]+=Pmtrx[12]*(Q_001000010*PR_020001001000+Q_001000110*PR_020001001001+Q_101000010*PR_020001001100+Q_101000110*PR_020001001101);
ans_temp[ans_id*18+2]+=Pmtrx[13]*(Q_000001010*PR_020001001000+Q_000001110*PR_020001001001+Q_000101010*PR_020001001010+Q_000101110*PR_020001001011);
ans_temp[ans_id*18+2]+=Pmtrx[14]*(Q_000000011*PR_020001001000+Q_000000111*PR_020001001001+Q_000000211*PR_020001001002);
ans_temp[ans_id*18+0]+=Pmtrx[15]*(Q_011000000*PR_020000002000+Q_111000000*PR_020000002100+Q_211000000*PR_020000002200);
ans_temp[ans_id*18+0]+=Pmtrx[16]*(Q_010001000*PR_020000002000+Q_010101000*PR_020000002010+Q_110001000*PR_020000002100+Q_110101000*PR_020000002110);
ans_temp[ans_id*18+0]+=Pmtrx[17]*(Q_010000001*PR_020000002000+Q_010000101*PR_020000002001+Q_110000001*PR_020000002100+Q_110000101*PR_020000002101);
ans_temp[ans_id*18+1]+=Pmtrx[15]*(Q_001010000*PR_020000002000+Q_001110000*PR_020000002010+Q_101010000*PR_020000002100+Q_101110000*PR_020000002110);
ans_temp[ans_id*18+1]+=Pmtrx[16]*(Q_000011000*PR_020000002000+Q_000111000*PR_020000002010+Q_000211000*PR_020000002020);
ans_temp[ans_id*18+1]+=Pmtrx[17]*(Q_000010001*PR_020000002000+Q_000010101*PR_020000002001+Q_000110001*PR_020000002010+Q_000110101*PR_020000002011);
ans_temp[ans_id*18+2]+=Pmtrx[15]*(Q_001000010*PR_020000002000+Q_001000110*PR_020000002001+Q_101000010*PR_020000002100+Q_101000110*PR_020000002101);
ans_temp[ans_id*18+2]+=Pmtrx[16]*(Q_000001010*PR_020000002000+Q_000001110*PR_020000002001+Q_000101010*PR_020000002010+Q_000101110*PR_020000002011);
ans_temp[ans_id*18+2]+=Pmtrx[17]*(Q_000000011*PR_020000002000+Q_000000111*PR_020000002001+Q_000000211*PR_020000002002);
ans_temp[ans_id*18+3]+=Pmtrx[0]*(Q_011000000*PR_012010000000+Q_111000000*PR_012010000100+Q_211000000*PR_012010000200);
ans_temp[ans_id*18+3]+=Pmtrx[1]*(Q_010001000*PR_012010000000+Q_010101000*PR_012010000010+Q_110001000*PR_012010000100+Q_110101000*PR_012010000110);
ans_temp[ans_id*18+3]+=Pmtrx[2]*(Q_010000001*PR_012010000000+Q_010000101*PR_012010000001+Q_110000001*PR_012010000100+Q_110000101*PR_012010000101);
ans_temp[ans_id*18+4]+=Pmtrx[0]*(Q_001010000*PR_012010000000+Q_001110000*PR_012010000010+Q_101010000*PR_012010000100+Q_101110000*PR_012010000110);
ans_temp[ans_id*18+4]+=Pmtrx[1]*(Q_000011000*PR_012010000000+Q_000111000*PR_012010000010+Q_000211000*PR_012010000020);
ans_temp[ans_id*18+4]+=Pmtrx[2]*(Q_000010001*PR_012010000000+Q_000010101*PR_012010000001+Q_000110001*PR_012010000010+Q_000110101*PR_012010000011);
ans_temp[ans_id*18+5]+=Pmtrx[0]*(Q_001000010*PR_012010000000+Q_001000110*PR_012010000001+Q_101000010*PR_012010000100+Q_101000110*PR_012010000101);
ans_temp[ans_id*18+5]+=Pmtrx[1]*(Q_000001010*PR_012010000000+Q_000001110*PR_012010000001+Q_000101010*PR_012010000010+Q_000101110*PR_012010000011);
ans_temp[ans_id*18+5]+=Pmtrx[2]*(Q_000000011*PR_012010000000+Q_000000111*PR_012010000001+Q_000000211*PR_012010000002);
ans_temp[ans_id*18+3]+=Pmtrx[3]*(Q_011000000*PR_011011000000+Q_111000000*PR_011011000100+Q_211000000*PR_011011000200);
ans_temp[ans_id*18+3]+=Pmtrx[4]*(Q_010001000*PR_011011000000+Q_010101000*PR_011011000010+Q_110001000*PR_011011000100+Q_110101000*PR_011011000110);
ans_temp[ans_id*18+3]+=Pmtrx[5]*(Q_010000001*PR_011011000000+Q_010000101*PR_011011000001+Q_110000001*PR_011011000100+Q_110000101*PR_011011000101);
ans_temp[ans_id*18+4]+=Pmtrx[3]*(Q_001010000*PR_011011000000+Q_001110000*PR_011011000010+Q_101010000*PR_011011000100+Q_101110000*PR_011011000110);
ans_temp[ans_id*18+4]+=Pmtrx[4]*(Q_000011000*PR_011011000000+Q_000111000*PR_011011000010+Q_000211000*PR_011011000020);
ans_temp[ans_id*18+4]+=Pmtrx[5]*(Q_000010001*PR_011011000000+Q_000010101*PR_011011000001+Q_000110001*PR_011011000010+Q_000110101*PR_011011000011);
ans_temp[ans_id*18+5]+=Pmtrx[3]*(Q_001000010*PR_011011000000+Q_001000110*PR_011011000001+Q_101000010*PR_011011000100+Q_101000110*PR_011011000101);
ans_temp[ans_id*18+5]+=Pmtrx[4]*(Q_000001010*PR_011011000000+Q_000001110*PR_011011000001+Q_000101010*PR_011011000010+Q_000101110*PR_011011000011);
ans_temp[ans_id*18+5]+=Pmtrx[5]*(Q_000000011*PR_011011000000+Q_000000111*PR_011011000001+Q_000000211*PR_011011000002);
ans_temp[ans_id*18+3]+=Pmtrx[6]*(Q_011000000*PR_010012000000+Q_111000000*PR_010012000100+Q_211000000*PR_010012000200);
ans_temp[ans_id*18+3]+=Pmtrx[7]*(Q_010001000*PR_010012000000+Q_010101000*PR_010012000010+Q_110001000*PR_010012000100+Q_110101000*PR_010012000110);
ans_temp[ans_id*18+3]+=Pmtrx[8]*(Q_010000001*PR_010012000000+Q_010000101*PR_010012000001+Q_110000001*PR_010012000100+Q_110000101*PR_010012000101);
ans_temp[ans_id*18+4]+=Pmtrx[6]*(Q_001010000*PR_010012000000+Q_001110000*PR_010012000010+Q_101010000*PR_010012000100+Q_101110000*PR_010012000110);
ans_temp[ans_id*18+4]+=Pmtrx[7]*(Q_000011000*PR_010012000000+Q_000111000*PR_010012000010+Q_000211000*PR_010012000020);
ans_temp[ans_id*18+4]+=Pmtrx[8]*(Q_000010001*PR_010012000000+Q_000010101*PR_010012000001+Q_000110001*PR_010012000010+Q_000110101*PR_010012000011);
ans_temp[ans_id*18+5]+=Pmtrx[6]*(Q_001000010*PR_010012000000+Q_001000110*PR_010012000001+Q_101000010*PR_010012000100+Q_101000110*PR_010012000101);
ans_temp[ans_id*18+5]+=Pmtrx[7]*(Q_000001010*PR_010012000000+Q_000001110*PR_010012000001+Q_000101010*PR_010012000010+Q_000101110*PR_010012000011);
ans_temp[ans_id*18+5]+=Pmtrx[8]*(Q_000000011*PR_010012000000+Q_000000111*PR_010012000001+Q_000000211*PR_010012000002);
ans_temp[ans_id*18+3]+=Pmtrx[9]*(Q_011000000*PR_011010001000+Q_111000000*PR_011010001100+Q_211000000*PR_011010001200);
ans_temp[ans_id*18+3]+=Pmtrx[10]*(Q_010001000*PR_011010001000+Q_010101000*PR_011010001010+Q_110001000*PR_011010001100+Q_110101000*PR_011010001110);
ans_temp[ans_id*18+3]+=Pmtrx[11]*(Q_010000001*PR_011010001000+Q_010000101*PR_011010001001+Q_110000001*PR_011010001100+Q_110000101*PR_011010001101);
ans_temp[ans_id*18+4]+=Pmtrx[9]*(Q_001010000*PR_011010001000+Q_001110000*PR_011010001010+Q_101010000*PR_011010001100+Q_101110000*PR_011010001110);
ans_temp[ans_id*18+4]+=Pmtrx[10]*(Q_000011000*PR_011010001000+Q_000111000*PR_011010001010+Q_000211000*PR_011010001020);
ans_temp[ans_id*18+4]+=Pmtrx[11]*(Q_000010001*PR_011010001000+Q_000010101*PR_011010001001+Q_000110001*PR_011010001010+Q_000110101*PR_011010001011);
ans_temp[ans_id*18+5]+=Pmtrx[9]*(Q_001000010*PR_011010001000+Q_001000110*PR_011010001001+Q_101000010*PR_011010001100+Q_101000110*PR_011010001101);
ans_temp[ans_id*18+5]+=Pmtrx[10]*(Q_000001010*PR_011010001000+Q_000001110*PR_011010001001+Q_000101010*PR_011010001010+Q_000101110*PR_011010001011);
ans_temp[ans_id*18+5]+=Pmtrx[11]*(Q_000000011*PR_011010001000+Q_000000111*PR_011010001001+Q_000000211*PR_011010001002);
ans_temp[ans_id*18+3]+=Pmtrx[12]*(Q_011000000*PR_010011001000+Q_111000000*PR_010011001100+Q_211000000*PR_010011001200);
ans_temp[ans_id*18+3]+=Pmtrx[13]*(Q_010001000*PR_010011001000+Q_010101000*PR_010011001010+Q_110001000*PR_010011001100+Q_110101000*PR_010011001110);
ans_temp[ans_id*18+3]+=Pmtrx[14]*(Q_010000001*PR_010011001000+Q_010000101*PR_010011001001+Q_110000001*PR_010011001100+Q_110000101*PR_010011001101);
ans_temp[ans_id*18+4]+=Pmtrx[12]*(Q_001010000*PR_010011001000+Q_001110000*PR_010011001010+Q_101010000*PR_010011001100+Q_101110000*PR_010011001110);
ans_temp[ans_id*18+4]+=Pmtrx[13]*(Q_000011000*PR_010011001000+Q_000111000*PR_010011001010+Q_000211000*PR_010011001020);
ans_temp[ans_id*18+4]+=Pmtrx[14]*(Q_000010001*PR_010011001000+Q_000010101*PR_010011001001+Q_000110001*PR_010011001010+Q_000110101*PR_010011001011);
ans_temp[ans_id*18+5]+=Pmtrx[12]*(Q_001000010*PR_010011001000+Q_001000110*PR_010011001001+Q_101000010*PR_010011001100+Q_101000110*PR_010011001101);
ans_temp[ans_id*18+5]+=Pmtrx[13]*(Q_000001010*PR_010011001000+Q_000001110*PR_010011001001+Q_000101010*PR_010011001010+Q_000101110*PR_010011001011);
ans_temp[ans_id*18+5]+=Pmtrx[14]*(Q_000000011*PR_010011001000+Q_000000111*PR_010011001001+Q_000000211*PR_010011001002);
ans_temp[ans_id*18+3]+=Pmtrx[15]*(Q_011000000*PR_010010002000+Q_111000000*PR_010010002100+Q_211000000*PR_010010002200);
ans_temp[ans_id*18+3]+=Pmtrx[16]*(Q_010001000*PR_010010002000+Q_010101000*PR_010010002010+Q_110001000*PR_010010002100+Q_110101000*PR_010010002110);
ans_temp[ans_id*18+3]+=Pmtrx[17]*(Q_010000001*PR_010010002000+Q_010000101*PR_010010002001+Q_110000001*PR_010010002100+Q_110000101*PR_010010002101);
ans_temp[ans_id*18+4]+=Pmtrx[15]*(Q_001010000*PR_010010002000+Q_001110000*PR_010010002010+Q_101010000*PR_010010002100+Q_101110000*PR_010010002110);
ans_temp[ans_id*18+4]+=Pmtrx[16]*(Q_000011000*PR_010010002000+Q_000111000*PR_010010002010+Q_000211000*PR_010010002020);
ans_temp[ans_id*18+4]+=Pmtrx[17]*(Q_000010001*PR_010010002000+Q_000010101*PR_010010002001+Q_000110001*PR_010010002010+Q_000110101*PR_010010002011);
ans_temp[ans_id*18+5]+=Pmtrx[15]*(Q_001000010*PR_010010002000+Q_001000110*PR_010010002001+Q_101000010*PR_010010002100+Q_101000110*PR_010010002101);
ans_temp[ans_id*18+5]+=Pmtrx[16]*(Q_000001010*PR_010010002000+Q_000001110*PR_010010002001+Q_000101010*PR_010010002010+Q_000101110*PR_010010002011);
ans_temp[ans_id*18+5]+=Pmtrx[17]*(Q_000000011*PR_010010002000+Q_000000111*PR_010010002001+Q_000000211*PR_010010002002);
ans_temp[ans_id*18+6]+=Pmtrx[0]*(Q_011000000*PR_002020000000+Q_111000000*PR_002020000100+Q_211000000*PR_002020000200);
ans_temp[ans_id*18+6]+=Pmtrx[1]*(Q_010001000*PR_002020000000+Q_010101000*PR_002020000010+Q_110001000*PR_002020000100+Q_110101000*PR_002020000110);
ans_temp[ans_id*18+6]+=Pmtrx[2]*(Q_010000001*PR_002020000000+Q_010000101*PR_002020000001+Q_110000001*PR_002020000100+Q_110000101*PR_002020000101);
ans_temp[ans_id*18+7]+=Pmtrx[0]*(Q_001010000*PR_002020000000+Q_001110000*PR_002020000010+Q_101010000*PR_002020000100+Q_101110000*PR_002020000110);
ans_temp[ans_id*18+7]+=Pmtrx[1]*(Q_000011000*PR_002020000000+Q_000111000*PR_002020000010+Q_000211000*PR_002020000020);
ans_temp[ans_id*18+7]+=Pmtrx[2]*(Q_000010001*PR_002020000000+Q_000010101*PR_002020000001+Q_000110001*PR_002020000010+Q_000110101*PR_002020000011);
ans_temp[ans_id*18+8]+=Pmtrx[0]*(Q_001000010*PR_002020000000+Q_001000110*PR_002020000001+Q_101000010*PR_002020000100+Q_101000110*PR_002020000101);
ans_temp[ans_id*18+8]+=Pmtrx[1]*(Q_000001010*PR_002020000000+Q_000001110*PR_002020000001+Q_000101010*PR_002020000010+Q_000101110*PR_002020000011);
ans_temp[ans_id*18+8]+=Pmtrx[2]*(Q_000000011*PR_002020000000+Q_000000111*PR_002020000001+Q_000000211*PR_002020000002);
ans_temp[ans_id*18+6]+=Pmtrx[3]*(Q_011000000*PR_001021000000+Q_111000000*PR_001021000100+Q_211000000*PR_001021000200);
ans_temp[ans_id*18+6]+=Pmtrx[4]*(Q_010001000*PR_001021000000+Q_010101000*PR_001021000010+Q_110001000*PR_001021000100+Q_110101000*PR_001021000110);
ans_temp[ans_id*18+6]+=Pmtrx[5]*(Q_010000001*PR_001021000000+Q_010000101*PR_001021000001+Q_110000001*PR_001021000100+Q_110000101*PR_001021000101);
ans_temp[ans_id*18+7]+=Pmtrx[3]*(Q_001010000*PR_001021000000+Q_001110000*PR_001021000010+Q_101010000*PR_001021000100+Q_101110000*PR_001021000110);
ans_temp[ans_id*18+7]+=Pmtrx[4]*(Q_000011000*PR_001021000000+Q_000111000*PR_001021000010+Q_000211000*PR_001021000020);
ans_temp[ans_id*18+7]+=Pmtrx[5]*(Q_000010001*PR_001021000000+Q_000010101*PR_001021000001+Q_000110001*PR_001021000010+Q_000110101*PR_001021000011);
ans_temp[ans_id*18+8]+=Pmtrx[3]*(Q_001000010*PR_001021000000+Q_001000110*PR_001021000001+Q_101000010*PR_001021000100+Q_101000110*PR_001021000101);
ans_temp[ans_id*18+8]+=Pmtrx[4]*(Q_000001010*PR_001021000000+Q_000001110*PR_001021000001+Q_000101010*PR_001021000010+Q_000101110*PR_001021000011);
ans_temp[ans_id*18+8]+=Pmtrx[5]*(Q_000000011*PR_001021000000+Q_000000111*PR_001021000001+Q_000000211*PR_001021000002);
ans_temp[ans_id*18+6]+=Pmtrx[6]*(Q_011000000*PR_000022000000+Q_111000000*PR_000022000100+Q_211000000*PR_000022000200);
ans_temp[ans_id*18+6]+=Pmtrx[7]*(Q_010001000*PR_000022000000+Q_010101000*PR_000022000010+Q_110001000*PR_000022000100+Q_110101000*PR_000022000110);
ans_temp[ans_id*18+6]+=Pmtrx[8]*(Q_010000001*PR_000022000000+Q_010000101*PR_000022000001+Q_110000001*PR_000022000100+Q_110000101*PR_000022000101);
ans_temp[ans_id*18+7]+=Pmtrx[6]*(Q_001010000*PR_000022000000+Q_001110000*PR_000022000010+Q_101010000*PR_000022000100+Q_101110000*PR_000022000110);
ans_temp[ans_id*18+7]+=Pmtrx[7]*(Q_000011000*PR_000022000000+Q_000111000*PR_000022000010+Q_000211000*PR_000022000020);
ans_temp[ans_id*18+7]+=Pmtrx[8]*(Q_000010001*PR_000022000000+Q_000010101*PR_000022000001+Q_000110001*PR_000022000010+Q_000110101*PR_000022000011);
ans_temp[ans_id*18+8]+=Pmtrx[6]*(Q_001000010*PR_000022000000+Q_001000110*PR_000022000001+Q_101000010*PR_000022000100+Q_101000110*PR_000022000101);
ans_temp[ans_id*18+8]+=Pmtrx[7]*(Q_000001010*PR_000022000000+Q_000001110*PR_000022000001+Q_000101010*PR_000022000010+Q_000101110*PR_000022000011);
ans_temp[ans_id*18+8]+=Pmtrx[8]*(Q_000000011*PR_000022000000+Q_000000111*PR_000022000001+Q_000000211*PR_000022000002);
ans_temp[ans_id*18+6]+=Pmtrx[9]*(Q_011000000*PR_001020001000+Q_111000000*PR_001020001100+Q_211000000*PR_001020001200);
ans_temp[ans_id*18+6]+=Pmtrx[10]*(Q_010001000*PR_001020001000+Q_010101000*PR_001020001010+Q_110001000*PR_001020001100+Q_110101000*PR_001020001110);
ans_temp[ans_id*18+6]+=Pmtrx[11]*(Q_010000001*PR_001020001000+Q_010000101*PR_001020001001+Q_110000001*PR_001020001100+Q_110000101*PR_001020001101);
ans_temp[ans_id*18+7]+=Pmtrx[9]*(Q_001010000*PR_001020001000+Q_001110000*PR_001020001010+Q_101010000*PR_001020001100+Q_101110000*PR_001020001110);
ans_temp[ans_id*18+7]+=Pmtrx[10]*(Q_000011000*PR_001020001000+Q_000111000*PR_001020001010+Q_000211000*PR_001020001020);
ans_temp[ans_id*18+7]+=Pmtrx[11]*(Q_000010001*PR_001020001000+Q_000010101*PR_001020001001+Q_000110001*PR_001020001010+Q_000110101*PR_001020001011);
ans_temp[ans_id*18+8]+=Pmtrx[9]*(Q_001000010*PR_001020001000+Q_001000110*PR_001020001001+Q_101000010*PR_001020001100+Q_101000110*PR_001020001101);
ans_temp[ans_id*18+8]+=Pmtrx[10]*(Q_000001010*PR_001020001000+Q_000001110*PR_001020001001+Q_000101010*PR_001020001010+Q_000101110*PR_001020001011);
ans_temp[ans_id*18+8]+=Pmtrx[11]*(Q_000000011*PR_001020001000+Q_000000111*PR_001020001001+Q_000000211*PR_001020001002);
ans_temp[ans_id*18+6]+=Pmtrx[12]*(Q_011000000*PR_000021001000+Q_111000000*PR_000021001100+Q_211000000*PR_000021001200);
ans_temp[ans_id*18+6]+=Pmtrx[13]*(Q_010001000*PR_000021001000+Q_010101000*PR_000021001010+Q_110001000*PR_000021001100+Q_110101000*PR_000021001110);
ans_temp[ans_id*18+6]+=Pmtrx[14]*(Q_010000001*PR_000021001000+Q_010000101*PR_000021001001+Q_110000001*PR_000021001100+Q_110000101*PR_000021001101);
ans_temp[ans_id*18+7]+=Pmtrx[12]*(Q_001010000*PR_000021001000+Q_001110000*PR_000021001010+Q_101010000*PR_000021001100+Q_101110000*PR_000021001110);
ans_temp[ans_id*18+7]+=Pmtrx[13]*(Q_000011000*PR_000021001000+Q_000111000*PR_000021001010+Q_000211000*PR_000021001020);
ans_temp[ans_id*18+7]+=Pmtrx[14]*(Q_000010001*PR_000021001000+Q_000010101*PR_000021001001+Q_000110001*PR_000021001010+Q_000110101*PR_000021001011);
ans_temp[ans_id*18+8]+=Pmtrx[12]*(Q_001000010*PR_000021001000+Q_001000110*PR_000021001001+Q_101000010*PR_000021001100+Q_101000110*PR_000021001101);
ans_temp[ans_id*18+8]+=Pmtrx[13]*(Q_000001010*PR_000021001000+Q_000001110*PR_000021001001+Q_000101010*PR_000021001010+Q_000101110*PR_000021001011);
ans_temp[ans_id*18+8]+=Pmtrx[14]*(Q_000000011*PR_000021001000+Q_000000111*PR_000021001001+Q_000000211*PR_000021001002);
ans_temp[ans_id*18+6]+=Pmtrx[15]*(Q_011000000*PR_000020002000+Q_111000000*PR_000020002100+Q_211000000*PR_000020002200);
ans_temp[ans_id*18+6]+=Pmtrx[16]*(Q_010001000*PR_000020002000+Q_010101000*PR_000020002010+Q_110001000*PR_000020002100+Q_110101000*PR_000020002110);
ans_temp[ans_id*18+6]+=Pmtrx[17]*(Q_010000001*PR_000020002000+Q_010000101*PR_000020002001+Q_110000001*PR_000020002100+Q_110000101*PR_000020002101);
ans_temp[ans_id*18+7]+=Pmtrx[15]*(Q_001010000*PR_000020002000+Q_001110000*PR_000020002010+Q_101010000*PR_000020002100+Q_101110000*PR_000020002110);
ans_temp[ans_id*18+7]+=Pmtrx[16]*(Q_000011000*PR_000020002000+Q_000111000*PR_000020002010+Q_000211000*PR_000020002020);
ans_temp[ans_id*18+7]+=Pmtrx[17]*(Q_000010001*PR_000020002000+Q_000010101*PR_000020002001+Q_000110001*PR_000020002010+Q_000110101*PR_000020002011);
ans_temp[ans_id*18+8]+=Pmtrx[15]*(Q_001000010*PR_000020002000+Q_001000110*PR_000020002001+Q_101000010*PR_000020002100+Q_101000110*PR_000020002101);
ans_temp[ans_id*18+8]+=Pmtrx[16]*(Q_000001010*PR_000020002000+Q_000001110*PR_000020002001+Q_000101010*PR_000020002010+Q_000101110*PR_000020002011);
ans_temp[ans_id*18+8]+=Pmtrx[17]*(Q_000000011*PR_000020002000+Q_000000111*PR_000020002001+Q_000000211*PR_000020002002);
ans_temp[ans_id*18+9]+=Pmtrx[0]*(Q_011000000*PR_012000010000+Q_111000000*PR_012000010100+Q_211000000*PR_012000010200);
ans_temp[ans_id*18+9]+=Pmtrx[1]*(Q_010001000*PR_012000010000+Q_010101000*PR_012000010010+Q_110001000*PR_012000010100+Q_110101000*PR_012000010110);
ans_temp[ans_id*18+9]+=Pmtrx[2]*(Q_010000001*PR_012000010000+Q_010000101*PR_012000010001+Q_110000001*PR_012000010100+Q_110000101*PR_012000010101);
ans_temp[ans_id*18+10]+=Pmtrx[0]*(Q_001010000*PR_012000010000+Q_001110000*PR_012000010010+Q_101010000*PR_012000010100+Q_101110000*PR_012000010110);
ans_temp[ans_id*18+10]+=Pmtrx[1]*(Q_000011000*PR_012000010000+Q_000111000*PR_012000010010+Q_000211000*PR_012000010020);
ans_temp[ans_id*18+10]+=Pmtrx[2]*(Q_000010001*PR_012000010000+Q_000010101*PR_012000010001+Q_000110001*PR_012000010010+Q_000110101*PR_012000010011);
ans_temp[ans_id*18+11]+=Pmtrx[0]*(Q_001000010*PR_012000010000+Q_001000110*PR_012000010001+Q_101000010*PR_012000010100+Q_101000110*PR_012000010101);
ans_temp[ans_id*18+11]+=Pmtrx[1]*(Q_000001010*PR_012000010000+Q_000001110*PR_012000010001+Q_000101010*PR_012000010010+Q_000101110*PR_012000010011);
ans_temp[ans_id*18+11]+=Pmtrx[2]*(Q_000000011*PR_012000010000+Q_000000111*PR_012000010001+Q_000000211*PR_012000010002);
ans_temp[ans_id*18+9]+=Pmtrx[3]*(Q_011000000*PR_011001010000+Q_111000000*PR_011001010100+Q_211000000*PR_011001010200);
ans_temp[ans_id*18+9]+=Pmtrx[4]*(Q_010001000*PR_011001010000+Q_010101000*PR_011001010010+Q_110001000*PR_011001010100+Q_110101000*PR_011001010110);
ans_temp[ans_id*18+9]+=Pmtrx[5]*(Q_010000001*PR_011001010000+Q_010000101*PR_011001010001+Q_110000001*PR_011001010100+Q_110000101*PR_011001010101);
ans_temp[ans_id*18+10]+=Pmtrx[3]*(Q_001010000*PR_011001010000+Q_001110000*PR_011001010010+Q_101010000*PR_011001010100+Q_101110000*PR_011001010110);
ans_temp[ans_id*18+10]+=Pmtrx[4]*(Q_000011000*PR_011001010000+Q_000111000*PR_011001010010+Q_000211000*PR_011001010020);
ans_temp[ans_id*18+10]+=Pmtrx[5]*(Q_000010001*PR_011001010000+Q_000010101*PR_011001010001+Q_000110001*PR_011001010010+Q_000110101*PR_011001010011);
ans_temp[ans_id*18+11]+=Pmtrx[3]*(Q_001000010*PR_011001010000+Q_001000110*PR_011001010001+Q_101000010*PR_011001010100+Q_101000110*PR_011001010101);
ans_temp[ans_id*18+11]+=Pmtrx[4]*(Q_000001010*PR_011001010000+Q_000001110*PR_011001010001+Q_000101010*PR_011001010010+Q_000101110*PR_011001010011);
ans_temp[ans_id*18+11]+=Pmtrx[5]*(Q_000000011*PR_011001010000+Q_000000111*PR_011001010001+Q_000000211*PR_011001010002);
ans_temp[ans_id*18+9]+=Pmtrx[6]*(Q_011000000*PR_010002010000+Q_111000000*PR_010002010100+Q_211000000*PR_010002010200);
ans_temp[ans_id*18+9]+=Pmtrx[7]*(Q_010001000*PR_010002010000+Q_010101000*PR_010002010010+Q_110001000*PR_010002010100+Q_110101000*PR_010002010110);
ans_temp[ans_id*18+9]+=Pmtrx[8]*(Q_010000001*PR_010002010000+Q_010000101*PR_010002010001+Q_110000001*PR_010002010100+Q_110000101*PR_010002010101);
ans_temp[ans_id*18+10]+=Pmtrx[6]*(Q_001010000*PR_010002010000+Q_001110000*PR_010002010010+Q_101010000*PR_010002010100+Q_101110000*PR_010002010110);
ans_temp[ans_id*18+10]+=Pmtrx[7]*(Q_000011000*PR_010002010000+Q_000111000*PR_010002010010+Q_000211000*PR_010002010020);
ans_temp[ans_id*18+10]+=Pmtrx[8]*(Q_000010001*PR_010002010000+Q_000010101*PR_010002010001+Q_000110001*PR_010002010010+Q_000110101*PR_010002010011);
ans_temp[ans_id*18+11]+=Pmtrx[6]*(Q_001000010*PR_010002010000+Q_001000110*PR_010002010001+Q_101000010*PR_010002010100+Q_101000110*PR_010002010101);
ans_temp[ans_id*18+11]+=Pmtrx[7]*(Q_000001010*PR_010002010000+Q_000001110*PR_010002010001+Q_000101010*PR_010002010010+Q_000101110*PR_010002010011);
ans_temp[ans_id*18+11]+=Pmtrx[8]*(Q_000000011*PR_010002010000+Q_000000111*PR_010002010001+Q_000000211*PR_010002010002);
ans_temp[ans_id*18+9]+=Pmtrx[9]*(Q_011000000*PR_011000011000+Q_111000000*PR_011000011100+Q_211000000*PR_011000011200);
ans_temp[ans_id*18+9]+=Pmtrx[10]*(Q_010001000*PR_011000011000+Q_010101000*PR_011000011010+Q_110001000*PR_011000011100+Q_110101000*PR_011000011110);
ans_temp[ans_id*18+9]+=Pmtrx[11]*(Q_010000001*PR_011000011000+Q_010000101*PR_011000011001+Q_110000001*PR_011000011100+Q_110000101*PR_011000011101);
ans_temp[ans_id*18+10]+=Pmtrx[9]*(Q_001010000*PR_011000011000+Q_001110000*PR_011000011010+Q_101010000*PR_011000011100+Q_101110000*PR_011000011110);
ans_temp[ans_id*18+10]+=Pmtrx[10]*(Q_000011000*PR_011000011000+Q_000111000*PR_011000011010+Q_000211000*PR_011000011020);
ans_temp[ans_id*18+10]+=Pmtrx[11]*(Q_000010001*PR_011000011000+Q_000010101*PR_011000011001+Q_000110001*PR_011000011010+Q_000110101*PR_011000011011);
ans_temp[ans_id*18+11]+=Pmtrx[9]*(Q_001000010*PR_011000011000+Q_001000110*PR_011000011001+Q_101000010*PR_011000011100+Q_101000110*PR_011000011101);
ans_temp[ans_id*18+11]+=Pmtrx[10]*(Q_000001010*PR_011000011000+Q_000001110*PR_011000011001+Q_000101010*PR_011000011010+Q_000101110*PR_011000011011);
ans_temp[ans_id*18+11]+=Pmtrx[11]*(Q_000000011*PR_011000011000+Q_000000111*PR_011000011001+Q_000000211*PR_011000011002);
ans_temp[ans_id*18+9]+=Pmtrx[12]*(Q_011000000*PR_010001011000+Q_111000000*PR_010001011100+Q_211000000*PR_010001011200);
ans_temp[ans_id*18+9]+=Pmtrx[13]*(Q_010001000*PR_010001011000+Q_010101000*PR_010001011010+Q_110001000*PR_010001011100+Q_110101000*PR_010001011110);
ans_temp[ans_id*18+9]+=Pmtrx[14]*(Q_010000001*PR_010001011000+Q_010000101*PR_010001011001+Q_110000001*PR_010001011100+Q_110000101*PR_010001011101);
ans_temp[ans_id*18+10]+=Pmtrx[12]*(Q_001010000*PR_010001011000+Q_001110000*PR_010001011010+Q_101010000*PR_010001011100+Q_101110000*PR_010001011110);
ans_temp[ans_id*18+10]+=Pmtrx[13]*(Q_000011000*PR_010001011000+Q_000111000*PR_010001011010+Q_000211000*PR_010001011020);
ans_temp[ans_id*18+10]+=Pmtrx[14]*(Q_000010001*PR_010001011000+Q_000010101*PR_010001011001+Q_000110001*PR_010001011010+Q_000110101*PR_010001011011);
ans_temp[ans_id*18+11]+=Pmtrx[12]*(Q_001000010*PR_010001011000+Q_001000110*PR_010001011001+Q_101000010*PR_010001011100+Q_101000110*PR_010001011101);
ans_temp[ans_id*18+11]+=Pmtrx[13]*(Q_000001010*PR_010001011000+Q_000001110*PR_010001011001+Q_000101010*PR_010001011010+Q_000101110*PR_010001011011);
ans_temp[ans_id*18+11]+=Pmtrx[14]*(Q_000000011*PR_010001011000+Q_000000111*PR_010001011001+Q_000000211*PR_010001011002);
ans_temp[ans_id*18+9]+=Pmtrx[15]*(Q_011000000*PR_010000012000+Q_111000000*PR_010000012100+Q_211000000*PR_010000012200);
ans_temp[ans_id*18+9]+=Pmtrx[16]*(Q_010001000*PR_010000012000+Q_010101000*PR_010000012010+Q_110001000*PR_010000012100+Q_110101000*PR_010000012110);
ans_temp[ans_id*18+9]+=Pmtrx[17]*(Q_010000001*PR_010000012000+Q_010000101*PR_010000012001+Q_110000001*PR_010000012100+Q_110000101*PR_010000012101);
ans_temp[ans_id*18+10]+=Pmtrx[15]*(Q_001010000*PR_010000012000+Q_001110000*PR_010000012010+Q_101010000*PR_010000012100+Q_101110000*PR_010000012110);
ans_temp[ans_id*18+10]+=Pmtrx[16]*(Q_000011000*PR_010000012000+Q_000111000*PR_010000012010+Q_000211000*PR_010000012020);
ans_temp[ans_id*18+10]+=Pmtrx[17]*(Q_000010001*PR_010000012000+Q_000010101*PR_010000012001+Q_000110001*PR_010000012010+Q_000110101*PR_010000012011);
ans_temp[ans_id*18+11]+=Pmtrx[15]*(Q_001000010*PR_010000012000+Q_001000110*PR_010000012001+Q_101000010*PR_010000012100+Q_101000110*PR_010000012101);
ans_temp[ans_id*18+11]+=Pmtrx[16]*(Q_000001010*PR_010000012000+Q_000001110*PR_010000012001+Q_000101010*PR_010000012010+Q_000101110*PR_010000012011);
ans_temp[ans_id*18+11]+=Pmtrx[17]*(Q_000000011*PR_010000012000+Q_000000111*PR_010000012001+Q_000000211*PR_010000012002);
ans_temp[ans_id*18+12]+=Pmtrx[0]*(Q_011000000*PR_002010010000+Q_111000000*PR_002010010100+Q_211000000*PR_002010010200);
ans_temp[ans_id*18+12]+=Pmtrx[1]*(Q_010001000*PR_002010010000+Q_010101000*PR_002010010010+Q_110001000*PR_002010010100+Q_110101000*PR_002010010110);
ans_temp[ans_id*18+12]+=Pmtrx[2]*(Q_010000001*PR_002010010000+Q_010000101*PR_002010010001+Q_110000001*PR_002010010100+Q_110000101*PR_002010010101);
ans_temp[ans_id*18+13]+=Pmtrx[0]*(Q_001010000*PR_002010010000+Q_001110000*PR_002010010010+Q_101010000*PR_002010010100+Q_101110000*PR_002010010110);
ans_temp[ans_id*18+13]+=Pmtrx[1]*(Q_000011000*PR_002010010000+Q_000111000*PR_002010010010+Q_000211000*PR_002010010020);
ans_temp[ans_id*18+13]+=Pmtrx[2]*(Q_000010001*PR_002010010000+Q_000010101*PR_002010010001+Q_000110001*PR_002010010010+Q_000110101*PR_002010010011);
ans_temp[ans_id*18+14]+=Pmtrx[0]*(Q_001000010*PR_002010010000+Q_001000110*PR_002010010001+Q_101000010*PR_002010010100+Q_101000110*PR_002010010101);
ans_temp[ans_id*18+14]+=Pmtrx[1]*(Q_000001010*PR_002010010000+Q_000001110*PR_002010010001+Q_000101010*PR_002010010010+Q_000101110*PR_002010010011);
ans_temp[ans_id*18+14]+=Pmtrx[2]*(Q_000000011*PR_002010010000+Q_000000111*PR_002010010001+Q_000000211*PR_002010010002);
ans_temp[ans_id*18+12]+=Pmtrx[3]*(Q_011000000*PR_001011010000+Q_111000000*PR_001011010100+Q_211000000*PR_001011010200);
ans_temp[ans_id*18+12]+=Pmtrx[4]*(Q_010001000*PR_001011010000+Q_010101000*PR_001011010010+Q_110001000*PR_001011010100+Q_110101000*PR_001011010110);
ans_temp[ans_id*18+12]+=Pmtrx[5]*(Q_010000001*PR_001011010000+Q_010000101*PR_001011010001+Q_110000001*PR_001011010100+Q_110000101*PR_001011010101);
ans_temp[ans_id*18+13]+=Pmtrx[3]*(Q_001010000*PR_001011010000+Q_001110000*PR_001011010010+Q_101010000*PR_001011010100+Q_101110000*PR_001011010110);
ans_temp[ans_id*18+13]+=Pmtrx[4]*(Q_000011000*PR_001011010000+Q_000111000*PR_001011010010+Q_000211000*PR_001011010020);
ans_temp[ans_id*18+13]+=Pmtrx[5]*(Q_000010001*PR_001011010000+Q_000010101*PR_001011010001+Q_000110001*PR_001011010010+Q_000110101*PR_001011010011);
ans_temp[ans_id*18+14]+=Pmtrx[3]*(Q_001000010*PR_001011010000+Q_001000110*PR_001011010001+Q_101000010*PR_001011010100+Q_101000110*PR_001011010101);
ans_temp[ans_id*18+14]+=Pmtrx[4]*(Q_000001010*PR_001011010000+Q_000001110*PR_001011010001+Q_000101010*PR_001011010010+Q_000101110*PR_001011010011);
ans_temp[ans_id*18+14]+=Pmtrx[5]*(Q_000000011*PR_001011010000+Q_000000111*PR_001011010001+Q_000000211*PR_001011010002);
ans_temp[ans_id*18+12]+=Pmtrx[6]*(Q_011000000*PR_000012010000+Q_111000000*PR_000012010100+Q_211000000*PR_000012010200);
ans_temp[ans_id*18+12]+=Pmtrx[7]*(Q_010001000*PR_000012010000+Q_010101000*PR_000012010010+Q_110001000*PR_000012010100+Q_110101000*PR_000012010110);
ans_temp[ans_id*18+12]+=Pmtrx[8]*(Q_010000001*PR_000012010000+Q_010000101*PR_000012010001+Q_110000001*PR_000012010100+Q_110000101*PR_000012010101);
ans_temp[ans_id*18+13]+=Pmtrx[6]*(Q_001010000*PR_000012010000+Q_001110000*PR_000012010010+Q_101010000*PR_000012010100+Q_101110000*PR_000012010110);
ans_temp[ans_id*18+13]+=Pmtrx[7]*(Q_000011000*PR_000012010000+Q_000111000*PR_000012010010+Q_000211000*PR_000012010020);
ans_temp[ans_id*18+13]+=Pmtrx[8]*(Q_000010001*PR_000012010000+Q_000010101*PR_000012010001+Q_000110001*PR_000012010010+Q_000110101*PR_000012010011);
ans_temp[ans_id*18+14]+=Pmtrx[6]*(Q_001000010*PR_000012010000+Q_001000110*PR_000012010001+Q_101000010*PR_000012010100+Q_101000110*PR_000012010101);
ans_temp[ans_id*18+14]+=Pmtrx[7]*(Q_000001010*PR_000012010000+Q_000001110*PR_000012010001+Q_000101010*PR_000012010010+Q_000101110*PR_000012010011);
ans_temp[ans_id*18+14]+=Pmtrx[8]*(Q_000000011*PR_000012010000+Q_000000111*PR_000012010001+Q_000000211*PR_000012010002);
ans_temp[ans_id*18+12]+=Pmtrx[9]*(Q_011000000*PR_001010011000+Q_111000000*PR_001010011100+Q_211000000*PR_001010011200);
ans_temp[ans_id*18+12]+=Pmtrx[10]*(Q_010001000*PR_001010011000+Q_010101000*PR_001010011010+Q_110001000*PR_001010011100+Q_110101000*PR_001010011110);
ans_temp[ans_id*18+12]+=Pmtrx[11]*(Q_010000001*PR_001010011000+Q_010000101*PR_001010011001+Q_110000001*PR_001010011100+Q_110000101*PR_001010011101);
ans_temp[ans_id*18+13]+=Pmtrx[9]*(Q_001010000*PR_001010011000+Q_001110000*PR_001010011010+Q_101010000*PR_001010011100+Q_101110000*PR_001010011110);
ans_temp[ans_id*18+13]+=Pmtrx[10]*(Q_000011000*PR_001010011000+Q_000111000*PR_001010011010+Q_000211000*PR_001010011020);
ans_temp[ans_id*18+13]+=Pmtrx[11]*(Q_000010001*PR_001010011000+Q_000010101*PR_001010011001+Q_000110001*PR_001010011010+Q_000110101*PR_001010011011);
ans_temp[ans_id*18+14]+=Pmtrx[9]*(Q_001000010*PR_001010011000+Q_001000110*PR_001010011001+Q_101000010*PR_001010011100+Q_101000110*PR_001010011101);
ans_temp[ans_id*18+14]+=Pmtrx[10]*(Q_000001010*PR_001010011000+Q_000001110*PR_001010011001+Q_000101010*PR_001010011010+Q_000101110*PR_001010011011);
ans_temp[ans_id*18+14]+=Pmtrx[11]*(Q_000000011*PR_001010011000+Q_000000111*PR_001010011001+Q_000000211*PR_001010011002);
ans_temp[ans_id*18+12]+=Pmtrx[12]*(Q_011000000*PR_000011011000+Q_111000000*PR_000011011100+Q_211000000*PR_000011011200);
ans_temp[ans_id*18+12]+=Pmtrx[13]*(Q_010001000*PR_000011011000+Q_010101000*PR_000011011010+Q_110001000*PR_000011011100+Q_110101000*PR_000011011110);
ans_temp[ans_id*18+12]+=Pmtrx[14]*(Q_010000001*PR_000011011000+Q_010000101*PR_000011011001+Q_110000001*PR_000011011100+Q_110000101*PR_000011011101);
ans_temp[ans_id*18+13]+=Pmtrx[12]*(Q_001010000*PR_000011011000+Q_001110000*PR_000011011010+Q_101010000*PR_000011011100+Q_101110000*PR_000011011110);
ans_temp[ans_id*18+13]+=Pmtrx[13]*(Q_000011000*PR_000011011000+Q_000111000*PR_000011011010+Q_000211000*PR_000011011020);
ans_temp[ans_id*18+13]+=Pmtrx[14]*(Q_000010001*PR_000011011000+Q_000010101*PR_000011011001+Q_000110001*PR_000011011010+Q_000110101*PR_000011011011);
ans_temp[ans_id*18+14]+=Pmtrx[12]*(Q_001000010*PR_000011011000+Q_001000110*PR_000011011001+Q_101000010*PR_000011011100+Q_101000110*PR_000011011101);
ans_temp[ans_id*18+14]+=Pmtrx[13]*(Q_000001010*PR_000011011000+Q_000001110*PR_000011011001+Q_000101010*PR_000011011010+Q_000101110*PR_000011011011);
ans_temp[ans_id*18+14]+=Pmtrx[14]*(Q_000000011*PR_000011011000+Q_000000111*PR_000011011001+Q_000000211*PR_000011011002);
ans_temp[ans_id*18+12]+=Pmtrx[15]*(Q_011000000*PR_000010012000+Q_111000000*PR_000010012100+Q_211000000*PR_000010012200);
ans_temp[ans_id*18+12]+=Pmtrx[16]*(Q_010001000*PR_000010012000+Q_010101000*PR_000010012010+Q_110001000*PR_000010012100+Q_110101000*PR_000010012110);
ans_temp[ans_id*18+12]+=Pmtrx[17]*(Q_010000001*PR_000010012000+Q_010000101*PR_000010012001+Q_110000001*PR_000010012100+Q_110000101*PR_000010012101);
ans_temp[ans_id*18+13]+=Pmtrx[15]*(Q_001010000*PR_000010012000+Q_001110000*PR_000010012010+Q_101010000*PR_000010012100+Q_101110000*PR_000010012110);
ans_temp[ans_id*18+13]+=Pmtrx[16]*(Q_000011000*PR_000010012000+Q_000111000*PR_000010012010+Q_000211000*PR_000010012020);
ans_temp[ans_id*18+13]+=Pmtrx[17]*(Q_000010001*PR_000010012000+Q_000010101*PR_000010012001+Q_000110001*PR_000010012010+Q_000110101*PR_000010012011);
ans_temp[ans_id*18+14]+=Pmtrx[15]*(Q_001000010*PR_000010012000+Q_001000110*PR_000010012001+Q_101000010*PR_000010012100+Q_101000110*PR_000010012101);
ans_temp[ans_id*18+14]+=Pmtrx[16]*(Q_000001010*PR_000010012000+Q_000001110*PR_000010012001+Q_000101010*PR_000010012010+Q_000101110*PR_000010012011);
ans_temp[ans_id*18+14]+=Pmtrx[17]*(Q_000000011*PR_000010012000+Q_000000111*PR_000010012001+Q_000000211*PR_000010012002);
ans_temp[ans_id*18+15]+=Pmtrx[0]*(Q_011000000*PR_002000020000+Q_111000000*PR_002000020100+Q_211000000*PR_002000020200);
ans_temp[ans_id*18+15]+=Pmtrx[1]*(Q_010001000*PR_002000020000+Q_010101000*PR_002000020010+Q_110001000*PR_002000020100+Q_110101000*PR_002000020110);
ans_temp[ans_id*18+15]+=Pmtrx[2]*(Q_010000001*PR_002000020000+Q_010000101*PR_002000020001+Q_110000001*PR_002000020100+Q_110000101*PR_002000020101);
ans_temp[ans_id*18+16]+=Pmtrx[0]*(Q_001010000*PR_002000020000+Q_001110000*PR_002000020010+Q_101010000*PR_002000020100+Q_101110000*PR_002000020110);
ans_temp[ans_id*18+16]+=Pmtrx[1]*(Q_000011000*PR_002000020000+Q_000111000*PR_002000020010+Q_000211000*PR_002000020020);
ans_temp[ans_id*18+16]+=Pmtrx[2]*(Q_000010001*PR_002000020000+Q_000010101*PR_002000020001+Q_000110001*PR_002000020010+Q_000110101*PR_002000020011);
ans_temp[ans_id*18+17]+=Pmtrx[0]*(Q_001000010*PR_002000020000+Q_001000110*PR_002000020001+Q_101000010*PR_002000020100+Q_101000110*PR_002000020101);
ans_temp[ans_id*18+17]+=Pmtrx[1]*(Q_000001010*PR_002000020000+Q_000001110*PR_002000020001+Q_000101010*PR_002000020010+Q_000101110*PR_002000020011);
ans_temp[ans_id*18+17]+=Pmtrx[2]*(Q_000000011*PR_002000020000+Q_000000111*PR_002000020001+Q_000000211*PR_002000020002);
ans_temp[ans_id*18+15]+=Pmtrx[3]*(Q_011000000*PR_001001020000+Q_111000000*PR_001001020100+Q_211000000*PR_001001020200);
ans_temp[ans_id*18+15]+=Pmtrx[4]*(Q_010001000*PR_001001020000+Q_010101000*PR_001001020010+Q_110001000*PR_001001020100+Q_110101000*PR_001001020110);
ans_temp[ans_id*18+15]+=Pmtrx[5]*(Q_010000001*PR_001001020000+Q_010000101*PR_001001020001+Q_110000001*PR_001001020100+Q_110000101*PR_001001020101);
ans_temp[ans_id*18+16]+=Pmtrx[3]*(Q_001010000*PR_001001020000+Q_001110000*PR_001001020010+Q_101010000*PR_001001020100+Q_101110000*PR_001001020110);
ans_temp[ans_id*18+16]+=Pmtrx[4]*(Q_000011000*PR_001001020000+Q_000111000*PR_001001020010+Q_000211000*PR_001001020020);
ans_temp[ans_id*18+16]+=Pmtrx[5]*(Q_000010001*PR_001001020000+Q_000010101*PR_001001020001+Q_000110001*PR_001001020010+Q_000110101*PR_001001020011);
ans_temp[ans_id*18+17]+=Pmtrx[3]*(Q_001000010*PR_001001020000+Q_001000110*PR_001001020001+Q_101000010*PR_001001020100+Q_101000110*PR_001001020101);
ans_temp[ans_id*18+17]+=Pmtrx[4]*(Q_000001010*PR_001001020000+Q_000001110*PR_001001020001+Q_000101010*PR_001001020010+Q_000101110*PR_001001020011);
ans_temp[ans_id*18+17]+=Pmtrx[5]*(Q_000000011*PR_001001020000+Q_000000111*PR_001001020001+Q_000000211*PR_001001020002);
ans_temp[ans_id*18+15]+=Pmtrx[6]*(Q_011000000*PR_000002020000+Q_111000000*PR_000002020100+Q_211000000*PR_000002020200);
ans_temp[ans_id*18+15]+=Pmtrx[7]*(Q_010001000*PR_000002020000+Q_010101000*PR_000002020010+Q_110001000*PR_000002020100+Q_110101000*PR_000002020110);
ans_temp[ans_id*18+15]+=Pmtrx[8]*(Q_010000001*PR_000002020000+Q_010000101*PR_000002020001+Q_110000001*PR_000002020100+Q_110000101*PR_000002020101);
ans_temp[ans_id*18+16]+=Pmtrx[6]*(Q_001010000*PR_000002020000+Q_001110000*PR_000002020010+Q_101010000*PR_000002020100+Q_101110000*PR_000002020110);
ans_temp[ans_id*18+16]+=Pmtrx[7]*(Q_000011000*PR_000002020000+Q_000111000*PR_000002020010+Q_000211000*PR_000002020020);
ans_temp[ans_id*18+16]+=Pmtrx[8]*(Q_000010001*PR_000002020000+Q_000010101*PR_000002020001+Q_000110001*PR_000002020010+Q_000110101*PR_000002020011);
ans_temp[ans_id*18+17]+=Pmtrx[6]*(Q_001000010*PR_000002020000+Q_001000110*PR_000002020001+Q_101000010*PR_000002020100+Q_101000110*PR_000002020101);
ans_temp[ans_id*18+17]+=Pmtrx[7]*(Q_000001010*PR_000002020000+Q_000001110*PR_000002020001+Q_000101010*PR_000002020010+Q_000101110*PR_000002020011);
ans_temp[ans_id*18+17]+=Pmtrx[8]*(Q_000000011*PR_000002020000+Q_000000111*PR_000002020001+Q_000000211*PR_000002020002);
ans_temp[ans_id*18+15]+=Pmtrx[9]*(Q_011000000*PR_001000021000+Q_111000000*PR_001000021100+Q_211000000*PR_001000021200);
ans_temp[ans_id*18+15]+=Pmtrx[10]*(Q_010001000*PR_001000021000+Q_010101000*PR_001000021010+Q_110001000*PR_001000021100+Q_110101000*PR_001000021110);
ans_temp[ans_id*18+15]+=Pmtrx[11]*(Q_010000001*PR_001000021000+Q_010000101*PR_001000021001+Q_110000001*PR_001000021100+Q_110000101*PR_001000021101);
ans_temp[ans_id*18+16]+=Pmtrx[9]*(Q_001010000*PR_001000021000+Q_001110000*PR_001000021010+Q_101010000*PR_001000021100+Q_101110000*PR_001000021110);
ans_temp[ans_id*18+16]+=Pmtrx[10]*(Q_000011000*PR_001000021000+Q_000111000*PR_001000021010+Q_000211000*PR_001000021020);
ans_temp[ans_id*18+16]+=Pmtrx[11]*(Q_000010001*PR_001000021000+Q_000010101*PR_001000021001+Q_000110001*PR_001000021010+Q_000110101*PR_001000021011);
ans_temp[ans_id*18+17]+=Pmtrx[9]*(Q_001000010*PR_001000021000+Q_001000110*PR_001000021001+Q_101000010*PR_001000021100+Q_101000110*PR_001000021101);
ans_temp[ans_id*18+17]+=Pmtrx[10]*(Q_000001010*PR_001000021000+Q_000001110*PR_001000021001+Q_000101010*PR_001000021010+Q_000101110*PR_001000021011);
ans_temp[ans_id*18+17]+=Pmtrx[11]*(Q_000000011*PR_001000021000+Q_000000111*PR_001000021001+Q_000000211*PR_001000021002);
ans_temp[ans_id*18+15]+=Pmtrx[12]*(Q_011000000*PR_000001021000+Q_111000000*PR_000001021100+Q_211000000*PR_000001021200);
ans_temp[ans_id*18+15]+=Pmtrx[13]*(Q_010001000*PR_000001021000+Q_010101000*PR_000001021010+Q_110001000*PR_000001021100+Q_110101000*PR_000001021110);
ans_temp[ans_id*18+15]+=Pmtrx[14]*(Q_010000001*PR_000001021000+Q_010000101*PR_000001021001+Q_110000001*PR_000001021100+Q_110000101*PR_000001021101);
ans_temp[ans_id*18+16]+=Pmtrx[12]*(Q_001010000*PR_000001021000+Q_001110000*PR_000001021010+Q_101010000*PR_000001021100+Q_101110000*PR_000001021110);
ans_temp[ans_id*18+16]+=Pmtrx[13]*(Q_000011000*PR_000001021000+Q_000111000*PR_000001021010+Q_000211000*PR_000001021020);
ans_temp[ans_id*18+16]+=Pmtrx[14]*(Q_000010001*PR_000001021000+Q_000010101*PR_000001021001+Q_000110001*PR_000001021010+Q_000110101*PR_000001021011);
ans_temp[ans_id*18+17]+=Pmtrx[12]*(Q_001000010*PR_000001021000+Q_001000110*PR_000001021001+Q_101000010*PR_000001021100+Q_101000110*PR_000001021101);
ans_temp[ans_id*18+17]+=Pmtrx[13]*(Q_000001010*PR_000001021000+Q_000001110*PR_000001021001+Q_000101010*PR_000001021010+Q_000101110*PR_000001021011);
ans_temp[ans_id*18+17]+=Pmtrx[14]*(Q_000000011*PR_000001021000+Q_000000111*PR_000001021001+Q_000000211*PR_000001021002);
ans_temp[ans_id*18+15]+=Pmtrx[15]*(Q_011000000*PR_000000022000+Q_111000000*PR_000000022100+Q_211000000*PR_000000022200);
ans_temp[ans_id*18+15]+=Pmtrx[16]*(Q_010001000*PR_000000022000+Q_010101000*PR_000000022010+Q_110001000*PR_000000022100+Q_110101000*PR_000000022110);
ans_temp[ans_id*18+15]+=Pmtrx[17]*(Q_010000001*PR_000000022000+Q_010000101*PR_000000022001+Q_110000001*PR_000000022100+Q_110000101*PR_000000022101);
ans_temp[ans_id*18+16]+=Pmtrx[15]*(Q_001010000*PR_000000022000+Q_001110000*PR_000000022010+Q_101010000*PR_000000022100+Q_101110000*PR_000000022110);
ans_temp[ans_id*18+16]+=Pmtrx[16]*(Q_000011000*PR_000000022000+Q_000111000*PR_000000022010+Q_000211000*PR_000000022020);
ans_temp[ans_id*18+16]+=Pmtrx[17]*(Q_000010001*PR_000000022000+Q_000010101*PR_000000022001+Q_000110001*PR_000000022010+Q_000110101*PR_000000022011);
ans_temp[ans_id*18+17]+=Pmtrx[15]*(Q_001000010*PR_000000022000+Q_001000110*PR_000000022001+Q_101000010*PR_000000022100+Q_101000110*PR_000000022101);
ans_temp[ans_id*18+17]+=Pmtrx[16]*(Q_000001010*PR_000000022000+Q_000001110*PR_000000022001+Q_000101010*PR_000000022010+Q_000101110*PR_000000022011);
ans_temp[ans_id*18+17]+=Pmtrx[17]*(Q_000000011*PR_000000022000+Q_000000111*PR_000000022001+Q_000000211*PR_000000022002);
}
}
__syncthreads();
int num_thread=NTHREAD/2;
while (num_thread!=0){
__syncthreads();
if(tId_x<num_thread){
for(int ians=0;ians<18;ians++){
ans_temp[tId_x*18+ians]+=ans_temp[(tId_x+num_thread)*18+ians];
}
}
num_thread/=2;
}
if(tId_x==0){
for(int ians=0;ians<18;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*18+ians]=ans_temp[(tId_x)*18+ians];
}
}
}
}
}
__global__ void MD_Kq_ddpp_fs(unsigned int contrc_bra_num,unsigned int contrc_ket_num,\
unsigned int * contrc_bra_id,\
unsigned int * contrc_ket_id,\
unsigned int mtrx_len,\
double * Pmtrx_in,\
double * P,\
double * PA,\
double * PB,\
double * Zta_in,\
double * pp_in,\
float * K2_p_in,\
unsigned int * id_bra_in,\
double * Q,\
double * QC,\
double * QD,\
double * Eta_in,\
double * pq_in,\
float * K2_q_in,\
unsigned int * id_ket_in,\
double * ans){
unsigned int tId_x = threadIdx.x;
unsigned int bId_x = blockIdx.x;
unsigned int bId_y = blockIdx.y;
unsigned int tdis = blockDim.x;
unsigned int bdis_x = gridDim.x;
unsigned int bdis_y = gridDim.y;
unsigned int ans_id=tId_x;
double Pmtrx[18]={0.0};
__shared__ double ans_temp[NTHREAD*18];
for(int i=0;i<18;i++){
ans_temp[i*tdis+tId_x]=0.0;
}
for(unsigned int i_contrc_bra=bId_x;i_contrc_bra<contrc_bra_num;i_contrc_bra+=bdis_x){
for(unsigned int j_contrc_ket=bId_y;j_contrc_ket<contrc_ket_num;j_contrc_ket+=bdis_y){
unsigned int primit_bra_start = contrc_bra_id[i_contrc_bra ];
unsigned int primit_bra_end = contrc_bra_id[i_contrc_bra+1];
unsigned int primit_ket_start = contrc_ket_id[j_contrc_ket ];
unsigned int primit_ket_end = contrc_ket_id[j_contrc_ket+1];
for(unsigned int ii=primit_bra_start;ii<primit_bra_end;ii++){
unsigned int id_bra=id_bra_in[ii];
double PX=P[ii*3+0];
double PY=P[ii*3+1];
double PZ=P[ii*3+2];
double Pd_010[3];
Pd_010[0]=PA[ii*3+0];
Pd_010[1]=PA[ii*3+1];
Pd_010[2]=PA[ii*3+2];
double Pd_001[3];
Pd_001[0]=PB[ii*3+0];
Pd_001[1]=PB[ii*3+1];
Pd_001[2]=PB[ii*3+2];
double Zta=Zta_in[ii];
double pp=pp_in[ii];
float K2_p=K2_p_in[ii];
double aPin1=1/(2*Zta);
for(unsigned int j=tId_x;j<primit_ket_end-primit_ket_start;j+=tdis){
unsigned int jj=primit_ket_start+j;
unsigned int id_ket=tex1Dfetch(tex_id_ket,jj);
double P_max=0.0;
for(int p_j=0;p_j<3;p_j++){
for(int p_i=0;p_i<6;p_i++){
Pmtrx[p_i*3+p_j]=Pmtrx_in[(id_ket+p_j)*mtrx_len+(id_bra+p_i)];
double temp_P=fabsf(Pmtrx[p_i*3+p_j]);
if(temp_P>P_max) P_max=temp_P;
}
}
float K2_q=tex1Dfetch(tex_K2_q,jj);
if(fabsf(K2_p*K2_q)<1.0E-14){
break;
}
if(fabsf(P_max*K2_p*K2_q)<1.0E-14) continue;
int2 temp_int2;
temp_int2=tex1Dfetch(tex_Eta,jj);
double Eta=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_pq,jj);
double pq=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+0);
double QX=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+1);
double QY=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+2);
double QZ=__hiloint2double(temp_int2.y,temp_int2.x);
double Qd_010[3];
temp_int2=tex1Dfetch(tex_QC,jj*3+0);
Qd_010[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QC,jj*3+1);
Qd_010[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QC,jj*3+2);
Qd_010[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double Qd_001[3];
temp_int2=tex1Dfetch(tex_QD,jj*3+0);
Qd_001[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QD,jj*3+1);
Qd_001[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QD,jj*3+2);
Qd_001[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double alphaT=rsqrt(Eta+Zta);
double lmd=4*P25*pp*pq*alphaT;
alphaT=Eta*Zta*alphaT*alphaT;
double TX=PX-QX;
double TY=PY-QY;
double TZ=PZ-QZ;
double T=alphaT*(TX*TX+TY*TY+TZ*TZ);
double R_000[7];
Ft_fs_6(6,T,R_000);
R_000[0]*=lmd;
R_000[1]*=-2*alphaT*lmd;
R_000[2]*=4*alphaT*alphaT*lmd;
R_000[3]*=-8*alphaT*alphaT*alphaT*lmd;
R_000[4]*=16*alphaT*alphaT*alphaT*alphaT*lmd;
R_000[5]*=-32*alphaT*alphaT*alphaT*alphaT*alphaT*lmd;
R_000[6]*=64*alphaT*alphaT*alphaT*alphaT*alphaT*alphaT*lmd;
double aQin1=1/(2*Eta);
double R_100[6];
double R_200[5];
double R_300[4];
double R_400[3];
double R_500[2];
double R_600[1];
double R_010[6];
double R_110[5];
double R_210[4];
double R_310[3];
double R_410[2];
double R_510[1];
double R_020[5];
double R_120[4];
double R_220[3];
double R_320[2];
double R_420[1];
double R_030[4];
double R_130[3];
double R_230[2];
double R_330[1];
double R_040[3];
double R_140[2];
double R_240[1];
double R_050[2];
double R_150[1];
double R_060[1];
double R_001[6];
double R_101[5];
double R_201[4];
double R_301[3];
double R_401[2];
double R_501[1];
double R_011[5];
double R_111[4];
double R_211[3];
double R_311[2];
double R_411[1];
double R_021[4];
double R_121[3];
double R_221[2];
double R_321[1];
double R_031[3];
double R_131[2];
double R_231[1];
double R_041[2];
double R_141[1];
double R_051[1];
double R_002[5];
double R_102[4];
double R_202[3];
double R_302[2];
double R_402[1];
double R_012[4];
double R_112[3];
double R_212[2];
double R_312[1];
double R_022[3];
double R_122[2];
double R_222[1];
double R_032[2];
double R_132[1];
double R_042[1];
double R_003[4];
double R_103[3];
double R_203[2];
double R_303[1];
double R_013[3];
double R_113[2];
double R_213[1];
double R_023[2];
double R_123[1];
double R_033[1];
double R_004[3];
double R_104[2];
double R_204[1];
double R_014[2];
double R_114[1];
double R_024[1];
double R_005[2];
double R_105[1];
double R_015[1];
double R_006[1];
for(int i=0;i<6;i++){
R_100[i]=TX*R_000[i+1];
}
for(int i=0;i<6;i++){
R_010[i]=TY*R_000[i+1];
}
for(int i=0;i<6;i++){
R_001[i]=TZ*R_000[i+1];
}
for(int i=0;i<5;i++){
R_200[i]=TX*R_100[i+1]+R_000[i+1];
}
for(int i=0;i<5;i++){
R_110[i]=TX*R_010[i+1];
}
for(int i=0;i<5;i++){
R_020[i]=TY*R_010[i+1]+R_000[i+1];
}
for(int i=0;i<5;i++){
R_101[i]=TX*R_001[i+1];
}
for(int i=0;i<5;i++){
R_011[i]=TY*R_001[i+1];
}
for(int i=0;i<5;i++){
R_002[i]=TZ*R_001[i+1]+R_000[i+1];
}
for(int i=0;i<4;i++){
R_300[i]=TX*R_200[i+1]+2*R_100[i+1];
}
for(int i=0;i<4;i++){
R_210[i]=TY*R_200[i+1];
}
for(int i=0;i<4;i++){
R_120[i]=TX*R_020[i+1];
}
for(int i=0;i<4;i++){
R_030[i]=TY*R_020[i+1]+2*R_010[i+1];
}
for(int i=0;i<4;i++){
R_201[i]=TZ*R_200[i+1];
}
for(int i=0;i<4;i++){
R_111[i]=TX*R_011[i+1];
}
for(int i=0;i<4;i++){
R_021[i]=TZ*R_020[i+1];
}
for(int i=0;i<4;i++){
R_102[i]=TX*R_002[i+1];
}
for(int i=0;i<4;i++){
R_012[i]=TY*R_002[i+1];
}
for(int i=0;i<4;i++){
R_003[i]=TZ*R_002[i+1]+2*R_001[i+1];
}
for(int i=0;i<3;i++){
R_400[i]=TX*R_300[i+1]+3*R_200[i+1];
}
for(int i=0;i<3;i++){
R_310[i]=TY*R_300[i+1];
}
for(int i=0;i<3;i++){
R_220[i]=TX*R_120[i+1]+R_020[i+1];
}
for(int i=0;i<3;i++){
R_130[i]=TX*R_030[i+1];
}
for(int i=0;i<3;i++){
R_040[i]=TY*R_030[i+1]+3*R_020[i+1];
}
for(int i=0;i<3;i++){
R_301[i]=TZ*R_300[i+1];
}
for(int i=0;i<3;i++){
R_211[i]=TY*R_201[i+1];
}
for(int i=0;i<3;i++){
R_121[i]=TX*R_021[i+1];
}
for(int i=0;i<3;i++){
R_031[i]=TZ*R_030[i+1];
}
for(int i=0;i<3;i++){
R_202[i]=TX*R_102[i+1]+R_002[i+1];
}
for(int i=0;i<3;i++){
R_112[i]=TX*R_012[i+1];
}
for(int i=0;i<3;i++){
R_022[i]=TY*R_012[i+1]+R_002[i+1];
}
for(int i=0;i<3;i++){
R_103[i]=TX*R_003[i+1];
}
for(int i=0;i<3;i++){
R_013[i]=TY*R_003[i+1];
}
for(int i=0;i<3;i++){
R_004[i]=TZ*R_003[i+1]+3*R_002[i+1];
}
for(int i=0;i<2;i++){
R_500[i]=TX*R_400[i+1]+4*R_300[i+1];
}
for(int i=0;i<2;i++){
R_410[i]=TY*R_400[i+1];
}
for(int i=0;i<2;i++){
R_320[i]=TX*R_220[i+1]+2*R_120[i+1];
}
for(int i=0;i<2;i++){
R_230[i]=TY*R_220[i+1]+2*R_210[i+1];
}
for(int i=0;i<2;i++){
R_140[i]=TX*R_040[i+1];
}
for(int i=0;i<2;i++){
R_050[i]=TY*R_040[i+1]+4*R_030[i+1];
}
for(int i=0;i<2;i++){
R_401[i]=TZ*R_400[i+1];
}
for(int i=0;i<2;i++){
R_311[i]=TY*R_301[i+1];
}
for(int i=0;i<2;i++){
R_221[i]=TZ*R_220[i+1];
}
for(int i=0;i<2;i++){
R_131[i]=TX*R_031[i+1];
}
for(int i=0;i<2;i++){
R_041[i]=TZ*R_040[i+1];
}
for(int i=0;i<2;i++){
R_302[i]=TX*R_202[i+1]+2*R_102[i+1];
}
for(int i=0;i<2;i++){
R_212[i]=TY*R_202[i+1];
}
for(int i=0;i<2;i++){
R_122[i]=TX*R_022[i+1];
}
for(int i=0;i<2;i++){
R_032[i]=TY*R_022[i+1]+2*R_012[i+1];
}
for(int i=0;i<2;i++){
R_203[i]=TZ*R_202[i+1]+2*R_201[i+1];
}
for(int i=0;i<2;i++){
R_113[i]=TX*R_013[i+1];
}
for(int i=0;i<2;i++){
R_023[i]=TZ*R_022[i+1]+2*R_021[i+1];
}
for(int i=0;i<2;i++){
R_104[i]=TX*R_004[i+1];
}
for(int i=0;i<2;i++){
R_014[i]=TY*R_004[i+1];
}
for(int i=0;i<2;i++){
R_005[i]=TZ*R_004[i+1]+4*R_003[i+1];
}
for(int i=0;i<1;i++){
R_600[i]=TX*R_500[i+1]+5*R_400[i+1];
}
for(int i=0;i<1;i++){
R_510[i]=TY*R_500[i+1];
}
for(int i=0;i<1;i++){
R_420[i]=TX*R_320[i+1]+3*R_220[i+1];
}
for(int i=0;i<1;i++){
R_330[i]=TX*R_230[i+1]+2*R_130[i+1];
}
for(int i=0;i<1;i++){
R_240[i]=TY*R_230[i+1]+3*R_220[i+1];
}
for(int i=0;i<1;i++){
R_150[i]=TX*R_050[i+1];
}
for(int i=0;i<1;i++){
R_060[i]=TY*R_050[i+1]+5*R_040[i+1];
}
for(int i=0;i<1;i++){
R_501[i]=TZ*R_500[i+1];
}
for(int i=0;i<1;i++){
R_411[i]=TY*R_401[i+1];
}
for(int i=0;i<1;i++){
R_321[i]=TZ*R_320[i+1];
}
for(int i=0;i<1;i++){
R_231[i]=TZ*R_230[i+1];
}
for(int i=0;i<1;i++){
R_141[i]=TX*R_041[i+1];
}
for(int i=0;i<1;i++){
R_051[i]=TZ*R_050[i+1];
}
for(int i=0;i<1;i++){
R_402[i]=TX*R_302[i+1]+3*R_202[i+1];
}
for(int i=0;i<1;i++){
R_312[i]=TY*R_302[i+1];
}
for(int i=0;i<1;i++){
R_222[i]=TX*R_122[i+1]+R_022[i+1];
}
for(int i=0;i<1;i++){
R_132[i]=TX*R_032[i+1];
}
for(int i=0;i<1;i++){
R_042[i]=TY*R_032[i+1]+3*R_022[i+1];
}
for(int i=0;i<1;i++){
R_303[i]=TX*R_203[i+1]+2*R_103[i+1];
}
for(int i=0;i<1;i++){
R_213[i]=TY*R_203[i+1];
}
for(int i=0;i<1;i++){
R_123[i]=TX*R_023[i+1];
}
for(int i=0;i<1;i++){
R_033[i]=TY*R_023[i+1]+2*R_013[i+1];
}
for(int i=0;i<1;i++){
R_204[i]=TZ*R_203[i+1]+3*R_202[i+1];
}
for(int i=0;i<1;i++){
R_114[i]=TX*R_014[i+1];
}
for(int i=0;i<1;i++){
R_024[i]=TZ*R_023[i+1]+3*R_022[i+1];
}
for(int i=0;i<1;i++){
R_105[i]=TX*R_005[i+1];
}
for(int i=0;i<1;i++){
R_015[i]=TY*R_005[i+1];
}
for(int i=0;i<1;i++){
R_006[i]=TZ*R_005[i+1]+5*R_004[i+1];
}
double Qd_101[3];
double Qd_110[3];
double Qd_011[3];
double Qd_111[3];
double Qd_211[3];
for(int i=0;i<3;i++){
Qd_101[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_110[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_011[i]=Qd_101[i]+Qd_010[i]*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_111[i]=Qd_010[i]*Qd_101[i]+aQin1*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_211[i]=aQin1*Qd_101[i];
}
double Q_011000000=Qd_011[0];
double Q_111000000=Qd_111[0];
double Q_211000000=Qd_211[0];
double Q_010001000=Qd_010[0]*Qd_001[1];
double Q_010101000=Qd_010[0]*Qd_101[1];
double Q_110001000=Qd_110[0]*Qd_001[1];
double Q_110101000=Qd_110[0]*Qd_101[1];
double Q_010000001=Qd_010[0]*Qd_001[2];
double Q_010000101=Qd_010[0]*Qd_101[2];
double Q_110000001=Qd_110[0]*Qd_001[2];
double Q_110000101=Qd_110[0]*Qd_101[2];
double Q_001010000=Qd_001[0]*Qd_010[1];
double Q_001110000=Qd_001[0]*Qd_110[1];
double Q_101010000=Qd_101[0]*Qd_010[1];
double Q_101110000=Qd_101[0]*Qd_110[1];
double Q_000011000=Qd_011[1];
double Q_000111000=Qd_111[1];
double Q_000211000=Qd_211[1];
double Q_000010001=Qd_010[1]*Qd_001[2];
double Q_000010101=Qd_010[1]*Qd_101[2];
double Q_000110001=Qd_110[1]*Qd_001[2];
double Q_000110101=Qd_110[1]*Qd_101[2];
double Q_001000010=Qd_001[0]*Qd_010[2];
double Q_001000110=Qd_001[0]*Qd_110[2];
double Q_101000010=Qd_101[0]*Qd_010[2];
double Q_101000110=Qd_101[0]*Qd_110[2];
double Q_000001010=Qd_001[1]*Qd_010[2];
double Q_000001110=Qd_001[1]*Qd_110[2];
double Q_000101010=Qd_101[1]*Qd_010[2];
double Q_000101110=Qd_101[1]*Qd_110[2];
double Q_000000011=Qd_011[2];
double Q_000000111=Qd_111[2];
double Q_000000211=Qd_211[2];
double QR_011000000000=Q_011000000*R_000[0]+-1*Q_111000000*R_100[0]+Q_211000000*R_200[0];
double QR_010001000000=Q_010001000*R_000[0]+-1*Q_010101000*R_010[0]+-1*Q_110001000*R_100[0]+Q_110101000*R_110[0];
double QR_010000001000=Q_010000001*R_000[0]+-1*Q_010000101*R_001[0]+-1*Q_110000001*R_100[0]+Q_110000101*R_101[0];
double QR_001010000000=Q_001010000*R_000[0]+-1*Q_001110000*R_010[0]+-1*Q_101010000*R_100[0]+Q_101110000*R_110[0];
double QR_000011000000=Q_000011000*R_000[0]+-1*Q_000111000*R_010[0]+Q_000211000*R_020[0];
double QR_000010001000=Q_000010001*R_000[0]+-1*Q_000010101*R_001[0]+-1*Q_000110001*R_010[0]+Q_000110101*R_011[0];
double QR_001000010000=Q_001000010*R_000[0]+-1*Q_001000110*R_001[0]+-1*Q_101000010*R_100[0]+Q_101000110*R_101[0];
double QR_000001010000=Q_000001010*R_000[0]+-1*Q_000001110*R_001[0]+-1*Q_000101010*R_010[0]+Q_000101110*R_011[0];
double QR_000000011000=Q_000000011*R_000[0]+-1*Q_000000111*R_001[0]+Q_000000211*R_002[0];
double QR_011000000001=Q_011000000*R_001[0]+-1*Q_111000000*R_101[0]+Q_211000000*R_201[0];
double QR_010001000001=Q_010001000*R_001[0]+-1*Q_010101000*R_011[0]+-1*Q_110001000*R_101[0]+Q_110101000*R_111[0];
double QR_010000001001=Q_010000001*R_001[0]+-1*Q_010000101*R_002[0]+-1*Q_110000001*R_101[0]+Q_110000101*R_102[0];
double QR_001010000001=Q_001010000*R_001[0]+-1*Q_001110000*R_011[0]+-1*Q_101010000*R_101[0]+Q_101110000*R_111[0];
double QR_000011000001=Q_000011000*R_001[0]+-1*Q_000111000*R_011[0]+Q_000211000*R_021[0];
double QR_000010001001=Q_000010001*R_001[0]+-1*Q_000010101*R_002[0]+-1*Q_000110001*R_011[0]+Q_000110101*R_012[0];
double QR_001000010001=Q_001000010*R_001[0]+-1*Q_001000110*R_002[0]+-1*Q_101000010*R_101[0]+Q_101000110*R_102[0];
double QR_000001010001=Q_000001010*R_001[0]+-1*Q_000001110*R_002[0]+-1*Q_000101010*R_011[0]+Q_000101110*R_012[0];
double QR_000000011001=Q_000000011*R_001[0]+-1*Q_000000111*R_002[0]+Q_000000211*R_003[0];
double QR_011000000010=Q_011000000*R_010[0]+-1*Q_111000000*R_110[0]+Q_211000000*R_210[0];
double QR_010001000010=Q_010001000*R_010[0]+-1*Q_010101000*R_020[0]+-1*Q_110001000*R_110[0]+Q_110101000*R_120[0];
double QR_010000001010=Q_010000001*R_010[0]+-1*Q_010000101*R_011[0]+-1*Q_110000001*R_110[0]+Q_110000101*R_111[0];
double QR_001010000010=Q_001010000*R_010[0]+-1*Q_001110000*R_020[0]+-1*Q_101010000*R_110[0]+Q_101110000*R_120[0];
double QR_000011000010=Q_000011000*R_010[0]+-1*Q_000111000*R_020[0]+Q_000211000*R_030[0];
double QR_000010001010=Q_000010001*R_010[0]+-1*Q_000010101*R_011[0]+-1*Q_000110001*R_020[0]+Q_000110101*R_021[0];
double QR_001000010010=Q_001000010*R_010[0]+-1*Q_001000110*R_011[0]+-1*Q_101000010*R_110[0]+Q_101000110*R_111[0];
double QR_000001010010=Q_000001010*R_010[0]+-1*Q_000001110*R_011[0]+-1*Q_000101010*R_020[0]+Q_000101110*R_021[0];
double QR_000000011010=Q_000000011*R_010[0]+-1*Q_000000111*R_011[0]+Q_000000211*R_012[0];
double QR_011000000100=Q_011000000*R_100[0]+-1*Q_111000000*R_200[0]+Q_211000000*R_300[0];
double QR_010001000100=Q_010001000*R_100[0]+-1*Q_010101000*R_110[0]+-1*Q_110001000*R_200[0]+Q_110101000*R_210[0];
double QR_010000001100=Q_010000001*R_100[0]+-1*Q_010000101*R_101[0]+-1*Q_110000001*R_200[0]+Q_110000101*R_201[0];
double QR_001010000100=Q_001010000*R_100[0]+-1*Q_001110000*R_110[0]+-1*Q_101010000*R_200[0]+Q_101110000*R_210[0];
double QR_000011000100=Q_000011000*R_100[0]+-1*Q_000111000*R_110[0]+Q_000211000*R_120[0];
double QR_000010001100=Q_000010001*R_100[0]+-1*Q_000010101*R_101[0]+-1*Q_000110001*R_110[0]+Q_000110101*R_111[0];
double QR_001000010100=Q_001000010*R_100[0]+-1*Q_001000110*R_101[0]+-1*Q_101000010*R_200[0]+Q_101000110*R_201[0];
double QR_000001010100=Q_000001010*R_100[0]+-1*Q_000001110*R_101[0]+-1*Q_000101010*R_110[0]+Q_000101110*R_111[0];
double QR_000000011100=Q_000000011*R_100[0]+-1*Q_000000111*R_101[0]+Q_000000211*R_102[0];
double QR_011000000002=Q_011000000*R_002[0]+-1*Q_111000000*R_102[0]+Q_211000000*R_202[0];
double QR_010001000002=Q_010001000*R_002[0]+-1*Q_010101000*R_012[0]+-1*Q_110001000*R_102[0]+Q_110101000*R_112[0];
double QR_010000001002=Q_010000001*R_002[0]+-1*Q_010000101*R_003[0]+-1*Q_110000001*R_102[0]+Q_110000101*R_103[0];
double QR_001010000002=Q_001010000*R_002[0]+-1*Q_001110000*R_012[0]+-1*Q_101010000*R_102[0]+Q_101110000*R_112[0];
double QR_000011000002=Q_000011000*R_002[0]+-1*Q_000111000*R_012[0]+Q_000211000*R_022[0];
double QR_000010001002=Q_000010001*R_002[0]+-1*Q_000010101*R_003[0]+-1*Q_000110001*R_012[0]+Q_000110101*R_013[0];
double QR_001000010002=Q_001000010*R_002[0]+-1*Q_001000110*R_003[0]+-1*Q_101000010*R_102[0]+Q_101000110*R_103[0];
double QR_000001010002=Q_000001010*R_002[0]+-1*Q_000001110*R_003[0]+-1*Q_000101010*R_012[0]+Q_000101110*R_013[0];
double QR_000000011002=Q_000000011*R_002[0]+-1*Q_000000111*R_003[0]+Q_000000211*R_004[0];
double QR_011000000011=Q_011000000*R_011[0]+-1*Q_111000000*R_111[0]+Q_211000000*R_211[0];
double QR_010001000011=Q_010001000*R_011[0]+-1*Q_010101000*R_021[0]+-1*Q_110001000*R_111[0]+Q_110101000*R_121[0];
double QR_010000001011=Q_010000001*R_011[0]+-1*Q_010000101*R_012[0]+-1*Q_110000001*R_111[0]+Q_110000101*R_112[0];
double QR_001010000011=Q_001010000*R_011[0]+-1*Q_001110000*R_021[0]+-1*Q_101010000*R_111[0]+Q_101110000*R_121[0];
double QR_000011000011=Q_000011000*R_011[0]+-1*Q_000111000*R_021[0]+Q_000211000*R_031[0];
double QR_000010001011=Q_000010001*R_011[0]+-1*Q_000010101*R_012[0]+-1*Q_000110001*R_021[0]+Q_000110101*R_022[0];
double QR_001000010011=Q_001000010*R_011[0]+-1*Q_001000110*R_012[0]+-1*Q_101000010*R_111[0]+Q_101000110*R_112[0];
double QR_000001010011=Q_000001010*R_011[0]+-1*Q_000001110*R_012[0]+-1*Q_000101010*R_021[0]+Q_000101110*R_022[0];
double QR_000000011011=Q_000000011*R_011[0]+-1*Q_000000111*R_012[0]+Q_000000211*R_013[0];
double QR_011000000020=Q_011000000*R_020[0]+-1*Q_111000000*R_120[0]+Q_211000000*R_220[0];
double QR_010001000020=Q_010001000*R_020[0]+-1*Q_010101000*R_030[0]+-1*Q_110001000*R_120[0]+Q_110101000*R_130[0];
double QR_010000001020=Q_010000001*R_020[0]+-1*Q_010000101*R_021[0]+-1*Q_110000001*R_120[0]+Q_110000101*R_121[0];
double QR_001010000020=Q_001010000*R_020[0]+-1*Q_001110000*R_030[0]+-1*Q_101010000*R_120[0]+Q_101110000*R_130[0];
double QR_000011000020=Q_000011000*R_020[0]+-1*Q_000111000*R_030[0]+Q_000211000*R_040[0];
double QR_000010001020=Q_000010001*R_020[0]+-1*Q_000010101*R_021[0]+-1*Q_000110001*R_030[0]+Q_000110101*R_031[0];
double QR_001000010020=Q_001000010*R_020[0]+-1*Q_001000110*R_021[0]+-1*Q_101000010*R_120[0]+Q_101000110*R_121[0];
double QR_000001010020=Q_000001010*R_020[0]+-1*Q_000001110*R_021[0]+-1*Q_000101010*R_030[0]+Q_000101110*R_031[0];
double QR_000000011020=Q_000000011*R_020[0]+-1*Q_000000111*R_021[0]+Q_000000211*R_022[0];
double QR_011000000101=Q_011000000*R_101[0]+-1*Q_111000000*R_201[0]+Q_211000000*R_301[0];
double QR_010001000101=Q_010001000*R_101[0]+-1*Q_010101000*R_111[0]+-1*Q_110001000*R_201[0]+Q_110101000*R_211[0];
double QR_010000001101=Q_010000001*R_101[0]+-1*Q_010000101*R_102[0]+-1*Q_110000001*R_201[0]+Q_110000101*R_202[0];
double QR_001010000101=Q_001010000*R_101[0]+-1*Q_001110000*R_111[0]+-1*Q_101010000*R_201[0]+Q_101110000*R_211[0];
double QR_000011000101=Q_000011000*R_101[0]+-1*Q_000111000*R_111[0]+Q_000211000*R_121[0];
double QR_000010001101=Q_000010001*R_101[0]+-1*Q_000010101*R_102[0]+-1*Q_000110001*R_111[0]+Q_000110101*R_112[0];
double QR_001000010101=Q_001000010*R_101[0]+-1*Q_001000110*R_102[0]+-1*Q_101000010*R_201[0]+Q_101000110*R_202[0];
double QR_000001010101=Q_000001010*R_101[0]+-1*Q_000001110*R_102[0]+-1*Q_000101010*R_111[0]+Q_000101110*R_112[0];
double QR_000000011101=Q_000000011*R_101[0]+-1*Q_000000111*R_102[0]+Q_000000211*R_103[0];
double QR_011000000110=Q_011000000*R_110[0]+-1*Q_111000000*R_210[0]+Q_211000000*R_310[0];
double QR_010001000110=Q_010001000*R_110[0]+-1*Q_010101000*R_120[0]+-1*Q_110001000*R_210[0]+Q_110101000*R_220[0];
double QR_010000001110=Q_010000001*R_110[0]+-1*Q_010000101*R_111[0]+-1*Q_110000001*R_210[0]+Q_110000101*R_211[0];
double QR_001010000110=Q_001010000*R_110[0]+-1*Q_001110000*R_120[0]+-1*Q_101010000*R_210[0]+Q_101110000*R_220[0];
double QR_000011000110=Q_000011000*R_110[0]+-1*Q_000111000*R_120[0]+Q_000211000*R_130[0];
double QR_000010001110=Q_000010001*R_110[0]+-1*Q_000010101*R_111[0]+-1*Q_000110001*R_120[0]+Q_000110101*R_121[0];
double QR_001000010110=Q_001000010*R_110[0]+-1*Q_001000110*R_111[0]+-1*Q_101000010*R_210[0]+Q_101000110*R_211[0];
double QR_000001010110=Q_000001010*R_110[0]+-1*Q_000001110*R_111[0]+-1*Q_000101010*R_120[0]+Q_000101110*R_121[0];
double QR_000000011110=Q_000000011*R_110[0]+-1*Q_000000111*R_111[0]+Q_000000211*R_112[0];
double QR_011000000200=Q_011000000*R_200[0]+-1*Q_111000000*R_300[0]+Q_211000000*R_400[0];
double QR_010001000200=Q_010001000*R_200[0]+-1*Q_010101000*R_210[0]+-1*Q_110001000*R_300[0]+Q_110101000*R_310[0];
double QR_010000001200=Q_010000001*R_200[0]+-1*Q_010000101*R_201[0]+-1*Q_110000001*R_300[0]+Q_110000101*R_301[0];
double QR_001010000200=Q_001010000*R_200[0]+-1*Q_001110000*R_210[0]+-1*Q_101010000*R_300[0]+Q_101110000*R_310[0];
double QR_000011000200=Q_000011000*R_200[0]+-1*Q_000111000*R_210[0]+Q_000211000*R_220[0];
double QR_000010001200=Q_000010001*R_200[0]+-1*Q_000010101*R_201[0]+-1*Q_000110001*R_210[0]+Q_000110101*R_211[0];
double QR_001000010200=Q_001000010*R_200[0]+-1*Q_001000110*R_201[0]+-1*Q_101000010*R_300[0]+Q_101000110*R_301[0];
double QR_000001010200=Q_000001010*R_200[0]+-1*Q_000001110*R_201[0]+-1*Q_000101010*R_210[0]+Q_000101110*R_211[0];
double QR_000000011200=Q_000000011*R_200[0]+-1*Q_000000111*R_201[0]+Q_000000211*R_202[0];
double QR_011000000003=Q_011000000*R_003[0]+-1*Q_111000000*R_103[0]+Q_211000000*R_203[0];
double QR_010001000003=Q_010001000*R_003[0]+-1*Q_010101000*R_013[0]+-1*Q_110001000*R_103[0]+Q_110101000*R_113[0];
double QR_010000001003=Q_010000001*R_003[0]+-1*Q_010000101*R_004[0]+-1*Q_110000001*R_103[0]+Q_110000101*R_104[0];
double QR_001010000003=Q_001010000*R_003[0]+-1*Q_001110000*R_013[0]+-1*Q_101010000*R_103[0]+Q_101110000*R_113[0];
double QR_000011000003=Q_000011000*R_003[0]+-1*Q_000111000*R_013[0]+Q_000211000*R_023[0];
double QR_000010001003=Q_000010001*R_003[0]+-1*Q_000010101*R_004[0]+-1*Q_000110001*R_013[0]+Q_000110101*R_014[0];
double QR_001000010003=Q_001000010*R_003[0]+-1*Q_001000110*R_004[0]+-1*Q_101000010*R_103[0]+Q_101000110*R_104[0];
double QR_000001010003=Q_000001010*R_003[0]+-1*Q_000001110*R_004[0]+-1*Q_000101010*R_013[0]+Q_000101110*R_014[0];
double QR_000000011003=Q_000000011*R_003[0]+-1*Q_000000111*R_004[0]+Q_000000211*R_005[0];
double QR_011000000012=Q_011000000*R_012[0]+-1*Q_111000000*R_112[0]+Q_211000000*R_212[0];
double QR_010001000012=Q_010001000*R_012[0]+-1*Q_010101000*R_022[0]+-1*Q_110001000*R_112[0]+Q_110101000*R_122[0];
double QR_010000001012=Q_010000001*R_012[0]+-1*Q_010000101*R_013[0]+-1*Q_110000001*R_112[0]+Q_110000101*R_113[0];
double QR_001010000012=Q_001010000*R_012[0]+-1*Q_001110000*R_022[0]+-1*Q_101010000*R_112[0]+Q_101110000*R_122[0];
double QR_000011000012=Q_000011000*R_012[0]+-1*Q_000111000*R_022[0]+Q_000211000*R_032[0];
double QR_000010001012=Q_000010001*R_012[0]+-1*Q_000010101*R_013[0]+-1*Q_000110001*R_022[0]+Q_000110101*R_023[0];
double QR_001000010012=Q_001000010*R_012[0]+-1*Q_001000110*R_013[0]+-1*Q_101000010*R_112[0]+Q_101000110*R_113[0];
double QR_000001010012=Q_000001010*R_012[0]+-1*Q_000001110*R_013[0]+-1*Q_000101010*R_022[0]+Q_000101110*R_023[0];
double QR_000000011012=Q_000000011*R_012[0]+-1*Q_000000111*R_013[0]+Q_000000211*R_014[0];
double QR_011000000021=Q_011000000*R_021[0]+-1*Q_111000000*R_121[0]+Q_211000000*R_221[0];
double QR_010001000021=Q_010001000*R_021[0]+-1*Q_010101000*R_031[0]+-1*Q_110001000*R_121[0]+Q_110101000*R_131[0];
double QR_010000001021=Q_010000001*R_021[0]+-1*Q_010000101*R_022[0]+-1*Q_110000001*R_121[0]+Q_110000101*R_122[0];
double QR_001010000021=Q_001010000*R_021[0]+-1*Q_001110000*R_031[0]+-1*Q_101010000*R_121[0]+Q_101110000*R_131[0];
double QR_000011000021=Q_000011000*R_021[0]+-1*Q_000111000*R_031[0]+Q_000211000*R_041[0];
double QR_000010001021=Q_000010001*R_021[0]+-1*Q_000010101*R_022[0]+-1*Q_000110001*R_031[0]+Q_000110101*R_032[0];
double QR_001000010021=Q_001000010*R_021[0]+-1*Q_001000110*R_022[0]+-1*Q_101000010*R_121[0]+Q_101000110*R_122[0];
double QR_000001010021=Q_000001010*R_021[0]+-1*Q_000001110*R_022[0]+-1*Q_000101010*R_031[0]+Q_000101110*R_032[0];
double QR_000000011021=Q_000000011*R_021[0]+-1*Q_000000111*R_022[0]+Q_000000211*R_023[0];
double QR_011000000030=Q_011000000*R_030[0]+-1*Q_111000000*R_130[0]+Q_211000000*R_230[0];
double QR_010001000030=Q_010001000*R_030[0]+-1*Q_010101000*R_040[0]+-1*Q_110001000*R_130[0]+Q_110101000*R_140[0];
double QR_010000001030=Q_010000001*R_030[0]+-1*Q_010000101*R_031[0]+-1*Q_110000001*R_130[0]+Q_110000101*R_131[0];
double QR_001010000030=Q_001010000*R_030[0]+-1*Q_001110000*R_040[0]+-1*Q_101010000*R_130[0]+Q_101110000*R_140[0];
double QR_000011000030=Q_000011000*R_030[0]+-1*Q_000111000*R_040[0]+Q_000211000*R_050[0];
double QR_000010001030=Q_000010001*R_030[0]+-1*Q_000010101*R_031[0]+-1*Q_000110001*R_040[0]+Q_000110101*R_041[0];
double QR_001000010030=Q_001000010*R_030[0]+-1*Q_001000110*R_031[0]+-1*Q_101000010*R_130[0]+Q_101000110*R_131[0];
double QR_000001010030=Q_000001010*R_030[0]+-1*Q_000001110*R_031[0]+-1*Q_000101010*R_040[0]+Q_000101110*R_041[0];
double QR_000000011030=Q_000000011*R_030[0]+-1*Q_000000111*R_031[0]+Q_000000211*R_032[0];
double QR_011000000102=Q_011000000*R_102[0]+-1*Q_111000000*R_202[0]+Q_211000000*R_302[0];
double QR_010001000102=Q_010001000*R_102[0]+-1*Q_010101000*R_112[0]+-1*Q_110001000*R_202[0]+Q_110101000*R_212[0];
double QR_010000001102=Q_010000001*R_102[0]+-1*Q_010000101*R_103[0]+-1*Q_110000001*R_202[0]+Q_110000101*R_203[0];
double QR_001010000102=Q_001010000*R_102[0]+-1*Q_001110000*R_112[0]+-1*Q_101010000*R_202[0]+Q_101110000*R_212[0];
double QR_000011000102=Q_000011000*R_102[0]+-1*Q_000111000*R_112[0]+Q_000211000*R_122[0];
double QR_000010001102=Q_000010001*R_102[0]+-1*Q_000010101*R_103[0]+-1*Q_000110001*R_112[0]+Q_000110101*R_113[0];
double QR_001000010102=Q_001000010*R_102[0]+-1*Q_001000110*R_103[0]+-1*Q_101000010*R_202[0]+Q_101000110*R_203[0];
double QR_000001010102=Q_000001010*R_102[0]+-1*Q_000001110*R_103[0]+-1*Q_000101010*R_112[0]+Q_000101110*R_113[0];
double QR_000000011102=Q_000000011*R_102[0]+-1*Q_000000111*R_103[0]+Q_000000211*R_104[0];
double QR_011000000111=Q_011000000*R_111[0]+-1*Q_111000000*R_211[0]+Q_211000000*R_311[0];
double QR_010001000111=Q_010001000*R_111[0]+-1*Q_010101000*R_121[0]+-1*Q_110001000*R_211[0]+Q_110101000*R_221[0];
double QR_010000001111=Q_010000001*R_111[0]+-1*Q_010000101*R_112[0]+-1*Q_110000001*R_211[0]+Q_110000101*R_212[0];
double QR_001010000111=Q_001010000*R_111[0]+-1*Q_001110000*R_121[0]+-1*Q_101010000*R_211[0]+Q_101110000*R_221[0];
double QR_000011000111=Q_000011000*R_111[0]+-1*Q_000111000*R_121[0]+Q_000211000*R_131[0];
double QR_000010001111=Q_000010001*R_111[0]+-1*Q_000010101*R_112[0]+-1*Q_000110001*R_121[0]+Q_000110101*R_122[0];
double QR_001000010111=Q_001000010*R_111[0]+-1*Q_001000110*R_112[0]+-1*Q_101000010*R_211[0]+Q_101000110*R_212[0];
double QR_000001010111=Q_000001010*R_111[0]+-1*Q_000001110*R_112[0]+-1*Q_000101010*R_121[0]+Q_000101110*R_122[0];
double QR_000000011111=Q_000000011*R_111[0]+-1*Q_000000111*R_112[0]+Q_000000211*R_113[0];
double QR_011000000120=Q_011000000*R_120[0]+-1*Q_111000000*R_220[0]+Q_211000000*R_320[0];
double QR_010001000120=Q_010001000*R_120[0]+-1*Q_010101000*R_130[0]+-1*Q_110001000*R_220[0]+Q_110101000*R_230[0];
double QR_010000001120=Q_010000001*R_120[0]+-1*Q_010000101*R_121[0]+-1*Q_110000001*R_220[0]+Q_110000101*R_221[0];
double QR_001010000120=Q_001010000*R_120[0]+-1*Q_001110000*R_130[0]+-1*Q_101010000*R_220[0]+Q_101110000*R_230[0];
double QR_000011000120=Q_000011000*R_120[0]+-1*Q_000111000*R_130[0]+Q_000211000*R_140[0];
double QR_000010001120=Q_000010001*R_120[0]+-1*Q_000010101*R_121[0]+-1*Q_000110001*R_130[0]+Q_000110101*R_131[0];
double QR_001000010120=Q_001000010*R_120[0]+-1*Q_001000110*R_121[0]+-1*Q_101000010*R_220[0]+Q_101000110*R_221[0];
double QR_000001010120=Q_000001010*R_120[0]+-1*Q_000001110*R_121[0]+-1*Q_000101010*R_130[0]+Q_000101110*R_131[0];
double QR_000000011120=Q_000000011*R_120[0]+-1*Q_000000111*R_121[0]+Q_000000211*R_122[0];
double QR_011000000201=Q_011000000*R_201[0]+-1*Q_111000000*R_301[0]+Q_211000000*R_401[0];
double QR_010001000201=Q_010001000*R_201[0]+-1*Q_010101000*R_211[0]+-1*Q_110001000*R_301[0]+Q_110101000*R_311[0];
double QR_010000001201=Q_010000001*R_201[0]+-1*Q_010000101*R_202[0]+-1*Q_110000001*R_301[0]+Q_110000101*R_302[0];
double QR_001010000201=Q_001010000*R_201[0]+-1*Q_001110000*R_211[0]+-1*Q_101010000*R_301[0]+Q_101110000*R_311[0];
double QR_000011000201=Q_000011000*R_201[0]+-1*Q_000111000*R_211[0]+Q_000211000*R_221[0];
double QR_000010001201=Q_000010001*R_201[0]+-1*Q_000010101*R_202[0]+-1*Q_000110001*R_211[0]+Q_000110101*R_212[0];
double QR_001000010201=Q_001000010*R_201[0]+-1*Q_001000110*R_202[0]+-1*Q_101000010*R_301[0]+Q_101000110*R_302[0];
double QR_000001010201=Q_000001010*R_201[0]+-1*Q_000001110*R_202[0]+-1*Q_000101010*R_211[0]+Q_000101110*R_212[0];
double QR_000000011201=Q_000000011*R_201[0]+-1*Q_000000111*R_202[0]+Q_000000211*R_203[0];
double QR_011000000210=Q_011000000*R_210[0]+-1*Q_111000000*R_310[0]+Q_211000000*R_410[0];
double QR_010001000210=Q_010001000*R_210[0]+-1*Q_010101000*R_220[0]+-1*Q_110001000*R_310[0]+Q_110101000*R_320[0];
double QR_010000001210=Q_010000001*R_210[0]+-1*Q_010000101*R_211[0]+-1*Q_110000001*R_310[0]+Q_110000101*R_311[0];
double QR_001010000210=Q_001010000*R_210[0]+-1*Q_001110000*R_220[0]+-1*Q_101010000*R_310[0]+Q_101110000*R_320[0];
double QR_000011000210=Q_000011000*R_210[0]+-1*Q_000111000*R_220[0]+Q_000211000*R_230[0];
double QR_000010001210=Q_000010001*R_210[0]+-1*Q_000010101*R_211[0]+-1*Q_000110001*R_220[0]+Q_000110101*R_221[0];
double QR_001000010210=Q_001000010*R_210[0]+-1*Q_001000110*R_211[0]+-1*Q_101000010*R_310[0]+Q_101000110*R_311[0];
double QR_000001010210=Q_000001010*R_210[0]+-1*Q_000001110*R_211[0]+-1*Q_000101010*R_220[0]+Q_000101110*R_221[0];
double QR_000000011210=Q_000000011*R_210[0]+-1*Q_000000111*R_211[0]+Q_000000211*R_212[0];
double QR_011000000300=Q_011000000*R_300[0]+-1*Q_111000000*R_400[0]+Q_211000000*R_500[0];
double QR_010001000300=Q_010001000*R_300[0]+-1*Q_010101000*R_310[0]+-1*Q_110001000*R_400[0]+Q_110101000*R_410[0];
double QR_010000001300=Q_010000001*R_300[0]+-1*Q_010000101*R_301[0]+-1*Q_110000001*R_400[0]+Q_110000101*R_401[0];
double QR_001010000300=Q_001010000*R_300[0]+-1*Q_001110000*R_310[0]+-1*Q_101010000*R_400[0]+Q_101110000*R_410[0];
double QR_000011000300=Q_000011000*R_300[0]+-1*Q_000111000*R_310[0]+Q_000211000*R_320[0];
double QR_000010001300=Q_000010001*R_300[0]+-1*Q_000010101*R_301[0]+-1*Q_000110001*R_310[0]+Q_000110101*R_311[0];
double QR_001000010300=Q_001000010*R_300[0]+-1*Q_001000110*R_301[0]+-1*Q_101000010*R_400[0]+Q_101000110*R_401[0];
double QR_000001010300=Q_000001010*R_300[0]+-1*Q_000001110*R_301[0]+-1*Q_000101010*R_310[0]+Q_000101110*R_311[0];
double QR_000000011300=Q_000000011*R_300[0]+-1*Q_000000111*R_301[0]+Q_000000211*R_302[0];
double QR_011000000004=Q_011000000*R_004[0]+-1*Q_111000000*R_104[0]+Q_211000000*R_204[0];
double QR_010001000004=Q_010001000*R_004[0]+-1*Q_010101000*R_014[0]+-1*Q_110001000*R_104[0]+Q_110101000*R_114[0];
double QR_010000001004=Q_010000001*R_004[0]+-1*Q_010000101*R_005[0]+-1*Q_110000001*R_104[0]+Q_110000101*R_105[0];
double QR_001010000004=Q_001010000*R_004[0]+-1*Q_001110000*R_014[0]+-1*Q_101010000*R_104[0]+Q_101110000*R_114[0];
double QR_000011000004=Q_000011000*R_004[0]+-1*Q_000111000*R_014[0]+Q_000211000*R_024[0];
double QR_000010001004=Q_000010001*R_004[0]+-1*Q_000010101*R_005[0]+-1*Q_000110001*R_014[0]+Q_000110101*R_015[0];
double QR_001000010004=Q_001000010*R_004[0]+-1*Q_001000110*R_005[0]+-1*Q_101000010*R_104[0]+Q_101000110*R_105[0];
double QR_000001010004=Q_000001010*R_004[0]+-1*Q_000001110*R_005[0]+-1*Q_000101010*R_014[0]+Q_000101110*R_015[0];
double QR_000000011004=Q_000000011*R_004[0]+-1*Q_000000111*R_005[0]+Q_000000211*R_006[0];
double QR_011000000013=Q_011000000*R_013[0]+-1*Q_111000000*R_113[0]+Q_211000000*R_213[0];
double QR_010001000013=Q_010001000*R_013[0]+-1*Q_010101000*R_023[0]+-1*Q_110001000*R_113[0]+Q_110101000*R_123[0];
double QR_010000001013=Q_010000001*R_013[0]+-1*Q_010000101*R_014[0]+-1*Q_110000001*R_113[0]+Q_110000101*R_114[0];
double QR_001010000013=Q_001010000*R_013[0]+-1*Q_001110000*R_023[0]+-1*Q_101010000*R_113[0]+Q_101110000*R_123[0];
double QR_000011000013=Q_000011000*R_013[0]+-1*Q_000111000*R_023[0]+Q_000211000*R_033[0];
double QR_000010001013=Q_000010001*R_013[0]+-1*Q_000010101*R_014[0]+-1*Q_000110001*R_023[0]+Q_000110101*R_024[0];
double QR_001000010013=Q_001000010*R_013[0]+-1*Q_001000110*R_014[0]+-1*Q_101000010*R_113[0]+Q_101000110*R_114[0];
double QR_000001010013=Q_000001010*R_013[0]+-1*Q_000001110*R_014[0]+-1*Q_000101010*R_023[0]+Q_000101110*R_024[0];
double QR_000000011013=Q_000000011*R_013[0]+-1*Q_000000111*R_014[0]+Q_000000211*R_015[0];
double QR_011000000022=Q_011000000*R_022[0]+-1*Q_111000000*R_122[0]+Q_211000000*R_222[0];
double QR_010001000022=Q_010001000*R_022[0]+-1*Q_010101000*R_032[0]+-1*Q_110001000*R_122[0]+Q_110101000*R_132[0];
double QR_010000001022=Q_010000001*R_022[0]+-1*Q_010000101*R_023[0]+-1*Q_110000001*R_122[0]+Q_110000101*R_123[0];
double QR_001010000022=Q_001010000*R_022[0]+-1*Q_001110000*R_032[0]+-1*Q_101010000*R_122[0]+Q_101110000*R_132[0];
double QR_000011000022=Q_000011000*R_022[0]+-1*Q_000111000*R_032[0]+Q_000211000*R_042[0];
double QR_000010001022=Q_000010001*R_022[0]+-1*Q_000010101*R_023[0]+-1*Q_000110001*R_032[0]+Q_000110101*R_033[0];
double QR_001000010022=Q_001000010*R_022[0]+-1*Q_001000110*R_023[0]+-1*Q_101000010*R_122[0]+Q_101000110*R_123[0];
double QR_000001010022=Q_000001010*R_022[0]+-1*Q_000001110*R_023[0]+-1*Q_000101010*R_032[0]+Q_000101110*R_033[0];
double QR_000000011022=Q_000000011*R_022[0]+-1*Q_000000111*R_023[0]+Q_000000211*R_024[0];
double QR_011000000031=Q_011000000*R_031[0]+-1*Q_111000000*R_131[0]+Q_211000000*R_231[0];
double QR_010001000031=Q_010001000*R_031[0]+-1*Q_010101000*R_041[0]+-1*Q_110001000*R_131[0]+Q_110101000*R_141[0];
double QR_010000001031=Q_010000001*R_031[0]+-1*Q_010000101*R_032[0]+-1*Q_110000001*R_131[0]+Q_110000101*R_132[0];
double QR_001010000031=Q_001010000*R_031[0]+-1*Q_001110000*R_041[0]+-1*Q_101010000*R_131[0]+Q_101110000*R_141[0];
double QR_000011000031=Q_000011000*R_031[0]+-1*Q_000111000*R_041[0]+Q_000211000*R_051[0];
double QR_000010001031=Q_000010001*R_031[0]+-1*Q_000010101*R_032[0]+-1*Q_000110001*R_041[0]+Q_000110101*R_042[0];
double QR_001000010031=Q_001000010*R_031[0]+-1*Q_001000110*R_032[0]+-1*Q_101000010*R_131[0]+Q_101000110*R_132[0];
double QR_000001010031=Q_000001010*R_031[0]+-1*Q_000001110*R_032[0]+-1*Q_000101010*R_041[0]+Q_000101110*R_042[0];
double QR_000000011031=Q_000000011*R_031[0]+-1*Q_000000111*R_032[0]+Q_000000211*R_033[0];
double QR_011000000040=Q_011000000*R_040[0]+-1*Q_111000000*R_140[0]+Q_211000000*R_240[0];
double QR_010001000040=Q_010001000*R_040[0]+-1*Q_010101000*R_050[0]+-1*Q_110001000*R_140[0]+Q_110101000*R_150[0];
double QR_010000001040=Q_010000001*R_040[0]+-1*Q_010000101*R_041[0]+-1*Q_110000001*R_140[0]+Q_110000101*R_141[0];
double QR_001010000040=Q_001010000*R_040[0]+-1*Q_001110000*R_050[0]+-1*Q_101010000*R_140[0]+Q_101110000*R_150[0];
double QR_000011000040=Q_000011000*R_040[0]+-1*Q_000111000*R_050[0]+Q_000211000*R_060[0];
double QR_000010001040=Q_000010001*R_040[0]+-1*Q_000010101*R_041[0]+-1*Q_000110001*R_050[0]+Q_000110101*R_051[0];
double QR_001000010040=Q_001000010*R_040[0]+-1*Q_001000110*R_041[0]+-1*Q_101000010*R_140[0]+Q_101000110*R_141[0];
double QR_000001010040=Q_000001010*R_040[0]+-1*Q_000001110*R_041[0]+-1*Q_000101010*R_050[0]+Q_000101110*R_051[0];
double QR_000000011040=Q_000000011*R_040[0]+-1*Q_000000111*R_041[0]+Q_000000211*R_042[0];
double QR_011000000103=Q_011000000*R_103[0]+-1*Q_111000000*R_203[0]+Q_211000000*R_303[0];
double QR_010001000103=Q_010001000*R_103[0]+-1*Q_010101000*R_113[0]+-1*Q_110001000*R_203[0]+Q_110101000*R_213[0];
double QR_010000001103=Q_010000001*R_103[0]+-1*Q_010000101*R_104[0]+-1*Q_110000001*R_203[0]+Q_110000101*R_204[0];
double QR_001010000103=Q_001010000*R_103[0]+-1*Q_001110000*R_113[0]+-1*Q_101010000*R_203[0]+Q_101110000*R_213[0];
double QR_000011000103=Q_000011000*R_103[0]+-1*Q_000111000*R_113[0]+Q_000211000*R_123[0];
double QR_000010001103=Q_000010001*R_103[0]+-1*Q_000010101*R_104[0]+-1*Q_000110001*R_113[0]+Q_000110101*R_114[0];
double QR_001000010103=Q_001000010*R_103[0]+-1*Q_001000110*R_104[0]+-1*Q_101000010*R_203[0]+Q_101000110*R_204[0];
double QR_000001010103=Q_000001010*R_103[0]+-1*Q_000001110*R_104[0]+-1*Q_000101010*R_113[0]+Q_000101110*R_114[0];
double QR_000000011103=Q_000000011*R_103[0]+-1*Q_000000111*R_104[0]+Q_000000211*R_105[0];
double QR_011000000112=Q_011000000*R_112[0]+-1*Q_111000000*R_212[0]+Q_211000000*R_312[0];
double QR_010001000112=Q_010001000*R_112[0]+-1*Q_010101000*R_122[0]+-1*Q_110001000*R_212[0]+Q_110101000*R_222[0];
double QR_010000001112=Q_010000001*R_112[0]+-1*Q_010000101*R_113[0]+-1*Q_110000001*R_212[0]+Q_110000101*R_213[0];
double QR_001010000112=Q_001010000*R_112[0]+-1*Q_001110000*R_122[0]+-1*Q_101010000*R_212[0]+Q_101110000*R_222[0];
double QR_000011000112=Q_000011000*R_112[0]+-1*Q_000111000*R_122[0]+Q_000211000*R_132[0];
double QR_000010001112=Q_000010001*R_112[0]+-1*Q_000010101*R_113[0]+-1*Q_000110001*R_122[0]+Q_000110101*R_123[0];
double QR_001000010112=Q_001000010*R_112[0]+-1*Q_001000110*R_113[0]+-1*Q_101000010*R_212[0]+Q_101000110*R_213[0];
double QR_000001010112=Q_000001010*R_112[0]+-1*Q_000001110*R_113[0]+-1*Q_000101010*R_122[0]+Q_000101110*R_123[0];
double QR_000000011112=Q_000000011*R_112[0]+-1*Q_000000111*R_113[0]+Q_000000211*R_114[0];
double QR_011000000121=Q_011000000*R_121[0]+-1*Q_111000000*R_221[0]+Q_211000000*R_321[0];
double QR_010001000121=Q_010001000*R_121[0]+-1*Q_010101000*R_131[0]+-1*Q_110001000*R_221[0]+Q_110101000*R_231[0];
double QR_010000001121=Q_010000001*R_121[0]+-1*Q_010000101*R_122[0]+-1*Q_110000001*R_221[0]+Q_110000101*R_222[0];
double QR_001010000121=Q_001010000*R_121[0]+-1*Q_001110000*R_131[0]+-1*Q_101010000*R_221[0]+Q_101110000*R_231[0];
double QR_000011000121=Q_000011000*R_121[0]+-1*Q_000111000*R_131[0]+Q_000211000*R_141[0];
double QR_000010001121=Q_000010001*R_121[0]+-1*Q_000010101*R_122[0]+-1*Q_000110001*R_131[0]+Q_000110101*R_132[0];
double QR_001000010121=Q_001000010*R_121[0]+-1*Q_001000110*R_122[0]+-1*Q_101000010*R_221[0]+Q_101000110*R_222[0];
double QR_000001010121=Q_000001010*R_121[0]+-1*Q_000001110*R_122[0]+-1*Q_000101010*R_131[0]+Q_000101110*R_132[0];
double QR_000000011121=Q_000000011*R_121[0]+-1*Q_000000111*R_122[0]+Q_000000211*R_123[0];
double QR_011000000130=Q_011000000*R_130[0]+-1*Q_111000000*R_230[0]+Q_211000000*R_330[0];
double QR_010001000130=Q_010001000*R_130[0]+-1*Q_010101000*R_140[0]+-1*Q_110001000*R_230[0]+Q_110101000*R_240[0];
double QR_010000001130=Q_010000001*R_130[0]+-1*Q_010000101*R_131[0]+-1*Q_110000001*R_230[0]+Q_110000101*R_231[0];
double QR_001010000130=Q_001010000*R_130[0]+-1*Q_001110000*R_140[0]+-1*Q_101010000*R_230[0]+Q_101110000*R_240[0];
double QR_000011000130=Q_000011000*R_130[0]+-1*Q_000111000*R_140[0]+Q_000211000*R_150[0];
double QR_000010001130=Q_000010001*R_130[0]+-1*Q_000010101*R_131[0]+-1*Q_000110001*R_140[0]+Q_000110101*R_141[0];
double QR_001000010130=Q_001000010*R_130[0]+-1*Q_001000110*R_131[0]+-1*Q_101000010*R_230[0]+Q_101000110*R_231[0];
double QR_000001010130=Q_000001010*R_130[0]+-1*Q_000001110*R_131[0]+-1*Q_000101010*R_140[0]+Q_000101110*R_141[0];
double QR_000000011130=Q_000000011*R_130[0]+-1*Q_000000111*R_131[0]+Q_000000211*R_132[0];
double QR_011000000202=Q_011000000*R_202[0]+-1*Q_111000000*R_302[0]+Q_211000000*R_402[0];
double QR_010001000202=Q_010001000*R_202[0]+-1*Q_010101000*R_212[0]+-1*Q_110001000*R_302[0]+Q_110101000*R_312[0];
double QR_010000001202=Q_010000001*R_202[0]+-1*Q_010000101*R_203[0]+-1*Q_110000001*R_302[0]+Q_110000101*R_303[0];
double QR_001010000202=Q_001010000*R_202[0]+-1*Q_001110000*R_212[0]+-1*Q_101010000*R_302[0]+Q_101110000*R_312[0];
double QR_000011000202=Q_000011000*R_202[0]+-1*Q_000111000*R_212[0]+Q_000211000*R_222[0];
double QR_000010001202=Q_000010001*R_202[0]+-1*Q_000010101*R_203[0]+-1*Q_000110001*R_212[0]+Q_000110101*R_213[0];
double QR_001000010202=Q_001000010*R_202[0]+-1*Q_001000110*R_203[0]+-1*Q_101000010*R_302[0]+Q_101000110*R_303[0];
double QR_000001010202=Q_000001010*R_202[0]+-1*Q_000001110*R_203[0]+-1*Q_000101010*R_212[0]+Q_000101110*R_213[0];
double QR_000000011202=Q_000000011*R_202[0]+-1*Q_000000111*R_203[0]+Q_000000211*R_204[0];
double QR_011000000211=Q_011000000*R_211[0]+-1*Q_111000000*R_311[0]+Q_211000000*R_411[0];
double QR_010001000211=Q_010001000*R_211[0]+-1*Q_010101000*R_221[0]+-1*Q_110001000*R_311[0]+Q_110101000*R_321[0];
double QR_010000001211=Q_010000001*R_211[0]+-1*Q_010000101*R_212[0]+-1*Q_110000001*R_311[0]+Q_110000101*R_312[0];
double QR_001010000211=Q_001010000*R_211[0]+-1*Q_001110000*R_221[0]+-1*Q_101010000*R_311[0]+Q_101110000*R_321[0];
double QR_000011000211=Q_000011000*R_211[0]+-1*Q_000111000*R_221[0]+Q_000211000*R_231[0];
double QR_000010001211=Q_000010001*R_211[0]+-1*Q_000010101*R_212[0]+-1*Q_000110001*R_221[0]+Q_000110101*R_222[0];
double QR_001000010211=Q_001000010*R_211[0]+-1*Q_001000110*R_212[0]+-1*Q_101000010*R_311[0]+Q_101000110*R_312[0];
double QR_000001010211=Q_000001010*R_211[0]+-1*Q_000001110*R_212[0]+-1*Q_000101010*R_221[0]+Q_000101110*R_222[0];
double QR_000000011211=Q_000000011*R_211[0]+-1*Q_000000111*R_212[0]+Q_000000211*R_213[0];
double QR_011000000220=Q_011000000*R_220[0]+-1*Q_111000000*R_320[0]+Q_211000000*R_420[0];
double QR_010001000220=Q_010001000*R_220[0]+-1*Q_010101000*R_230[0]+-1*Q_110001000*R_320[0]+Q_110101000*R_330[0];
double QR_010000001220=Q_010000001*R_220[0]+-1*Q_010000101*R_221[0]+-1*Q_110000001*R_320[0]+Q_110000101*R_321[0];
double QR_001010000220=Q_001010000*R_220[0]+-1*Q_001110000*R_230[0]+-1*Q_101010000*R_320[0]+Q_101110000*R_330[0];
double QR_000011000220=Q_000011000*R_220[0]+-1*Q_000111000*R_230[0]+Q_000211000*R_240[0];
double QR_000010001220=Q_000010001*R_220[0]+-1*Q_000010101*R_221[0]+-1*Q_000110001*R_230[0]+Q_000110101*R_231[0];
double QR_001000010220=Q_001000010*R_220[0]+-1*Q_001000110*R_221[0]+-1*Q_101000010*R_320[0]+Q_101000110*R_321[0];
double QR_000001010220=Q_000001010*R_220[0]+-1*Q_000001110*R_221[0]+-1*Q_000101010*R_230[0]+Q_000101110*R_231[0];
double QR_000000011220=Q_000000011*R_220[0]+-1*Q_000000111*R_221[0]+Q_000000211*R_222[0];
double QR_011000000301=Q_011000000*R_301[0]+-1*Q_111000000*R_401[0]+Q_211000000*R_501[0];
double QR_010001000301=Q_010001000*R_301[0]+-1*Q_010101000*R_311[0]+-1*Q_110001000*R_401[0]+Q_110101000*R_411[0];
double QR_010000001301=Q_010000001*R_301[0]+-1*Q_010000101*R_302[0]+-1*Q_110000001*R_401[0]+Q_110000101*R_402[0];
double QR_001010000301=Q_001010000*R_301[0]+-1*Q_001110000*R_311[0]+-1*Q_101010000*R_401[0]+Q_101110000*R_411[0];
double QR_000011000301=Q_000011000*R_301[0]+-1*Q_000111000*R_311[0]+Q_000211000*R_321[0];
double QR_000010001301=Q_000010001*R_301[0]+-1*Q_000010101*R_302[0]+-1*Q_000110001*R_311[0]+Q_000110101*R_312[0];
double QR_001000010301=Q_001000010*R_301[0]+-1*Q_001000110*R_302[0]+-1*Q_101000010*R_401[0]+Q_101000110*R_402[0];
double QR_000001010301=Q_000001010*R_301[0]+-1*Q_000001110*R_302[0]+-1*Q_000101010*R_311[0]+Q_000101110*R_312[0];
double QR_000000011301=Q_000000011*R_301[0]+-1*Q_000000111*R_302[0]+Q_000000211*R_303[0];
double QR_011000000310=Q_011000000*R_310[0]+-1*Q_111000000*R_410[0]+Q_211000000*R_510[0];
double QR_010001000310=Q_010001000*R_310[0]+-1*Q_010101000*R_320[0]+-1*Q_110001000*R_410[0]+Q_110101000*R_420[0];
double QR_010000001310=Q_010000001*R_310[0]+-1*Q_010000101*R_311[0]+-1*Q_110000001*R_410[0]+Q_110000101*R_411[0];
double QR_001010000310=Q_001010000*R_310[0]+-1*Q_001110000*R_320[0]+-1*Q_101010000*R_410[0]+Q_101110000*R_420[0];
double QR_000011000310=Q_000011000*R_310[0]+-1*Q_000111000*R_320[0]+Q_000211000*R_330[0];
double QR_000010001310=Q_000010001*R_310[0]+-1*Q_000010101*R_311[0]+-1*Q_000110001*R_320[0]+Q_000110101*R_321[0];
double QR_001000010310=Q_001000010*R_310[0]+-1*Q_001000110*R_311[0]+-1*Q_101000010*R_410[0]+Q_101000110*R_411[0];
double QR_000001010310=Q_000001010*R_310[0]+-1*Q_000001110*R_311[0]+-1*Q_000101010*R_320[0]+Q_000101110*R_321[0];
double QR_000000011310=Q_000000011*R_310[0]+-1*Q_000000111*R_311[0]+Q_000000211*R_312[0];
double QR_011000000400=Q_011000000*R_400[0]+-1*Q_111000000*R_500[0]+Q_211000000*R_600[0];
double QR_010001000400=Q_010001000*R_400[0]+-1*Q_010101000*R_410[0]+-1*Q_110001000*R_500[0]+Q_110101000*R_510[0];
double QR_010000001400=Q_010000001*R_400[0]+-1*Q_010000101*R_401[0]+-1*Q_110000001*R_500[0]+Q_110000101*R_501[0];
double QR_001010000400=Q_001010000*R_400[0]+-1*Q_001110000*R_410[0]+-1*Q_101010000*R_500[0]+Q_101110000*R_510[0];
double QR_000011000400=Q_000011000*R_400[0]+-1*Q_000111000*R_410[0]+Q_000211000*R_420[0];
double QR_000010001400=Q_000010001*R_400[0]+-1*Q_000010101*R_401[0]+-1*Q_000110001*R_410[0]+Q_000110101*R_411[0];
double QR_001000010400=Q_001000010*R_400[0]+-1*Q_001000110*R_401[0]+-1*Q_101000010*R_500[0]+Q_101000110*R_501[0];
double QR_000001010400=Q_000001010*R_400[0]+-1*Q_000001110*R_401[0]+-1*Q_000101010*R_410[0]+Q_000101110*R_411[0];
double QR_000000011400=Q_000000011*R_400[0]+-1*Q_000000111*R_401[0]+Q_000000211*R_402[0];
double Pd_101[3];
double Pd_002[3];
double Pd_102[3];
double Pd_202[3];
double Pd_110[3];
double Pd_011[3];
double Pd_111[3];
double Pd_211[3];
double Pd_012[3];
double Pd_112[3];
double Pd_212[3];
double Pd_312[3];
double Pd_020[3];
double Pd_120[3];
double Pd_220[3];
double Pd_021[3];
double Pd_121[3];
double Pd_221[3];
double Pd_321[3];
double Pd_022[3];
double Pd_122[3];
double Pd_222[3];
double Pd_322[3];
double Pd_422[3];
for(int i=0;i<3;i++){
Pd_101[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_002[i]=Pd_101[i]+Pd_001[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_102[i]=Pd_001[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_202[i]=aPin1*Pd_101[i];
}
for(int i=0;i<3;i++){
Pd_110[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_011[i]=Pd_101[i]+Pd_010[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_111[i]=Pd_010[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_211[i]=aPin1*Pd_101[i];
}
for(int i=0;i<3;i++){
Pd_012[i]=Pd_111[i]+Pd_001[i]*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_112[i]=2*Pd_211[i]+Pd_001[i]*Pd_111[i]+aPin1*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_212[i]=Pd_001[i]*Pd_211[i]+aPin1*Pd_111[i];
}
for(int i=0;i<3;i++){
Pd_312[i]=aPin1*Pd_211[i];
}
for(int i=0;i<3;i++){
Pd_020[i]=Pd_110[i]+Pd_010[i]*Pd_010[i];
}
for(int i=0;i<3;i++){
Pd_120[i]=Pd_010[i]*Pd_110[i]+aPin1*Pd_010[i];
}
for(int i=0;i<3;i++){
Pd_220[i]=aPin1*Pd_110[i];
}
for(int i=0;i<3;i++){
Pd_021[i]=Pd_111[i]+Pd_010[i]*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_121[i]=2*Pd_211[i]+Pd_010[i]*Pd_111[i]+aPin1*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_221[i]=Pd_010[i]*Pd_211[i]+aPin1*Pd_111[i];
}
for(int i=0;i<3;i++){
Pd_321[i]=aPin1*Pd_211[i];
}
for(int i=0;i<3;i++){
Pd_022[i]=Pd_112[i]+Pd_010[i]*Pd_012[i];
}
for(int i=0;i<3;i++){
Pd_122[i]=2*Pd_212[i]+Pd_010[i]*Pd_112[i]+aPin1*Pd_012[i];
}
for(int i=0;i<3;i++){
Pd_222[i]=3*Pd_312[i]+Pd_010[i]*Pd_212[i]+aPin1*Pd_112[i];
}
for(int i=0;i<3;i++){
Pd_322[i]=Pd_010[i]*Pd_312[i]+aPin1*Pd_212[i];
}
for(int i=0;i<3;i++){
Pd_422[i]=aPin1*Pd_312[i];
}
double P_022000000=Pd_022[0];
double P_122000000=Pd_122[0];
double P_222000000=Pd_222[0];
double P_322000000=Pd_322[0];
double P_422000000=Pd_422[0];
double P_021001000=Pd_021[0]*Pd_001[1];
double P_021101000=Pd_021[0]*Pd_101[1];
double P_121001000=Pd_121[0]*Pd_001[1];
double P_121101000=Pd_121[0]*Pd_101[1];
double P_221001000=Pd_221[0]*Pd_001[1];
double P_221101000=Pd_221[0]*Pd_101[1];
double P_321001000=Pd_321[0]*Pd_001[1];
double P_321101000=Pd_321[0]*Pd_101[1];
double P_020002000=Pd_020[0]*Pd_002[1];
double P_020102000=Pd_020[0]*Pd_102[1];
double P_020202000=Pd_020[0]*Pd_202[1];
double P_120002000=Pd_120[0]*Pd_002[1];
double P_120102000=Pd_120[0]*Pd_102[1];
double P_120202000=Pd_120[0]*Pd_202[1];
double P_220002000=Pd_220[0]*Pd_002[1];
double P_220102000=Pd_220[0]*Pd_102[1];
double P_220202000=Pd_220[0]*Pd_202[1];
double P_021000001=Pd_021[0]*Pd_001[2];
double P_021000101=Pd_021[0]*Pd_101[2];
double P_121000001=Pd_121[0]*Pd_001[2];
double P_121000101=Pd_121[0]*Pd_101[2];
double P_221000001=Pd_221[0]*Pd_001[2];
double P_221000101=Pd_221[0]*Pd_101[2];
double P_321000001=Pd_321[0]*Pd_001[2];
double P_321000101=Pd_321[0]*Pd_101[2];
double P_020001001=Pd_020[0]*Pd_001[1]*Pd_001[2];
double P_020001101=Pd_020[0]*Pd_001[1]*Pd_101[2];
double P_020101001=Pd_020[0]*Pd_101[1]*Pd_001[2];
double P_020101101=Pd_020[0]*Pd_101[1]*Pd_101[2];
double P_120001001=Pd_120[0]*Pd_001[1]*Pd_001[2];
double P_120001101=Pd_120[0]*Pd_001[1]*Pd_101[2];
double P_120101001=Pd_120[0]*Pd_101[1]*Pd_001[2];
double P_120101101=Pd_120[0]*Pd_101[1]*Pd_101[2];
double P_220001001=Pd_220[0]*Pd_001[1]*Pd_001[2];
double P_220001101=Pd_220[0]*Pd_001[1]*Pd_101[2];
double P_220101001=Pd_220[0]*Pd_101[1]*Pd_001[2];
double P_220101101=Pd_220[0]*Pd_101[1]*Pd_101[2];
double P_020000002=Pd_020[0]*Pd_002[2];
double P_020000102=Pd_020[0]*Pd_102[2];
double P_020000202=Pd_020[0]*Pd_202[2];
double P_120000002=Pd_120[0]*Pd_002[2];
double P_120000102=Pd_120[0]*Pd_102[2];
double P_120000202=Pd_120[0]*Pd_202[2];
double P_220000002=Pd_220[0]*Pd_002[2];
double P_220000102=Pd_220[0]*Pd_102[2];
double P_220000202=Pd_220[0]*Pd_202[2];
double P_012010000=Pd_012[0]*Pd_010[1];
double P_012110000=Pd_012[0]*Pd_110[1];
double P_112010000=Pd_112[0]*Pd_010[1];
double P_112110000=Pd_112[0]*Pd_110[1];
double P_212010000=Pd_212[0]*Pd_010[1];
double P_212110000=Pd_212[0]*Pd_110[1];
double P_312010000=Pd_312[0]*Pd_010[1];
double P_312110000=Pd_312[0]*Pd_110[1];
double P_011011000=Pd_011[0]*Pd_011[1];
double P_011111000=Pd_011[0]*Pd_111[1];
double P_011211000=Pd_011[0]*Pd_211[1];
double P_111011000=Pd_111[0]*Pd_011[1];
double P_111111000=Pd_111[0]*Pd_111[1];
double P_111211000=Pd_111[0]*Pd_211[1];
double P_211011000=Pd_211[0]*Pd_011[1];
double P_211111000=Pd_211[0]*Pd_111[1];
double P_211211000=Pd_211[0]*Pd_211[1];
double P_010012000=Pd_010[0]*Pd_012[1];
double P_010112000=Pd_010[0]*Pd_112[1];
double P_010212000=Pd_010[0]*Pd_212[1];
double P_010312000=Pd_010[0]*Pd_312[1];
double P_110012000=Pd_110[0]*Pd_012[1];
double P_110112000=Pd_110[0]*Pd_112[1];
double P_110212000=Pd_110[0]*Pd_212[1];
double P_110312000=Pd_110[0]*Pd_312[1];
double P_011010001=Pd_011[0]*Pd_010[1]*Pd_001[2];
double P_011010101=Pd_011[0]*Pd_010[1]*Pd_101[2];
double P_011110001=Pd_011[0]*Pd_110[1]*Pd_001[2];
double P_011110101=Pd_011[0]*Pd_110[1]*Pd_101[2];
double P_111010001=Pd_111[0]*Pd_010[1]*Pd_001[2];
double P_111010101=Pd_111[0]*Pd_010[1]*Pd_101[2];
double P_111110001=Pd_111[0]*Pd_110[1]*Pd_001[2];
double P_111110101=Pd_111[0]*Pd_110[1]*Pd_101[2];
double P_211010001=Pd_211[0]*Pd_010[1]*Pd_001[2];
double P_211010101=Pd_211[0]*Pd_010[1]*Pd_101[2];
double P_211110001=Pd_211[0]*Pd_110[1]*Pd_001[2];
double P_211110101=Pd_211[0]*Pd_110[1]*Pd_101[2];
double P_010011001=Pd_010[0]*Pd_011[1]*Pd_001[2];
double P_010011101=Pd_010[0]*Pd_011[1]*Pd_101[2];
double P_010111001=Pd_010[0]*Pd_111[1]*Pd_001[2];
double P_010111101=Pd_010[0]*Pd_111[1]*Pd_101[2];
double P_010211001=Pd_010[0]*Pd_211[1]*Pd_001[2];
double P_010211101=Pd_010[0]*Pd_211[1]*Pd_101[2];
double P_110011001=Pd_110[0]*Pd_011[1]*Pd_001[2];
double P_110011101=Pd_110[0]*Pd_011[1]*Pd_101[2];
double P_110111001=Pd_110[0]*Pd_111[1]*Pd_001[2];
double P_110111101=Pd_110[0]*Pd_111[1]*Pd_101[2];
double P_110211001=Pd_110[0]*Pd_211[1]*Pd_001[2];
double P_110211101=Pd_110[0]*Pd_211[1]*Pd_101[2];
double P_010010002=Pd_010[0]*Pd_010[1]*Pd_002[2];
double P_010010102=Pd_010[0]*Pd_010[1]*Pd_102[2];
double P_010010202=Pd_010[0]*Pd_010[1]*Pd_202[2];
double P_010110002=Pd_010[0]*Pd_110[1]*Pd_002[2];
double P_010110102=Pd_010[0]*Pd_110[1]*Pd_102[2];
double P_010110202=Pd_010[0]*Pd_110[1]*Pd_202[2];
double P_110010002=Pd_110[0]*Pd_010[1]*Pd_002[2];
double P_110010102=Pd_110[0]*Pd_010[1]*Pd_102[2];
double P_110010202=Pd_110[0]*Pd_010[1]*Pd_202[2];
double P_110110002=Pd_110[0]*Pd_110[1]*Pd_002[2];
double P_110110102=Pd_110[0]*Pd_110[1]*Pd_102[2];
double P_110110202=Pd_110[0]*Pd_110[1]*Pd_202[2];
double P_002020000=Pd_002[0]*Pd_020[1];
double P_002120000=Pd_002[0]*Pd_120[1];
double P_002220000=Pd_002[0]*Pd_220[1];
double P_102020000=Pd_102[0]*Pd_020[1];
double P_102120000=Pd_102[0]*Pd_120[1];
double P_102220000=Pd_102[0]*Pd_220[1];
double P_202020000=Pd_202[0]*Pd_020[1];
double P_202120000=Pd_202[0]*Pd_120[1];
double P_202220000=Pd_202[0]*Pd_220[1];
double P_001021000=Pd_001[0]*Pd_021[1];
double P_001121000=Pd_001[0]*Pd_121[1];
double P_001221000=Pd_001[0]*Pd_221[1];
double P_001321000=Pd_001[0]*Pd_321[1];
double P_101021000=Pd_101[0]*Pd_021[1];
double P_101121000=Pd_101[0]*Pd_121[1];
double P_101221000=Pd_101[0]*Pd_221[1];
double P_101321000=Pd_101[0]*Pd_321[1];
double P_000022000=Pd_022[1];
double P_000122000=Pd_122[1];
double P_000222000=Pd_222[1];
double P_000322000=Pd_322[1];
double P_000422000=Pd_422[1];
double P_001020001=Pd_001[0]*Pd_020[1]*Pd_001[2];
double P_001020101=Pd_001[0]*Pd_020[1]*Pd_101[2];
double P_001120001=Pd_001[0]*Pd_120[1]*Pd_001[2];
double P_001120101=Pd_001[0]*Pd_120[1]*Pd_101[2];
double P_001220001=Pd_001[0]*Pd_220[1]*Pd_001[2];
double P_001220101=Pd_001[0]*Pd_220[1]*Pd_101[2];
double P_101020001=Pd_101[0]*Pd_020[1]*Pd_001[2];
double P_101020101=Pd_101[0]*Pd_020[1]*Pd_101[2];
double P_101120001=Pd_101[0]*Pd_120[1]*Pd_001[2];
double P_101120101=Pd_101[0]*Pd_120[1]*Pd_101[2];
double P_101220001=Pd_101[0]*Pd_220[1]*Pd_001[2];
double P_101220101=Pd_101[0]*Pd_220[1]*Pd_101[2];
double P_000021001=Pd_021[1]*Pd_001[2];
double P_000021101=Pd_021[1]*Pd_101[2];
double P_000121001=Pd_121[1]*Pd_001[2];
double P_000121101=Pd_121[1]*Pd_101[2];
double P_000221001=Pd_221[1]*Pd_001[2];
double P_000221101=Pd_221[1]*Pd_101[2];
double P_000321001=Pd_321[1]*Pd_001[2];
double P_000321101=Pd_321[1]*Pd_101[2];
double P_000020002=Pd_020[1]*Pd_002[2];
double P_000020102=Pd_020[1]*Pd_102[2];
double P_000020202=Pd_020[1]*Pd_202[2];
double P_000120002=Pd_120[1]*Pd_002[2];
double P_000120102=Pd_120[1]*Pd_102[2];
double P_000120202=Pd_120[1]*Pd_202[2];
double P_000220002=Pd_220[1]*Pd_002[2];
double P_000220102=Pd_220[1]*Pd_102[2];
double P_000220202=Pd_220[1]*Pd_202[2];
double P_012000010=Pd_012[0]*Pd_010[2];
double P_012000110=Pd_012[0]*Pd_110[2];
double P_112000010=Pd_112[0]*Pd_010[2];
double P_112000110=Pd_112[0]*Pd_110[2];
double P_212000010=Pd_212[0]*Pd_010[2];
double P_212000110=Pd_212[0]*Pd_110[2];
double P_312000010=Pd_312[0]*Pd_010[2];
double P_312000110=Pd_312[0]*Pd_110[2];
double P_011001010=Pd_011[0]*Pd_001[1]*Pd_010[2];
double P_011001110=Pd_011[0]*Pd_001[1]*Pd_110[2];
double P_011101010=Pd_011[0]*Pd_101[1]*Pd_010[2];
double P_011101110=Pd_011[0]*Pd_101[1]*Pd_110[2];
double P_111001010=Pd_111[0]*Pd_001[1]*Pd_010[2];
double P_111001110=Pd_111[0]*Pd_001[1]*Pd_110[2];
double P_111101010=Pd_111[0]*Pd_101[1]*Pd_010[2];
double P_111101110=Pd_111[0]*Pd_101[1]*Pd_110[2];
double P_211001010=Pd_211[0]*Pd_001[1]*Pd_010[2];
double P_211001110=Pd_211[0]*Pd_001[1]*Pd_110[2];
double P_211101010=Pd_211[0]*Pd_101[1]*Pd_010[2];
double P_211101110=Pd_211[0]*Pd_101[1]*Pd_110[2];
double P_010002010=Pd_010[0]*Pd_002[1]*Pd_010[2];
double P_010002110=Pd_010[0]*Pd_002[1]*Pd_110[2];
double P_010102010=Pd_010[0]*Pd_102[1]*Pd_010[2];
double P_010102110=Pd_010[0]*Pd_102[1]*Pd_110[2];
double P_010202010=Pd_010[0]*Pd_202[1]*Pd_010[2];
double P_010202110=Pd_010[0]*Pd_202[1]*Pd_110[2];
double P_110002010=Pd_110[0]*Pd_002[1]*Pd_010[2];
double P_110002110=Pd_110[0]*Pd_002[1]*Pd_110[2];
double P_110102010=Pd_110[0]*Pd_102[1]*Pd_010[2];
double P_110102110=Pd_110[0]*Pd_102[1]*Pd_110[2];
double P_110202010=Pd_110[0]*Pd_202[1]*Pd_010[2];
double P_110202110=Pd_110[0]*Pd_202[1]*Pd_110[2];
double P_011000011=Pd_011[0]*Pd_011[2];
double P_011000111=Pd_011[0]*Pd_111[2];
double P_011000211=Pd_011[0]*Pd_211[2];
double P_111000011=Pd_111[0]*Pd_011[2];
double P_111000111=Pd_111[0]*Pd_111[2];
double P_111000211=Pd_111[0]*Pd_211[2];
double P_211000011=Pd_211[0]*Pd_011[2];
double P_211000111=Pd_211[0]*Pd_111[2];
double P_211000211=Pd_211[0]*Pd_211[2];
double P_010001011=Pd_010[0]*Pd_001[1]*Pd_011[2];
double P_010001111=Pd_010[0]*Pd_001[1]*Pd_111[2];
double P_010001211=Pd_010[0]*Pd_001[1]*Pd_211[2];
double P_010101011=Pd_010[0]*Pd_101[1]*Pd_011[2];
double P_010101111=Pd_010[0]*Pd_101[1]*Pd_111[2];
double P_010101211=Pd_010[0]*Pd_101[1]*Pd_211[2];
double P_110001011=Pd_110[0]*Pd_001[1]*Pd_011[2];
double P_110001111=Pd_110[0]*Pd_001[1]*Pd_111[2];
double P_110001211=Pd_110[0]*Pd_001[1]*Pd_211[2];
double P_110101011=Pd_110[0]*Pd_101[1]*Pd_011[2];
double P_110101111=Pd_110[0]*Pd_101[1]*Pd_111[2];
double P_110101211=Pd_110[0]*Pd_101[1]*Pd_211[2];
double P_010000012=Pd_010[0]*Pd_012[2];
double P_010000112=Pd_010[0]*Pd_112[2];
double P_010000212=Pd_010[0]*Pd_212[2];
double P_010000312=Pd_010[0]*Pd_312[2];
double P_110000012=Pd_110[0]*Pd_012[2];
double P_110000112=Pd_110[0]*Pd_112[2];
double P_110000212=Pd_110[0]*Pd_212[2];
double P_110000312=Pd_110[0]*Pd_312[2];
double P_002010010=Pd_002[0]*Pd_010[1]*Pd_010[2];
double P_002010110=Pd_002[0]*Pd_010[1]*Pd_110[2];
double P_002110010=Pd_002[0]*Pd_110[1]*Pd_010[2];
double P_002110110=Pd_002[0]*Pd_110[1]*Pd_110[2];
double P_102010010=Pd_102[0]*Pd_010[1]*Pd_010[2];
double P_102010110=Pd_102[0]*Pd_010[1]*Pd_110[2];
double P_102110010=Pd_102[0]*Pd_110[1]*Pd_010[2];
double P_102110110=Pd_102[0]*Pd_110[1]*Pd_110[2];
double P_202010010=Pd_202[0]*Pd_010[1]*Pd_010[2];
double P_202010110=Pd_202[0]*Pd_010[1]*Pd_110[2];
double P_202110010=Pd_202[0]*Pd_110[1]*Pd_010[2];
double P_202110110=Pd_202[0]*Pd_110[1]*Pd_110[2];
double P_001011010=Pd_001[0]*Pd_011[1]*Pd_010[2];
double P_001011110=Pd_001[0]*Pd_011[1]*Pd_110[2];
double P_001111010=Pd_001[0]*Pd_111[1]*Pd_010[2];
double P_001111110=Pd_001[0]*Pd_111[1]*Pd_110[2];
double P_001211010=Pd_001[0]*Pd_211[1]*Pd_010[2];
double P_001211110=Pd_001[0]*Pd_211[1]*Pd_110[2];
double P_101011010=Pd_101[0]*Pd_011[1]*Pd_010[2];
double P_101011110=Pd_101[0]*Pd_011[1]*Pd_110[2];
double P_101111010=Pd_101[0]*Pd_111[1]*Pd_010[2];
double P_101111110=Pd_101[0]*Pd_111[1]*Pd_110[2];
double P_101211010=Pd_101[0]*Pd_211[1]*Pd_010[2];
double P_101211110=Pd_101[0]*Pd_211[1]*Pd_110[2];
double P_000012010=Pd_012[1]*Pd_010[2];
double P_000012110=Pd_012[1]*Pd_110[2];
double P_000112010=Pd_112[1]*Pd_010[2];
double P_000112110=Pd_112[1]*Pd_110[2];
double P_000212010=Pd_212[1]*Pd_010[2];
double P_000212110=Pd_212[1]*Pd_110[2];
double P_000312010=Pd_312[1]*Pd_010[2];
double P_000312110=Pd_312[1]*Pd_110[2];
double P_001010011=Pd_001[0]*Pd_010[1]*Pd_011[2];
double P_001010111=Pd_001[0]*Pd_010[1]*Pd_111[2];
double P_001010211=Pd_001[0]*Pd_010[1]*Pd_211[2];
double P_001110011=Pd_001[0]*Pd_110[1]*Pd_011[2];
double P_001110111=Pd_001[0]*Pd_110[1]*Pd_111[2];
double P_001110211=Pd_001[0]*Pd_110[1]*Pd_211[2];
double P_101010011=Pd_101[0]*Pd_010[1]*Pd_011[2];
double P_101010111=Pd_101[0]*Pd_010[1]*Pd_111[2];
double P_101010211=Pd_101[0]*Pd_010[1]*Pd_211[2];
double P_101110011=Pd_101[0]*Pd_110[1]*Pd_011[2];
double P_101110111=Pd_101[0]*Pd_110[1]*Pd_111[2];
double P_101110211=Pd_101[0]*Pd_110[1]*Pd_211[2];
double P_000011011=Pd_011[1]*Pd_011[2];
double P_000011111=Pd_011[1]*Pd_111[2];
double P_000011211=Pd_011[1]*Pd_211[2];
double P_000111011=Pd_111[1]*Pd_011[2];
double P_000111111=Pd_111[1]*Pd_111[2];
double P_000111211=Pd_111[1]*Pd_211[2];
double P_000211011=Pd_211[1]*Pd_011[2];
double P_000211111=Pd_211[1]*Pd_111[2];
double P_000211211=Pd_211[1]*Pd_211[2];
double P_000010012=Pd_010[1]*Pd_012[2];
double P_000010112=Pd_010[1]*Pd_112[2];
double P_000010212=Pd_010[1]*Pd_212[2];
double P_000010312=Pd_010[1]*Pd_312[2];
double P_000110012=Pd_110[1]*Pd_012[2];
double P_000110112=Pd_110[1]*Pd_112[2];
double P_000110212=Pd_110[1]*Pd_212[2];
double P_000110312=Pd_110[1]*Pd_312[2];
double P_002000020=Pd_002[0]*Pd_020[2];
double P_002000120=Pd_002[0]*Pd_120[2];
double P_002000220=Pd_002[0]*Pd_220[2];
double P_102000020=Pd_102[0]*Pd_020[2];
double P_102000120=Pd_102[0]*Pd_120[2];
double P_102000220=Pd_102[0]*Pd_220[2];
double P_202000020=Pd_202[0]*Pd_020[2];
double P_202000120=Pd_202[0]*Pd_120[2];
double P_202000220=Pd_202[0]*Pd_220[2];
double P_001001020=Pd_001[0]*Pd_001[1]*Pd_020[2];
double P_001001120=Pd_001[0]*Pd_001[1]*Pd_120[2];
double P_001001220=Pd_001[0]*Pd_001[1]*Pd_220[2];
double P_001101020=Pd_001[0]*Pd_101[1]*Pd_020[2];
double P_001101120=Pd_001[0]*Pd_101[1]*Pd_120[2];
double P_001101220=Pd_001[0]*Pd_101[1]*Pd_220[2];
double P_101001020=Pd_101[0]*Pd_001[1]*Pd_020[2];
double P_101001120=Pd_101[0]*Pd_001[1]*Pd_120[2];
double P_101001220=Pd_101[0]*Pd_001[1]*Pd_220[2];
double P_101101020=Pd_101[0]*Pd_101[1]*Pd_020[2];
double P_101101120=Pd_101[0]*Pd_101[1]*Pd_120[2];
double P_101101220=Pd_101[0]*Pd_101[1]*Pd_220[2];
double P_000002020=Pd_002[1]*Pd_020[2];
double P_000002120=Pd_002[1]*Pd_120[2];
double P_000002220=Pd_002[1]*Pd_220[2];
double P_000102020=Pd_102[1]*Pd_020[2];
double P_000102120=Pd_102[1]*Pd_120[2];
double P_000102220=Pd_102[1]*Pd_220[2];
double P_000202020=Pd_202[1]*Pd_020[2];
double P_000202120=Pd_202[1]*Pd_120[2];
double P_000202220=Pd_202[1]*Pd_220[2];
double P_001000021=Pd_001[0]*Pd_021[2];
double P_001000121=Pd_001[0]*Pd_121[2];
double P_001000221=Pd_001[0]*Pd_221[2];
double P_001000321=Pd_001[0]*Pd_321[2];
double P_101000021=Pd_101[0]*Pd_021[2];
double P_101000121=Pd_101[0]*Pd_121[2];
double P_101000221=Pd_101[0]*Pd_221[2];
double P_101000321=Pd_101[0]*Pd_321[2];
double P_000001021=Pd_001[1]*Pd_021[2];
double P_000001121=Pd_001[1]*Pd_121[2];
double P_000001221=Pd_001[1]*Pd_221[2];
double P_000001321=Pd_001[1]*Pd_321[2];
double P_000101021=Pd_101[1]*Pd_021[2];
double P_000101121=Pd_101[1]*Pd_121[2];
double P_000101221=Pd_101[1]*Pd_221[2];
double P_000101321=Pd_101[1]*Pd_321[2];
double P_000000022=Pd_022[2];
double P_000000122=Pd_122[2];
double P_000000222=Pd_222[2];
double P_000000322=Pd_322[2];
double P_000000422=Pd_422[2];
ans_temp[ans_id*18+0]+=Pmtrx[0]*(P_022000000*QR_011000000000+P_122000000*QR_011000000100+P_222000000*QR_011000000200+P_322000000*QR_011000000300+P_422000000*QR_011000000400);
ans_temp[ans_id*18+0]+=Pmtrx[1]*(P_022000000*QR_010001000000+P_122000000*QR_010001000100+P_222000000*QR_010001000200+P_322000000*QR_010001000300+P_422000000*QR_010001000400);
ans_temp[ans_id*18+0]+=Pmtrx[2]*(P_022000000*QR_010000001000+P_122000000*QR_010000001100+P_222000000*QR_010000001200+P_322000000*QR_010000001300+P_422000000*QR_010000001400);
ans_temp[ans_id*18+1]+=Pmtrx[0]*(P_022000000*QR_001010000000+P_122000000*QR_001010000100+P_222000000*QR_001010000200+P_322000000*QR_001010000300+P_422000000*QR_001010000400);
ans_temp[ans_id*18+1]+=Pmtrx[1]*(P_022000000*QR_000011000000+P_122000000*QR_000011000100+P_222000000*QR_000011000200+P_322000000*QR_000011000300+P_422000000*QR_000011000400);
ans_temp[ans_id*18+1]+=Pmtrx[2]*(P_022000000*QR_000010001000+P_122000000*QR_000010001100+P_222000000*QR_000010001200+P_322000000*QR_000010001300+P_422000000*QR_000010001400);
ans_temp[ans_id*18+2]+=Pmtrx[0]*(P_022000000*QR_001000010000+P_122000000*QR_001000010100+P_222000000*QR_001000010200+P_322000000*QR_001000010300+P_422000000*QR_001000010400);
ans_temp[ans_id*18+2]+=Pmtrx[1]*(P_022000000*QR_000001010000+P_122000000*QR_000001010100+P_222000000*QR_000001010200+P_322000000*QR_000001010300+P_422000000*QR_000001010400);
ans_temp[ans_id*18+2]+=Pmtrx[2]*(P_022000000*QR_000000011000+P_122000000*QR_000000011100+P_222000000*QR_000000011200+P_322000000*QR_000000011300+P_422000000*QR_000000011400);
ans_temp[ans_id*18+0]+=Pmtrx[3]*(P_021001000*QR_011000000000+P_021101000*QR_011000000010+P_121001000*QR_011000000100+P_121101000*QR_011000000110+P_221001000*QR_011000000200+P_221101000*QR_011000000210+P_321001000*QR_011000000300+P_321101000*QR_011000000310);
ans_temp[ans_id*18+0]+=Pmtrx[4]*(P_021001000*QR_010001000000+P_021101000*QR_010001000010+P_121001000*QR_010001000100+P_121101000*QR_010001000110+P_221001000*QR_010001000200+P_221101000*QR_010001000210+P_321001000*QR_010001000300+P_321101000*QR_010001000310);
ans_temp[ans_id*18+0]+=Pmtrx[5]*(P_021001000*QR_010000001000+P_021101000*QR_010000001010+P_121001000*QR_010000001100+P_121101000*QR_010000001110+P_221001000*QR_010000001200+P_221101000*QR_010000001210+P_321001000*QR_010000001300+P_321101000*QR_010000001310);
ans_temp[ans_id*18+1]+=Pmtrx[3]*(P_021001000*QR_001010000000+P_021101000*QR_001010000010+P_121001000*QR_001010000100+P_121101000*QR_001010000110+P_221001000*QR_001010000200+P_221101000*QR_001010000210+P_321001000*QR_001010000300+P_321101000*QR_001010000310);
ans_temp[ans_id*18+1]+=Pmtrx[4]*(P_021001000*QR_000011000000+P_021101000*QR_000011000010+P_121001000*QR_000011000100+P_121101000*QR_000011000110+P_221001000*QR_000011000200+P_221101000*QR_000011000210+P_321001000*QR_000011000300+P_321101000*QR_000011000310);
ans_temp[ans_id*18+1]+=Pmtrx[5]*(P_021001000*QR_000010001000+P_021101000*QR_000010001010+P_121001000*QR_000010001100+P_121101000*QR_000010001110+P_221001000*QR_000010001200+P_221101000*QR_000010001210+P_321001000*QR_000010001300+P_321101000*QR_000010001310);
ans_temp[ans_id*18+2]+=Pmtrx[3]*(P_021001000*QR_001000010000+P_021101000*QR_001000010010+P_121001000*QR_001000010100+P_121101000*QR_001000010110+P_221001000*QR_001000010200+P_221101000*QR_001000010210+P_321001000*QR_001000010300+P_321101000*QR_001000010310);
ans_temp[ans_id*18+2]+=Pmtrx[4]*(P_021001000*QR_000001010000+P_021101000*QR_000001010010+P_121001000*QR_000001010100+P_121101000*QR_000001010110+P_221001000*QR_000001010200+P_221101000*QR_000001010210+P_321001000*QR_000001010300+P_321101000*QR_000001010310);
ans_temp[ans_id*18+2]+=Pmtrx[5]*(P_021001000*QR_000000011000+P_021101000*QR_000000011010+P_121001000*QR_000000011100+P_121101000*QR_000000011110+P_221001000*QR_000000011200+P_221101000*QR_000000011210+P_321001000*QR_000000011300+P_321101000*QR_000000011310);
ans_temp[ans_id*18+0]+=Pmtrx[6]*(P_020002000*QR_011000000000+P_020102000*QR_011000000010+P_020202000*QR_011000000020+P_120002000*QR_011000000100+P_120102000*QR_011000000110+P_120202000*QR_011000000120+P_220002000*QR_011000000200+P_220102000*QR_011000000210+P_220202000*QR_011000000220);
ans_temp[ans_id*18+0]+=Pmtrx[7]*(P_020002000*QR_010001000000+P_020102000*QR_010001000010+P_020202000*QR_010001000020+P_120002000*QR_010001000100+P_120102000*QR_010001000110+P_120202000*QR_010001000120+P_220002000*QR_010001000200+P_220102000*QR_010001000210+P_220202000*QR_010001000220);
ans_temp[ans_id*18+0]+=Pmtrx[8]*(P_020002000*QR_010000001000+P_020102000*QR_010000001010+P_020202000*QR_010000001020+P_120002000*QR_010000001100+P_120102000*QR_010000001110+P_120202000*QR_010000001120+P_220002000*QR_010000001200+P_220102000*QR_010000001210+P_220202000*QR_010000001220);
ans_temp[ans_id*18+1]+=Pmtrx[6]*(P_020002000*QR_001010000000+P_020102000*QR_001010000010+P_020202000*QR_001010000020+P_120002000*QR_001010000100+P_120102000*QR_001010000110+P_120202000*QR_001010000120+P_220002000*QR_001010000200+P_220102000*QR_001010000210+P_220202000*QR_001010000220);
ans_temp[ans_id*18+1]+=Pmtrx[7]*(P_020002000*QR_000011000000+P_020102000*QR_000011000010+P_020202000*QR_000011000020+P_120002000*QR_000011000100+P_120102000*QR_000011000110+P_120202000*QR_000011000120+P_220002000*QR_000011000200+P_220102000*QR_000011000210+P_220202000*QR_000011000220);
ans_temp[ans_id*18+1]+=Pmtrx[8]*(P_020002000*QR_000010001000+P_020102000*QR_000010001010+P_020202000*QR_000010001020+P_120002000*QR_000010001100+P_120102000*QR_000010001110+P_120202000*QR_000010001120+P_220002000*QR_000010001200+P_220102000*QR_000010001210+P_220202000*QR_000010001220);
ans_temp[ans_id*18+2]+=Pmtrx[6]*(P_020002000*QR_001000010000+P_020102000*QR_001000010010+P_020202000*QR_001000010020+P_120002000*QR_001000010100+P_120102000*QR_001000010110+P_120202000*QR_001000010120+P_220002000*QR_001000010200+P_220102000*QR_001000010210+P_220202000*QR_001000010220);
ans_temp[ans_id*18+2]+=Pmtrx[7]*(P_020002000*QR_000001010000+P_020102000*QR_000001010010+P_020202000*QR_000001010020+P_120002000*QR_000001010100+P_120102000*QR_000001010110+P_120202000*QR_000001010120+P_220002000*QR_000001010200+P_220102000*QR_000001010210+P_220202000*QR_000001010220);
ans_temp[ans_id*18+2]+=Pmtrx[8]*(P_020002000*QR_000000011000+P_020102000*QR_000000011010+P_020202000*QR_000000011020+P_120002000*QR_000000011100+P_120102000*QR_000000011110+P_120202000*QR_000000011120+P_220002000*QR_000000011200+P_220102000*QR_000000011210+P_220202000*QR_000000011220);
ans_temp[ans_id*18+0]+=Pmtrx[9]*(P_021000001*QR_011000000000+P_021000101*QR_011000000001+P_121000001*QR_011000000100+P_121000101*QR_011000000101+P_221000001*QR_011000000200+P_221000101*QR_011000000201+P_321000001*QR_011000000300+P_321000101*QR_011000000301);
ans_temp[ans_id*18+0]+=Pmtrx[10]*(P_021000001*QR_010001000000+P_021000101*QR_010001000001+P_121000001*QR_010001000100+P_121000101*QR_010001000101+P_221000001*QR_010001000200+P_221000101*QR_010001000201+P_321000001*QR_010001000300+P_321000101*QR_010001000301);
ans_temp[ans_id*18+0]+=Pmtrx[11]*(P_021000001*QR_010000001000+P_021000101*QR_010000001001+P_121000001*QR_010000001100+P_121000101*QR_010000001101+P_221000001*QR_010000001200+P_221000101*QR_010000001201+P_321000001*QR_010000001300+P_321000101*QR_010000001301);
ans_temp[ans_id*18+1]+=Pmtrx[9]*(P_021000001*QR_001010000000+P_021000101*QR_001010000001+P_121000001*QR_001010000100+P_121000101*QR_001010000101+P_221000001*QR_001010000200+P_221000101*QR_001010000201+P_321000001*QR_001010000300+P_321000101*QR_001010000301);
ans_temp[ans_id*18+1]+=Pmtrx[10]*(P_021000001*QR_000011000000+P_021000101*QR_000011000001+P_121000001*QR_000011000100+P_121000101*QR_000011000101+P_221000001*QR_000011000200+P_221000101*QR_000011000201+P_321000001*QR_000011000300+P_321000101*QR_000011000301);
ans_temp[ans_id*18+1]+=Pmtrx[11]*(P_021000001*QR_000010001000+P_021000101*QR_000010001001+P_121000001*QR_000010001100+P_121000101*QR_000010001101+P_221000001*QR_000010001200+P_221000101*QR_000010001201+P_321000001*QR_000010001300+P_321000101*QR_000010001301);
ans_temp[ans_id*18+2]+=Pmtrx[9]*(P_021000001*QR_001000010000+P_021000101*QR_001000010001+P_121000001*QR_001000010100+P_121000101*QR_001000010101+P_221000001*QR_001000010200+P_221000101*QR_001000010201+P_321000001*QR_001000010300+P_321000101*QR_001000010301);
ans_temp[ans_id*18+2]+=Pmtrx[10]*(P_021000001*QR_000001010000+P_021000101*QR_000001010001+P_121000001*QR_000001010100+P_121000101*QR_000001010101+P_221000001*QR_000001010200+P_221000101*QR_000001010201+P_321000001*QR_000001010300+P_321000101*QR_000001010301);
ans_temp[ans_id*18+2]+=Pmtrx[11]*(P_021000001*QR_000000011000+P_021000101*QR_000000011001+P_121000001*QR_000000011100+P_121000101*QR_000000011101+P_221000001*QR_000000011200+P_221000101*QR_000000011201+P_321000001*QR_000000011300+P_321000101*QR_000000011301);
ans_temp[ans_id*18+0]+=Pmtrx[12]*(P_020001001*QR_011000000000+P_020001101*QR_011000000001+P_020101001*QR_011000000010+P_020101101*QR_011000000011+P_120001001*QR_011000000100+P_120001101*QR_011000000101+P_120101001*QR_011000000110+P_120101101*QR_011000000111+P_220001001*QR_011000000200+P_220001101*QR_011000000201+P_220101001*QR_011000000210+P_220101101*QR_011000000211);
ans_temp[ans_id*18+0]+=Pmtrx[13]*(P_020001001*QR_010001000000+P_020001101*QR_010001000001+P_020101001*QR_010001000010+P_020101101*QR_010001000011+P_120001001*QR_010001000100+P_120001101*QR_010001000101+P_120101001*QR_010001000110+P_120101101*QR_010001000111+P_220001001*QR_010001000200+P_220001101*QR_010001000201+P_220101001*QR_010001000210+P_220101101*QR_010001000211);
ans_temp[ans_id*18+0]+=Pmtrx[14]*(P_020001001*QR_010000001000+P_020001101*QR_010000001001+P_020101001*QR_010000001010+P_020101101*QR_010000001011+P_120001001*QR_010000001100+P_120001101*QR_010000001101+P_120101001*QR_010000001110+P_120101101*QR_010000001111+P_220001001*QR_010000001200+P_220001101*QR_010000001201+P_220101001*QR_010000001210+P_220101101*QR_010000001211);
ans_temp[ans_id*18+1]+=Pmtrx[12]*(P_020001001*QR_001010000000+P_020001101*QR_001010000001+P_020101001*QR_001010000010+P_020101101*QR_001010000011+P_120001001*QR_001010000100+P_120001101*QR_001010000101+P_120101001*QR_001010000110+P_120101101*QR_001010000111+P_220001001*QR_001010000200+P_220001101*QR_001010000201+P_220101001*QR_001010000210+P_220101101*QR_001010000211);
ans_temp[ans_id*18+1]+=Pmtrx[13]*(P_020001001*QR_000011000000+P_020001101*QR_000011000001+P_020101001*QR_000011000010+P_020101101*QR_000011000011+P_120001001*QR_000011000100+P_120001101*QR_000011000101+P_120101001*QR_000011000110+P_120101101*QR_000011000111+P_220001001*QR_000011000200+P_220001101*QR_000011000201+P_220101001*QR_000011000210+P_220101101*QR_000011000211);
ans_temp[ans_id*18+1]+=Pmtrx[14]*(P_020001001*QR_000010001000+P_020001101*QR_000010001001+P_020101001*QR_000010001010+P_020101101*QR_000010001011+P_120001001*QR_000010001100+P_120001101*QR_000010001101+P_120101001*QR_000010001110+P_120101101*QR_000010001111+P_220001001*QR_000010001200+P_220001101*QR_000010001201+P_220101001*QR_000010001210+P_220101101*QR_000010001211);
ans_temp[ans_id*18+2]+=Pmtrx[12]*(P_020001001*QR_001000010000+P_020001101*QR_001000010001+P_020101001*QR_001000010010+P_020101101*QR_001000010011+P_120001001*QR_001000010100+P_120001101*QR_001000010101+P_120101001*QR_001000010110+P_120101101*QR_001000010111+P_220001001*QR_001000010200+P_220001101*QR_001000010201+P_220101001*QR_001000010210+P_220101101*QR_001000010211);
ans_temp[ans_id*18+2]+=Pmtrx[13]*(P_020001001*QR_000001010000+P_020001101*QR_000001010001+P_020101001*QR_000001010010+P_020101101*QR_000001010011+P_120001001*QR_000001010100+P_120001101*QR_000001010101+P_120101001*QR_000001010110+P_120101101*QR_000001010111+P_220001001*QR_000001010200+P_220001101*QR_000001010201+P_220101001*QR_000001010210+P_220101101*QR_000001010211);
ans_temp[ans_id*18+2]+=Pmtrx[14]*(P_020001001*QR_000000011000+P_020001101*QR_000000011001+P_020101001*QR_000000011010+P_020101101*QR_000000011011+P_120001001*QR_000000011100+P_120001101*QR_000000011101+P_120101001*QR_000000011110+P_120101101*QR_000000011111+P_220001001*QR_000000011200+P_220001101*QR_000000011201+P_220101001*QR_000000011210+P_220101101*QR_000000011211);
ans_temp[ans_id*18+0]+=Pmtrx[15]*(P_020000002*QR_011000000000+P_020000102*QR_011000000001+P_020000202*QR_011000000002+P_120000002*QR_011000000100+P_120000102*QR_011000000101+P_120000202*QR_011000000102+P_220000002*QR_011000000200+P_220000102*QR_011000000201+P_220000202*QR_011000000202);
ans_temp[ans_id*18+0]+=Pmtrx[16]*(P_020000002*QR_010001000000+P_020000102*QR_010001000001+P_020000202*QR_010001000002+P_120000002*QR_010001000100+P_120000102*QR_010001000101+P_120000202*QR_010001000102+P_220000002*QR_010001000200+P_220000102*QR_010001000201+P_220000202*QR_010001000202);
ans_temp[ans_id*18+0]+=Pmtrx[17]*(P_020000002*QR_010000001000+P_020000102*QR_010000001001+P_020000202*QR_010000001002+P_120000002*QR_010000001100+P_120000102*QR_010000001101+P_120000202*QR_010000001102+P_220000002*QR_010000001200+P_220000102*QR_010000001201+P_220000202*QR_010000001202);
ans_temp[ans_id*18+1]+=Pmtrx[15]*(P_020000002*QR_001010000000+P_020000102*QR_001010000001+P_020000202*QR_001010000002+P_120000002*QR_001010000100+P_120000102*QR_001010000101+P_120000202*QR_001010000102+P_220000002*QR_001010000200+P_220000102*QR_001010000201+P_220000202*QR_001010000202);
ans_temp[ans_id*18+1]+=Pmtrx[16]*(P_020000002*QR_000011000000+P_020000102*QR_000011000001+P_020000202*QR_000011000002+P_120000002*QR_000011000100+P_120000102*QR_000011000101+P_120000202*QR_000011000102+P_220000002*QR_000011000200+P_220000102*QR_000011000201+P_220000202*QR_000011000202);
ans_temp[ans_id*18+1]+=Pmtrx[17]*(P_020000002*QR_000010001000+P_020000102*QR_000010001001+P_020000202*QR_000010001002+P_120000002*QR_000010001100+P_120000102*QR_000010001101+P_120000202*QR_000010001102+P_220000002*QR_000010001200+P_220000102*QR_000010001201+P_220000202*QR_000010001202);
ans_temp[ans_id*18+2]+=Pmtrx[15]*(P_020000002*QR_001000010000+P_020000102*QR_001000010001+P_020000202*QR_001000010002+P_120000002*QR_001000010100+P_120000102*QR_001000010101+P_120000202*QR_001000010102+P_220000002*QR_001000010200+P_220000102*QR_001000010201+P_220000202*QR_001000010202);
ans_temp[ans_id*18+2]+=Pmtrx[16]*(P_020000002*QR_000001010000+P_020000102*QR_000001010001+P_020000202*QR_000001010002+P_120000002*QR_000001010100+P_120000102*QR_000001010101+P_120000202*QR_000001010102+P_220000002*QR_000001010200+P_220000102*QR_000001010201+P_220000202*QR_000001010202);
ans_temp[ans_id*18+2]+=Pmtrx[17]*(P_020000002*QR_000000011000+P_020000102*QR_000000011001+P_020000202*QR_000000011002+P_120000002*QR_000000011100+P_120000102*QR_000000011101+P_120000202*QR_000000011102+P_220000002*QR_000000011200+P_220000102*QR_000000011201+P_220000202*QR_000000011202);
ans_temp[ans_id*18+3]+=Pmtrx[0]*(P_012010000*QR_011000000000+P_012110000*QR_011000000010+P_112010000*QR_011000000100+P_112110000*QR_011000000110+P_212010000*QR_011000000200+P_212110000*QR_011000000210+P_312010000*QR_011000000300+P_312110000*QR_011000000310);
ans_temp[ans_id*18+3]+=Pmtrx[1]*(P_012010000*QR_010001000000+P_012110000*QR_010001000010+P_112010000*QR_010001000100+P_112110000*QR_010001000110+P_212010000*QR_010001000200+P_212110000*QR_010001000210+P_312010000*QR_010001000300+P_312110000*QR_010001000310);
ans_temp[ans_id*18+3]+=Pmtrx[2]*(P_012010000*QR_010000001000+P_012110000*QR_010000001010+P_112010000*QR_010000001100+P_112110000*QR_010000001110+P_212010000*QR_010000001200+P_212110000*QR_010000001210+P_312010000*QR_010000001300+P_312110000*QR_010000001310);
ans_temp[ans_id*18+4]+=Pmtrx[0]*(P_012010000*QR_001010000000+P_012110000*QR_001010000010+P_112010000*QR_001010000100+P_112110000*QR_001010000110+P_212010000*QR_001010000200+P_212110000*QR_001010000210+P_312010000*QR_001010000300+P_312110000*QR_001010000310);
ans_temp[ans_id*18+4]+=Pmtrx[1]*(P_012010000*QR_000011000000+P_012110000*QR_000011000010+P_112010000*QR_000011000100+P_112110000*QR_000011000110+P_212010000*QR_000011000200+P_212110000*QR_000011000210+P_312010000*QR_000011000300+P_312110000*QR_000011000310);
ans_temp[ans_id*18+4]+=Pmtrx[2]*(P_012010000*QR_000010001000+P_012110000*QR_000010001010+P_112010000*QR_000010001100+P_112110000*QR_000010001110+P_212010000*QR_000010001200+P_212110000*QR_000010001210+P_312010000*QR_000010001300+P_312110000*QR_000010001310);
ans_temp[ans_id*18+5]+=Pmtrx[0]*(P_012010000*QR_001000010000+P_012110000*QR_001000010010+P_112010000*QR_001000010100+P_112110000*QR_001000010110+P_212010000*QR_001000010200+P_212110000*QR_001000010210+P_312010000*QR_001000010300+P_312110000*QR_001000010310);
ans_temp[ans_id*18+5]+=Pmtrx[1]*(P_012010000*QR_000001010000+P_012110000*QR_000001010010+P_112010000*QR_000001010100+P_112110000*QR_000001010110+P_212010000*QR_000001010200+P_212110000*QR_000001010210+P_312010000*QR_000001010300+P_312110000*QR_000001010310);
ans_temp[ans_id*18+5]+=Pmtrx[2]*(P_012010000*QR_000000011000+P_012110000*QR_000000011010+P_112010000*QR_000000011100+P_112110000*QR_000000011110+P_212010000*QR_000000011200+P_212110000*QR_000000011210+P_312010000*QR_000000011300+P_312110000*QR_000000011310);
ans_temp[ans_id*18+3]+=Pmtrx[3]*(P_011011000*QR_011000000000+P_011111000*QR_011000000010+P_011211000*QR_011000000020+P_111011000*QR_011000000100+P_111111000*QR_011000000110+P_111211000*QR_011000000120+P_211011000*QR_011000000200+P_211111000*QR_011000000210+P_211211000*QR_011000000220);
ans_temp[ans_id*18+3]+=Pmtrx[4]*(P_011011000*QR_010001000000+P_011111000*QR_010001000010+P_011211000*QR_010001000020+P_111011000*QR_010001000100+P_111111000*QR_010001000110+P_111211000*QR_010001000120+P_211011000*QR_010001000200+P_211111000*QR_010001000210+P_211211000*QR_010001000220);
ans_temp[ans_id*18+3]+=Pmtrx[5]*(P_011011000*QR_010000001000+P_011111000*QR_010000001010+P_011211000*QR_010000001020+P_111011000*QR_010000001100+P_111111000*QR_010000001110+P_111211000*QR_010000001120+P_211011000*QR_010000001200+P_211111000*QR_010000001210+P_211211000*QR_010000001220);
ans_temp[ans_id*18+4]+=Pmtrx[3]*(P_011011000*QR_001010000000+P_011111000*QR_001010000010+P_011211000*QR_001010000020+P_111011000*QR_001010000100+P_111111000*QR_001010000110+P_111211000*QR_001010000120+P_211011000*QR_001010000200+P_211111000*QR_001010000210+P_211211000*QR_001010000220);
ans_temp[ans_id*18+4]+=Pmtrx[4]*(P_011011000*QR_000011000000+P_011111000*QR_000011000010+P_011211000*QR_000011000020+P_111011000*QR_000011000100+P_111111000*QR_000011000110+P_111211000*QR_000011000120+P_211011000*QR_000011000200+P_211111000*QR_000011000210+P_211211000*QR_000011000220);
ans_temp[ans_id*18+4]+=Pmtrx[5]*(P_011011000*QR_000010001000+P_011111000*QR_000010001010+P_011211000*QR_000010001020+P_111011000*QR_000010001100+P_111111000*QR_000010001110+P_111211000*QR_000010001120+P_211011000*QR_000010001200+P_211111000*QR_000010001210+P_211211000*QR_000010001220);
ans_temp[ans_id*18+5]+=Pmtrx[3]*(P_011011000*QR_001000010000+P_011111000*QR_001000010010+P_011211000*QR_001000010020+P_111011000*QR_001000010100+P_111111000*QR_001000010110+P_111211000*QR_001000010120+P_211011000*QR_001000010200+P_211111000*QR_001000010210+P_211211000*QR_001000010220);
ans_temp[ans_id*18+5]+=Pmtrx[4]*(P_011011000*QR_000001010000+P_011111000*QR_000001010010+P_011211000*QR_000001010020+P_111011000*QR_000001010100+P_111111000*QR_000001010110+P_111211000*QR_000001010120+P_211011000*QR_000001010200+P_211111000*QR_000001010210+P_211211000*QR_000001010220);
ans_temp[ans_id*18+5]+=Pmtrx[5]*(P_011011000*QR_000000011000+P_011111000*QR_000000011010+P_011211000*QR_000000011020+P_111011000*QR_000000011100+P_111111000*QR_000000011110+P_111211000*QR_000000011120+P_211011000*QR_000000011200+P_211111000*QR_000000011210+P_211211000*QR_000000011220);
ans_temp[ans_id*18+3]+=Pmtrx[6]*(P_010012000*QR_011000000000+P_010112000*QR_011000000010+P_010212000*QR_011000000020+P_010312000*QR_011000000030+P_110012000*QR_011000000100+P_110112000*QR_011000000110+P_110212000*QR_011000000120+P_110312000*QR_011000000130);
ans_temp[ans_id*18+3]+=Pmtrx[7]*(P_010012000*QR_010001000000+P_010112000*QR_010001000010+P_010212000*QR_010001000020+P_010312000*QR_010001000030+P_110012000*QR_010001000100+P_110112000*QR_010001000110+P_110212000*QR_010001000120+P_110312000*QR_010001000130);
ans_temp[ans_id*18+3]+=Pmtrx[8]*(P_010012000*QR_010000001000+P_010112000*QR_010000001010+P_010212000*QR_010000001020+P_010312000*QR_010000001030+P_110012000*QR_010000001100+P_110112000*QR_010000001110+P_110212000*QR_010000001120+P_110312000*QR_010000001130);
ans_temp[ans_id*18+4]+=Pmtrx[6]*(P_010012000*QR_001010000000+P_010112000*QR_001010000010+P_010212000*QR_001010000020+P_010312000*QR_001010000030+P_110012000*QR_001010000100+P_110112000*QR_001010000110+P_110212000*QR_001010000120+P_110312000*QR_001010000130);
ans_temp[ans_id*18+4]+=Pmtrx[7]*(P_010012000*QR_000011000000+P_010112000*QR_000011000010+P_010212000*QR_000011000020+P_010312000*QR_000011000030+P_110012000*QR_000011000100+P_110112000*QR_000011000110+P_110212000*QR_000011000120+P_110312000*QR_000011000130);
ans_temp[ans_id*18+4]+=Pmtrx[8]*(P_010012000*QR_000010001000+P_010112000*QR_000010001010+P_010212000*QR_000010001020+P_010312000*QR_000010001030+P_110012000*QR_000010001100+P_110112000*QR_000010001110+P_110212000*QR_000010001120+P_110312000*QR_000010001130);
ans_temp[ans_id*18+5]+=Pmtrx[6]*(P_010012000*QR_001000010000+P_010112000*QR_001000010010+P_010212000*QR_001000010020+P_010312000*QR_001000010030+P_110012000*QR_001000010100+P_110112000*QR_001000010110+P_110212000*QR_001000010120+P_110312000*QR_001000010130);
ans_temp[ans_id*18+5]+=Pmtrx[7]*(P_010012000*QR_000001010000+P_010112000*QR_000001010010+P_010212000*QR_000001010020+P_010312000*QR_000001010030+P_110012000*QR_000001010100+P_110112000*QR_000001010110+P_110212000*QR_000001010120+P_110312000*QR_000001010130);
ans_temp[ans_id*18+5]+=Pmtrx[8]*(P_010012000*QR_000000011000+P_010112000*QR_000000011010+P_010212000*QR_000000011020+P_010312000*QR_000000011030+P_110012000*QR_000000011100+P_110112000*QR_000000011110+P_110212000*QR_000000011120+P_110312000*QR_000000011130);
ans_temp[ans_id*18+3]+=Pmtrx[9]*(P_011010001*QR_011000000000+P_011010101*QR_011000000001+P_011110001*QR_011000000010+P_011110101*QR_011000000011+P_111010001*QR_011000000100+P_111010101*QR_011000000101+P_111110001*QR_011000000110+P_111110101*QR_011000000111+P_211010001*QR_011000000200+P_211010101*QR_011000000201+P_211110001*QR_011000000210+P_211110101*QR_011000000211);
ans_temp[ans_id*18+3]+=Pmtrx[10]*(P_011010001*QR_010001000000+P_011010101*QR_010001000001+P_011110001*QR_010001000010+P_011110101*QR_010001000011+P_111010001*QR_010001000100+P_111010101*QR_010001000101+P_111110001*QR_010001000110+P_111110101*QR_010001000111+P_211010001*QR_010001000200+P_211010101*QR_010001000201+P_211110001*QR_010001000210+P_211110101*QR_010001000211);
ans_temp[ans_id*18+3]+=Pmtrx[11]*(P_011010001*QR_010000001000+P_011010101*QR_010000001001+P_011110001*QR_010000001010+P_011110101*QR_010000001011+P_111010001*QR_010000001100+P_111010101*QR_010000001101+P_111110001*QR_010000001110+P_111110101*QR_010000001111+P_211010001*QR_010000001200+P_211010101*QR_010000001201+P_211110001*QR_010000001210+P_211110101*QR_010000001211);
ans_temp[ans_id*18+4]+=Pmtrx[9]*(P_011010001*QR_001010000000+P_011010101*QR_001010000001+P_011110001*QR_001010000010+P_011110101*QR_001010000011+P_111010001*QR_001010000100+P_111010101*QR_001010000101+P_111110001*QR_001010000110+P_111110101*QR_001010000111+P_211010001*QR_001010000200+P_211010101*QR_001010000201+P_211110001*QR_001010000210+P_211110101*QR_001010000211);
ans_temp[ans_id*18+4]+=Pmtrx[10]*(P_011010001*QR_000011000000+P_011010101*QR_000011000001+P_011110001*QR_000011000010+P_011110101*QR_000011000011+P_111010001*QR_000011000100+P_111010101*QR_000011000101+P_111110001*QR_000011000110+P_111110101*QR_000011000111+P_211010001*QR_000011000200+P_211010101*QR_000011000201+P_211110001*QR_000011000210+P_211110101*QR_000011000211);
ans_temp[ans_id*18+4]+=Pmtrx[11]*(P_011010001*QR_000010001000+P_011010101*QR_000010001001+P_011110001*QR_000010001010+P_011110101*QR_000010001011+P_111010001*QR_000010001100+P_111010101*QR_000010001101+P_111110001*QR_000010001110+P_111110101*QR_000010001111+P_211010001*QR_000010001200+P_211010101*QR_000010001201+P_211110001*QR_000010001210+P_211110101*QR_000010001211);
ans_temp[ans_id*18+5]+=Pmtrx[9]*(P_011010001*QR_001000010000+P_011010101*QR_001000010001+P_011110001*QR_001000010010+P_011110101*QR_001000010011+P_111010001*QR_001000010100+P_111010101*QR_001000010101+P_111110001*QR_001000010110+P_111110101*QR_001000010111+P_211010001*QR_001000010200+P_211010101*QR_001000010201+P_211110001*QR_001000010210+P_211110101*QR_001000010211);
ans_temp[ans_id*18+5]+=Pmtrx[10]*(P_011010001*QR_000001010000+P_011010101*QR_000001010001+P_011110001*QR_000001010010+P_011110101*QR_000001010011+P_111010001*QR_000001010100+P_111010101*QR_000001010101+P_111110001*QR_000001010110+P_111110101*QR_000001010111+P_211010001*QR_000001010200+P_211010101*QR_000001010201+P_211110001*QR_000001010210+P_211110101*QR_000001010211);
ans_temp[ans_id*18+5]+=Pmtrx[11]*(P_011010001*QR_000000011000+P_011010101*QR_000000011001+P_011110001*QR_000000011010+P_011110101*QR_000000011011+P_111010001*QR_000000011100+P_111010101*QR_000000011101+P_111110001*QR_000000011110+P_111110101*QR_000000011111+P_211010001*QR_000000011200+P_211010101*QR_000000011201+P_211110001*QR_000000011210+P_211110101*QR_000000011211);
ans_temp[ans_id*18+3]+=Pmtrx[12]*(P_010011001*QR_011000000000+P_010011101*QR_011000000001+P_010111001*QR_011000000010+P_010111101*QR_011000000011+P_010211001*QR_011000000020+P_010211101*QR_011000000021+P_110011001*QR_011000000100+P_110011101*QR_011000000101+P_110111001*QR_011000000110+P_110111101*QR_011000000111+P_110211001*QR_011000000120+P_110211101*QR_011000000121);
ans_temp[ans_id*18+3]+=Pmtrx[13]*(P_010011001*QR_010001000000+P_010011101*QR_010001000001+P_010111001*QR_010001000010+P_010111101*QR_010001000011+P_010211001*QR_010001000020+P_010211101*QR_010001000021+P_110011001*QR_010001000100+P_110011101*QR_010001000101+P_110111001*QR_010001000110+P_110111101*QR_010001000111+P_110211001*QR_010001000120+P_110211101*QR_010001000121);
ans_temp[ans_id*18+3]+=Pmtrx[14]*(P_010011001*QR_010000001000+P_010011101*QR_010000001001+P_010111001*QR_010000001010+P_010111101*QR_010000001011+P_010211001*QR_010000001020+P_010211101*QR_010000001021+P_110011001*QR_010000001100+P_110011101*QR_010000001101+P_110111001*QR_010000001110+P_110111101*QR_010000001111+P_110211001*QR_010000001120+P_110211101*QR_010000001121);
ans_temp[ans_id*18+4]+=Pmtrx[12]*(P_010011001*QR_001010000000+P_010011101*QR_001010000001+P_010111001*QR_001010000010+P_010111101*QR_001010000011+P_010211001*QR_001010000020+P_010211101*QR_001010000021+P_110011001*QR_001010000100+P_110011101*QR_001010000101+P_110111001*QR_001010000110+P_110111101*QR_001010000111+P_110211001*QR_001010000120+P_110211101*QR_001010000121);
ans_temp[ans_id*18+4]+=Pmtrx[13]*(P_010011001*QR_000011000000+P_010011101*QR_000011000001+P_010111001*QR_000011000010+P_010111101*QR_000011000011+P_010211001*QR_000011000020+P_010211101*QR_000011000021+P_110011001*QR_000011000100+P_110011101*QR_000011000101+P_110111001*QR_000011000110+P_110111101*QR_000011000111+P_110211001*QR_000011000120+P_110211101*QR_000011000121);
ans_temp[ans_id*18+4]+=Pmtrx[14]*(P_010011001*QR_000010001000+P_010011101*QR_000010001001+P_010111001*QR_000010001010+P_010111101*QR_000010001011+P_010211001*QR_000010001020+P_010211101*QR_000010001021+P_110011001*QR_000010001100+P_110011101*QR_000010001101+P_110111001*QR_000010001110+P_110111101*QR_000010001111+P_110211001*QR_000010001120+P_110211101*QR_000010001121);
ans_temp[ans_id*18+5]+=Pmtrx[12]*(P_010011001*QR_001000010000+P_010011101*QR_001000010001+P_010111001*QR_001000010010+P_010111101*QR_001000010011+P_010211001*QR_001000010020+P_010211101*QR_001000010021+P_110011001*QR_001000010100+P_110011101*QR_001000010101+P_110111001*QR_001000010110+P_110111101*QR_001000010111+P_110211001*QR_001000010120+P_110211101*QR_001000010121);
ans_temp[ans_id*18+5]+=Pmtrx[13]*(P_010011001*QR_000001010000+P_010011101*QR_000001010001+P_010111001*QR_000001010010+P_010111101*QR_000001010011+P_010211001*QR_000001010020+P_010211101*QR_000001010021+P_110011001*QR_000001010100+P_110011101*QR_000001010101+P_110111001*QR_000001010110+P_110111101*QR_000001010111+P_110211001*QR_000001010120+P_110211101*QR_000001010121);
ans_temp[ans_id*18+5]+=Pmtrx[14]*(P_010011001*QR_000000011000+P_010011101*QR_000000011001+P_010111001*QR_000000011010+P_010111101*QR_000000011011+P_010211001*QR_000000011020+P_010211101*QR_000000011021+P_110011001*QR_000000011100+P_110011101*QR_000000011101+P_110111001*QR_000000011110+P_110111101*QR_000000011111+P_110211001*QR_000000011120+P_110211101*QR_000000011121);
ans_temp[ans_id*18+3]+=Pmtrx[15]*(P_010010002*QR_011000000000+P_010010102*QR_011000000001+P_010010202*QR_011000000002+P_010110002*QR_011000000010+P_010110102*QR_011000000011+P_010110202*QR_011000000012+P_110010002*QR_011000000100+P_110010102*QR_011000000101+P_110010202*QR_011000000102+P_110110002*QR_011000000110+P_110110102*QR_011000000111+P_110110202*QR_011000000112);
ans_temp[ans_id*18+3]+=Pmtrx[16]*(P_010010002*QR_010001000000+P_010010102*QR_010001000001+P_010010202*QR_010001000002+P_010110002*QR_010001000010+P_010110102*QR_010001000011+P_010110202*QR_010001000012+P_110010002*QR_010001000100+P_110010102*QR_010001000101+P_110010202*QR_010001000102+P_110110002*QR_010001000110+P_110110102*QR_010001000111+P_110110202*QR_010001000112);
ans_temp[ans_id*18+3]+=Pmtrx[17]*(P_010010002*QR_010000001000+P_010010102*QR_010000001001+P_010010202*QR_010000001002+P_010110002*QR_010000001010+P_010110102*QR_010000001011+P_010110202*QR_010000001012+P_110010002*QR_010000001100+P_110010102*QR_010000001101+P_110010202*QR_010000001102+P_110110002*QR_010000001110+P_110110102*QR_010000001111+P_110110202*QR_010000001112);
ans_temp[ans_id*18+4]+=Pmtrx[15]*(P_010010002*QR_001010000000+P_010010102*QR_001010000001+P_010010202*QR_001010000002+P_010110002*QR_001010000010+P_010110102*QR_001010000011+P_010110202*QR_001010000012+P_110010002*QR_001010000100+P_110010102*QR_001010000101+P_110010202*QR_001010000102+P_110110002*QR_001010000110+P_110110102*QR_001010000111+P_110110202*QR_001010000112);
ans_temp[ans_id*18+4]+=Pmtrx[16]*(P_010010002*QR_000011000000+P_010010102*QR_000011000001+P_010010202*QR_000011000002+P_010110002*QR_000011000010+P_010110102*QR_000011000011+P_010110202*QR_000011000012+P_110010002*QR_000011000100+P_110010102*QR_000011000101+P_110010202*QR_000011000102+P_110110002*QR_000011000110+P_110110102*QR_000011000111+P_110110202*QR_000011000112);
ans_temp[ans_id*18+4]+=Pmtrx[17]*(P_010010002*QR_000010001000+P_010010102*QR_000010001001+P_010010202*QR_000010001002+P_010110002*QR_000010001010+P_010110102*QR_000010001011+P_010110202*QR_000010001012+P_110010002*QR_000010001100+P_110010102*QR_000010001101+P_110010202*QR_000010001102+P_110110002*QR_000010001110+P_110110102*QR_000010001111+P_110110202*QR_000010001112);
ans_temp[ans_id*18+5]+=Pmtrx[15]*(P_010010002*QR_001000010000+P_010010102*QR_001000010001+P_010010202*QR_001000010002+P_010110002*QR_001000010010+P_010110102*QR_001000010011+P_010110202*QR_001000010012+P_110010002*QR_001000010100+P_110010102*QR_001000010101+P_110010202*QR_001000010102+P_110110002*QR_001000010110+P_110110102*QR_001000010111+P_110110202*QR_001000010112);
ans_temp[ans_id*18+5]+=Pmtrx[16]*(P_010010002*QR_000001010000+P_010010102*QR_000001010001+P_010010202*QR_000001010002+P_010110002*QR_000001010010+P_010110102*QR_000001010011+P_010110202*QR_000001010012+P_110010002*QR_000001010100+P_110010102*QR_000001010101+P_110010202*QR_000001010102+P_110110002*QR_000001010110+P_110110102*QR_000001010111+P_110110202*QR_000001010112);
ans_temp[ans_id*18+5]+=Pmtrx[17]*(P_010010002*QR_000000011000+P_010010102*QR_000000011001+P_010010202*QR_000000011002+P_010110002*QR_000000011010+P_010110102*QR_000000011011+P_010110202*QR_000000011012+P_110010002*QR_000000011100+P_110010102*QR_000000011101+P_110010202*QR_000000011102+P_110110002*QR_000000011110+P_110110102*QR_000000011111+P_110110202*QR_000000011112);
ans_temp[ans_id*18+6]+=Pmtrx[0]*(P_002020000*QR_011000000000+P_002120000*QR_011000000010+P_002220000*QR_011000000020+P_102020000*QR_011000000100+P_102120000*QR_011000000110+P_102220000*QR_011000000120+P_202020000*QR_011000000200+P_202120000*QR_011000000210+P_202220000*QR_011000000220);
ans_temp[ans_id*18+6]+=Pmtrx[1]*(P_002020000*QR_010001000000+P_002120000*QR_010001000010+P_002220000*QR_010001000020+P_102020000*QR_010001000100+P_102120000*QR_010001000110+P_102220000*QR_010001000120+P_202020000*QR_010001000200+P_202120000*QR_010001000210+P_202220000*QR_010001000220);
ans_temp[ans_id*18+6]+=Pmtrx[2]*(P_002020000*QR_010000001000+P_002120000*QR_010000001010+P_002220000*QR_010000001020+P_102020000*QR_010000001100+P_102120000*QR_010000001110+P_102220000*QR_010000001120+P_202020000*QR_010000001200+P_202120000*QR_010000001210+P_202220000*QR_010000001220);
ans_temp[ans_id*18+7]+=Pmtrx[0]*(P_002020000*QR_001010000000+P_002120000*QR_001010000010+P_002220000*QR_001010000020+P_102020000*QR_001010000100+P_102120000*QR_001010000110+P_102220000*QR_001010000120+P_202020000*QR_001010000200+P_202120000*QR_001010000210+P_202220000*QR_001010000220);
ans_temp[ans_id*18+7]+=Pmtrx[1]*(P_002020000*QR_000011000000+P_002120000*QR_000011000010+P_002220000*QR_000011000020+P_102020000*QR_000011000100+P_102120000*QR_000011000110+P_102220000*QR_000011000120+P_202020000*QR_000011000200+P_202120000*QR_000011000210+P_202220000*QR_000011000220);
ans_temp[ans_id*18+7]+=Pmtrx[2]*(P_002020000*QR_000010001000+P_002120000*QR_000010001010+P_002220000*QR_000010001020+P_102020000*QR_000010001100+P_102120000*QR_000010001110+P_102220000*QR_000010001120+P_202020000*QR_000010001200+P_202120000*QR_000010001210+P_202220000*QR_000010001220);
ans_temp[ans_id*18+8]+=Pmtrx[0]*(P_002020000*QR_001000010000+P_002120000*QR_001000010010+P_002220000*QR_001000010020+P_102020000*QR_001000010100+P_102120000*QR_001000010110+P_102220000*QR_001000010120+P_202020000*QR_001000010200+P_202120000*QR_001000010210+P_202220000*QR_001000010220);
ans_temp[ans_id*18+8]+=Pmtrx[1]*(P_002020000*QR_000001010000+P_002120000*QR_000001010010+P_002220000*QR_000001010020+P_102020000*QR_000001010100+P_102120000*QR_000001010110+P_102220000*QR_000001010120+P_202020000*QR_000001010200+P_202120000*QR_000001010210+P_202220000*QR_000001010220);
ans_temp[ans_id*18+8]+=Pmtrx[2]*(P_002020000*QR_000000011000+P_002120000*QR_000000011010+P_002220000*QR_000000011020+P_102020000*QR_000000011100+P_102120000*QR_000000011110+P_102220000*QR_000000011120+P_202020000*QR_000000011200+P_202120000*QR_000000011210+P_202220000*QR_000000011220);
ans_temp[ans_id*18+6]+=Pmtrx[3]*(P_001021000*QR_011000000000+P_001121000*QR_011000000010+P_001221000*QR_011000000020+P_001321000*QR_011000000030+P_101021000*QR_011000000100+P_101121000*QR_011000000110+P_101221000*QR_011000000120+P_101321000*QR_011000000130);
ans_temp[ans_id*18+6]+=Pmtrx[4]*(P_001021000*QR_010001000000+P_001121000*QR_010001000010+P_001221000*QR_010001000020+P_001321000*QR_010001000030+P_101021000*QR_010001000100+P_101121000*QR_010001000110+P_101221000*QR_010001000120+P_101321000*QR_010001000130);
ans_temp[ans_id*18+6]+=Pmtrx[5]*(P_001021000*QR_010000001000+P_001121000*QR_010000001010+P_001221000*QR_010000001020+P_001321000*QR_010000001030+P_101021000*QR_010000001100+P_101121000*QR_010000001110+P_101221000*QR_010000001120+P_101321000*QR_010000001130);
ans_temp[ans_id*18+7]+=Pmtrx[3]*(P_001021000*QR_001010000000+P_001121000*QR_001010000010+P_001221000*QR_001010000020+P_001321000*QR_001010000030+P_101021000*QR_001010000100+P_101121000*QR_001010000110+P_101221000*QR_001010000120+P_101321000*QR_001010000130);
ans_temp[ans_id*18+7]+=Pmtrx[4]*(P_001021000*QR_000011000000+P_001121000*QR_000011000010+P_001221000*QR_000011000020+P_001321000*QR_000011000030+P_101021000*QR_000011000100+P_101121000*QR_000011000110+P_101221000*QR_000011000120+P_101321000*QR_000011000130);
ans_temp[ans_id*18+7]+=Pmtrx[5]*(P_001021000*QR_000010001000+P_001121000*QR_000010001010+P_001221000*QR_000010001020+P_001321000*QR_000010001030+P_101021000*QR_000010001100+P_101121000*QR_000010001110+P_101221000*QR_000010001120+P_101321000*QR_000010001130);
ans_temp[ans_id*18+8]+=Pmtrx[3]*(P_001021000*QR_001000010000+P_001121000*QR_001000010010+P_001221000*QR_001000010020+P_001321000*QR_001000010030+P_101021000*QR_001000010100+P_101121000*QR_001000010110+P_101221000*QR_001000010120+P_101321000*QR_001000010130);
ans_temp[ans_id*18+8]+=Pmtrx[4]*(P_001021000*QR_000001010000+P_001121000*QR_000001010010+P_001221000*QR_000001010020+P_001321000*QR_000001010030+P_101021000*QR_000001010100+P_101121000*QR_000001010110+P_101221000*QR_000001010120+P_101321000*QR_000001010130);
ans_temp[ans_id*18+8]+=Pmtrx[5]*(P_001021000*QR_000000011000+P_001121000*QR_000000011010+P_001221000*QR_000000011020+P_001321000*QR_000000011030+P_101021000*QR_000000011100+P_101121000*QR_000000011110+P_101221000*QR_000000011120+P_101321000*QR_000000011130);
ans_temp[ans_id*18+6]+=Pmtrx[6]*(P_000022000*QR_011000000000+P_000122000*QR_011000000010+P_000222000*QR_011000000020+P_000322000*QR_011000000030+P_000422000*QR_011000000040);
ans_temp[ans_id*18+6]+=Pmtrx[7]*(P_000022000*QR_010001000000+P_000122000*QR_010001000010+P_000222000*QR_010001000020+P_000322000*QR_010001000030+P_000422000*QR_010001000040);
ans_temp[ans_id*18+6]+=Pmtrx[8]*(P_000022000*QR_010000001000+P_000122000*QR_010000001010+P_000222000*QR_010000001020+P_000322000*QR_010000001030+P_000422000*QR_010000001040);
ans_temp[ans_id*18+7]+=Pmtrx[6]*(P_000022000*QR_001010000000+P_000122000*QR_001010000010+P_000222000*QR_001010000020+P_000322000*QR_001010000030+P_000422000*QR_001010000040);
ans_temp[ans_id*18+7]+=Pmtrx[7]*(P_000022000*QR_000011000000+P_000122000*QR_000011000010+P_000222000*QR_000011000020+P_000322000*QR_000011000030+P_000422000*QR_000011000040);
ans_temp[ans_id*18+7]+=Pmtrx[8]*(P_000022000*QR_000010001000+P_000122000*QR_000010001010+P_000222000*QR_000010001020+P_000322000*QR_000010001030+P_000422000*QR_000010001040);
ans_temp[ans_id*18+8]+=Pmtrx[6]*(P_000022000*QR_001000010000+P_000122000*QR_001000010010+P_000222000*QR_001000010020+P_000322000*QR_001000010030+P_000422000*QR_001000010040);
ans_temp[ans_id*18+8]+=Pmtrx[7]*(P_000022000*QR_000001010000+P_000122000*QR_000001010010+P_000222000*QR_000001010020+P_000322000*QR_000001010030+P_000422000*QR_000001010040);
ans_temp[ans_id*18+8]+=Pmtrx[8]*(P_000022000*QR_000000011000+P_000122000*QR_000000011010+P_000222000*QR_000000011020+P_000322000*QR_000000011030+P_000422000*QR_000000011040);
ans_temp[ans_id*18+6]+=Pmtrx[9]*(P_001020001*QR_011000000000+P_001020101*QR_011000000001+P_001120001*QR_011000000010+P_001120101*QR_011000000011+P_001220001*QR_011000000020+P_001220101*QR_011000000021+P_101020001*QR_011000000100+P_101020101*QR_011000000101+P_101120001*QR_011000000110+P_101120101*QR_011000000111+P_101220001*QR_011000000120+P_101220101*QR_011000000121);
ans_temp[ans_id*18+6]+=Pmtrx[10]*(P_001020001*QR_010001000000+P_001020101*QR_010001000001+P_001120001*QR_010001000010+P_001120101*QR_010001000011+P_001220001*QR_010001000020+P_001220101*QR_010001000021+P_101020001*QR_010001000100+P_101020101*QR_010001000101+P_101120001*QR_010001000110+P_101120101*QR_010001000111+P_101220001*QR_010001000120+P_101220101*QR_010001000121);
ans_temp[ans_id*18+6]+=Pmtrx[11]*(P_001020001*QR_010000001000+P_001020101*QR_010000001001+P_001120001*QR_010000001010+P_001120101*QR_010000001011+P_001220001*QR_010000001020+P_001220101*QR_010000001021+P_101020001*QR_010000001100+P_101020101*QR_010000001101+P_101120001*QR_010000001110+P_101120101*QR_010000001111+P_101220001*QR_010000001120+P_101220101*QR_010000001121);
ans_temp[ans_id*18+7]+=Pmtrx[9]*(P_001020001*QR_001010000000+P_001020101*QR_001010000001+P_001120001*QR_001010000010+P_001120101*QR_001010000011+P_001220001*QR_001010000020+P_001220101*QR_001010000021+P_101020001*QR_001010000100+P_101020101*QR_001010000101+P_101120001*QR_001010000110+P_101120101*QR_001010000111+P_101220001*QR_001010000120+P_101220101*QR_001010000121);
ans_temp[ans_id*18+7]+=Pmtrx[10]*(P_001020001*QR_000011000000+P_001020101*QR_000011000001+P_001120001*QR_000011000010+P_001120101*QR_000011000011+P_001220001*QR_000011000020+P_001220101*QR_000011000021+P_101020001*QR_000011000100+P_101020101*QR_000011000101+P_101120001*QR_000011000110+P_101120101*QR_000011000111+P_101220001*QR_000011000120+P_101220101*QR_000011000121);
ans_temp[ans_id*18+7]+=Pmtrx[11]*(P_001020001*QR_000010001000+P_001020101*QR_000010001001+P_001120001*QR_000010001010+P_001120101*QR_000010001011+P_001220001*QR_000010001020+P_001220101*QR_000010001021+P_101020001*QR_000010001100+P_101020101*QR_000010001101+P_101120001*QR_000010001110+P_101120101*QR_000010001111+P_101220001*QR_000010001120+P_101220101*QR_000010001121);
ans_temp[ans_id*18+8]+=Pmtrx[9]*(P_001020001*QR_001000010000+P_001020101*QR_001000010001+P_001120001*QR_001000010010+P_001120101*QR_001000010011+P_001220001*QR_001000010020+P_001220101*QR_001000010021+P_101020001*QR_001000010100+P_101020101*QR_001000010101+P_101120001*QR_001000010110+P_101120101*QR_001000010111+P_101220001*QR_001000010120+P_101220101*QR_001000010121);
ans_temp[ans_id*18+8]+=Pmtrx[10]*(P_001020001*QR_000001010000+P_001020101*QR_000001010001+P_001120001*QR_000001010010+P_001120101*QR_000001010011+P_001220001*QR_000001010020+P_001220101*QR_000001010021+P_101020001*QR_000001010100+P_101020101*QR_000001010101+P_101120001*QR_000001010110+P_101120101*QR_000001010111+P_101220001*QR_000001010120+P_101220101*QR_000001010121);
ans_temp[ans_id*18+8]+=Pmtrx[11]*(P_001020001*QR_000000011000+P_001020101*QR_000000011001+P_001120001*QR_000000011010+P_001120101*QR_000000011011+P_001220001*QR_000000011020+P_001220101*QR_000000011021+P_101020001*QR_000000011100+P_101020101*QR_000000011101+P_101120001*QR_000000011110+P_101120101*QR_000000011111+P_101220001*QR_000000011120+P_101220101*QR_000000011121);
ans_temp[ans_id*18+6]+=Pmtrx[12]*(P_000021001*QR_011000000000+P_000021101*QR_011000000001+P_000121001*QR_011000000010+P_000121101*QR_011000000011+P_000221001*QR_011000000020+P_000221101*QR_011000000021+P_000321001*QR_011000000030+P_000321101*QR_011000000031);
ans_temp[ans_id*18+6]+=Pmtrx[13]*(P_000021001*QR_010001000000+P_000021101*QR_010001000001+P_000121001*QR_010001000010+P_000121101*QR_010001000011+P_000221001*QR_010001000020+P_000221101*QR_010001000021+P_000321001*QR_010001000030+P_000321101*QR_010001000031);
ans_temp[ans_id*18+6]+=Pmtrx[14]*(P_000021001*QR_010000001000+P_000021101*QR_010000001001+P_000121001*QR_010000001010+P_000121101*QR_010000001011+P_000221001*QR_010000001020+P_000221101*QR_010000001021+P_000321001*QR_010000001030+P_000321101*QR_010000001031);
ans_temp[ans_id*18+7]+=Pmtrx[12]*(P_000021001*QR_001010000000+P_000021101*QR_001010000001+P_000121001*QR_001010000010+P_000121101*QR_001010000011+P_000221001*QR_001010000020+P_000221101*QR_001010000021+P_000321001*QR_001010000030+P_000321101*QR_001010000031);
ans_temp[ans_id*18+7]+=Pmtrx[13]*(P_000021001*QR_000011000000+P_000021101*QR_000011000001+P_000121001*QR_000011000010+P_000121101*QR_000011000011+P_000221001*QR_000011000020+P_000221101*QR_000011000021+P_000321001*QR_000011000030+P_000321101*QR_000011000031);
ans_temp[ans_id*18+7]+=Pmtrx[14]*(P_000021001*QR_000010001000+P_000021101*QR_000010001001+P_000121001*QR_000010001010+P_000121101*QR_000010001011+P_000221001*QR_000010001020+P_000221101*QR_000010001021+P_000321001*QR_000010001030+P_000321101*QR_000010001031);
ans_temp[ans_id*18+8]+=Pmtrx[12]*(P_000021001*QR_001000010000+P_000021101*QR_001000010001+P_000121001*QR_001000010010+P_000121101*QR_001000010011+P_000221001*QR_001000010020+P_000221101*QR_001000010021+P_000321001*QR_001000010030+P_000321101*QR_001000010031);
ans_temp[ans_id*18+8]+=Pmtrx[13]*(P_000021001*QR_000001010000+P_000021101*QR_000001010001+P_000121001*QR_000001010010+P_000121101*QR_000001010011+P_000221001*QR_000001010020+P_000221101*QR_000001010021+P_000321001*QR_000001010030+P_000321101*QR_000001010031);
ans_temp[ans_id*18+8]+=Pmtrx[14]*(P_000021001*QR_000000011000+P_000021101*QR_000000011001+P_000121001*QR_000000011010+P_000121101*QR_000000011011+P_000221001*QR_000000011020+P_000221101*QR_000000011021+P_000321001*QR_000000011030+P_000321101*QR_000000011031);
ans_temp[ans_id*18+6]+=Pmtrx[15]*(P_000020002*QR_011000000000+P_000020102*QR_011000000001+P_000020202*QR_011000000002+P_000120002*QR_011000000010+P_000120102*QR_011000000011+P_000120202*QR_011000000012+P_000220002*QR_011000000020+P_000220102*QR_011000000021+P_000220202*QR_011000000022);
ans_temp[ans_id*18+6]+=Pmtrx[16]*(P_000020002*QR_010001000000+P_000020102*QR_010001000001+P_000020202*QR_010001000002+P_000120002*QR_010001000010+P_000120102*QR_010001000011+P_000120202*QR_010001000012+P_000220002*QR_010001000020+P_000220102*QR_010001000021+P_000220202*QR_010001000022);
ans_temp[ans_id*18+6]+=Pmtrx[17]*(P_000020002*QR_010000001000+P_000020102*QR_010000001001+P_000020202*QR_010000001002+P_000120002*QR_010000001010+P_000120102*QR_010000001011+P_000120202*QR_010000001012+P_000220002*QR_010000001020+P_000220102*QR_010000001021+P_000220202*QR_010000001022);
ans_temp[ans_id*18+7]+=Pmtrx[15]*(P_000020002*QR_001010000000+P_000020102*QR_001010000001+P_000020202*QR_001010000002+P_000120002*QR_001010000010+P_000120102*QR_001010000011+P_000120202*QR_001010000012+P_000220002*QR_001010000020+P_000220102*QR_001010000021+P_000220202*QR_001010000022);
ans_temp[ans_id*18+7]+=Pmtrx[16]*(P_000020002*QR_000011000000+P_000020102*QR_000011000001+P_000020202*QR_000011000002+P_000120002*QR_000011000010+P_000120102*QR_000011000011+P_000120202*QR_000011000012+P_000220002*QR_000011000020+P_000220102*QR_000011000021+P_000220202*QR_000011000022);
ans_temp[ans_id*18+7]+=Pmtrx[17]*(P_000020002*QR_000010001000+P_000020102*QR_000010001001+P_000020202*QR_000010001002+P_000120002*QR_000010001010+P_000120102*QR_000010001011+P_000120202*QR_000010001012+P_000220002*QR_000010001020+P_000220102*QR_000010001021+P_000220202*QR_000010001022);
ans_temp[ans_id*18+8]+=Pmtrx[15]*(P_000020002*QR_001000010000+P_000020102*QR_001000010001+P_000020202*QR_001000010002+P_000120002*QR_001000010010+P_000120102*QR_001000010011+P_000120202*QR_001000010012+P_000220002*QR_001000010020+P_000220102*QR_001000010021+P_000220202*QR_001000010022);
ans_temp[ans_id*18+8]+=Pmtrx[16]*(P_000020002*QR_000001010000+P_000020102*QR_000001010001+P_000020202*QR_000001010002+P_000120002*QR_000001010010+P_000120102*QR_000001010011+P_000120202*QR_000001010012+P_000220002*QR_000001010020+P_000220102*QR_000001010021+P_000220202*QR_000001010022);
ans_temp[ans_id*18+8]+=Pmtrx[17]*(P_000020002*QR_000000011000+P_000020102*QR_000000011001+P_000020202*QR_000000011002+P_000120002*QR_000000011010+P_000120102*QR_000000011011+P_000120202*QR_000000011012+P_000220002*QR_000000011020+P_000220102*QR_000000011021+P_000220202*QR_000000011022);
ans_temp[ans_id*18+9]+=Pmtrx[0]*(P_012000010*QR_011000000000+P_012000110*QR_011000000001+P_112000010*QR_011000000100+P_112000110*QR_011000000101+P_212000010*QR_011000000200+P_212000110*QR_011000000201+P_312000010*QR_011000000300+P_312000110*QR_011000000301);
ans_temp[ans_id*18+9]+=Pmtrx[1]*(P_012000010*QR_010001000000+P_012000110*QR_010001000001+P_112000010*QR_010001000100+P_112000110*QR_010001000101+P_212000010*QR_010001000200+P_212000110*QR_010001000201+P_312000010*QR_010001000300+P_312000110*QR_010001000301);
ans_temp[ans_id*18+9]+=Pmtrx[2]*(P_012000010*QR_010000001000+P_012000110*QR_010000001001+P_112000010*QR_010000001100+P_112000110*QR_010000001101+P_212000010*QR_010000001200+P_212000110*QR_010000001201+P_312000010*QR_010000001300+P_312000110*QR_010000001301);
ans_temp[ans_id*18+10]+=Pmtrx[0]*(P_012000010*QR_001010000000+P_012000110*QR_001010000001+P_112000010*QR_001010000100+P_112000110*QR_001010000101+P_212000010*QR_001010000200+P_212000110*QR_001010000201+P_312000010*QR_001010000300+P_312000110*QR_001010000301);
ans_temp[ans_id*18+10]+=Pmtrx[1]*(P_012000010*QR_000011000000+P_012000110*QR_000011000001+P_112000010*QR_000011000100+P_112000110*QR_000011000101+P_212000010*QR_000011000200+P_212000110*QR_000011000201+P_312000010*QR_000011000300+P_312000110*QR_000011000301);
ans_temp[ans_id*18+10]+=Pmtrx[2]*(P_012000010*QR_000010001000+P_012000110*QR_000010001001+P_112000010*QR_000010001100+P_112000110*QR_000010001101+P_212000010*QR_000010001200+P_212000110*QR_000010001201+P_312000010*QR_000010001300+P_312000110*QR_000010001301);
ans_temp[ans_id*18+11]+=Pmtrx[0]*(P_012000010*QR_001000010000+P_012000110*QR_001000010001+P_112000010*QR_001000010100+P_112000110*QR_001000010101+P_212000010*QR_001000010200+P_212000110*QR_001000010201+P_312000010*QR_001000010300+P_312000110*QR_001000010301);
ans_temp[ans_id*18+11]+=Pmtrx[1]*(P_012000010*QR_000001010000+P_012000110*QR_000001010001+P_112000010*QR_000001010100+P_112000110*QR_000001010101+P_212000010*QR_000001010200+P_212000110*QR_000001010201+P_312000010*QR_000001010300+P_312000110*QR_000001010301);
ans_temp[ans_id*18+11]+=Pmtrx[2]*(P_012000010*QR_000000011000+P_012000110*QR_000000011001+P_112000010*QR_000000011100+P_112000110*QR_000000011101+P_212000010*QR_000000011200+P_212000110*QR_000000011201+P_312000010*QR_000000011300+P_312000110*QR_000000011301);
ans_temp[ans_id*18+9]+=Pmtrx[3]*(P_011001010*QR_011000000000+P_011001110*QR_011000000001+P_011101010*QR_011000000010+P_011101110*QR_011000000011+P_111001010*QR_011000000100+P_111001110*QR_011000000101+P_111101010*QR_011000000110+P_111101110*QR_011000000111+P_211001010*QR_011000000200+P_211001110*QR_011000000201+P_211101010*QR_011000000210+P_211101110*QR_011000000211);
ans_temp[ans_id*18+9]+=Pmtrx[4]*(P_011001010*QR_010001000000+P_011001110*QR_010001000001+P_011101010*QR_010001000010+P_011101110*QR_010001000011+P_111001010*QR_010001000100+P_111001110*QR_010001000101+P_111101010*QR_010001000110+P_111101110*QR_010001000111+P_211001010*QR_010001000200+P_211001110*QR_010001000201+P_211101010*QR_010001000210+P_211101110*QR_010001000211);
ans_temp[ans_id*18+9]+=Pmtrx[5]*(P_011001010*QR_010000001000+P_011001110*QR_010000001001+P_011101010*QR_010000001010+P_011101110*QR_010000001011+P_111001010*QR_010000001100+P_111001110*QR_010000001101+P_111101010*QR_010000001110+P_111101110*QR_010000001111+P_211001010*QR_010000001200+P_211001110*QR_010000001201+P_211101010*QR_010000001210+P_211101110*QR_010000001211);
ans_temp[ans_id*18+10]+=Pmtrx[3]*(P_011001010*QR_001010000000+P_011001110*QR_001010000001+P_011101010*QR_001010000010+P_011101110*QR_001010000011+P_111001010*QR_001010000100+P_111001110*QR_001010000101+P_111101010*QR_001010000110+P_111101110*QR_001010000111+P_211001010*QR_001010000200+P_211001110*QR_001010000201+P_211101010*QR_001010000210+P_211101110*QR_001010000211);
ans_temp[ans_id*18+10]+=Pmtrx[4]*(P_011001010*QR_000011000000+P_011001110*QR_000011000001+P_011101010*QR_000011000010+P_011101110*QR_000011000011+P_111001010*QR_000011000100+P_111001110*QR_000011000101+P_111101010*QR_000011000110+P_111101110*QR_000011000111+P_211001010*QR_000011000200+P_211001110*QR_000011000201+P_211101010*QR_000011000210+P_211101110*QR_000011000211);
ans_temp[ans_id*18+10]+=Pmtrx[5]*(P_011001010*QR_000010001000+P_011001110*QR_000010001001+P_011101010*QR_000010001010+P_011101110*QR_000010001011+P_111001010*QR_000010001100+P_111001110*QR_000010001101+P_111101010*QR_000010001110+P_111101110*QR_000010001111+P_211001010*QR_000010001200+P_211001110*QR_000010001201+P_211101010*QR_000010001210+P_211101110*QR_000010001211);
ans_temp[ans_id*18+11]+=Pmtrx[3]*(P_011001010*QR_001000010000+P_011001110*QR_001000010001+P_011101010*QR_001000010010+P_011101110*QR_001000010011+P_111001010*QR_001000010100+P_111001110*QR_001000010101+P_111101010*QR_001000010110+P_111101110*QR_001000010111+P_211001010*QR_001000010200+P_211001110*QR_001000010201+P_211101010*QR_001000010210+P_211101110*QR_001000010211);
ans_temp[ans_id*18+11]+=Pmtrx[4]*(P_011001010*QR_000001010000+P_011001110*QR_000001010001+P_011101010*QR_000001010010+P_011101110*QR_000001010011+P_111001010*QR_000001010100+P_111001110*QR_000001010101+P_111101010*QR_000001010110+P_111101110*QR_000001010111+P_211001010*QR_000001010200+P_211001110*QR_000001010201+P_211101010*QR_000001010210+P_211101110*QR_000001010211);
ans_temp[ans_id*18+11]+=Pmtrx[5]*(P_011001010*QR_000000011000+P_011001110*QR_000000011001+P_011101010*QR_000000011010+P_011101110*QR_000000011011+P_111001010*QR_000000011100+P_111001110*QR_000000011101+P_111101010*QR_000000011110+P_111101110*QR_000000011111+P_211001010*QR_000000011200+P_211001110*QR_000000011201+P_211101010*QR_000000011210+P_211101110*QR_000000011211);
ans_temp[ans_id*18+9]+=Pmtrx[6]*(P_010002010*QR_011000000000+P_010002110*QR_011000000001+P_010102010*QR_011000000010+P_010102110*QR_011000000011+P_010202010*QR_011000000020+P_010202110*QR_011000000021+P_110002010*QR_011000000100+P_110002110*QR_011000000101+P_110102010*QR_011000000110+P_110102110*QR_011000000111+P_110202010*QR_011000000120+P_110202110*QR_011000000121);
ans_temp[ans_id*18+9]+=Pmtrx[7]*(P_010002010*QR_010001000000+P_010002110*QR_010001000001+P_010102010*QR_010001000010+P_010102110*QR_010001000011+P_010202010*QR_010001000020+P_010202110*QR_010001000021+P_110002010*QR_010001000100+P_110002110*QR_010001000101+P_110102010*QR_010001000110+P_110102110*QR_010001000111+P_110202010*QR_010001000120+P_110202110*QR_010001000121);
ans_temp[ans_id*18+9]+=Pmtrx[8]*(P_010002010*QR_010000001000+P_010002110*QR_010000001001+P_010102010*QR_010000001010+P_010102110*QR_010000001011+P_010202010*QR_010000001020+P_010202110*QR_010000001021+P_110002010*QR_010000001100+P_110002110*QR_010000001101+P_110102010*QR_010000001110+P_110102110*QR_010000001111+P_110202010*QR_010000001120+P_110202110*QR_010000001121);
ans_temp[ans_id*18+10]+=Pmtrx[6]*(P_010002010*QR_001010000000+P_010002110*QR_001010000001+P_010102010*QR_001010000010+P_010102110*QR_001010000011+P_010202010*QR_001010000020+P_010202110*QR_001010000021+P_110002010*QR_001010000100+P_110002110*QR_001010000101+P_110102010*QR_001010000110+P_110102110*QR_001010000111+P_110202010*QR_001010000120+P_110202110*QR_001010000121);
ans_temp[ans_id*18+10]+=Pmtrx[7]*(P_010002010*QR_000011000000+P_010002110*QR_000011000001+P_010102010*QR_000011000010+P_010102110*QR_000011000011+P_010202010*QR_000011000020+P_010202110*QR_000011000021+P_110002010*QR_000011000100+P_110002110*QR_000011000101+P_110102010*QR_000011000110+P_110102110*QR_000011000111+P_110202010*QR_000011000120+P_110202110*QR_000011000121);
ans_temp[ans_id*18+10]+=Pmtrx[8]*(P_010002010*QR_000010001000+P_010002110*QR_000010001001+P_010102010*QR_000010001010+P_010102110*QR_000010001011+P_010202010*QR_000010001020+P_010202110*QR_000010001021+P_110002010*QR_000010001100+P_110002110*QR_000010001101+P_110102010*QR_000010001110+P_110102110*QR_000010001111+P_110202010*QR_000010001120+P_110202110*QR_000010001121);
ans_temp[ans_id*18+11]+=Pmtrx[6]*(P_010002010*QR_001000010000+P_010002110*QR_001000010001+P_010102010*QR_001000010010+P_010102110*QR_001000010011+P_010202010*QR_001000010020+P_010202110*QR_001000010021+P_110002010*QR_001000010100+P_110002110*QR_001000010101+P_110102010*QR_001000010110+P_110102110*QR_001000010111+P_110202010*QR_001000010120+P_110202110*QR_001000010121);
ans_temp[ans_id*18+11]+=Pmtrx[7]*(P_010002010*QR_000001010000+P_010002110*QR_000001010001+P_010102010*QR_000001010010+P_010102110*QR_000001010011+P_010202010*QR_000001010020+P_010202110*QR_000001010021+P_110002010*QR_000001010100+P_110002110*QR_000001010101+P_110102010*QR_000001010110+P_110102110*QR_000001010111+P_110202010*QR_000001010120+P_110202110*QR_000001010121);
ans_temp[ans_id*18+11]+=Pmtrx[8]*(P_010002010*QR_000000011000+P_010002110*QR_000000011001+P_010102010*QR_000000011010+P_010102110*QR_000000011011+P_010202010*QR_000000011020+P_010202110*QR_000000011021+P_110002010*QR_000000011100+P_110002110*QR_000000011101+P_110102010*QR_000000011110+P_110102110*QR_000000011111+P_110202010*QR_000000011120+P_110202110*QR_000000011121);
ans_temp[ans_id*18+9]+=Pmtrx[9]*(P_011000011*QR_011000000000+P_011000111*QR_011000000001+P_011000211*QR_011000000002+P_111000011*QR_011000000100+P_111000111*QR_011000000101+P_111000211*QR_011000000102+P_211000011*QR_011000000200+P_211000111*QR_011000000201+P_211000211*QR_011000000202);
ans_temp[ans_id*18+9]+=Pmtrx[10]*(P_011000011*QR_010001000000+P_011000111*QR_010001000001+P_011000211*QR_010001000002+P_111000011*QR_010001000100+P_111000111*QR_010001000101+P_111000211*QR_010001000102+P_211000011*QR_010001000200+P_211000111*QR_010001000201+P_211000211*QR_010001000202);
ans_temp[ans_id*18+9]+=Pmtrx[11]*(P_011000011*QR_010000001000+P_011000111*QR_010000001001+P_011000211*QR_010000001002+P_111000011*QR_010000001100+P_111000111*QR_010000001101+P_111000211*QR_010000001102+P_211000011*QR_010000001200+P_211000111*QR_010000001201+P_211000211*QR_010000001202);
ans_temp[ans_id*18+10]+=Pmtrx[9]*(P_011000011*QR_001010000000+P_011000111*QR_001010000001+P_011000211*QR_001010000002+P_111000011*QR_001010000100+P_111000111*QR_001010000101+P_111000211*QR_001010000102+P_211000011*QR_001010000200+P_211000111*QR_001010000201+P_211000211*QR_001010000202);
ans_temp[ans_id*18+10]+=Pmtrx[10]*(P_011000011*QR_000011000000+P_011000111*QR_000011000001+P_011000211*QR_000011000002+P_111000011*QR_000011000100+P_111000111*QR_000011000101+P_111000211*QR_000011000102+P_211000011*QR_000011000200+P_211000111*QR_000011000201+P_211000211*QR_000011000202);
ans_temp[ans_id*18+10]+=Pmtrx[11]*(P_011000011*QR_000010001000+P_011000111*QR_000010001001+P_011000211*QR_000010001002+P_111000011*QR_000010001100+P_111000111*QR_000010001101+P_111000211*QR_000010001102+P_211000011*QR_000010001200+P_211000111*QR_000010001201+P_211000211*QR_000010001202);
ans_temp[ans_id*18+11]+=Pmtrx[9]*(P_011000011*QR_001000010000+P_011000111*QR_001000010001+P_011000211*QR_001000010002+P_111000011*QR_001000010100+P_111000111*QR_001000010101+P_111000211*QR_001000010102+P_211000011*QR_001000010200+P_211000111*QR_001000010201+P_211000211*QR_001000010202);
ans_temp[ans_id*18+11]+=Pmtrx[10]*(P_011000011*QR_000001010000+P_011000111*QR_000001010001+P_011000211*QR_000001010002+P_111000011*QR_000001010100+P_111000111*QR_000001010101+P_111000211*QR_000001010102+P_211000011*QR_000001010200+P_211000111*QR_000001010201+P_211000211*QR_000001010202);
ans_temp[ans_id*18+11]+=Pmtrx[11]*(P_011000011*QR_000000011000+P_011000111*QR_000000011001+P_011000211*QR_000000011002+P_111000011*QR_000000011100+P_111000111*QR_000000011101+P_111000211*QR_000000011102+P_211000011*QR_000000011200+P_211000111*QR_000000011201+P_211000211*QR_000000011202);
ans_temp[ans_id*18+9]+=Pmtrx[12]*(P_010001011*QR_011000000000+P_010001111*QR_011000000001+P_010001211*QR_011000000002+P_010101011*QR_011000000010+P_010101111*QR_011000000011+P_010101211*QR_011000000012+P_110001011*QR_011000000100+P_110001111*QR_011000000101+P_110001211*QR_011000000102+P_110101011*QR_011000000110+P_110101111*QR_011000000111+P_110101211*QR_011000000112);
ans_temp[ans_id*18+9]+=Pmtrx[13]*(P_010001011*QR_010001000000+P_010001111*QR_010001000001+P_010001211*QR_010001000002+P_010101011*QR_010001000010+P_010101111*QR_010001000011+P_010101211*QR_010001000012+P_110001011*QR_010001000100+P_110001111*QR_010001000101+P_110001211*QR_010001000102+P_110101011*QR_010001000110+P_110101111*QR_010001000111+P_110101211*QR_010001000112);
ans_temp[ans_id*18+9]+=Pmtrx[14]*(P_010001011*QR_010000001000+P_010001111*QR_010000001001+P_010001211*QR_010000001002+P_010101011*QR_010000001010+P_010101111*QR_010000001011+P_010101211*QR_010000001012+P_110001011*QR_010000001100+P_110001111*QR_010000001101+P_110001211*QR_010000001102+P_110101011*QR_010000001110+P_110101111*QR_010000001111+P_110101211*QR_010000001112);
ans_temp[ans_id*18+10]+=Pmtrx[12]*(P_010001011*QR_001010000000+P_010001111*QR_001010000001+P_010001211*QR_001010000002+P_010101011*QR_001010000010+P_010101111*QR_001010000011+P_010101211*QR_001010000012+P_110001011*QR_001010000100+P_110001111*QR_001010000101+P_110001211*QR_001010000102+P_110101011*QR_001010000110+P_110101111*QR_001010000111+P_110101211*QR_001010000112);
ans_temp[ans_id*18+10]+=Pmtrx[13]*(P_010001011*QR_000011000000+P_010001111*QR_000011000001+P_010001211*QR_000011000002+P_010101011*QR_000011000010+P_010101111*QR_000011000011+P_010101211*QR_000011000012+P_110001011*QR_000011000100+P_110001111*QR_000011000101+P_110001211*QR_000011000102+P_110101011*QR_000011000110+P_110101111*QR_000011000111+P_110101211*QR_000011000112);
ans_temp[ans_id*18+10]+=Pmtrx[14]*(P_010001011*QR_000010001000+P_010001111*QR_000010001001+P_010001211*QR_000010001002+P_010101011*QR_000010001010+P_010101111*QR_000010001011+P_010101211*QR_000010001012+P_110001011*QR_000010001100+P_110001111*QR_000010001101+P_110001211*QR_000010001102+P_110101011*QR_000010001110+P_110101111*QR_000010001111+P_110101211*QR_000010001112);
ans_temp[ans_id*18+11]+=Pmtrx[12]*(P_010001011*QR_001000010000+P_010001111*QR_001000010001+P_010001211*QR_001000010002+P_010101011*QR_001000010010+P_010101111*QR_001000010011+P_010101211*QR_001000010012+P_110001011*QR_001000010100+P_110001111*QR_001000010101+P_110001211*QR_001000010102+P_110101011*QR_001000010110+P_110101111*QR_001000010111+P_110101211*QR_001000010112);
ans_temp[ans_id*18+11]+=Pmtrx[13]*(P_010001011*QR_000001010000+P_010001111*QR_000001010001+P_010001211*QR_000001010002+P_010101011*QR_000001010010+P_010101111*QR_000001010011+P_010101211*QR_000001010012+P_110001011*QR_000001010100+P_110001111*QR_000001010101+P_110001211*QR_000001010102+P_110101011*QR_000001010110+P_110101111*QR_000001010111+P_110101211*QR_000001010112);
ans_temp[ans_id*18+11]+=Pmtrx[14]*(P_010001011*QR_000000011000+P_010001111*QR_000000011001+P_010001211*QR_000000011002+P_010101011*QR_000000011010+P_010101111*QR_000000011011+P_010101211*QR_000000011012+P_110001011*QR_000000011100+P_110001111*QR_000000011101+P_110001211*QR_000000011102+P_110101011*QR_000000011110+P_110101111*QR_000000011111+P_110101211*QR_000000011112);
ans_temp[ans_id*18+9]+=Pmtrx[15]*(P_010000012*QR_011000000000+P_010000112*QR_011000000001+P_010000212*QR_011000000002+P_010000312*QR_011000000003+P_110000012*QR_011000000100+P_110000112*QR_011000000101+P_110000212*QR_011000000102+P_110000312*QR_011000000103);
ans_temp[ans_id*18+9]+=Pmtrx[16]*(P_010000012*QR_010001000000+P_010000112*QR_010001000001+P_010000212*QR_010001000002+P_010000312*QR_010001000003+P_110000012*QR_010001000100+P_110000112*QR_010001000101+P_110000212*QR_010001000102+P_110000312*QR_010001000103);
ans_temp[ans_id*18+9]+=Pmtrx[17]*(P_010000012*QR_010000001000+P_010000112*QR_010000001001+P_010000212*QR_010000001002+P_010000312*QR_010000001003+P_110000012*QR_010000001100+P_110000112*QR_010000001101+P_110000212*QR_010000001102+P_110000312*QR_010000001103);
ans_temp[ans_id*18+10]+=Pmtrx[15]*(P_010000012*QR_001010000000+P_010000112*QR_001010000001+P_010000212*QR_001010000002+P_010000312*QR_001010000003+P_110000012*QR_001010000100+P_110000112*QR_001010000101+P_110000212*QR_001010000102+P_110000312*QR_001010000103);
ans_temp[ans_id*18+10]+=Pmtrx[16]*(P_010000012*QR_000011000000+P_010000112*QR_000011000001+P_010000212*QR_000011000002+P_010000312*QR_000011000003+P_110000012*QR_000011000100+P_110000112*QR_000011000101+P_110000212*QR_000011000102+P_110000312*QR_000011000103);
ans_temp[ans_id*18+10]+=Pmtrx[17]*(P_010000012*QR_000010001000+P_010000112*QR_000010001001+P_010000212*QR_000010001002+P_010000312*QR_000010001003+P_110000012*QR_000010001100+P_110000112*QR_000010001101+P_110000212*QR_000010001102+P_110000312*QR_000010001103);
ans_temp[ans_id*18+11]+=Pmtrx[15]*(P_010000012*QR_001000010000+P_010000112*QR_001000010001+P_010000212*QR_001000010002+P_010000312*QR_001000010003+P_110000012*QR_001000010100+P_110000112*QR_001000010101+P_110000212*QR_001000010102+P_110000312*QR_001000010103);
ans_temp[ans_id*18+11]+=Pmtrx[16]*(P_010000012*QR_000001010000+P_010000112*QR_000001010001+P_010000212*QR_000001010002+P_010000312*QR_000001010003+P_110000012*QR_000001010100+P_110000112*QR_000001010101+P_110000212*QR_000001010102+P_110000312*QR_000001010103);
ans_temp[ans_id*18+11]+=Pmtrx[17]*(P_010000012*QR_000000011000+P_010000112*QR_000000011001+P_010000212*QR_000000011002+P_010000312*QR_000000011003+P_110000012*QR_000000011100+P_110000112*QR_000000011101+P_110000212*QR_000000011102+P_110000312*QR_000000011103);
ans_temp[ans_id*18+12]+=Pmtrx[0]*(P_002010010*QR_011000000000+P_002010110*QR_011000000001+P_002110010*QR_011000000010+P_002110110*QR_011000000011+P_102010010*QR_011000000100+P_102010110*QR_011000000101+P_102110010*QR_011000000110+P_102110110*QR_011000000111+P_202010010*QR_011000000200+P_202010110*QR_011000000201+P_202110010*QR_011000000210+P_202110110*QR_011000000211);
ans_temp[ans_id*18+12]+=Pmtrx[1]*(P_002010010*QR_010001000000+P_002010110*QR_010001000001+P_002110010*QR_010001000010+P_002110110*QR_010001000011+P_102010010*QR_010001000100+P_102010110*QR_010001000101+P_102110010*QR_010001000110+P_102110110*QR_010001000111+P_202010010*QR_010001000200+P_202010110*QR_010001000201+P_202110010*QR_010001000210+P_202110110*QR_010001000211);
ans_temp[ans_id*18+12]+=Pmtrx[2]*(P_002010010*QR_010000001000+P_002010110*QR_010000001001+P_002110010*QR_010000001010+P_002110110*QR_010000001011+P_102010010*QR_010000001100+P_102010110*QR_010000001101+P_102110010*QR_010000001110+P_102110110*QR_010000001111+P_202010010*QR_010000001200+P_202010110*QR_010000001201+P_202110010*QR_010000001210+P_202110110*QR_010000001211);
ans_temp[ans_id*18+13]+=Pmtrx[0]*(P_002010010*QR_001010000000+P_002010110*QR_001010000001+P_002110010*QR_001010000010+P_002110110*QR_001010000011+P_102010010*QR_001010000100+P_102010110*QR_001010000101+P_102110010*QR_001010000110+P_102110110*QR_001010000111+P_202010010*QR_001010000200+P_202010110*QR_001010000201+P_202110010*QR_001010000210+P_202110110*QR_001010000211);
ans_temp[ans_id*18+13]+=Pmtrx[1]*(P_002010010*QR_000011000000+P_002010110*QR_000011000001+P_002110010*QR_000011000010+P_002110110*QR_000011000011+P_102010010*QR_000011000100+P_102010110*QR_000011000101+P_102110010*QR_000011000110+P_102110110*QR_000011000111+P_202010010*QR_000011000200+P_202010110*QR_000011000201+P_202110010*QR_000011000210+P_202110110*QR_000011000211);
ans_temp[ans_id*18+13]+=Pmtrx[2]*(P_002010010*QR_000010001000+P_002010110*QR_000010001001+P_002110010*QR_000010001010+P_002110110*QR_000010001011+P_102010010*QR_000010001100+P_102010110*QR_000010001101+P_102110010*QR_000010001110+P_102110110*QR_000010001111+P_202010010*QR_000010001200+P_202010110*QR_000010001201+P_202110010*QR_000010001210+P_202110110*QR_000010001211);
ans_temp[ans_id*18+14]+=Pmtrx[0]*(P_002010010*QR_001000010000+P_002010110*QR_001000010001+P_002110010*QR_001000010010+P_002110110*QR_001000010011+P_102010010*QR_001000010100+P_102010110*QR_001000010101+P_102110010*QR_001000010110+P_102110110*QR_001000010111+P_202010010*QR_001000010200+P_202010110*QR_001000010201+P_202110010*QR_001000010210+P_202110110*QR_001000010211);
ans_temp[ans_id*18+14]+=Pmtrx[1]*(P_002010010*QR_000001010000+P_002010110*QR_000001010001+P_002110010*QR_000001010010+P_002110110*QR_000001010011+P_102010010*QR_000001010100+P_102010110*QR_000001010101+P_102110010*QR_000001010110+P_102110110*QR_000001010111+P_202010010*QR_000001010200+P_202010110*QR_000001010201+P_202110010*QR_000001010210+P_202110110*QR_000001010211);
ans_temp[ans_id*18+14]+=Pmtrx[2]*(P_002010010*QR_000000011000+P_002010110*QR_000000011001+P_002110010*QR_000000011010+P_002110110*QR_000000011011+P_102010010*QR_000000011100+P_102010110*QR_000000011101+P_102110010*QR_000000011110+P_102110110*QR_000000011111+P_202010010*QR_000000011200+P_202010110*QR_000000011201+P_202110010*QR_000000011210+P_202110110*QR_000000011211);
ans_temp[ans_id*18+12]+=Pmtrx[3]*(P_001011010*QR_011000000000+P_001011110*QR_011000000001+P_001111010*QR_011000000010+P_001111110*QR_011000000011+P_001211010*QR_011000000020+P_001211110*QR_011000000021+P_101011010*QR_011000000100+P_101011110*QR_011000000101+P_101111010*QR_011000000110+P_101111110*QR_011000000111+P_101211010*QR_011000000120+P_101211110*QR_011000000121);
ans_temp[ans_id*18+12]+=Pmtrx[4]*(P_001011010*QR_010001000000+P_001011110*QR_010001000001+P_001111010*QR_010001000010+P_001111110*QR_010001000011+P_001211010*QR_010001000020+P_001211110*QR_010001000021+P_101011010*QR_010001000100+P_101011110*QR_010001000101+P_101111010*QR_010001000110+P_101111110*QR_010001000111+P_101211010*QR_010001000120+P_101211110*QR_010001000121);
ans_temp[ans_id*18+12]+=Pmtrx[5]*(P_001011010*QR_010000001000+P_001011110*QR_010000001001+P_001111010*QR_010000001010+P_001111110*QR_010000001011+P_001211010*QR_010000001020+P_001211110*QR_010000001021+P_101011010*QR_010000001100+P_101011110*QR_010000001101+P_101111010*QR_010000001110+P_101111110*QR_010000001111+P_101211010*QR_010000001120+P_101211110*QR_010000001121);
ans_temp[ans_id*18+13]+=Pmtrx[3]*(P_001011010*QR_001010000000+P_001011110*QR_001010000001+P_001111010*QR_001010000010+P_001111110*QR_001010000011+P_001211010*QR_001010000020+P_001211110*QR_001010000021+P_101011010*QR_001010000100+P_101011110*QR_001010000101+P_101111010*QR_001010000110+P_101111110*QR_001010000111+P_101211010*QR_001010000120+P_101211110*QR_001010000121);
ans_temp[ans_id*18+13]+=Pmtrx[4]*(P_001011010*QR_000011000000+P_001011110*QR_000011000001+P_001111010*QR_000011000010+P_001111110*QR_000011000011+P_001211010*QR_000011000020+P_001211110*QR_000011000021+P_101011010*QR_000011000100+P_101011110*QR_000011000101+P_101111010*QR_000011000110+P_101111110*QR_000011000111+P_101211010*QR_000011000120+P_101211110*QR_000011000121);
ans_temp[ans_id*18+13]+=Pmtrx[5]*(P_001011010*QR_000010001000+P_001011110*QR_000010001001+P_001111010*QR_000010001010+P_001111110*QR_000010001011+P_001211010*QR_000010001020+P_001211110*QR_000010001021+P_101011010*QR_000010001100+P_101011110*QR_000010001101+P_101111010*QR_000010001110+P_101111110*QR_000010001111+P_101211010*QR_000010001120+P_101211110*QR_000010001121);
ans_temp[ans_id*18+14]+=Pmtrx[3]*(P_001011010*QR_001000010000+P_001011110*QR_001000010001+P_001111010*QR_001000010010+P_001111110*QR_001000010011+P_001211010*QR_001000010020+P_001211110*QR_001000010021+P_101011010*QR_001000010100+P_101011110*QR_001000010101+P_101111010*QR_001000010110+P_101111110*QR_001000010111+P_101211010*QR_001000010120+P_101211110*QR_001000010121);
ans_temp[ans_id*18+14]+=Pmtrx[4]*(P_001011010*QR_000001010000+P_001011110*QR_000001010001+P_001111010*QR_000001010010+P_001111110*QR_000001010011+P_001211010*QR_000001010020+P_001211110*QR_000001010021+P_101011010*QR_000001010100+P_101011110*QR_000001010101+P_101111010*QR_000001010110+P_101111110*QR_000001010111+P_101211010*QR_000001010120+P_101211110*QR_000001010121);
ans_temp[ans_id*18+14]+=Pmtrx[5]*(P_001011010*QR_000000011000+P_001011110*QR_000000011001+P_001111010*QR_000000011010+P_001111110*QR_000000011011+P_001211010*QR_000000011020+P_001211110*QR_000000011021+P_101011010*QR_000000011100+P_101011110*QR_000000011101+P_101111010*QR_000000011110+P_101111110*QR_000000011111+P_101211010*QR_000000011120+P_101211110*QR_000000011121);
ans_temp[ans_id*18+12]+=Pmtrx[6]*(P_000012010*QR_011000000000+P_000012110*QR_011000000001+P_000112010*QR_011000000010+P_000112110*QR_011000000011+P_000212010*QR_011000000020+P_000212110*QR_011000000021+P_000312010*QR_011000000030+P_000312110*QR_011000000031);
ans_temp[ans_id*18+12]+=Pmtrx[7]*(P_000012010*QR_010001000000+P_000012110*QR_010001000001+P_000112010*QR_010001000010+P_000112110*QR_010001000011+P_000212010*QR_010001000020+P_000212110*QR_010001000021+P_000312010*QR_010001000030+P_000312110*QR_010001000031);
ans_temp[ans_id*18+12]+=Pmtrx[8]*(P_000012010*QR_010000001000+P_000012110*QR_010000001001+P_000112010*QR_010000001010+P_000112110*QR_010000001011+P_000212010*QR_010000001020+P_000212110*QR_010000001021+P_000312010*QR_010000001030+P_000312110*QR_010000001031);
ans_temp[ans_id*18+13]+=Pmtrx[6]*(P_000012010*QR_001010000000+P_000012110*QR_001010000001+P_000112010*QR_001010000010+P_000112110*QR_001010000011+P_000212010*QR_001010000020+P_000212110*QR_001010000021+P_000312010*QR_001010000030+P_000312110*QR_001010000031);
ans_temp[ans_id*18+13]+=Pmtrx[7]*(P_000012010*QR_000011000000+P_000012110*QR_000011000001+P_000112010*QR_000011000010+P_000112110*QR_000011000011+P_000212010*QR_000011000020+P_000212110*QR_000011000021+P_000312010*QR_000011000030+P_000312110*QR_000011000031);
ans_temp[ans_id*18+13]+=Pmtrx[8]*(P_000012010*QR_000010001000+P_000012110*QR_000010001001+P_000112010*QR_000010001010+P_000112110*QR_000010001011+P_000212010*QR_000010001020+P_000212110*QR_000010001021+P_000312010*QR_000010001030+P_000312110*QR_000010001031);
ans_temp[ans_id*18+14]+=Pmtrx[6]*(P_000012010*QR_001000010000+P_000012110*QR_001000010001+P_000112010*QR_001000010010+P_000112110*QR_001000010011+P_000212010*QR_001000010020+P_000212110*QR_001000010021+P_000312010*QR_001000010030+P_000312110*QR_001000010031);
ans_temp[ans_id*18+14]+=Pmtrx[7]*(P_000012010*QR_000001010000+P_000012110*QR_000001010001+P_000112010*QR_000001010010+P_000112110*QR_000001010011+P_000212010*QR_000001010020+P_000212110*QR_000001010021+P_000312010*QR_000001010030+P_000312110*QR_000001010031);
ans_temp[ans_id*18+14]+=Pmtrx[8]*(P_000012010*QR_000000011000+P_000012110*QR_000000011001+P_000112010*QR_000000011010+P_000112110*QR_000000011011+P_000212010*QR_000000011020+P_000212110*QR_000000011021+P_000312010*QR_000000011030+P_000312110*QR_000000011031);
ans_temp[ans_id*18+12]+=Pmtrx[9]*(P_001010011*QR_011000000000+P_001010111*QR_011000000001+P_001010211*QR_011000000002+P_001110011*QR_011000000010+P_001110111*QR_011000000011+P_001110211*QR_011000000012+P_101010011*QR_011000000100+P_101010111*QR_011000000101+P_101010211*QR_011000000102+P_101110011*QR_011000000110+P_101110111*QR_011000000111+P_101110211*QR_011000000112);
ans_temp[ans_id*18+12]+=Pmtrx[10]*(P_001010011*QR_010001000000+P_001010111*QR_010001000001+P_001010211*QR_010001000002+P_001110011*QR_010001000010+P_001110111*QR_010001000011+P_001110211*QR_010001000012+P_101010011*QR_010001000100+P_101010111*QR_010001000101+P_101010211*QR_010001000102+P_101110011*QR_010001000110+P_101110111*QR_010001000111+P_101110211*QR_010001000112);
ans_temp[ans_id*18+12]+=Pmtrx[11]*(P_001010011*QR_010000001000+P_001010111*QR_010000001001+P_001010211*QR_010000001002+P_001110011*QR_010000001010+P_001110111*QR_010000001011+P_001110211*QR_010000001012+P_101010011*QR_010000001100+P_101010111*QR_010000001101+P_101010211*QR_010000001102+P_101110011*QR_010000001110+P_101110111*QR_010000001111+P_101110211*QR_010000001112);
ans_temp[ans_id*18+13]+=Pmtrx[9]*(P_001010011*QR_001010000000+P_001010111*QR_001010000001+P_001010211*QR_001010000002+P_001110011*QR_001010000010+P_001110111*QR_001010000011+P_001110211*QR_001010000012+P_101010011*QR_001010000100+P_101010111*QR_001010000101+P_101010211*QR_001010000102+P_101110011*QR_001010000110+P_101110111*QR_001010000111+P_101110211*QR_001010000112);
ans_temp[ans_id*18+13]+=Pmtrx[10]*(P_001010011*QR_000011000000+P_001010111*QR_000011000001+P_001010211*QR_000011000002+P_001110011*QR_000011000010+P_001110111*QR_000011000011+P_001110211*QR_000011000012+P_101010011*QR_000011000100+P_101010111*QR_000011000101+P_101010211*QR_000011000102+P_101110011*QR_000011000110+P_101110111*QR_000011000111+P_101110211*QR_000011000112);
ans_temp[ans_id*18+13]+=Pmtrx[11]*(P_001010011*QR_000010001000+P_001010111*QR_000010001001+P_001010211*QR_000010001002+P_001110011*QR_000010001010+P_001110111*QR_000010001011+P_001110211*QR_000010001012+P_101010011*QR_000010001100+P_101010111*QR_000010001101+P_101010211*QR_000010001102+P_101110011*QR_000010001110+P_101110111*QR_000010001111+P_101110211*QR_000010001112);
ans_temp[ans_id*18+14]+=Pmtrx[9]*(P_001010011*QR_001000010000+P_001010111*QR_001000010001+P_001010211*QR_001000010002+P_001110011*QR_001000010010+P_001110111*QR_001000010011+P_001110211*QR_001000010012+P_101010011*QR_001000010100+P_101010111*QR_001000010101+P_101010211*QR_001000010102+P_101110011*QR_001000010110+P_101110111*QR_001000010111+P_101110211*QR_001000010112);
ans_temp[ans_id*18+14]+=Pmtrx[10]*(P_001010011*QR_000001010000+P_001010111*QR_000001010001+P_001010211*QR_000001010002+P_001110011*QR_000001010010+P_001110111*QR_000001010011+P_001110211*QR_000001010012+P_101010011*QR_000001010100+P_101010111*QR_000001010101+P_101010211*QR_000001010102+P_101110011*QR_000001010110+P_101110111*QR_000001010111+P_101110211*QR_000001010112);
ans_temp[ans_id*18+14]+=Pmtrx[11]*(P_001010011*QR_000000011000+P_001010111*QR_000000011001+P_001010211*QR_000000011002+P_001110011*QR_000000011010+P_001110111*QR_000000011011+P_001110211*QR_000000011012+P_101010011*QR_000000011100+P_101010111*QR_000000011101+P_101010211*QR_000000011102+P_101110011*QR_000000011110+P_101110111*QR_000000011111+P_101110211*QR_000000011112);
ans_temp[ans_id*18+12]+=Pmtrx[12]*(P_000011011*QR_011000000000+P_000011111*QR_011000000001+P_000011211*QR_011000000002+P_000111011*QR_011000000010+P_000111111*QR_011000000011+P_000111211*QR_011000000012+P_000211011*QR_011000000020+P_000211111*QR_011000000021+P_000211211*QR_011000000022);
ans_temp[ans_id*18+12]+=Pmtrx[13]*(P_000011011*QR_010001000000+P_000011111*QR_010001000001+P_000011211*QR_010001000002+P_000111011*QR_010001000010+P_000111111*QR_010001000011+P_000111211*QR_010001000012+P_000211011*QR_010001000020+P_000211111*QR_010001000021+P_000211211*QR_010001000022);
ans_temp[ans_id*18+12]+=Pmtrx[14]*(P_000011011*QR_010000001000+P_000011111*QR_010000001001+P_000011211*QR_010000001002+P_000111011*QR_010000001010+P_000111111*QR_010000001011+P_000111211*QR_010000001012+P_000211011*QR_010000001020+P_000211111*QR_010000001021+P_000211211*QR_010000001022);
ans_temp[ans_id*18+13]+=Pmtrx[12]*(P_000011011*QR_001010000000+P_000011111*QR_001010000001+P_000011211*QR_001010000002+P_000111011*QR_001010000010+P_000111111*QR_001010000011+P_000111211*QR_001010000012+P_000211011*QR_001010000020+P_000211111*QR_001010000021+P_000211211*QR_001010000022);
ans_temp[ans_id*18+13]+=Pmtrx[13]*(P_000011011*QR_000011000000+P_000011111*QR_000011000001+P_000011211*QR_000011000002+P_000111011*QR_000011000010+P_000111111*QR_000011000011+P_000111211*QR_000011000012+P_000211011*QR_000011000020+P_000211111*QR_000011000021+P_000211211*QR_000011000022);
ans_temp[ans_id*18+13]+=Pmtrx[14]*(P_000011011*QR_000010001000+P_000011111*QR_000010001001+P_000011211*QR_000010001002+P_000111011*QR_000010001010+P_000111111*QR_000010001011+P_000111211*QR_000010001012+P_000211011*QR_000010001020+P_000211111*QR_000010001021+P_000211211*QR_000010001022);
ans_temp[ans_id*18+14]+=Pmtrx[12]*(P_000011011*QR_001000010000+P_000011111*QR_001000010001+P_000011211*QR_001000010002+P_000111011*QR_001000010010+P_000111111*QR_001000010011+P_000111211*QR_001000010012+P_000211011*QR_001000010020+P_000211111*QR_001000010021+P_000211211*QR_001000010022);
ans_temp[ans_id*18+14]+=Pmtrx[13]*(P_000011011*QR_000001010000+P_000011111*QR_000001010001+P_000011211*QR_000001010002+P_000111011*QR_000001010010+P_000111111*QR_000001010011+P_000111211*QR_000001010012+P_000211011*QR_000001010020+P_000211111*QR_000001010021+P_000211211*QR_000001010022);
ans_temp[ans_id*18+14]+=Pmtrx[14]*(P_000011011*QR_000000011000+P_000011111*QR_000000011001+P_000011211*QR_000000011002+P_000111011*QR_000000011010+P_000111111*QR_000000011011+P_000111211*QR_000000011012+P_000211011*QR_000000011020+P_000211111*QR_000000011021+P_000211211*QR_000000011022);
ans_temp[ans_id*18+12]+=Pmtrx[15]*(P_000010012*QR_011000000000+P_000010112*QR_011000000001+P_000010212*QR_011000000002+P_000010312*QR_011000000003+P_000110012*QR_011000000010+P_000110112*QR_011000000011+P_000110212*QR_011000000012+P_000110312*QR_011000000013);
ans_temp[ans_id*18+12]+=Pmtrx[16]*(P_000010012*QR_010001000000+P_000010112*QR_010001000001+P_000010212*QR_010001000002+P_000010312*QR_010001000003+P_000110012*QR_010001000010+P_000110112*QR_010001000011+P_000110212*QR_010001000012+P_000110312*QR_010001000013);
ans_temp[ans_id*18+12]+=Pmtrx[17]*(P_000010012*QR_010000001000+P_000010112*QR_010000001001+P_000010212*QR_010000001002+P_000010312*QR_010000001003+P_000110012*QR_010000001010+P_000110112*QR_010000001011+P_000110212*QR_010000001012+P_000110312*QR_010000001013);
ans_temp[ans_id*18+13]+=Pmtrx[15]*(P_000010012*QR_001010000000+P_000010112*QR_001010000001+P_000010212*QR_001010000002+P_000010312*QR_001010000003+P_000110012*QR_001010000010+P_000110112*QR_001010000011+P_000110212*QR_001010000012+P_000110312*QR_001010000013);
ans_temp[ans_id*18+13]+=Pmtrx[16]*(P_000010012*QR_000011000000+P_000010112*QR_000011000001+P_000010212*QR_000011000002+P_000010312*QR_000011000003+P_000110012*QR_000011000010+P_000110112*QR_000011000011+P_000110212*QR_000011000012+P_000110312*QR_000011000013);
ans_temp[ans_id*18+13]+=Pmtrx[17]*(P_000010012*QR_000010001000+P_000010112*QR_000010001001+P_000010212*QR_000010001002+P_000010312*QR_000010001003+P_000110012*QR_000010001010+P_000110112*QR_000010001011+P_000110212*QR_000010001012+P_000110312*QR_000010001013);
ans_temp[ans_id*18+14]+=Pmtrx[15]*(P_000010012*QR_001000010000+P_000010112*QR_001000010001+P_000010212*QR_001000010002+P_000010312*QR_001000010003+P_000110012*QR_001000010010+P_000110112*QR_001000010011+P_000110212*QR_001000010012+P_000110312*QR_001000010013);
ans_temp[ans_id*18+14]+=Pmtrx[16]*(P_000010012*QR_000001010000+P_000010112*QR_000001010001+P_000010212*QR_000001010002+P_000010312*QR_000001010003+P_000110012*QR_000001010010+P_000110112*QR_000001010011+P_000110212*QR_000001010012+P_000110312*QR_000001010013);
ans_temp[ans_id*18+14]+=Pmtrx[17]*(P_000010012*QR_000000011000+P_000010112*QR_000000011001+P_000010212*QR_000000011002+P_000010312*QR_000000011003+P_000110012*QR_000000011010+P_000110112*QR_000000011011+P_000110212*QR_000000011012+P_000110312*QR_000000011013);
ans_temp[ans_id*18+15]+=Pmtrx[0]*(P_002000020*QR_011000000000+P_002000120*QR_011000000001+P_002000220*QR_011000000002+P_102000020*QR_011000000100+P_102000120*QR_011000000101+P_102000220*QR_011000000102+P_202000020*QR_011000000200+P_202000120*QR_011000000201+P_202000220*QR_011000000202);
ans_temp[ans_id*18+15]+=Pmtrx[1]*(P_002000020*QR_010001000000+P_002000120*QR_010001000001+P_002000220*QR_010001000002+P_102000020*QR_010001000100+P_102000120*QR_010001000101+P_102000220*QR_010001000102+P_202000020*QR_010001000200+P_202000120*QR_010001000201+P_202000220*QR_010001000202);
ans_temp[ans_id*18+15]+=Pmtrx[2]*(P_002000020*QR_010000001000+P_002000120*QR_010000001001+P_002000220*QR_010000001002+P_102000020*QR_010000001100+P_102000120*QR_010000001101+P_102000220*QR_010000001102+P_202000020*QR_010000001200+P_202000120*QR_010000001201+P_202000220*QR_010000001202);
ans_temp[ans_id*18+16]+=Pmtrx[0]*(P_002000020*QR_001010000000+P_002000120*QR_001010000001+P_002000220*QR_001010000002+P_102000020*QR_001010000100+P_102000120*QR_001010000101+P_102000220*QR_001010000102+P_202000020*QR_001010000200+P_202000120*QR_001010000201+P_202000220*QR_001010000202);
ans_temp[ans_id*18+16]+=Pmtrx[1]*(P_002000020*QR_000011000000+P_002000120*QR_000011000001+P_002000220*QR_000011000002+P_102000020*QR_000011000100+P_102000120*QR_000011000101+P_102000220*QR_000011000102+P_202000020*QR_000011000200+P_202000120*QR_000011000201+P_202000220*QR_000011000202);
ans_temp[ans_id*18+16]+=Pmtrx[2]*(P_002000020*QR_000010001000+P_002000120*QR_000010001001+P_002000220*QR_000010001002+P_102000020*QR_000010001100+P_102000120*QR_000010001101+P_102000220*QR_000010001102+P_202000020*QR_000010001200+P_202000120*QR_000010001201+P_202000220*QR_000010001202);
ans_temp[ans_id*18+17]+=Pmtrx[0]*(P_002000020*QR_001000010000+P_002000120*QR_001000010001+P_002000220*QR_001000010002+P_102000020*QR_001000010100+P_102000120*QR_001000010101+P_102000220*QR_001000010102+P_202000020*QR_001000010200+P_202000120*QR_001000010201+P_202000220*QR_001000010202);
ans_temp[ans_id*18+17]+=Pmtrx[1]*(P_002000020*QR_000001010000+P_002000120*QR_000001010001+P_002000220*QR_000001010002+P_102000020*QR_000001010100+P_102000120*QR_000001010101+P_102000220*QR_000001010102+P_202000020*QR_000001010200+P_202000120*QR_000001010201+P_202000220*QR_000001010202);
ans_temp[ans_id*18+17]+=Pmtrx[2]*(P_002000020*QR_000000011000+P_002000120*QR_000000011001+P_002000220*QR_000000011002+P_102000020*QR_000000011100+P_102000120*QR_000000011101+P_102000220*QR_000000011102+P_202000020*QR_000000011200+P_202000120*QR_000000011201+P_202000220*QR_000000011202);
ans_temp[ans_id*18+15]+=Pmtrx[3]*(P_001001020*QR_011000000000+P_001001120*QR_011000000001+P_001001220*QR_011000000002+P_001101020*QR_011000000010+P_001101120*QR_011000000011+P_001101220*QR_011000000012+P_101001020*QR_011000000100+P_101001120*QR_011000000101+P_101001220*QR_011000000102+P_101101020*QR_011000000110+P_101101120*QR_011000000111+P_101101220*QR_011000000112);
ans_temp[ans_id*18+15]+=Pmtrx[4]*(P_001001020*QR_010001000000+P_001001120*QR_010001000001+P_001001220*QR_010001000002+P_001101020*QR_010001000010+P_001101120*QR_010001000011+P_001101220*QR_010001000012+P_101001020*QR_010001000100+P_101001120*QR_010001000101+P_101001220*QR_010001000102+P_101101020*QR_010001000110+P_101101120*QR_010001000111+P_101101220*QR_010001000112);
ans_temp[ans_id*18+15]+=Pmtrx[5]*(P_001001020*QR_010000001000+P_001001120*QR_010000001001+P_001001220*QR_010000001002+P_001101020*QR_010000001010+P_001101120*QR_010000001011+P_001101220*QR_010000001012+P_101001020*QR_010000001100+P_101001120*QR_010000001101+P_101001220*QR_010000001102+P_101101020*QR_010000001110+P_101101120*QR_010000001111+P_101101220*QR_010000001112);
ans_temp[ans_id*18+16]+=Pmtrx[3]*(P_001001020*QR_001010000000+P_001001120*QR_001010000001+P_001001220*QR_001010000002+P_001101020*QR_001010000010+P_001101120*QR_001010000011+P_001101220*QR_001010000012+P_101001020*QR_001010000100+P_101001120*QR_001010000101+P_101001220*QR_001010000102+P_101101020*QR_001010000110+P_101101120*QR_001010000111+P_101101220*QR_001010000112);
ans_temp[ans_id*18+16]+=Pmtrx[4]*(P_001001020*QR_000011000000+P_001001120*QR_000011000001+P_001001220*QR_000011000002+P_001101020*QR_000011000010+P_001101120*QR_000011000011+P_001101220*QR_000011000012+P_101001020*QR_000011000100+P_101001120*QR_000011000101+P_101001220*QR_000011000102+P_101101020*QR_000011000110+P_101101120*QR_000011000111+P_101101220*QR_000011000112);
ans_temp[ans_id*18+16]+=Pmtrx[5]*(P_001001020*QR_000010001000+P_001001120*QR_000010001001+P_001001220*QR_000010001002+P_001101020*QR_000010001010+P_001101120*QR_000010001011+P_001101220*QR_000010001012+P_101001020*QR_000010001100+P_101001120*QR_000010001101+P_101001220*QR_000010001102+P_101101020*QR_000010001110+P_101101120*QR_000010001111+P_101101220*QR_000010001112);
ans_temp[ans_id*18+17]+=Pmtrx[3]*(P_001001020*QR_001000010000+P_001001120*QR_001000010001+P_001001220*QR_001000010002+P_001101020*QR_001000010010+P_001101120*QR_001000010011+P_001101220*QR_001000010012+P_101001020*QR_001000010100+P_101001120*QR_001000010101+P_101001220*QR_001000010102+P_101101020*QR_001000010110+P_101101120*QR_001000010111+P_101101220*QR_001000010112);
ans_temp[ans_id*18+17]+=Pmtrx[4]*(P_001001020*QR_000001010000+P_001001120*QR_000001010001+P_001001220*QR_000001010002+P_001101020*QR_000001010010+P_001101120*QR_000001010011+P_001101220*QR_000001010012+P_101001020*QR_000001010100+P_101001120*QR_000001010101+P_101001220*QR_000001010102+P_101101020*QR_000001010110+P_101101120*QR_000001010111+P_101101220*QR_000001010112);
ans_temp[ans_id*18+17]+=Pmtrx[5]*(P_001001020*QR_000000011000+P_001001120*QR_000000011001+P_001001220*QR_000000011002+P_001101020*QR_000000011010+P_001101120*QR_000000011011+P_001101220*QR_000000011012+P_101001020*QR_000000011100+P_101001120*QR_000000011101+P_101001220*QR_000000011102+P_101101020*QR_000000011110+P_101101120*QR_000000011111+P_101101220*QR_000000011112);
ans_temp[ans_id*18+15]+=Pmtrx[6]*(P_000002020*QR_011000000000+P_000002120*QR_011000000001+P_000002220*QR_011000000002+P_000102020*QR_011000000010+P_000102120*QR_011000000011+P_000102220*QR_011000000012+P_000202020*QR_011000000020+P_000202120*QR_011000000021+P_000202220*QR_011000000022);
ans_temp[ans_id*18+15]+=Pmtrx[7]*(P_000002020*QR_010001000000+P_000002120*QR_010001000001+P_000002220*QR_010001000002+P_000102020*QR_010001000010+P_000102120*QR_010001000011+P_000102220*QR_010001000012+P_000202020*QR_010001000020+P_000202120*QR_010001000021+P_000202220*QR_010001000022);
ans_temp[ans_id*18+15]+=Pmtrx[8]*(P_000002020*QR_010000001000+P_000002120*QR_010000001001+P_000002220*QR_010000001002+P_000102020*QR_010000001010+P_000102120*QR_010000001011+P_000102220*QR_010000001012+P_000202020*QR_010000001020+P_000202120*QR_010000001021+P_000202220*QR_010000001022);
ans_temp[ans_id*18+16]+=Pmtrx[6]*(P_000002020*QR_001010000000+P_000002120*QR_001010000001+P_000002220*QR_001010000002+P_000102020*QR_001010000010+P_000102120*QR_001010000011+P_000102220*QR_001010000012+P_000202020*QR_001010000020+P_000202120*QR_001010000021+P_000202220*QR_001010000022);
ans_temp[ans_id*18+16]+=Pmtrx[7]*(P_000002020*QR_000011000000+P_000002120*QR_000011000001+P_000002220*QR_000011000002+P_000102020*QR_000011000010+P_000102120*QR_000011000011+P_000102220*QR_000011000012+P_000202020*QR_000011000020+P_000202120*QR_000011000021+P_000202220*QR_000011000022);
ans_temp[ans_id*18+16]+=Pmtrx[8]*(P_000002020*QR_000010001000+P_000002120*QR_000010001001+P_000002220*QR_000010001002+P_000102020*QR_000010001010+P_000102120*QR_000010001011+P_000102220*QR_000010001012+P_000202020*QR_000010001020+P_000202120*QR_000010001021+P_000202220*QR_000010001022);
ans_temp[ans_id*18+17]+=Pmtrx[6]*(P_000002020*QR_001000010000+P_000002120*QR_001000010001+P_000002220*QR_001000010002+P_000102020*QR_001000010010+P_000102120*QR_001000010011+P_000102220*QR_001000010012+P_000202020*QR_001000010020+P_000202120*QR_001000010021+P_000202220*QR_001000010022);
ans_temp[ans_id*18+17]+=Pmtrx[7]*(P_000002020*QR_000001010000+P_000002120*QR_000001010001+P_000002220*QR_000001010002+P_000102020*QR_000001010010+P_000102120*QR_000001010011+P_000102220*QR_000001010012+P_000202020*QR_000001010020+P_000202120*QR_000001010021+P_000202220*QR_000001010022);
ans_temp[ans_id*18+17]+=Pmtrx[8]*(P_000002020*QR_000000011000+P_000002120*QR_000000011001+P_000002220*QR_000000011002+P_000102020*QR_000000011010+P_000102120*QR_000000011011+P_000102220*QR_000000011012+P_000202020*QR_000000011020+P_000202120*QR_000000011021+P_000202220*QR_000000011022);
ans_temp[ans_id*18+15]+=Pmtrx[9]*(P_001000021*QR_011000000000+P_001000121*QR_011000000001+P_001000221*QR_011000000002+P_001000321*QR_011000000003+P_101000021*QR_011000000100+P_101000121*QR_011000000101+P_101000221*QR_011000000102+P_101000321*QR_011000000103);
ans_temp[ans_id*18+15]+=Pmtrx[10]*(P_001000021*QR_010001000000+P_001000121*QR_010001000001+P_001000221*QR_010001000002+P_001000321*QR_010001000003+P_101000021*QR_010001000100+P_101000121*QR_010001000101+P_101000221*QR_010001000102+P_101000321*QR_010001000103);
ans_temp[ans_id*18+15]+=Pmtrx[11]*(P_001000021*QR_010000001000+P_001000121*QR_010000001001+P_001000221*QR_010000001002+P_001000321*QR_010000001003+P_101000021*QR_010000001100+P_101000121*QR_010000001101+P_101000221*QR_010000001102+P_101000321*QR_010000001103);
ans_temp[ans_id*18+16]+=Pmtrx[9]*(P_001000021*QR_001010000000+P_001000121*QR_001010000001+P_001000221*QR_001010000002+P_001000321*QR_001010000003+P_101000021*QR_001010000100+P_101000121*QR_001010000101+P_101000221*QR_001010000102+P_101000321*QR_001010000103);
ans_temp[ans_id*18+16]+=Pmtrx[10]*(P_001000021*QR_000011000000+P_001000121*QR_000011000001+P_001000221*QR_000011000002+P_001000321*QR_000011000003+P_101000021*QR_000011000100+P_101000121*QR_000011000101+P_101000221*QR_000011000102+P_101000321*QR_000011000103);
ans_temp[ans_id*18+16]+=Pmtrx[11]*(P_001000021*QR_000010001000+P_001000121*QR_000010001001+P_001000221*QR_000010001002+P_001000321*QR_000010001003+P_101000021*QR_000010001100+P_101000121*QR_000010001101+P_101000221*QR_000010001102+P_101000321*QR_000010001103);
ans_temp[ans_id*18+17]+=Pmtrx[9]*(P_001000021*QR_001000010000+P_001000121*QR_001000010001+P_001000221*QR_001000010002+P_001000321*QR_001000010003+P_101000021*QR_001000010100+P_101000121*QR_001000010101+P_101000221*QR_001000010102+P_101000321*QR_001000010103);
ans_temp[ans_id*18+17]+=Pmtrx[10]*(P_001000021*QR_000001010000+P_001000121*QR_000001010001+P_001000221*QR_000001010002+P_001000321*QR_000001010003+P_101000021*QR_000001010100+P_101000121*QR_000001010101+P_101000221*QR_000001010102+P_101000321*QR_000001010103);
ans_temp[ans_id*18+17]+=Pmtrx[11]*(P_001000021*QR_000000011000+P_001000121*QR_000000011001+P_001000221*QR_000000011002+P_001000321*QR_000000011003+P_101000021*QR_000000011100+P_101000121*QR_000000011101+P_101000221*QR_000000011102+P_101000321*QR_000000011103);
ans_temp[ans_id*18+15]+=Pmtrx[12]*(P_000001021*QR_011000000000+P_000001121*QR_011000000001+P_000001221*QR_011000000002+P_000001321*QR_011000000003+P_000101021*QR_011000000010+P_000101121*QR_011000000011+P_000101221*QR_011000000012+P_000101321*QR_011000000013);
ans_temp[ans_id*18+15]+=Pmtrx[13]*(P_000001021*QR_010001000000+P_000001121*QR_010001000001+P_000001221*QR_010001000002+P_000001321*QR_010001000003+P_000101021*QR_010001000010+P_000101121*QR_010001000011+P_000101221*QR_010001000012+P_000101321*QR_010001000013);
ans_temp[ans_id*18+15]+=Pmtrx[14]*(P_000001021*QR_010000001000+P_000001121*QR_010000001001+P_000001221*QR_010000001002+P_000001321*QR_010000001003+P_000101021*QR_010000001010+P_000101121*QR_010000001011+P_000101221*QR_010000001012+P_000101321*QR_010000001013);
ans_temp[ans_id*18+16]+=Pmtrx[12]*(P_000001021*QR_001010000000+P_000001121*QR_001010000001+P_000001221*QR_001010000002+P_000001321*QR_001010000003+P_000101021*QR_001010000010+P_000101121*QR_001010000011+P_000101221*QR_001010000012+P_000101321*QR_001010000013);
ans_temp[ans_id*18+16]+=Pmtrx[13]*(P_000001021*QR_000011000000+P_000001121*QR_000011000001+P_000001221*QR_000011000002+P_000001321*QR_000011000003+P_000101021*QR_000011000010+P_000101121*QR_000011000011+P_000101221*QR_000011000012+P_000101321*QR_000011000013);
ans_temp[ans_id*18+16]+=Pmtrx[14]*(P_000001021*QR_000010001000+P_000001121*QR_000010001001+P_000001221*QR_000010001002+P_000001321*QR_000010001003+P_000101021*QR_000010001010+P_000101121*QR_000010001011+P_000101221*QR_000010001012+P_000101321*QR_000010001013);
ans_temp[ans_id*18+17]+=Pmtrx[12]*(P_000001021*QR_001000010000+P_000001121*QR_001000010001+P_000001221*QR_001000010002+P_000001321*QR_001000010003+P_000101021*QR_001000010010+P_000101121*QR_001000010011+P_000101221*QR_001000010012+P_000101321*QR_001000010013);
ans_temp[ans_id*18+17]+=Pmtrx[13]*(P_000001021*QR_000001010000+P_000001121*QR_000001010001+P_000001221*QR_000001010002+P_000001321*QR_000001010003+P_000101021*QR_000001010010+P_000101121*QR_000001010011+P_000101221*QR_000001010012+P_000101321*QR_000001010013);
ans_temp[ans_id*18+17]+=Pmtrx[14]*(P_000001021*QR_000000011000+P_000001121*QR_000000011001+P_000001221*QR_000000011002+P_000001321*QR_000000011003+P_000101021*QR_000000011010+P_000101121*QR_000000011011+P_000101221*QR_000000011012+P_000101321*QR_000000011013);
ans_temp[ans_id*18+15]+=Pmtrx[15]*(P_000000022*QR_011000000000+P_000000122*QR_011000000001+P_000000222*QR_011000000002+P_000000322*QR_011000000003+P_000000422*QR_011000000004);
ans_temp[ans_id*18+15]+=Pmtrx[16]*(P_000000022*QR_010001000000+P_000000122*QR_010001000001+P_000000222*QR_010001000002+P_000000322*QR_010001000003+P_000000422*QR_010001000004);
ans_temp[ans_id*18+15]+=Pmtrx[17]*(P_000000022*QR_010000001000+P_000000122*QR_010000001001+P_000000222*QR_010000001002+P_000000322*QR_010000001003+P_000000422*QR_010000001004);
ans_temp[ans_id*18+16]+=Pmtrx[15]*(P_000000022*QR_001010000000+P_000000122*QR_001010000001+P_000000222*QR_001010000002+P_000000322*QR_001010000003+P_000000422*QR_001010000004);
ans_temp[ans_id*18+16]+=Pmtrx[16]*(P_000000022*QR_000011000000+P_000000122*QR_000011000001+P_000000222*QR_000011000002+P_000000322*QR_000011000003+P_000000422*QR_000011000004);
ans_temp[ans_id*18+16]+=Pmtrx[17]*(P_000000022*QR_000010001000+P_000000122*QR_000010001001+P_000000222*QR_000010001002+P_000000322*QR_000010001003+P_000000422*QR_000010001004);
ans_temp[ans_id*18+17]+=Pmtrx[15]*(P_000000022*QR_001000010000+P_000000122*QR_001000010001+P_000000222*QR_001000010002+P_000000322*QR_001000010003+P_000000422*QR_001000010004);
ans_temp[ans_id*18+17]+=Pmtrx[16]*(P_000000022*QR_000001010000+P_000000122*QR_000001010001+P_000000222*QR_000001010002+P_000000322*QR_000001010003+P_000000422*QR_000001010004);
ans_temp[ans_id*18+17]+=Pmtrx[17]*(P_000000022*QR_000000011000+P_000000122*QR_000000011001+P_000000222*QR_000000011002+P_000000322*QR_000000011003+P_000000422*QR_000000011004);
}
}
__syncthreads();
int num_thread=NTHREAD/2;
while (num_thread!=0){
__syncthreads();
if(tId_x<num_thread){
for(int ians=0;ians<18;ians++){
ans_temp[tId_x*18+ians]+=ans_temp[(tId_x+num_thread)*18+ians];
}
}
num_thread/=2;
}
if(tId_x==0){
for(int ians=0;ians<18;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*18+ians]=ans_temp[(tId_x)*18+ians];
}
}
}
}
}
| 42ea41a7b68710724ef05a2338bd034c550efddc.cu | #include<math.h>
#include"Boys_gpu.h"
#define PI 3.1415926535897932
#define P25 17.4934183276248620
#define NTHREAD 64
texture<int2,1,cudaReadModeElementType> tex_P;
texture<int2,1,cudaReadModeElementType> tex_Zta;
texture<int2,1,cudaReadModeElementType> tex_pp;
texture<float,1,cudaReadModeElementType> tex_K2_p;
texture<int2,1,cudaReadModeElementType> tex_PA;
texture<int2,1,cudaReadModeElementType> tex_PB;
texture<unsigned int,1,cudaReadModeElementType> tex_id_bra;
texture<int2,1,cudaReadModeElementType> tex_Q;
texture<int2,1,cudaReadModeElementType> tex_Eta;
texture<int2,1,cudaReadModeElementType> tex_pq;
texture<float,1,cudaReadModeElementType> tex_K2_q;
texture<int2,1,cudaReadModeElementType> tex_QC;
texture<int2,1,cudaReadModeElementType> tex_QD;
texture<unsigned int,1,cudaReadModeElementType> tex_id_ket;
void MD_texture_binding_bra_pp(double * P_d,double * PA_d,double * PB_d,\
double * alphaP_d,double * pp_d,float * K2_p_d,unsigned int * id_bra_d,\
unsigned int primit_len){
cudaBindTexture(0, tex_P, P_d, sizeof(double)*primit_len*3);
cudaBindTexture(0, tex_Zta, alphaP_d, sizeof(double)*primit_len);
cudaBindTexture(0, tex_pp, pp_d, sizeof(double)*primit_len);
cudaBindTexture(0, tex_K2_p, K2_p_d, sizeof(float)*primit_len);
cudaBindTexture(0, tex_PA, PA_d, sizeof(double)*primit_len*3);
cudaBindTexture(0, tex_PB, PB_d, sizeof(double)*primit_len*3);
cudaBindTexture(0, tex_id_bra, id_bra_d, sizeof(unsigned int)*primit_len);
}
void MD_texture_unbind_bra_pp(){
cudaUnbindTexture(tex_P);
cudaUnbindTexture(tex_Zta);
cudaUnbindTexture(tex_pp);
cudaUnbindTexture(tex_K2_p);
cudaUnbindTexture(tex_PA);
cudaUnbindTexture(tex_PB);
cudaUnbindTexture(tex_id_bra);
}
void MD_texture_binding_ket_pp(double * Q_d,double * QC_d,double * QD_d,\
double * alphaQ_d,double * pq_d,float * K2_q_d,unsigned int * id_ket_d,\
unsigned int primit_len){
cudaBindTexture(0, tex_Q, Q_d, sizeof(double)*primit_len*3);
cudaBindTexture(0, tex_Eta, alphaQ_d, sizeof(double)*primit_len);
cudaBindTexture(0, tex_pq, pq_d, sizeof(double)*primit_len);
cudaBindTexture(0, tex_K2_q, K2_q_d, sizeof(float)*primit_len);
cudaBindTexture(0, tex_QC, QC_d, sizeof(double)*primit_len*3);
cudaBindTexture(0, tex_QD, QD_d, sizeof(double)*primit_len*3);
cudaBindTexture(0, tex_id_ket, id_ket_d, sizeof(unsigned int)*primit_len);
}
void MD_texture_unbind_ket_pp(){
cudaUnbindTexture(tex_Q);
cudaUnbindTexture(tex_Eta);
cudaUnbindTexture(tex_pq);
cudaUnbindTexture(tex_K2_q);
cudaUnbindTexture(tex_QC);
cudaUnbindTexture(tex_QD);
cudaUnbindTexture(tex_id_ket);
}
__global__ void MD_Kp_sdpp_fs(unsigned int contrc_bra_num,unsigned int contrc_ket_num,\
unsigned int * contrc_bra_id,\
unsigned int * contrc_ket_id,\
unsigned int mtrx_len,\
double * Pmtrx_in,\
double * P,\
double * PA,\
double * PB,\
double * Zta_in,\
double * pp_in,\
float * K2_p_in,\
unsigned int * id_bra_in,\
double * Q,\
double * QC,\
double * QD,\
double * Eta_in,\
double * pq_in,\
float * K2_q_in,\
unsigned int * id_ket_in,\
double * ans){
unsigned int tId_x = threadIdx.x;
unsigned int bId_x = blockIdx.x;
unsigned int bId_y = blockIdx.y;
unsigned int tdis = blockDim.x;
unsigned int bdis_x = gridDim.x;
unsigned int bdis_y = gridDim.y;
unsigned int ans_id=tId_x;
double Pmtrx[18]={0.0};
__shared__ double ans_temp[NTHREAD*3];
for(int i=0;i<3;i++){
ans_temp[i*tdis+tId_x]=0.0;
}
for(unsigned int i_contrc_bra=bId_x;i_contrc_bra<contrc_bra_num;i_contrc_bra+=bdis_x){
for(unsigned int j_contrc_ket=bId_y;j_contrc_ket<contrc_ket_num;j_contrc_ket+=bdis_y){
unsigned int primit_bra_start = contrc_bra_id[i_contrc_bra ];
unsigned int primit_bra_end = contrc_bra_id[i_contrc_bra+1];
unsigned int primit_ket_start = contrc_ket_id[j_contrc_ket ];
unsigned int primit_ket_end = contrc_ket_id[j_contrc_ket+1];
for(unsigned int ii=primit_ket_start;ii<primit_ket_end;ii++){
unsigned int id_ket=id_ket_in[ii];
double QX=Q[ii*3+0];
double QY=Q[ii*3+1];
double QZ=Q[ii*3+2];
double Qd_010[3];
Qd_010[0]=QC[ii*3+0];
Qd_010[1]=QC[ii*3+1];
Qd_010[2]=QC[ii*3+2];
double Qd_001[3];
Qd_001[0]=QD[ii*3+0];
Qd_001[1]=QD[ii*3+1];
Qd_001[2]=QD[ii*3+2];
double Eta=Eta_in[ii];
double pq=pq_in[ii];
float K2_q=K2_q_in[ii];
double aQin1=1/(2*Eta);
for(unsigned int j=tId_x;j<primit_bra_end-primit_bra_start;j+=tdis){
unsigned int jj=primit_bra_start+j;
unsigned int id_bra=tex1Dfetch(tex_id_bra,jj);
double P_max=0.0;
for(int p_j=0;p_j<3;p_j++){
for(int p_i=0;p_i<6;p_i++){
Pmtrx[p_i*3+p_j]=Pmtrx_in[(id_ket+p_j)*mtrx_len+(id_bra+p_i)];
double temp_P=fabsf(Pmtrx[p_i*3+p_j]);
if(temp_P>P_max) P_max=temp_P;
}
}
float K2_p=tex1Dfetch(tex_K2_p,jj);
if(fabsf(K2_p*K2_q)<1.0E-14){
break;
}
if(fabsf(P_max*K2_p*K2_q)<1.0E-14) continue;
int2 temp_int2;
temp_int2=tex1Dfetch(tex_Zta,jj);
double Zta=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_pp,jj);
double pp=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+0);
double PX=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+1);
double PY=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+2);
double PZ=__hiloint2double(temp_int2.y,temp_int2.x);
double Pd_001[3];
temp_int2=tex1Dfetch(tex_PB,jj*3+0);
Pd_001[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PB,jj*3+1);
Pd_001[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PB,jj*3+2);
Pd_001[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double alphaT=rsqrt(Eta+Zta);
double lmd=4*P25*pp*pq*alphaT;
alphaT=Eta*Zta*alphaT*alphaT;
double TX=PX-QX;
double TY=PY-QY;
double TZ=PZ-QZ;
double T=alphaT*(TX*TX+TY*TY+TZ*TZ);
double R_000[5];
Ft_fs_4(4,T,R_000);
R_000[0]*=lmd;
R_000[1]*=-2*alphaT*lmd;
R_000[2]*=4*alphaT*alphaT*lmd;
R_000[3]*=-8*alphaT*alphaT*alphaT*lmd;
R_000[4]*=16*alphaT*alphaT*alphaT*alphaT*lmd;
double aPin1=1/(2*Zta);
double R_100[4];
double R_200[3];
double R_300[2];
double R_400[1];
double R_010[4];
double R_110[3];
double R_210[2];
double R_310[1];
double R_020[3];
double R_120[2];
double R_220[1];
double R_030[2];
double R_130[1];
double R_040[1];
double R_001[4];
double R_101[3];
double R_201[2];
double R_301[1];
double R_011[3];
double R_111[2];
double R_211[1];
double R_021[2];
double R_121[1];
double R_031[1];
double R_002[3];
double R_102[2];
double R_202[1];
double R_012[2];
double R_112[1];
double R_022[1];
double R_003[2];
double R_103[1];
double R_013[1];
double R_004[1];
for(int i=0;i<4;i++){
R_100[i]=TX*R_000[i+1];
}
for(int i=0;i<4;i++){
R_010[i]=TY*R_000[i+1];
}
for(int i=0;i<4;i++){
R_001[i]=TZ*R_000[i+1];
}
for(int i=0;i<3;i++){
R_200[i]=TX*R_100[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_110[i]=TX*R_010[i+1];
}
for(int i=0;i<3;i++){
R_020[i]=TY*R_010[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_101[i]=TX*R_001[i+1];
}
for(int i=0;i<3;i++){
R_011[i]=TY*R_001[i+1];
}
for(int i=0;i<3;i++){
R_002[i]=TZ*R_001[i+1]+R_000[i+1];
}
for(int i=0;i<2;i++){
R_300[i]=TX*R_200[i+1]+2*R_100[i+1];
}
for(int i=0;i<2;i++){
R_210[i]=TY*R_200[i+1];
}
for(int i=0;i<2;i++){
R_120[i]=TX*R_020[i+1];
}
for(int i=0;i<2;i++){
R_030[i]=TY*R_020[i+1]+2*R_010[i+1];
}
for(int i=0;i<2;i++){
R_201[i]=TZ*R_200[i+1];
}
for(int i=0;i<2;i++){
R_111[i]=TX*R_011[i+1];
}
for(int i=0;i<2;i++){
R_021[i]=TZ*R_020[i+1];
}
for(int i=0;i<2;i++){
R_102[i]=TX*R_002[i+1];
}
for(int i=0;i<2;i++){
R_012[i]=TY*R_002[i+1];
}
for(int i=0;i<2;i++){
R_003[i]=TZ*R_002[i+1]+2*R_001[i+1];
}
for(int i=0;i<1;i++){
R_400[i]=TX*R_300[i+1]+3*R_200[i+1];
}
for(int i=0;i<1;i++){
R_310[i]=TY*R_300[i+1];
}
for(int i=0;i<1;i++){
R_220[i]=TX*R_120[i+1]+R_020[i+1];
}
for(int i=0;i<1;i++){
R_130[i]=TX*R_030[i+1];
}
for(int i=0;i<1;i++){
R_040[i]=TY*R_030[i+1]+3*R_020[i+1];
}
for(int i=0;i<1;i++){
R_301[i]=TZ*R_300[i+1];
}
for(int i=0;i<1;i++){
R_211[i]=TY*R_201[i+1];
}
for(int i=0;i<1;i++){
R_121[i]=TX*R_021[i+1];
}
for(int i=0;i<1;i++){
R_031[i]=TZ*R_030[i+1];
}
for(int i=0;i<1;i++){
R_202[i]=TX*R_102[i+1]+R_002[i+1];
}
for(int i=0;i<1;i++){
R_112[i]=TX*R_012[i+1];
}
for(int i=0;i<1;i++){
R_022[i]=TY*R_012[i+1]+R_002[i+1];
}
for(int i=0;i<1;i++){
R_103[i]=TX*R_003[i+1];
}
for(int i=0;i<1;i++){
R_013[i]=TY*R_003[i+1];
}
for(int i=0;i<1;i++){
R_004[i]=TZ*R_003[i+1]+3*R_002[i+1];
}
double Pd_101[3];
double Pd_002[3];
double Pd_102[3];
double Pd_202[3];
for(int i=0;i<3;i++){
Pd_101[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_002[i]=Pd_101[i]+Pd_001[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_102[i]=Pd_001[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_202[i]=aPin1*Pd_101[i];
}
double P_002000000=Pd_002[0];
double P_102000000=Pd_102[0];
double P_202000000=Pd_202[0];
double P_001001000=Pd_001[0]*Pd_001[1];
double P_001101000=Pd_001[0]*Pd_101[1];
double P_101001000=Pd_101[0]*Pd_001[1];
double P_101101000=Pd_101[0]*Pd_101[1];
double P_000002000=Pd_002[1];
double P_000102000=Pd_102[1];
double P_000202000=Pd_202[1];
double P_001000001=Pd_001[0]*Pd_001[2];
double P_001000101=Pd_001[0]*Pd_101[2];
double P_101000001=Pd_101[0]*Pd_001[2];
double P_101000101=Pd_101[0]*Pd_101[2];
double P_000001001=Pd_001[1]*Pd_001[2];
double P_000001101=Pd_001[1]*Pd_101[2];
double P_000101001=Pd_101[1]*Pd_001[2];
double P_000101101=Pd_101[1]*Pd_101[2];
double P_000000002=Pd_002[2];
double P_000000102=Pd_102[2];
double P_000000202=Pd_202[2];
double PR_002000000000=P_002000000*R_000[0]+-1*P_102000000*R_100[0]+P_202000000*R_200[0];
double PR_001001000000=P_001001000*R_000[0]+-1*P_001101000*R_010[0]+-1*P_101001000*R_100[0]+P_101101000*R_110[0];
double PR_000002000000=P_000002000*R_000[0]+-1*P_000102000*R_010[0]+P_000202000*R_020[0];
double PR_001000001000=P_001000001*R_000[0]+-1*P_001000101*R_001[0]+-1*P_101000001*R_100[0]+P_101000101*R_101[0];
double PR_000001001000=P_000001001*R_000[0]+-1*P_000001101*R_001[0]+-1*P_000101001*R_010[0]+P_000101101*R_011[0];
double PR_000000002000=P_000000002*R_000[0]+-1*P_000000102*R_001[0]+P_000000202*R_002[0];
double PR_002000000001=P_002000000*R_001[0]+-1*P_102000000*R_101[0]+P_202000000*R_201[0];
double PR_001001000001=P_001001000*R_001[0]+-1*P_001101000*R_011[0]+-1*P_101001000*R_101[0]+P_101101000*R_111[0];
double PR_000002000001=P_000002000*R_001[0]+-1*P_000102000*R_011[0]+P_000202000*R_021[0];
double PR_001000001001=P_001000001*R_001[0]+-1*P_001000101*R_002[0]+-1*P_101000001*R_101[0]+P_101000101*R_102[0];
double PR_000001001001=P_000001001*R_001[0]+-1*P_000001101*R_002[0]+-1*P_000101001*R_011[0]+P_000101101*R_012[0];
double PR_000000002001=P_000000002*R_001[0]+-1*P_000000102*R_002[0]+P_000000202*R_003[0];
double PR_002000000010=P_002000000*R_010[0]+-1*P_102000000*R_110[0]+P_202000000*R_210[0];
double PR_001001000010=P_001001000*R_010[0]+-1*P_001101000*R_020[0]+-1*P_101001000*R_110[0]+P_101101000*R_120[0];
double PR_000002000010=P_000002000*R_010[0]+-1*P_000102000*R_020[0]+P_000202000*R_030[0];
double PR_001000001010=P_001000001*R_010[0]+-1*P_001000101*R_011[0]+-1*P_101000001*R_110[0]+P_101000101*R_111[0];
double PR_000001001010=P_000001001*R_010[0]+-1*P_000001101*R_011[0]+-1*P_000101001*R_020[0]+P_000101101*R_021[0];
double PR_000000002010=P_000000002*R_010[0]+-1*P_000000102*R_011[0]+P_000000202*R_012[0];
double PR_002000000100=P_002000000*R_100[0]+-1*P_102000000*R_200[0]+P_202000000*R_300[0];
double PR_001001000100=P_001001000*R_100[0]+-1*P_001101000*R_110[0]+-1*P_101001000*R_200[0]+P_101101000*R_210[0];
double PR_000002000100=P_000002000*R_100[0]+-1*P_000102000*R_110[0]+P_000202000*R_120[0];
double PR_001000001100=P_001000001*R_100[0]+-1*P_001000101*R_101[0]+-1*P_101000001*R_200[0]+P_101000101*R_201[0];
double PR_000001001100=P_000001001*R_100[0]+-1*P_000001101*R_101[0]+-1*P_000101001*R_110[0]+P_000101101*R_111[0];
double PR_000000002100=P_000000002*R_100[0]+-1*P_000000102*R_101[0]+P_000000202*R_102[0];
double PR_002000000002=P_002000000*R_002[0]+-1*P_102000000*R_102[0]+P_202000000*R_202[0];
double PR_001001000002=P_001001000*R_002[0]+-1*P_001101000*R_012[0]+-1*P_101001000*R_102[0]+P_101101000*R_112[0];
double PR_000002000002=P_000002000*R_002[0]+-1*P_000102000*R_012[0]+P_000202000*R_022[0];
double PR_001000001002=P_001000001*R_002[0]+-1*P_001000101*R_003[0]+-1*P_101000001*R_102[0]+P_101000101*R_103[0];
double PR_000001001002=P_000001001*R_002[0]+-1*P_000001101*R_003[0]+-1*P_000101001*R_012[0]+P_000101101*R_013[0];
double PR_000000002002=P_000000002*R_002[0]+-1*P_000000102*R_003[0]+P_000000202*R_004[0];
double PR_002000000011=P_002000000*R_011[0]+-1*P_102000000*R_111[0]+P_202000000*R_211[0];
double PR_001001000011=P_001001000*R_011[0]+-1*P_001101000*R_021[0]+-1*P_101001000*R_111[0]+P_101101000*R_121[0];
double PR_000002000011=P_000002000*R_011[0]+-1*P_000102000*R_021[0]+P_000202000*R_031[0];
double PR_001000001011=P_001000001*R_011[0]+-1*P_001000101*R_012[0]+-1*P_101000001*R_111[0]+P_101000101*R_112[0];
double PR_000001001011=P_000001001*R_011[0]+-1*P_000001101*R_012[0]+-1*P_000101001*R_021[0]+P_000101101*R_022[0];
double PR_000000002011=P_000000002*R_011[0]+-1*P_000000102*R_012[0]+P_000000202*R_013[0];
double PR_002000000020=P_002000000*R_020[0]+-1*P_102000000*R_120[0]+P_202000000*R_220[0];
double PR_001001000020=P_001001000*R_020[0]+-1*P_001101000*R_030[0]+-1*P_101001000*R_120[0]+P_101101000*R_130[0];
double PR_000002000020=P_000002000*R_020[0]+-1*P_000102000*R_030[0]+P_000202000*R_040[0];
double PR_001000001020=P_001000001*R_020[0]+-1*P_001000101*R_021[0]+-1*P_101000001*R_120[0]+P_101000101*R_121[0];
double PR_000001001020=P_000001001*R_020[0]+-1*P_000001101*R_021[0]+-1*P_000101001*R_030[0]+P_000101101*R_031[0];
double PR_000000002020=P_000000002*R_020[0]+-1*P_000000102*R_021[0]+P_000000202*R_022[0];
double PR_002000000101=P_002000000*R_101[0]+-1*P_102000000*R_201[0]+P_202000000*R_301[0];
double PR_001001000101=P_001001000*R_101[0]+-1*P_001101000*R_111[0]+-1*P_101001000*R_201[0]+P_101101000*R_211[0];
double PR_000002000101=P_000002000*R_101[0]+-1*P_000102000*R_111[0]+P_000202000*R_121[0];
double PR_001000001101=P_001000001*R_101[0]+-1*P_001000101*R_102[0]+-1*P_101000001*R_201[0]+P_101000101*R_202[0];
double PR_000001001101=P_000001001*R_101[0]+-1*P_000001101*R_102[0]+-1*P_000101001*R_111[0]+P_000101101*R_112[0];
double PR_000000002101=P_000000002*R_101[0]+-1*P_000000102*R_102[0]+P_000000202*R_103[0];
double PR_002000000110=P_002000000*R_110[0]+-1*P_102000000*R_210[0]+P_202000000*R_310[0];
double PR_001001000110=P_001001000*R_110[0]+-1*P_001101000*R_120[0]+-1*P_101001000*R_210[0]+P_101101000*R_220[0];
double PR_000002000110=P_000002000*R_110[0]+-1*P_000102000*R_120[0]+P_000202000*R_130[0];
double PR_001000001110=P_001000001*R_110[0]+-1*P_001000101*R_111[0]+-1*P_101000001*R_210[0]+P_101000101*R_211[0];
double PR_000001001110=P_000001001*R_110[0]+-1*P_000001101*R_111[0]+-1*P_000101001*R_120[0]+P_000101101*R_121[0];
double PR_000000002110=P_000000002*R_110[0]+-1*P_000000102*R_111[0]+P_000000202*R_112[0];
double PR_002000000200=P_002000000*R_200[0]+-1*P_102000000*R_300[0]+P_202000000*R_400[0];
double PR_001001000200=P_001001000*R_200[0]+-1*P_001101000*R_210[0]+-1*P_101001000*R_300[0]+P_101101000*R_310[0];
double PR_000002000200=P_000002000*R_200[0]+-1*P_000102000*R_210[0]+P_000202000*R_220[0];
double PR_001000001200=P_001000001*R_200[0]+-1*P_001000101*R_201[0]+-1*P_101000001*R_300[0]+P_101000101*R_301[0];
double PR_000001001200=P_000001001*R_200[0]+-1*P_000001101*R_201[0]+-1*P_000101001*R_210[0]+P_000101101*R_211[0];
double PR_000000002200=P_000000002*R_200[0]+-1*P_000000102*R_201[0]+P_000000202*R_202[0];
double Qd_101[3];
double Qd_110[3];
double Qd_011[3];
double Qd_111[3];
double Qd_211[3];
for(int i=0;i<3;i++){
Qd_101[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_110[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_011[i]=Qd_101[i]+Qd_010[i]*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_111[i]=Qd_010[i]*Qd_101[i]+aQin1*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_211[i]=aQin1*Qd_101[i];
}
double Q_011000000=Qd_011[0];
double Q_111000000=Qd_111[0];
double Q_211000000=Qd_211[0];
double Q_010001000=Qd_010[0]*Qd_001[1];
double Q_010101000=Qd_010[0]*Qd_101[1];
double Q_110001000=Qd_110[0]*Qd_001[1];
double Q_110101000=Qd_110[0]*Qd_101[1];
double Q_010000001=Qd_010[0]*Qd_001[2];
double Q_010000101=Qd_010[0]*Qd_101[2];
double Q_110000001=Qd_110[0]*Qd_001[2];
double Q_110000101=Qd_110[0]*Qd_101[2];
double Q_001010000=Qd_001[0]*Qd_010[1];
double Q_001110000=Qd_001[0]*Qd_110[1];
double Q_101010000=Qd_101[0]*Qd_010[1];
double Q_101110000=Qd_101[0]*Qd_110[1];
double Q_000011000=Qd_011[1];
double Q_000111000=Qd_111[1];
double Q_000211000=Qd_211[1];
double Q_000010001=Qd_010[1]*Qd_001[2];
double Q_000010101=Qd_010[1]*Qd_101[2];
double Q_000110001=Qd_110[1]*Qd_001[2];
double Q_000110101=Qd_110[1]*Qd_101[2];
double Q_001000010=Qd_001[0]*Qd_010[2];
double Q_001000110=Qd_001[0]*Qd_110[2];
double Q_101000010=Qd_101[0]*Qd_010[2];
double Q_101000110=Qd_101[0]*Qd_110[2];
double Q_000001010=Qd_001[1]*Qd_010[2];
double Q_000001110=Qd_001[1]*Qd_110[2];
double Q_000101010=Qd_101[1]*Qd_010[2];
double Q_000101110=Qd_101[1]*Qd_110[2];
double Q_000000011=Qd_011[2];
double Q_000000111=Qd_111[2];
double Q_000000211=Qd_211[2];
ans_temp[ans_id*3+0]+=Pmtrx[0]*(Q_011000000*PR_002000000000+Q_111000000*PR_002000000100+Q_211000000*PR_002000000200);
ans_temp[ans_id*3+0]+=Pmtrx[1]*(Q_010001000*PR_002000000000+Q_010101000*PR_002000000010+Q_110001000*PR_002000000100+Q_110101000*PR_002000000110);
ans_temp[ans_id*3+0]+=Pmtrx[2]*(Q_010000001*PR_002000000000+Q_010000101*PR_002000000001+Q_110000001*PR_002000000100+Q_110000101*PR_002000000101);
ans_temp[ans_id*3+1]+=Pmtrx[0]*(Q_001010000*PR_002000000000+Q_001110000*PR_002000000010+Q_101010000*PR_002000000100+Q_101110000*PR_002000000110);
ans_temp[ans_id*3+1]+=Pmtrx[1]*(Q_000011000*PR_002000000000+Q_000111000*PR_002000000010+Q_000211000*PR_002000000020);
ans_temp[ans_id*3+1]+=Pmtrx[2]*(Q_000010001*PR_002000000000+Q_000010101*PR_002000000001+Q_000110001*PR_002000000010+Q_000110101*PR_002000000011);
ans_temp[ans_id*3+2]+=Pmtrx[0]*(Q_001000010*PR_002000000000+Q_001000110*PR_002000000001+Q_101000010*PR_002000000100+Q_101000110*PR_002000000101);
ans_temp[ans_id*3+2]+=Pmtrx[1]*(Q_000001010*PR_002000000000+Q_000001110*PR_002000000001+Q_000101010*PR_002000000010+Q_000101110*PR_002000000011);
ans_temp[ans_id*3+2]+=Pmtrx[2]*(Q_000000011*PR_002000000000+Q_000000111*PR_002000000001+Q_000000211*PR_002000000002);
ans_temp[ans_id*3+0]+=Pmtrx[3]*(Q_011000000*PR_001001000000+Q_111000000*PR_001001000100+Q_211000000*PR_001001000200);
ans_temp[ans_id*3+0]+=Pmtrx[4]*(Q_010001000*PR_001001000000+Q_010101000*PR_001001000010+Q_110001000*PR_001001000100+Q_110101000*PR_001001000110);
ans_temp[ans_id*3+0]+=Pmtrx[5]*(Q_010000001*PR_001001000000+Q_010000101*PR_001001000001+Q_110000001*PR_001001000100+Q_110000101*PR_001001000101);
ans_temp[ans_id*3+1]+=Pmtrx[3]*(Q_001010000*PR_001001000000+Q_001110000*PR_001001000010+Q_101010000*PR_001001000100+Q_101110000*PR_001001000110);
ans_temp[ans_id*3+1]+=Pmtrx[4]*(Q_000011000*PR_001001000000+Q_000111000*PR_001001000010+Q_000211000*PR_001001000020);
ans_temp[ans_id*3+1]+=Pmtrx[5]*(Q_000010001*PR_001001000000+Q_000010101*PR_001001000001+Q_000110001*PR_001001000010+Q_000110101*PR_001001000011);
ans_temp[ans_id*3+2]+=Pmtrx[3]*(Q_001000010*PR_001001000000+Q_001000110*PR_001001000001+Q_101000010*PR_001001000100+Q_101000110*PR_001001000101);
ans_temp[ans_id*3+2]+=Pmtrx[4]*(Q_000001010*PR_001001000000+Q_000001110*PR_001001000001+Q_000101010*PR_001001000010+Q_000101110*PR_001001000011);
ans_temp[ans_id*3+2]+=Pmtrx[5]*(Q_000000011*PR_001001000000+Q_000000111*PR_001001000001+Q_000000211*PR_001001000002);
ans_temp[ans_id*3+0]+=Pmtrx[6]*(Q_011000000*PR_000002000000+Q_111000000*PR_000002000100+Q_211000000*PR_000002000200);
ans_temp[ans_id*3+0]+=Pmtrx[7]*(Q_010001000*PR_000002000000+Q_010101000*PR_000002000010+Q_110001000*PR_000002000100+Q_110101000*PR_000002000110);
ans_temp[ans_id*3+0]+=Pmtrx[8]*(Q_010000001*PR_000002000000+Q_010000101*PR_000002000001+Q_110000001*PR_000002000100+Q_110000101*PR_000002000101);
ans_temp[ans_id*3+1]+=Pmtrx[6]*(Q_001010000*PR_000002000000+Q_001110000*PR_000002000010+Q_101010000*PR_000002000100+Q_101110000*PR_000002000110);
ans_temp[ans_id*3+1]+=Pmtrx[7]*(Q_000011000*PR_000002000000+Q_000111000*PR_000002000010+Q_000211000*PR_000002000020);
ans_temp[ans_id*3+1]+=Pmtrx[8]*(Q_000010001*PR_000002000000+Q_000010101*PR_000002000001+Q_000110001*PR_000002000010+Q_000110101*PR_000002000011);
ans_temp[ans_id*3+2]+=Pmtrx[6]*(Q_001000010*PR_000002000000+Q_001000110*PR_000002000001+Q_101000010*PR_000002000100+Q_101000110*PR_000002000101);
ans_temp[ans_id*3+2]+=Pmtrx[7]*(Q_000001010*PR_000002000000+Q_000001110*PR_000002000001+Q_000101010*PR_000002000010+Q_000101110*PR_000002000011);
ans_temp[ans_id*3+2]+=Pmtrx[8]*(Q_000000011*PR_000002000000+Q_000000111*PR_000002000001+Q_000000211*PR_000002000002);
ans_temp[ans_id*3+0]+=Pmtrx[9]*(Q_011000000*PR_001000001000+Q_111000000*PR_001000001100+Q_211000000*PR_001000001200);
ans_temp[ans_id*3+0]+=Pmtrx[10]*(Q_010001000*PR_001000001000+Q_010101000*PR_001000001010+Q_110001000*PR_001000001100+Q_110101000*PR_001000001110);
ans_temp[ans_id*3+0]+=Pmtrx[11]*(Q_010000001*PR_001000001000+Q_010000101*PR_001000001001+Q_110000001*PR_001000001100+Q_110000101*PR_001000001101);
ans_temp[ans_id*3+1]+=Pmtrx[9]*(Q_001010000*PR_001000001000+Q_001110000*PR_001000001010+Q_101010000*PR_001000001100+Q_101110000*PR_001000001110);
ans_temp[ans_id*3+1]+=Pmtrx[10]*(Q_000011000*PR_001000001000+Q_000111000*PR_001000001010+Q_000211000*PR_001000001020);
ans_temp[ans_id*3+1]+=Pmtrx[11]*(Q_000010001*PR_001000001000+Q_000010101*PR_001000001001+Q_000110001*PR_001000001010+Q_000110101*PR_001000001011);
ans_temp[ans_id*3+2]+=Pmtrx[9]*(Q_001000010*PR_001000001000+Q_001000110*PR_001000001001+Q_101000010*PR_001000001100+Q_101000110*PR_001000001101);
ans_temp[ans_id*3+2]+=Pmtrx[10]*(Q_000001010*PR_001000001000+Q_000001110*PR_001000001001+Q_000101010*PR_001000001010+Q_000101110*PR_001000001011);
ans_temp[ans_id*3+2]+=Pmtrx[11]*(Q_000000011*PR_001000001000+Q_000000111*PR_001000001001+Q_000000211*PR_001000001002);
ans_temp[ans_id*3+0]+=Pmtrx[12]*(Q_011000000*PR_000001001000+Q_111000000*PR_000001001100+Q_211000000*PR_000001001200);
ans_temp[ans_id*3+0]+=Pmtrx[13]*(Q_010001000*PR_000001001000+Q_010101000*PR_000001001010+Q_110001000*PR_000001001100+Q_110101000*PR_000001001110);
ans_temp[ans_id*3+0]+=Pmtrx[14]*(Q_010000001*PR_000001001000+Q_010000101*PR_000001001001+Q_110000001*PR_000001001100+Q_110000101*PR_000001001101);
ans_temp[ans_id*3+1]+=Pmtrx[12]*(Q_001010000*PR_000001001000+Q_001110000*PR_000001001010+Q_101010000*PR_000001001100+Q_101110000*PR_000001001110);
ans_temp[ans_id*3+1]+=Pmtrx[13]*(Q_000011000*PR_000001001000+Q_000111000*PR_000001001010+Q_000211000*PR_000001001020);
ans_temp[ans_id*3+1]+=Pmtrx[14]*(Q_000010001*PR_000001001000+Q_000010101*PR_000001001001+Q_000110001*PR_000001001010+Q_000110101*PR_000001001011);
ans_temp[ans_id*3+2]+=Pmtrx[12]*(Q_001000010*PR_000001001000+Q_001000110*PR_000001001001+Q_101000010*PR_000001001100+Q_101000110*PR_000001001101);
ans_temp[ans_id*3+2]+=Pmtrx[13]*(Q_000001010*PR_000001001000+Q_000001110*PR_000001001001+Q_000101010*PR_000001001010+Q_000101110*PR_000001001011);
ans_temp[ans_id*3+2]+=Pmtrx[14]*(Q_000000011*PR_000001001000+Q_000000111*PR_000001001001+Q_000000211*PR_000001001002);
ans_temp[ans_id*3+0]+=Pmtrx[15]*(Q_011000000*PR_000000002000+Q_111000000*PR_000000002100+Q_211000000*PR_000000002200);
ans_temp[ans_id*3+0]+=Pmtrx[16]*(Q_010001000*PR_000000002000+Q_010101000*PR_000000002010+Q_110001000*PR_000000002100+Q_110101000*PR_000000002110);
ans_temp[ans_id*3+0]+=Pmtrx[17]*(Q_010000001*PR_000000002000+Q_010000101*PR_000000002001+Q_110000001*PR_000000002100+Q_110000101*PR_000000002101);
ans_temp[ans_id*3+1]+=Pmtrx[15]*(Q_001010000*PR_000000002000+Q_001110000*PR_000000002010+Q_101010000*PR_000000002100+Q_101110000*PR_000000002110);
ans_temp[ans_id*3+1]+=Pmtrx[16]*(Q_000011000*PR_000000002000+Q_000111000*PR_000000002010+Q_000211000*PR_000000002020);
ans_temp[ans_id*3+1]+=Pmtrx[17]*(Q_000010001*PR_000000002000+Q_000010101*PR_000000002001+Q_000110001*PR_000000002010+Q_000110101*PR_000000002011);
ans_temp[ans_id*3+2]+=Pmtrx[15]*(Q_001000010*PR_000000002000+Q_001000110*PR_000000002001+Q_101000010*PR_000000002100+Q_101000110*PR_000000002101);
ans_temp[ans_id*3+2]+=Pmtrx[16]*(Q_000001010*PR_000000002000+Q_000001110*PR_000000002001+Q_000101010*PR_000000002010+Q_000101110*PR_000000002011);
ans_temp[ans_id*3+2]+=Pmtrx[17]*(Q_000000011*PR_000000002000+Q_000000111*PR_000000002001+Q_000000211*PR_000000002002);
}
}
__syncthreads();
int num_thread=NTHREAD/2;
while (num_thread!=0){
__syncthreads();
if(tId_x<num_thread){
for(int ians=0;ians<3;ians++){
ans_temp[tId_x*3+ians]+=ans_temp[(tId_x+num_thread)*3+ians];
}
}
num_thread/=2;
}
if(tId_x==0){
for(int ians=0;ians<3;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*3+ians]=ans_temp[(tId_x)*3+ians];
}
}
}
}
}
__global__ void MD_Kq_sdpp_fs(unsigned int contrc_bra_num,unsigned int contrc_ket_num,\
unsigned int * contrc_bra_id,\
unsigned int * contrc_ket_id,\
unsigned int mtrx_len,\
double * Pmtrx_in,\
double * P,\
double * PA,\
double * PB,\
double * Zta_in,\
double * pp_in,\
float * K2_p_in,\
unsigned int * id_bra_in,\
double * Q,\
double * QC,\
double * QD,\
double * Eta_in,\
double * pq_in,\
float * K2_q_in,\
unsigned int * id_ket_in,\
double * ans){
unsigned int tId_x = threadIdx.x;
unsigned int bId_x = blockIdx.x;
unsigned int bId_y = blockIdx.y;
unsigned int tdis = blockDim.x;
unsigned int bdis_x = gridDim.x;
unsigned int bdis_y = gridDim.y;
unsigned int ans_id=tId_x;
double Pmtrx[18]={0.0};
__shared__ double ans_temp[NTHREAD*3];
for(int i=0;i<3;i++){
ans_temp[i*tdis+tId_x]=0.0;
}
for(unsigned int i_contrc_bra=bId_x;i_contrc_bra<contrc_bra_num;i_contrc_bra+=bdis_x){
for(unsigned int j_contrc_ket=bId_y;j_contrc_ket<contrc_ket_num;j_contrc_ket+=bdis_y){
unsigned int primit_bra_start = contrc_bra_id[i_contrc_bra ];
unsigned int primit_bra_end = contrc_bra_id[i_contrc_bra+1];
unsigned int primit_ket_start = contrc_ket_id[j_contrc_ket ];
unsigned int primit_ket_end = contrc_ket_id[j_contrc_ket+1];
for(unsigned int ii=primit_bra_start;ii<primit_bra_end;ii++){
unsigned int id_bra=id_bra_in[ii];
double PX=P[ii*3+0];
double PY=P[ii*3+1];
double PZ=P[ii*3+2];
double Pd_001[3];
Pd_001[0]=PB[ii*3+0];
Pd_001[1]=PB[ii*3+1];
Pd_001[2]=PB[ii*3+2];
double Zta=Zta_in[ii];
double pp=pp_in[ii];
float K2_p=K2_p_in[ii];
double aPin1=1/(2*Zta);
for(unsigned int j=tId_x;j<primit_ket_end-primit_ket_start;j+=tdis){
unsigned int jj=primit_ket_start+j;
unsigned int id_ket=tex1Dfetch(tex_id_ket,jj);
double P_max=0.0;
for(int p_j=0;p_j<3;p_j++){
for(int p_i=0;p_i<6;p_i++){
Pmtrx[p_i*3+p_j]=Pmtrx_in[(id_ket+p_j)*mtrx_len+(id_bra+p_i)];
double temp_P=fabsf(Pmtrx[p_i*3+p_j]);
if(temp_P>P_max) P_max=temp_P;
}
}
float K2_q=tex1Dfetch(tex_K2_q,jj);
if(fabsf(K2_p*K2_q)<1.0E-14){
break;
}
if(fabsf(P_max*K2_p*K2_q)<1.0E-14) continue;
int2 temp_int2;
temp_int2=tex1Dfetch(tex_Eta,jj);
double Eta=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_pq,jj);
double pq=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+0);
double QX=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+1);
double QY=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+2);
double QZ=__hiloint2double(temp_int2.y,temp_int2.x);
double Qd_010[3];
temp_int2=tex1Dfetch(tex_QC,jj*3+0);
Qd_010[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QC,jj*3+1);
Qd_010[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QC,jj*3+2);
Qd_010[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double Qd_001[3];
temp_int2=tex1Dfetch(tex_QD,jj*3+0);
Qd_001[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QD,jj*3+1);
Qd_001[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QD,jj*3+2);
Qd_001[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double alphaT=rsqrt(Eta+Zta);
double lmd=4*P25*pp*pq*alphaT;
alphaT=Eta*Zta*alphaT*alphaT;
double TX=PX-QX;
double TY=PY-QY;
double TZ=PZ-QZ;
double T=alphaT*(TX*TX+TY*TY+TZ*TZ);
double R_000[5];
Ft_fs_4(4,T,R_000);
R_000[0]*=lmd;
R_000[1]*=-2*alphaT*lmd;
R_000[2]*=4*alphaT*alphaT*lmd;
R_000[3]*=-8*alphaT*alphaT*alphaT*lmd;
R_000[4]*=16*alphaT*alphaT*alphaT*alphaT*lmd;
double aQin1=1/(2*Eta);
double R_100[4];
double R_200[3];
double R_300[2];
double R_400[1];
double R_010[4];
double R_110[3];
double R_210[2];
double R_310[1];
double R_020[3];
double R_120[2];
double R_220[1];
double R_030[2];
double R_130[1];
double R_040[1];
double R_001[4];
double R_101[3];
double R_201[2];
double R_301[1];
double R_011[3];
double R_111[2];
double R_211[1];
double R_021[2];
double R_121[1];
double R_031[1];
double R_002[3];
double R_102[2];
double R_202[1];
double R_012[2];
double R_112[1];
double R_022[1];
double R_003[2];
double R_103[1];
double R_013[1];
double R_004[1];
for(int i=0;i<4;i++){
R_100[i]=TX*R_000[i+1];
}
for(int i=0;i<4;i++){
R_010[i]=TY*R_000[i+1];
}
for(int i=0;i<4;i++){
R_001[i]=TZ*R_000[i+1];
}
for(int i=0;i<3;i++){
R_200[i]=TX*R_100[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_110[i]=TX*R_010[i+1];
}
for(int i=0;i<3;i++){
R_020[i]=TY*R_010[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_101[i]=TX*R_001[i+1];
}
for(int i=0;i<3;i++){
R_011[i]=TY*R_001[i+1];
}
for(int i=0;i<3;i++){
R_002[i]=TZ*R_001[i+1]+R_000[i+1];
}
for(int i=0;i<2;i++){
R_300[i]=TX*R_200[i+1]+2*R_100[i+1];
}
for(int i=0;i<2;i++){
R_210[i]=TY*R_200[i+1];
}
for(int i=0;i<2;i++){
R_120[i]=TX*R_020[i+1];
}
for(int i=0;i<2;i++){
R_030[i]=TY*R_020[i+1]+2*R_010[i+1];
}
for(int i=0;i<2;i++){
R_201[i]=TZ*R_200[i+1];
}
for(int i=0;i<2;i++){
R_111[i]=TX*R_011[i+1];
}
for(int i=0;i<2;i++){
R_021[i]=TZ*R_020[i+1];
}
for(int i=0;i<2;i++){
R_102[i]=TX*R_002[i+1];
}
for(int i=0;i<2;i++){
R_012[i]=TY*R_002[i+1];
}
for(int i=0;i<2;i++){
R_003[i]=TZ*R_002[i+1]+2*R_001[i+1];
}
for(int i=0;i<1;i++){
R_400[i]=TX*R_300[i+1]+3*R_200[i+1];
}
for(int i=0;i<1;i++){
R_310[i]=TY*R_300[i+1];
}
for(int i=0;i<1;i++){
R_220[i]=TX*R_120[i+1]+R_020[i+1];
}
for(int i=0;i<1;i++){
R_130[i]=TX*R_030[i+1];
}
for(int i=0;i<1;i++){
R_040[i]=TY*R_030[i+1]+3*R_020[i+1];
}
for(int i=0;i<1;i++){
R_301[i]=TZ*R_300[i+1];
}
for(int i=0;i<1;i++){
R_211[i]=TY*R_201[i+1];
}
for(int i=0;i<1;i++){
R_121[i]=TX*R_021[i+1];
}
for(int i=0;i<1;i++){
R_031[i]=TZ*R_030[i+1];
}
for(int i=0;i<1;i++){
R_202[i]=TX*R_102[i+1]+R_002[i+1];
}
for(int i=0;i<1;i++){
R_112[i]=TX*R_012[i+1];
}
for(int i=0;i<1;i++){
R_022[i]=TY*R_012[i+1]+R_002[i+1];
}
for(int i=0;i<1;i++){
R_103[i]=TX*R_003[i+1];
}
for(int i=0;i<1;i++){
R_013[i]=TY*R_003[i+1];
}
for(int i=0;i<1;i++){
R_004[i]=TZ*R_003[i+1]+3*R_002[i+1];
}
double Qd_101[3];
double Qd_110[3];
double Qd_011[3];
double Qd_111[3];
double Qd_211[3];
for(int i=0;i<3;i++){
Qd_101[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_110[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_011[i]=Qd_101[i]+Qd_010[i]*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_111[i]=Qd_010[i]*Qd_101[i]+aQin1*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_211[i]=aQin1*Qd_101[i];
}
double Q_011000000=Qd_011[0];
double Q_111000000=Qd_111[0];
double Q_211000000=Qd_211[0];
double Q_010001000=Qd_010[0]*Qd_001[1];
double Q_010101000=Qd_010[0]*Qd_101[1];
double Q_110001000=Qd_110[0]*Qd_001[1];
double Q_110101000=Qd_110[0]*Qd_101[1];
double Q_010000001=Qd_010[0]*Qd_001[2];
double Q_010000101=Qd_010[0]*Qd_101[2];
double Q_110000001=Qd_110[0]*Qd_001[2];
double Q_110000101=Qd_110[0]*Qd_101[2];
double Q_001010000=Qd_001[0]*Qd_010[1];
double Q_001110000=Qd_001[0]*Qd_110[1];
double Q_101010000=Qd_101[0]*Qd_010[1];
double Q_101110000=Qd_101[0]*Qd_110[1];
double Q_000011000=Qd_011[1];
double Q_000111000=Qd_111[1];
double Q_000211000=Qd_211[1];
double Q_000010001=Qd_010[1]*Qd_001[2];
double Q_000010101=Qd_010[1]*Qd_101[2];
double Q_000110001=Qd_110[1]*Qd_001[2];
double Q_000110101=Qd_110[1]*Qd_101[2];
double Q_001000010=Qd_001[0]*Qd_010[2];
double Q_001000110=Qd_001[0]*Qd_110[2];
double Q_101000010=Qd_101[0]*Qd_010[2];
double Q_101000110=Qd_101[0]*Qd_110[2];
double Q_000001010=Qd_001[1]*Qd_010[2];
double Q_000001110=Qd_001[1]*Qd_110[2];
double Q_000101010=Qd_101[1]*Qd_010[2];
double Q_000101110=Qd_101[1]*Qd_110[2];
double Q_000000011=Qd_011[2];
double Q_000000111=Qd_111[2];
double Q_000000211=Qd_211[2];
double QR_011000000000=Q_011000000*R_000[0]+-1*Q_111000000*R_100[0]+Q_211000000*R_200[0];
double QR_010001000000=Q_010001000*R_000[0]+-1*Q_010101000*R_010[0]+-1*Q_110001000*R_100[0]+Q_110101000*R_110[0];
double QR_010000001000=Q_010000001*R_000[0]+-1*Q_010000101*R_001[0]+-1*Q_110000001*R_100[0]+Q_110000101*R_101[0];
double QR_001010000000=Q_001010000*R_000[0]+-1*Q_001110000*R_010[0]+-1*Q_101010000*R_100[0]+Q_101110000*R_110[0];
double QR_000011000000=Q_000011000*R_000[0]+-1*Q_000111000*R_010[0]+Q_000211000*R_020[0];
double QR_000010001000=Q_000010001*R_000[0]+-1*Q_000010101*R_001[0]+-1*Q_000110001*R_010[0]+Q_000110101*R_011[0];
double QR_001000010000=Q_001000010*R_000[0]+-1*Q_001000110*R_001[0]+-1*Q_101000010*R_100[0]+Q_101000110*R_101[0];
double QR_000001010000=Q_000001010*R_000[0]+-1*Q_000001110*R_001[0]+-1*Q_000101010*R_010[0]+Q_000101110*R_011[0];
double QR_000000011000=Q_000000011*R_000[0]+-1*Q_000000111*R_001[0]+Q_000000211*R_002[0];
double QR_011000000001=Q_011000000*R_001[0]+-1*Q_111000000*R_101[0]+Q_211000000*R_201[0];
double QR_010001000001=Q_010001000*R_001[0]+-1*Q_010101000*R_011[0]+-1*Q_110001000*R_101[0]+Q_110101000*R_111[0];
double QR_010000001001=Q_010000001*R_001[0]+-1*Q_010000101*R_002[0]+-1*Q_110000001*R_101[0]+Q_110000101*R_102[0];
double QR_001010000001=Q_001010000*R_001[0]+-1*Q_001110000*R_011[0]+-1*Q_101010000*R_101[0]+Q_101110000*R_111[0];
double QR_000011000001=Q_000011000*R_001[0]+-1*Q_000111000*R_011[0]+Q_000211000*R_021[0];
double QR_000010001001=Q_000010001*R_001[0]+-1*Q_000010101*R_002[0]+-1*Q_000110001*R_011[0]+Q_000110101*R_012[0];
double QR_001000010001=Q_001000010*R_001[0]+-1*Q_001000110*R_002[0]+-1*Q_101000010*R_101[0]+Q_101000110*R_102[0];
double QR_000001010001=Q_000001010*R_001[0]+-1*Q_000001110*R_002[0]+-1*Q_000101010*R_011[0]+Q_000101110*R_012[0];
double QR_000000011001=Q_000000011*R_001[0]+-1*Q_000000111*R_002[0]+Q_000000211*R_003[0];
double QR_011000000010=Q_011000000*R_010[0]+-1*Q_111000000*R_110[0]+Q_211000000*R_210[0];
double QR_010001000010=Q_010001000*R_010[0]+-1*Q_010101000*R_020[0]+-1*Q_110001000*R_110[0]+Q_110101000*R_120[0];
double QR_010000001010=Q_010000001*R_010[0]+-1*Q_010000101*R_011[0]+-1*Q_110000001*R_110[0]+Q_110000101*R_111[0];
double QR_001010000010=Q_001010000*R_010[0]+-1*Q_001110000*R_020[0]+-1*Q_101010000*R_110[0]+Q_101110000*R_120[0];
double QR_000011000010=Q_000011000*R_010[0]+-1*Q_000111000*R_020[0]+Q_000211000*R_030[0];
double QR_000010001010=Q_000010001*R_010[0]+-1*Q_000010101*R_011[0]+-1*Q_000110001*R_020[0]+Q_000110101*R_021[0];
double QR_001000010010=Q_001000010*R_010[0]+-1*Q_001000110*R_011[0]+-1*Q_101000010*R_110[0]+Q_101000110*R_111[0];
double QR_000001010010=Q_000001010*R_010[0]+-1*Q_000001110*R_011[0]+-1*Q_000101010*R_020[0]+Q_000101110*R_021[0];
double QR_000000011010=Q_000000011*R_010[0]+-1*Q_000000111*R_011[0]+Q_000000211*R_012[0];
double QR_011000000100=Q_011000000*R_100[0]+-1*Q_111000000*R_200[0]+Q_211000000*R_300[0];
double QR_010001000100=Q_010001000*R_100[0]+-1*Q_010101000*R_110[0]+-1*Q_110001000*R_200[0]+Q_110101000*R_210[0];
double QR_010000001100=Q_010000001*R_100[0]+-1*Q_010000101*R_101[0]+-1*Q_110000001*R_200[0]+Q_110000101*R_201[0];
double QR_001010000100=Q_001010000*R_100[0]+-1*Q_001110000*R_110[0]+-1*Q_101010000*R_200[0]+Q_101110000*R_210[0];
double QR_000011000100=Q_000011000*R_100[0]+-1*Q_000111000*R_110[0]+Q_000211000*R_120[0];
double QR_000010001100=Q_000010001*R_100[0]+-1*Q_000010101*R_101[0]+-1*Q_000110001*R_110[0]+Q_000110101*R_111[0];
double QR_001000010100=Q_001000010*R_100[0]+-1*Q_001000110*R_101[0]+-1*Q_101000010*R_200[0]+Q_101000110*R_201[0];
double QR_000001010100=Q_000001010*R_100[0]+-1*Q_000001110*R_101[0]+-1*Q_000101010*R_110[0]+Q_000101110*R_111[0];
double QR_000000011100=Q_000000011*R_100[0]+-1*Q_000000111*R_101[0]+Q_000000211*R_102[0];
double QR_011000000002=Q_011000000*R_002[0]+-1*Q_111000000*R_102[0]+Q_211000000*R_202[0];
double QR_010001000002=Q_010001000*R_002[0]+-1*Q_010101000*R_012[0]+-1*Q_110001000*R_102[0]+Q_110101000*R_112[0];
double QR_010000001002=Q_010000001*R_002[0]+-1*Q_010000101*R_003[0]+-1*Q_110000001*R_102[0]+Q_110000101*R_103[0];
double QR_001010000002=Q_001010000*R_002[0]+-1*Q_001110000*R_012[0]+-1*Q_101010000*R_102[0]+Q_101110000*R_112[0];
double QR_000011000002=Q_000011000*R_002[0]+-1*Q_000111000*R_012[0]+Q_000211000*R_022[0];
double QR_000010001002=Q_000010001*R_002[0]+-1*Q_000010101*R_003[0]+-1*Q_000110001*R_012[0]+Q_000110101*R_013[0];
double QR_001000010002=Q_001000010*R_002[0]+-1*Q_001000110*R_003[0]+-1*Q_101000010*R_102[0]+Q_101000110*R_103[0];
double QR_000001010002=Q_000001010*R_002[0]+-1*Q_000001110*R_003[0]+-1*Q_000101010*R_012[0]+Q_000101110*R_013[0];
double QR_000000011002=Q_000000011*R_002[0]+-1*Q_000000111*R_003[0]+Q_000000211*R_004[0];
double QR_011000000011=Q_011000000*R_011[0]+-1*Q_111000000*R_111[0]+Q_211000000*R_211[0];
double QR_010001000011=Q_010001000*R_011[0]+-1*Q_010101000*R_021[0]+-1*Q_110001000*R_111[0]+Q_110101000*R_121[0];
double QR_010000001011=Q_010000001*R_011[0]+-1*Q_010000101*R_012[0]+-1*Q_110000001*R_111[0]+Q_110000101*R_112[0];
double QR_001010000011=Q_001010000*R_011[0]+-1*Q_001110000*R_021[0]+-1*Q_101010000*R_111[0]+Q_101110000*R_121[0];
double QR_000011000011=Q_000011000*R_011[0]+-1*Q_000111000*R_021[0]+Q_000211000*R_031[0];
double QR_000010001011=Q_000010001*R_011[0]+-1*Q_000010101*R_012[0]+-1*Q_000110001*R_021[0]+Q_000110101*R_022[0];
double QR_001000010011=Q_001000010*R_011[0]+-1*Q_001000110*R_012[0]+-1*Q_101000010*R_111[0]+Q_101000110*R_112[0];
double QR_000001010011=Q_000001010*R_011[0]+-1*Q_000001110*R_012[0]+-1*Q_000101010*R_021[0]+Q_000101110*R_022[0];
double QR_000000011011=Q_000000011*R_011[0]+-1*Q_000000111*R_012[0]+Q_000000211*R_013[0];
double QR_011000000020=Q_011000000*R_020[0]+-1*Q_111000000*R_120[0]+Q_211000000*R_220[0];
double QR_010001000020=Q_010001000*R_020[0]+-1*Q_010101000*R_030[0]+-1*Q_110001000*R_120[0]+Q_110101000*R_130[0];
double QR_010000001020=Q_010000001*R_020[0]+-1*Q_010000101*R_021[0]+-1*Q_110000001*R_120[0]+Q_110000101*R_121[0];
double QR_001010000020=Q_001010000*R_020[0]+-1*Q_001110000*R_030[0]+-1*Q_101010000*R_120[0]+Q_101110000*R_130[0];
double QR_000011000020=Q_000011000*R_020[0]+-1*Q_000111000*R_030[0]+Q_000211000*R_040[0];
double QR_000010001020=Q_000010001*R_020[0]+-1*Q_000010101*R_021[0]+-1*Q_000110001*R_030[0]+Q_000110101*R_031[0];
double QR_001000010020=Q_001000010*R_020[0]+-1*Q_001000110*R_021[0]+-1*Q_101000010*R_120[0]+Q_101000110*R_121[0];
double QR_000001010020=Q_000001010*R_020[0]+-1*Q_000001110*R_021[0]+-1*Q_000101010*R_030[0]+Q_000101110*R_031[0];
double QR_000000011020=Q_000000011*R_020[0]+-1*Q_000000111*R_021[0]+Q_000000211*R_022[0];
double QR_011000000101=Q_011000000*R_101[0]+-1*Q_111000000*R_201[0]+Q_211000000*R_301[0];
double QR_010001000101=Q_010001000*R_101[0]+-1*Q_010101000*R_111[0]+-1*Q_110001000*R_201[0]+Q_110101000*R_211[0];
double QR_010000001101=Q_010000001*R_101[0]+-1*Q_010000101*R_102[0]+-1*Q_110000001*R_201[0]+Q_110000101*R_202[0];
double QR_001010000101=Q_001010000*R_101[0]+-1*Q_001110000*R_111[0]+-1*Q_101010000*R_201[0]+Q_101110000*R_211[0];
double QR_000011000101=Q_000011000*R_101[0]+-1*Q_000111000*R_111[0]+Q_000211000*R_121[0];
double QR_000010001101=Q_000010001*R_101[0]+-1*Q_000010101*R_102[0]+-1*Q_000110001*R_111[0]+Q_000110101*R_112[0];
double QR_001000010101=Q_001000010*R_101[0]+-1*Q_001000110*R_102[0]+-1*Q_101000010*R_201[0]+Q_101000110*R_202[0];
double QR_000001010101=Q_000001010*R_101[0]+-1*Q_000001110*R_102[0]+-1*Q_000101010*R_111[0]+Q_000101110*R_112[0];
double QR_000000011101=Q_000000011*R_101[0]+-1*Q_000000111*R_102[0]+Q_000000211*R_103[0];
double QR_011000000110=Q_011000000*R_110[0]+-1*Q_111000000*R_210[0]+Q_211000000*R_310[0];
double QR_010001000110=Q_010001000*R_110[0]+-1*Q_010101000*R_120[0]+-1*Q_110001000*R_210[0]+Q_110101000*R_220[0];
double QR_010000001110=Q_010000001*R_110[0]+-1*Q_010000101*R_111[0]+-1*Q_110000001*R_210[0]+Q_110000101*R_211[0];
double QR_001010000110=Q_001010000*R_110[0]+-1*Q_001110000*R_120[0]+-1*Q_101010000*R_210[0]+Q_101110000*R_220[0];
double QR_000011000110=Q_000011000*R_110[0]+-1*Q_000111000*R_120[0]+Q_000211000*R_130[0];
double QR_000010001110=Q_000010001*R_110[0]+-1*Q_000010101*R_111[0]+-1*Q_000110001*R_120[0]+Q_000110101*R_121[0];
double QR_001000010110=Q_001000010*R_110[0]+-1*Q_001000110*R_111[0]+-1*Q_101000010*R_210[0]+Q_101000110*R_211[0];
double QR_000001010110=Q_000001010*R_110[0]+-1*Q_000001110*R_111[0]+-1*Q_000101010*R_120[0]+Q_000101110*R_121[0];
double QR_000000011110=Q_000000011*R_110[0]+-1*Q_000000111*R_111[0]+Q_000000211*R_112[0];
double QR_011000000200=Q_011000000*R_200[0]+-1*Q_111000000*R_300[0]+Q_211000000*R_400[0];
double QR_010001000200=Q_010001000*R_200[0]+-1*Q_010101000*R_210[0]+-1*Q_110001000*R_300[0]+Q_110101000*R_310[0];
double QR_010000001200=Q_010000001*R_200[0]+-1*Q_010000101*R_201[0]+-1*Q_110000001*R_300[0]+Q_110000101*R_301[0];
double QR_001010000200=Q_001010000*R_200[0]+-1*Q_001110000*R_210[0]+-1*Q_101010000*R_300[0]+Q_101110000*R_310[0];
double QR_000011000200=Q_000011000*R_200[0]+-1*Q_000111000*R_210[0]+Q_000211000*R_220[0];
double QR_000010001200=Q_000010001*R_200[0]+-1*Q_000010101*R_201[0]+-1*Q_000110001*R_210[0]+Q_000110101*R_211[0];
double QR_001000010200=Q_001000010*R_200[0]+-1*Q_001000110*R_201[0]+-1*Q_101000010*R_300[0]+Q_101000110*R_301[0];
double QR_000001010200=Q_000001010*R_200[0]+-1*Q_000001110*R_201[0]+-1*Q_000101010*R_210[0]+Q_000101110*R_211[0];
double QR_000000011200=Q_000000011*R_200[0]+-1*Q_000000111*R_201[0]+Q_000000211*R_202[0];
double Pd_101[3];
double Pd_002[3];
double Pd_102[3];
double Pd_202[3];
for(int i=0;i<3;i++){
Pd_101[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_002[i]=Pd_101[i]+Pd_001[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_102[i]=Pd_001[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_202[i]=aPin1*Pd_101[i];
}
double P_002000000=Pd_002[0];
double P_102000000=Pd_102[0];
double P_202000000=Pd_202[0];
double P_001001000=Pd_001[0]*Pd_001[1];
double P_001101000=Pd_001[0]*Pd_101[1];
double P_101001000=Pd_101[0]*Pd_001[1];
double P_101101000=Pd_101[0]*Pd_101[1];
double P_000002000=Pd_002[1];
double P_000102000=Pd_102[1];
double P_000202000=Pd_202[1];
double P_001000001=Pd_001[0]*Pd_001[2];
double P_001000101=Pd_001[0]*Pd_101[2];
double P_101000001=Pd_101[0]*Pd_001[2];
double P_101000101=Pd_101[0]*Pd_101[2];
double P_000001001=Pd_001[1]*Pd_001[2];
double P_000001101=Pd_001[1]*Pd_101[2];
double P_000101001=Pd_101[1]*Pd_001[2];
double P_000101101=Pd_101[1]*Pd_101[2];
double P_000000002=Pd_002[2];
double P_000000102=Pd_102[2];
double P_000000202=Pd_202[2];
ans_temp[ans_id*3+0]+=Pmtrx[0]*(P_002000000*QR_011000000000+P_102000000*QR_011000000100+P_202000000*QR_011000000200);
ans_temp[ans_id*3+0]+=Pmtrx[1]*(P_002000000*QR_010001000000+P_102000000*QR_010001000100+P_202000000*QR_010001000200);
ans_temp[ans_id*3+0]+=Pmtrx[2]*(P_002000000*QR_010000001000+P_102000000*QR_010000001100+P_202000000*QR_010000001200);
ans_temp[ans_id*3+1]+=Pmtrx[0]*(P_002000000*QR_001010000000+P_102000000*QR_001010000100+P_202000000*QR_001010000200);
ans_temp[ans_id*3+1]+=Pmtrx[1]*(P_002000000*QR_000011000000+P_102000000*QR_000011000100+P_202000000*QR_000011000200);
ans_temp[ans_id*3+1]+=Pmtrx[2]*(P_002000000*QR_000010001000+P_102000000*QR_000010001100+P_202000000*QR_000010001200);
ans_temp[ans_id*3+2]+=Pmtrx[0]*(P_002000000*QR_001000010000+P_102000000*QR_001000010100+P_202000000*QR_001000010200);
ans_temp[ans_id*3+2]+=Pmtrx[1]*(P_002000000*QR_000001010000+P_102000000*QR_000001010100+P_202000000*QR_000001010200);
ans_temp[ans_id*3+2]+=Pmtrx[2]*(P_002000000*QR_000000011000+P_102000000*QR_000000011100+P_202000000*QR_000000011200);
ans_temp[ans_id*3+0]+=Pmtrx[3]*(P_001001000*QR_011000000000+P_001101000*QR_011000000010+P_101001000*QR_011000000100+P_101101000*QR_011000000110);
ans_temp[ans_id*3+0]+=Pmtrx[4]*(P_001001000*QR_010001000000+P_001101000*QR_010001000010+P_101001000*QR_010001000100+P_101101000*QR_010001000110);
ans_temp[ans_id*3+0]+=Pmtrx[5]*(P_001001000*QR_010000001000+P_001101000*QR_010000001010+P_101001000*QR_010000001100+P_101101000*QR_010000001110);
ans_temp[ans_id*3+1]+=Pmtrx[3]*(P_001001000*QR_001010000000+P_001101000*QR_001010000010+P_101001000*QR_001010000100+P_101101000*QR_001010000110);
ans_temp[ans_id*3+1]+=Pmtrx[4]*(P_001001000*QR_000011000000+P_001101000*QR_000011000010+P_101001000*QR_000011000100+P_101101000*QR_000011000110);
ans_temp[ans_id*3+1]+=Pmtrx[5]*(P_001001000*QR_000010001000+P_001101000*QR_000010001010+P_101001000*QR_000010001100+P_101101000*QR_000010001110);
ans_temp[ans_id*3+2]+=Pmtrx[3]*(P_001001000*QR_001000010000+P_001101000*QR_001000010010+P_101001000*QR_001000010100+P_101101000*QR_001000010110);
ans_temp[ans_id*3+2]+=Pmtrx[4]*(P_001001000*QR_000001010000+P_001101000*QR_000001010010+P_101001000*QR_000001010100+P_101101000*QR_000001010110);
ans_temp[ans_id*3+2]+=Pmtrx[5]*(P_001001000*QR_000000011000+P_001101000*QR_000000011010+P_101001000*QR_000000011100+P_101101000*QR_000000011110);
ans_temp[ans_id*3+0]+=Pmtrx[6]*(P_000002000*QR_011000000000+P_000102000*QR_011000000010+P_000202000*QR_011000000020);
ans_temp[ans_id*3+0]+=Pmtrx[7]*(P_000002000*QR_010001000000+P_000102000*QR_010001000010+P_000202000*QR_010001000020);
ans_temp[ans_id*3+0]+=Pmtrx[8]*(P_000002000*QR_010000001000+P_000102000*QR_010000001010+P_000202000*QR_010000001020);
ans_temp[ans_id*3+1]+=Pmtrx[6]*(P_000002000*QR_001010000000+P_000102000*QR_001010000010+P_000202000*QR_001010000020);
ans_temp[ans_id*3+1]+=Pmtrx[7]*(P_000002000*QR_000011000000+P_000102000*QR_000011000010+P_000202000*QR_000011000020);
ans_temp[ans_id*3+1]+=Pmtrx[8]*(P_000002000*QR_000010001000+P_000102000*QR_000010001010+P_000202000*QR_000010001020);
ans_temp[ans_id*3+2]+=Pmtrx[6]*(P_000002000*QR_001000010000+P_000102000*QR_001000010010+P_000202000*QR_001000010020);
ans_temp[ans_id*3+2]+=Pmtrx[7]*(P_000002000*QR_000001010000+P_000102000*QR_000001010010+P_000202000*QR_000001010020);
ans_temp[ans_id*3+2]+=Pmtrx[8]*(P_000002000*QR_000000011000+P_000102000*QR_000000011010+P_000202000*QR_000000011020);
ans_temp[ans_id*3+0]+=Pmtrx[9]*(P_001000001*QR_011000000000+P_001000101*QR_011000000001+P_101000001*QR_011000000100+P_101000101*QR_011000000101);
ans_temp[ans_id*3+0]+=Pmtrx[10]*(P_001000001*QR_010001000000+P_001000101*QR_010001000001+P_101000001*QR_010001000100+P_101000101*QR_010001000101);
ans_temp[ans_id*3+0]+=Pmtrx[11]*(P_001000001*QR_010000001000+P_001000101*QR_010000001001+P_101000001*QR_010000001100+P_101000101*QR_010000001101);
ans_temp[ans_id*3+1]+=Pmtrx[9]*(P_001000001*QR_001010000000+P_001000101*QR_001010000001+P_101000001*QR_001010000100+P_101000101*QR_001010000101);
ans_temp[ans_id*3+1]+=Pmtrx[10]*(P_001000001*QR_000011000000+P_001000101*QR_000011000001+P_101000001*QR_000011000100+P_101000101*QR_000011000101);
ans_temp[ans_id*3+1]+=Pmtrx[11]*(P_001000001*QR_000010001000+P_001000101*QR_000010001001+P_101000001*QR_000010001100+P_101000101*QR_000010001101);
ans_temp[ans_id*3+2]+=Pmtrx[9]*(P_001000001*QR_001000010000+P_001000101*QR_001000010001+P_101000001*QR_001000010100+P_101000101*QR_001000010101);
ans_temp[ans_id*3+2]+=Pmtrx[10]*(P_001000001*QR_000001010000+P_001000101*QR_000001010001+P_101000001*QR_000001010100+P_101000101*QR_000001010101);
ans_temp[ans_id*3+2]+=Pmtrx[11]*(P_001000001*QR_000000011000+P_001000101*QR_000000011001+P_101000001*QR_000000011100+P_101000101*QR_000000011101);
ans_temp[ans_id*3+0]+=Pmtrx[12]*(P_000001001*QR_011000000000+P_000001101*QR_011000000001+P_000101001*QR_011000000010+P_000101101*QR_011000000011);
ans_temp[ans_id*3+0]+=Pmtrx[13]*(P_000001001*QR_010001000000+P_000001101*QR_010001000001+P_000101001*QR_010001000010+P_000101101*QR_010001000011);
ans_temp[ans_id*3+0]+=Pmtrx[14]*(P_000001001*QR_010000001000+P_000001101*QR_010000001001+P_000101001*QR_010000001010+P_000101101*QR_010000001011);
ans_temp[ans_id*3+1]+=Pmtrx[12]*(P_000001001*QR_001010000000+P_000001101*QR_001010000001+P_000101001*QR_001010000010+P_000101101*QR_001010000011);
ans_temp[ans_id*3+1]+=Pmtrx[13]*(P_000001001*QR_000011000000+P_000001101*QR_000011000001+P_000101001*QR_000011000010+P_000101101*QR_000011000011);
ans_temp[ans_id*3+1]+=Pmtrx[14]*(P_000001001*QR_000010001000+P_000001101*QR_000010001001+P_000101001*QR_000010001010+P_000101101*QR_000010001011);
ans_temp[ans_id*3+2]+=Pmtrx[12]*(P_000001001*QR_001000010000+P_000001101*QR_001000010001+P_000101001*QR_001000010010+P_000101101*QR_001000010011);
ans_temp[ans_id*3+2]+=Pmtrx[13]*(P_000001001*QR_000001010000+P_000001101*QR_000001010001+P_000101001*QR_000001010010+P_000101101*QR_000001010011);
ans_temp[ans_id*3+2]+=Pmtrx[14]*(P_000001001*QR_000000011000+P_000001101*QR_000000011001+P_000101001*QR_000000011010+P_000101101*QR_000000011011);
ans_temp[ans_id*3+0]+=Pmtrx[15]*(P_000000002*QR_011000000000+P_000000102*QR_011000000001+P_000000202*QR_011000000002);
ans_temp[ans_id*3+0]+=Pmtrx[16]*(P_000000002*QR_010001000000+P_000000102*QR_010001000001+P_000000202*QR_010001000002);
ans_temp[ans_id*3+0]+=Pmtrx[17]*(P_000000002*QR_010000001000+P_000000102*QR_010000001001+P_000000202*QR_010000001002);
ans_temp[ans_id*3+1]+=Pmtrx[15]*(P_000000002*QR_001010000000+P_000000102*QR_001010000001+P_000000202*QR_001010000002);
ans_temp[ans_id*3+1]+=Pmtrx[16]*(P_000000002*QR_000011000000+P_000000102*QR_000011000001+P_000000202*QR_000011000002);
ans_temp[ans_id*3+1]+=Pmtrx[17]*(P_000000002*QR_000010001000+P_000000102*QR_000010001001+P_000000202*QR_000010001002);
ans_temp[ans_id*3+2]+=Pmtrx[15]*(P_000000002*QR_001000010000+P_000000102*QR_001000010001+P_000000202*QR_001000010002);
ans_temp[ans_id*3+2]+=Pmtrx[16]*(P_000000002*QR_000001010000+P_000000102*QR_000001010001+P_000000202*QR_000001010002);
ans_temp[ans_id*3+2]+=Pmtrx[17]*(P_000000002*QR_000000011000+P_000000102*QR_000000011001+P_000000202*QR_000000011002);
}
}
__syncthreads();
int num_thread=NTHREAD/2;
while (num_thread!=0){
__syncthreads();
if(tId_x<num_thread){
for(int ians=0;ians<3;ians++){
ans_temp[tId_x*3+ians]+=ans_temp[(tId_x+num_thread)*3+ians];
}
}
num_thread/=2;
}
if(tId_x==0){
for(int ians=0;ians<3;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*3+ians]=ans_temp[(tId_x)*3+ians];
}
}
}
}
}
__global__ void MD_Kp_pppp_fs(unsigned int contrc_bra_num,unsigned int contrc_ket_num,\
unsigned int * contrc_bra_id,\
unsigned int * contrc_ket_id,\
unsigned int mtrx_len,\
double * Pmtrx_in,\
double * P,\
double * PA,\
double * PB,\
double * Zta_in,\
double * pp_in,\
float * K2_p_in,\
unsigned int * id_bra_in,\
double * Q,\
double * QC,\
double * QD,\
double * Eta_in,\
double * pq_in,\
float * K2_q_in,\
unsigned int * id_ket_in,\
double * ans){
unsigned int tId_x = threadIdx.x;
unsigned int bId_x = blockIdx.x;
unsigned int bId_y = blockIdx.y;
unsigned int tdis = blockDim.x;
unsigned int bdis_x = gridDim.x;
unsigned int bdis_y = gridDim.y;
unsigned int ans_id=tId_x;
double Pmtrx[9]={0.0};
__shared__ double ans_temp[NTHREAD*9];
for(int i=0;i<9;i++){
ans_temp[i*tdis+tId_x]=0.0;
}
for(unsigned int i_contrc_bra=bId_x;i_contrc_bra<contrc_bra_num;i_contrc_bra+=bdis_x){
for(unsigned int j_contrc_ket=bId_y;j_contrc_ket<contrc_ket_num;j_contrc_ket+=bdis_y){
unsigned int primit_bra_start = contrc_bra_id[i_contrc_bra ];
unsigned int primit_bra_end = contrc_bra_id[i_contrc_bra+1];
unsigned int primit_ket_start = contrc_ket_id[j_contrc_ket ];
unsigned int primit_ket_end = contrc_ket_id[j_contrc_ket+1];
if(i_contrc_bra>j_contrc_ket){
if(tId_x==0){
for(int ians=0;ians<9;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*9+ians]=0.0;
}
}
continue;
}
for(unsigned int ii=primit_ket_start;ii<primit_ket_end;ii++){
unsigned int id_ket=id_ket_in[ii];
double QX=Q[ii*3+0];
double QY=Q[ii*3+1];
double QZ=Q[ii*3+2];
double Qd_010[3];
Qd_010[0]=QC[ii*3+0];
Qd_010[1]=QC[ii*3+1];
Qd_010[2]=QC[ii*3+2];
double Qd_001[3];
Qd_001[0]=QD[ii*3+0];
Qd_001[1]=QD[ii*3+1];
Qd_001[2]=QD[ii*3+2];
double Eta=Eta_in[ii];
double pq=pq_in[ii];
float K2_q=K2_q_in[ii];
double aQin1=1/(2*Eta);
for(unsigned int j=tId_x;j<primit_bra_end-primit_bra_start;j+=tdis){
unsigned int jj=primit_bra_start+j;
unsigned int id_bra=tex1Dfetch(tex_id_bra,jj);
double P_max=0.0;
for(int p_j=0;p_j<3;p_j++){
for(int p_i=0;p_i<3;p_i++){
Pmtrx[p_i*3+p_j]=Pmtrx_in[(id_ket+p_j)*mtrx_len+(id_bra+p_i)];
double temp_P=fabsf(Pmtrx[p_i*3+p_j]);
if(temp_P>P_max) P_max=temp_P;
}
}
float K2_p=tex1Dfetch(tex_K2_p,jj);
if(fabsf(K2_p*K2_q)<1.0E-14){
break;
}
if(fabsf(P_max*K2_p*K2_q)<1.0E-14) continue;
int2 temp_int2;
temp_int2=tex1Dfetch(tex_Zta,jj);
double Zta=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_pp,jj);
double pp=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+0);
double PX=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+1);
double PY=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+2);
double PZ=__hiloint2double(temp_int2.y,temp_int2.x);
double Pd_010[3];
temp_int2=tex1Dfetch(tex_PA,jj*3+0);
Pd_010[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PA,jj*3+1);
Pd_010[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PA,jj*3+2);
Pd_010[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double Pd_001[3];
temp_int2=tex1Dfetch(tex_PB,jj*3+0);
Pd_001[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PB,jj*3+1);
Pd_001[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PB,jj*3+2);
Pd_001[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double alphaT=rsqrt(Eta+Zta);
double lmd=2*P25*pp*pq*alphaT;
alphaT=Eta*Zta*alphaT*alphaT;
double TX=PX-QX;
double TY=PY-QY;
double TZ=PZ-QZ;
double T=alphaT*(TX*TX+TY*TY+TZ*TZ);
double R_000[5];
Ft_fs_4(4,T,R_000);
R_000[0]*=lmd;
R_000[1]*=-2*alphaT*lmd;
R_000[2]*=4*alphaT*alphaT*lmd;
R_000[3]*=-8*alphaT*alphaT*alphaT*lmd;
R_000[4]*=16*alphaT*alphaT*alphaT*alphaT*lmd;
double aPin1=1/(2*Zta);
double R_100[4];
double R_200[3];
double R_300[2];
double R_400[1];
double R_010[4];
double R_110[3];
double R_210[2];
double R_310[1];
double R_020[3];
double R_120[2];
double R_220[1];
double R_030[2];
double R_130[1];
double R_040[1];
double R_001[4];
double R_101[3];
double R_201[2];
double R_301[1];
double R_011[3];
double R_111[2];
double R_211[1];
double R_021[2];
double R_121[1];
double R_031[1];
double R_002[3];
double R_102[2];
double R_202[1];
double R_012[2];
double R_112[1];
double R_022[1];
double R_003[2];
double R_103[1];
double R_013[1];
double R_004[1];
for(int i=0;i<4;i++){
R_100[i]=TX*R_000[i+1];
}
for(int i=0;i<4;i++){
R_010[i]=TY*R_000[i+1];
}
for(int i=0;i<4;i++){
R_001[i]=TZ*R_000[i+1];
}
for(int i=0;i<3;i++){
R_200[i]=TX*R_100[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_110[i]=TX*R_010[i+1];
}
for(int i=0;i<3;i++){
R_020[i]=TY*R_010[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_101[i]=TX*R_001[i+1];
}
for(int i=0;i<3;i++){
R_011[i]=TY*R_001[i+1];
}
for(int i=0;i<3;i++){
R_002[i]=TZ*R_001[i+1]+R_000[i+1];
}
for(int i=0;i<2;i++){
R_300[i]=TX*R_200[i+1]+2*R_100[i+1];
}
for(int i=0;i<2;i++){
R_210[i]=TY*R_200[i+1];
}
for(int i=0;i<2;i++){
R_120[i]=TX*R_020[i+1];
}
for(int i=0;i<2;i++){
R_030[i]=TY*R_020[i+1]+2*R_010[i+1];
}
for(int i=0;i<2;i++){
R_201[i]=TZ*R_200[i+1];
}
for(int i=0;i<2;i++){
R_111[i]=TX*R_011[i+1];
}
for(int i=0;i<2;i++){
R_021[i]=TZ*R_020[i+1];
}
for(int i=0;i<2;i++){
R_102[i]=TX*R_002[i+1];
}
for(int i=0;i<2;i++){
R_012[i]=TY*R_002[i+1];
}
for(int i=0;i<2;i++){
R_003[i]=TZ*R_002[i+1]+2*R_001[i+1];
}
for(int i=0;i<1;i++){
R_400[i]=TX*R_300[i+1]+3*R_200[i+1];
}
for(int i=0;i<1;i++){
R_310[i]=TY*R_300[i+1];
}
for(int i=0;i<1;i++){
R_220[i]=TX*R_120[i+1]+R_020[i+1];
}
for(int i=0;i<1;i++){
R_130[i]=TX*R_030[i+1];
}
for(int i=0;i<1;i++){
R_040[i]=TY*R_030[i+1]+3*R_020[i+1];
}
for(int i=0;i<1;i++){
R_301[i]=TZ*R_300[i+1];
}
for(int i=0;i<1;i++){
R_211[i]=TY*R_201[i+1];
}
for(int i=0;i<1;i++){
R_121[i]=TX*R_021[i+1];
}
for(int i=0;i<1;i++){
R_031[i]=TZ*R_030[i+1];
}
for(int i=0;i<1;i++){
R_202[i]=TX*R_102[i+1]+R_002[i+1];
}
for(int i=0;i<1;i++){
R_112[i]=TX*R_012[i+1];
}
for(int i=0;i<1;i++){
R_022[i]=TY*R_012[i+1]+R_002[i+1];
}
for(int i=0;i<1;i++){
R_103[i]=TX*R_003[i+1];
}
for(int i=0;i<1;i++){
R_013[i]=TY*R_003[i+1];
}
for(int i=0;i<1;i++){
R_004[i]=TZ*R_003[i+1]+3*R_002[i+1];
}
double Pd_101[3];
double Pd_110[3];
double Pd_011[3];
double Pd_111[3];
double Pd_211[3];
for(int i=0;i<3;i++){
Pd_101[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_110[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_011[i]=Pd_101[i]+Pd_010[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_111[i]=Pd_010[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_211[i]=aPin1*Pd_101[i];
}
double P_011000000=Pd_011[0];
double P_111000000=Pd_111[0];
double P_211000000=Pd_211[0];
double P_010001000=Pd_010[0]*Pd_001[1];
double P_010101000=Pd_010[0]*Pd_101[1];
double P_110001000=Pd_110[0]*Pd_001[1];
double P_110101000=Pd_110[0]*Pd_101[1];
double P_010000001=Pd_010[0]*Pd_001[2];
double P_010000101=Pd_010[0]*Pd_101[2];
double P_110000001=Pd_110[0]*Pd_001[2];
double P_110000101=Pd_110[0]*Pd_101[2];
double P_001010000=Pd_001[0]*Pd_010[1];
double P_001110000=Pd_001[0]*Pd_110[1];
double P_101010000=Pd_101[0]*Pd_010[1];
double P_101110000=Pd_101[0]*Pd_110[1];
double P_000011000=Pd_011[1];
double P_000111000=Pd_111[1];
double P_000211000=Pd_211[1];
double P_000010001=Pd_010[1]*Pd_001[2];
double P_000010101=Pd_010[1]*Pd_101[2];
double P_000110001=Pd_110[1]*Pd_001[2];
double P_000110101=Pd_110[1]*Pd_101[2];
double P_001000010=Pd_001[0]*Pd_010[2];
double P_001000110=Pd_001[0]*Pd_110[2];
double P_101000010=Pd_101[0]*Pd_010[2];
double P_101000110=Pd_101[0]*Pd_110[2];
double P_000001010=Pd_001[1]*Pd_010[2];
double P_000001110=Pd_001[1]*Pd_110[2];
double P_000101010=Pd_101[1]*Pd_010[2];
double P_000101110=Pd_101[1]*Pd_110[2];
double P_000000011=Pd_011[2];
double P_000000111=Pd_111[2];
double P_000000211=Pd_211[2];
double PR_011000000000=P_011000000*R_000[0]+-1*P_111000000*R_100[0]+P_211000000*R_200[0];
double PR_010001000000=P_010001000*R_000[0]+-1*P_010101000*R_010[0]+-1*P_110001000*R_100[0]+P_110101000*R_110[0];
double PR_010000001000=P_010000001*R_000[0]+-1*P_010000101*R_001[0]+-1*P_110000001*R_100[0]+P_110000101*R_101[0];
double PR_001010000000=P_001010000*R_000[0]+-1*P_001110000*R_010[0]+-1*P_101010000*R_100[0]+P_101110000*R_110[0];
double PR_000011000000=P_000011000*R_000[0]+-1*P_000111000*R_010[0]+P_000211000*R_020[0];
double PR_000010001000=P_000010001*R_000[0]+-1*P_000010101*R_001[0]+-1*P_000110001*R_010[0]+P_000110101*R_011[0];
double PR_001000010000=P_001000010*R_000[0]+-1*P_001000110*R_001[0]+-1*P_101000010*R_100[0]+P_101000110*R_101[0];
double PR_000001010000=P_000001010*R_000[0]+-1*P_000001110*R_001[0]+-1*P_000101010*R_010[0]+P_000101110*R_011[0];
double PR_000000011000=P_000000011*R_000[0]+-1*P_000000111*R_001[0]+P_000000211*R_002[0];
double PR_011000000001=P_011000000*R_001[0]+-1*P_111000000*R_101[0]+P_211000000*R_201[0];
double PR_010001000001=P_010001000*R_001[0]+-1*P_010101000*R_011[0]+-1*P_110001000*R_101[0]+P_110101000*R_111[0];
double PR_010000001001=P_010000001*R_001[0]+-1*P_010000101*R_002[0]+-1*P_110000001*R_101[0]+P_110000101*R_102[0];
double PR_001010000001=P_001010000*R_001[0]+-1*P_001110000*R_011[0]+-1*P_101010000*R_101[0]+P_101110000*R_111[0];
double PR_000011000001=P_000011000*R_001[0]+-1*P_000111000*R_011[0]+P_000211000*R_021[0];
double PR_000010001001=P_000010001*R_001[0]+-1*P_000010101*R_002[0]+-1*P_000110001*R_011[0]+P_000110101*R_012[0];
double PR_001000010001=P_001000010*R_001[0]+-1*P_001000110*R_002[0]+-1*P_101000010*R_101[0]+P_101000110*R_102[0];
double PR_000001010001=P_000001010*R_001[0]+-1*P_000001110*R_002[0]+-1*P_000101010*R_011[0]+P_000101110*R_012[0];
double PR_000000011001=P_000000011*R_001[0]+-1*P_000000111*R_002[0]+P_000000211*R_003[0];
double PR_011000000010=P_011000000*R_010[0]+-1*P_111000000*R_110[0]+P_211000000*R_210[0];
double PR_010001000010=P_010001000*R_010[0]+-1*P_010101000*R_020[0]+-1*P_110001000*R_110[0]+P_110101000*R_120[0];
double PR_010000001010=P_010000001*R_010[0]+-1*P_010000101*R_011[0]+-1*P_110000001*R_110[0]+P_110000101*R_111[0];
double PR_001010000010=P_001010000*R_010[0]+-1*P_001110000*R_020[0]+-1*P_101010000*R_110[0]+P_101110000*R_120[0];
double PR_000011000010=P_000011000*R_010[0]+-1*P_000111000*R_020[0]+P_000211000*R_030[0];
double PR_000010001010=P_000010001*R_010[0]+-1*P_000010101*R_011[0]+-1*P_000110001*R_020[0]+P_000110101*R_021[0];
double PR_001000010010=P_001000010*R_010[0]+-1*P_001000110*R_011[0]+-1*P_101000010*R_110[0]+P_101000110*R_111[0];
double PR_000001010010=P_000001010*R_010[0]+-1*P_000001110*R_011[0]+-1*P_000101010*R_020[0]+P_000101110*R_021[0];
double PR_000000011010=P_000000011*R_010[0]+-1*P_000000111*R_011[0]+P_000000211*R_012[0];
double PR_011000000100=P_011000000*R_100[0]+-1*P_111000000*R_200[0]+P_211000000*R_300[0];
double PR_010001000100=P_010001000*R_100[0]+-1*P_010101000*R_110[0]+-1*P_110001000*R_200[0]+P_110101000*R_210[0];
double PR_010000001100=P_010000001*R_100[0]+-1*P_010000101*R_101[0]+-1*P_110000001*R_200[0]+P_110000101*R_201[0];
double PR_001010000100=P_001010000*R_100[0]+-1*P_001110000*R_110[0]+-1*P_101010000*R_200[0]+P_101110000*R_210[0];
double PR_000011000100=P_000011000*R_100[0]+-1*P_000111000*R_110[0]+P_000211000*R_120[0];
double PR_000010001100=P_000010001*R_100[0]+-1*P_000010101*R_101[0]+-1*P_000110001*R_110[0]+P_000110101*R_111[0];
double PR_001000010100=P_001000010*R_100[0]+-1*P_001000110*R_101[0]+-1*P_101000010*R_200[0]+P_101000110*R_201[0];
double PR_000001010100=P_000001010*R_100[0]+-1*P_000001110*R_101[0]+-1*P_000101010*R_110[0]+P_000101110*R_111[0];
double PR_000000011100=P_000000011*R_100[0]+-1*P_000000111*R_101[0]+P_000000211*R_102[0];
double PR_011000000002=P_011000000*R_002[0]+-1*P_111000000*R_102[0]+P_211000000*R_202[0];
double PR_010001000002=P_010001000*R_002[0]+-1*P_010101000*R_012[0]+-1*P_110001000*R_102[0]+P_110101000*R_112[0];
double PR_010000001002=P_010000001*R_002[0]+-1*P_010000101*R_003[0]+-1*P_110000001*R_102[0]+P_110000101*R_103[0];
double PR_001010000002=P_001010000*R_002[0]+-1*P_001110000*R_012[0]+-1*P_101010000*R_102[0]+P_101110000*R_112[0];
double PR_000011000002=P_000011000*R_002[0]+-1*P_000111000*R_012[0]+P_000211000*R_022[0];
double PR_000010001002=P_000010001*R_002[0]+-1*P_000010101*R_003[0]+-1*P_000110001*R_012[0]+P_000110101*R_013[0];
double PR_001000010002=P_001000010*R_002[0]+-1*P_001000110*R_003[0]+-1*P_101000010*R_102[0]+P_101000110*R_103[0];
double PR_000001010002=P_000001010*R_002[0]+-1*P_000001110*R_003[0]+-1*P_000101010*R_012[0]+P_000101110*R_013[0];
double PR_000000011002=P_000000011*R_002[0]+-1*P_000000111*R_003[0]+P_000000211*R_004[0];
double PR_011000000011=P_011000000*R_011[0]+-1*P_111000000*R_111[0]+P_211000000*R_211[0];
double PR_010001000011=P_010001000*R_011[0]+-1*P_010101000*R_021[0]+-1*P_110001000*R_111[0]+P_110101000*R_121[0];
double PR_010000001011=P_010000001*R_011[0]+-1*P_010000101*R_012[0]+-1*P_110000001*R_111[0]+P_110000101*R_112[0];
double PR_001010000011=P_001010000*R_011[0]+-1*P_001110000*R_021[0]+-1*P_101010000*R_111[0]+P_101110000*R_121[0];
double PR_000011000011=P_000011000*R_011[0]+-1*P_000111000*R_021[0]+P_000211000*R_031[0];
double PR_000010001011=P_000010001*R_011[0]+-1*P_000010101*R_012[0]+-1*P_000110001*R_021[0]+P_000110101*R_022[0];
double PR_001000010011=P_001000010*R_011[0]+-1*P_001000110*R_012[0]+-1*P_101000010*R_111[0]+P_101000110*R_112[0];
double PR_000001010011=P_000001010*R_011[0]+-1*P_000001110*R_012[0]+-1*P_000101010*R_021[0]+P_000101110*R_022[0];
double PR_000000011011=P_000000011*R_011[0]+-1*P_000000111*R_012[0]+P_000000211*R_013[0];
double PR_011000000020=P_011000000*R_020[0]+-1*P_111000000*R_120[0]+P_211000000*R_220[0];
double PR_010001000020=P_010001000*R_020[0]+-1*P_010101000*R_030[0]+-1*P_110001000*R_120[0]+P_110101000*R_130[0];
double PR_010000001020=P_010000001*R_020[0]+-1*P_010000101*R_021[0]+-1*P_110000001*R_120[0]+P_110000101*R_121[0];
double PR_001010000020=P_001010000*R_020[0]+-1*P_001110000*R_030[0]+-1*P_101010000*R_120[0]+P_101110000*R_130[0];
double PR_000011000020=P_000011000*R_020[0]+-1*P_000111000*R_030[0]+P_000211000*R_040[0];
double PR_000010001020=P_000010001*R_020[0]+-1*P_000010101*R_021[0]+-1*P_000110001*R_030[0]+P_000110101*R_031[0];
double PR_001000010020=P_001000010*R_020[0]+-1*P_001000110*R_021[0]+-1*P_101000010*R_120[0]+P_101000110*R_121[0];
double PR_000001010020=P_000001010*R_020[0]+-1*P_000001110*R_021[0]+-1*P_000101010*R_030[0]+P_000101110*R_031[0];
double PR_000000011020=P_000000011*R_020[0]+-1*P_000000111*R_021[0]+P_000000211*R_022[0];
double PR_011000000101=P_011000000*R_101[0]+-1*P_111000000*R_201[0]+P_211000000*R_301[0];
double PR_010001000101=P_010001000*R_101[0]+-1*P_010101000*R_111[0]+-1*P_110001000*R_201[0]+P_110101000*R_211[0];
double PR_010000001101=P_010000001*R_101[0]+-1*P_010000101*R_102[0]+-1*P_110000001*R_201[0]+P_110000101*R_202[0];
double PR_001010000101=P_001010000*R_101[0]+-1*P_001110000*R_111[0]+-1*P_101010000*R_201[0]+P_101110000*R_211[0];
double PR_000011000101=P_000011000*R_101[0]+-1*P_000111000*R_111[0]+P_000211000*R_121[0];
double PR_000010001101=P_000010001*R_101[0]+-1*P_000010101*R_102[0]+-1*P_000110001*R_111[0]+P_000110101*R_112[0];
double PR_001000010101=P_001000010*R_101[0]+-1*P_001000110*R_102[0]+-1*P_101000010*R_201[0]+P_101000110*R_202[0];
double PR_000001010101=P_000001010*R_101[0]+-1*P_000001110*R_102[0]+-1*P_000101010*R_111[0]+P_000101110*R_112[0];
double PR_000000011101=P_000000011*R_101[0]+-1*P_000000111*R_102[0]+P_000000211*R_103[0];
double PR_011000000110=P_011000000*R_110[0]+-1*P_111000000*R_210[0]+P_211000000*R_310[0];
double PR_010001000110=P_010001000*R_110[0]+-1*P_010101000*R_120[0]+-1*P_110001000*R_210[0]+P_110101000*R_220[0];
double PR_010000001110=P_010000001*R_110[0]+-1*P_010000101*R_111[0]+-1*P_110000001*R_210[0]+P_110000101*R_211[0];
double PR_001010000110=P_001010000*R_110[0]+-1*P_001110000*R_120[0]+-1*P_101010000*R_210[0]+P_101110000*R_220[0];
double PR_000011000110=P_000011000*R_110[0]+-1*P_000111000*R_120[0]+P_000211000*R_130[0];
double PR_000010001110=P_000010001*R_110[0]+-1*P_000010101*R_111[0]+-1*P_000110001*R_120[0]+P_000110101*R_121[0];
double PR_001000010110=P_001000010*R_110[0]+-1*P_001000110*R_111[0]+-1*P_101000010*R_210[0]+P_101000110*R_211[0];
double PR_000001010110=P_000001010*R_110[0]+-1*P_000001110*R_111[0]+-1*P_000101010*R_120[0]+P_000101110*R_121[0];
double PR_000000011110=P_000000011*R_110[0]+-1*P_000000111*R_111[0]+P_000000211*R_112[0];
double PR_011000000200=P_011000000*R_200[0]+-1*P_111000000*R_300[0]+P_211000000*R_400[0];
double PR_010001000200=P_010001000*R_200[0]+-1*P_010101000*R_210[0]+-1*P_110001000*R_300[0]+P_110101000*R_310[0];
double PR_010000001200=P_010000001*R_200[0]+-1*P_010000101*R_201[0]+-1*P_110000001*R_300[0]+P_110000101*R_301[0];
double PR_001010000200=P_001010000*R_200[0]+-1*P_001110000*R_210[0]+-1*P_101010000*R_300[0]+P_101110000*R_310[0];
double PR_000011000200=P_000011000*R_200[0]+-1*P_000111000*R_210[0]+P_000211000*R_220[0];
double PR_000010001200=P_000010001*R_200[0]+-1*P_000010101*R_201[0]+-1*P_000110001*R_210[0]+P_000110101*R_211[0];
double PR_001000010200=P_001000010*R_200[0]+-1*P_001000110*R_201[0]+-1*P_101000010*R_300[0]+P_101000110*R_301[0];
double PR_000001010200=P_000001010*R_200[0]+-1*P_000001110*R_201[0]+-1*P_000101010*R_210[0]+P_000101110*R_211[0];
double PR_000000011200=P_000000011*R_200[0]+-1*P_000000111*R_201[0]+P_000000211*R_202[0];
double Qd_101[3];
double Qd_110[3];
double Qd_011[3];
double Qd_111[3];
double Qd_211[3];
for(int i=0;i<3;i++){
Qd_101[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_110[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_011[i]=Qd_101[i]+Qd_010[i]*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_111[i]=Qd_010[i]*Qd_101[i]+aQin1*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_211[i]=aQin1*Qd_101[i];
}
double Q_011000000=Qd_011[0];
double Q_111000000=Qd_111[0];
double Q_211000000=Qd_211[0];
double Q_010001000=Qd_010[0]*Qd_001[1];
double Q_010101000=Qd_010[0]*Qd_101[1];
double Q_110001000=Qd_110[0]*Qd_001[1];
double Q_110101000=Qd_110[0]*Qd_101[1];
double Q_010000001=Qd_010[0]*Qd_001[2];
double Q_010000101=Qd_010[0]*Qd_101[2];
double Q_110000001=Qd_110[0]*Qd_001[2];
double Q_110000101=Qd_110[0]*Qd_101[2];
double Q_001010000=Qd_001[0]*Qd_010[1];
double Q_001110000=Qd_001[0]*Qd_110[1];
double Q_101010000=Qd_101[0]*Qd_010[1];
double Q_101110000=Qd_101[0]*Qd_110[1];
double Q_000011000=Qd_011[1];
double Q_000111000=Qd_111[1];
double Q_000211000=Qd_211[1];
double Q_000010001=Qd_010[1]*Qd_001[2];
double Q_000010101=Qd_010[1]*Qd_101[2];
double Q_000110001=Qd_110[1]*Qd_001[2];
double Q_000110101=Qd_110[1]*Qd_101[2];
double Q_001000010=Qd_001[0]*Qd_010[2];
double Q_001000110=Qd_001[0]*Qd_110[2];
double Q_101000010=Qd_101[0]*Qd_010[2];
double Q_101000110=Qd_101[0]*Qd_110[2];
double Q_000001010=Qd_001[1]*Qd_010[2];
double Q_000001110=Qd_001[1]*Qd_110[2];
double Q_000101010=Qd_101[1]*Qd_010[2];
double Q_000101110=Qd_101[1]*Qd_110[2];
double Q_000000011=Qd_011[2];
double Q_000000111=Qd_111[2];
double Q_000000211=Qd_211[2];
ans_temp[ans_id*9+0]+=Pmtrx[0]*(Q_011000000*PR_011000000000+Q_111000000*PR_011000000100+Q_211000000*PR_011000000200);
ans_temp[ans_id*9+0]+=Pmtrx[1]*(Q_010001000*PR_011000000000+Q_010101000*PR_011000000010+Q_110001000*PR_011000000100+Q_110101000*PR_011000000110);
ans_temp[ans_id*9+0]+=Pmtrx[2]*(Q_010000001*PR_011000000000+Q_010000101*PR_011000000001+Q_110000001*PR_011000000100+Q_110000101*PR_011000000101);
ans_temp[ans_id*9+1]+=Pmtrx[0]*(Q_001010000*PR_011000000000+Q_001110000*PR_011000000010+Q_101010000*PR_011000000100+Q_101110000*PR_011000000110);
ans_temp[ans_id*9+1]+=Pmtrx[1]*(Q_000011000*PR_011000000000+Q_000111000*PR_011000000010+Q_000211000*PR_011000000020);
ans_temp[ans_id*9+1]+=Pmtrx[2]*(Q_000010001*PR_011000000000+Q_000010101*PR_011000000001+Q_000110001*PR_011000000010+Q_000110101*PR_011000000011);
ans_temp[ans_id*9+2]+=Pmtrx[0]*(Q_001000010*PR_011000000000+Q_001000110*PR_011000000001+Q_101000010*PR_011000000100+Q_101000110*PR_011000000101);
ans_temp[ans_id*9+2]+=Pmtrx[1]*(Q_000001010*PR_011000000000+Q_000001110*PR_011000000001+Q_000101010*PR_011000000010+Q_000101110*PR_011000000011);
ans_temp[ans_id*9+2]+=Pmtrx[2]*(Q_000000011*PR_011000000000+Q_000000111*PR_011000000001+Q_000000211*PR_011000000002);
ans_temp[ans_id*9+0]+=Pmtrx[3]*(Q_011000000*PR_010001000000+Q_111000000*PR_010001000100+Q_211000000*PR_010001000200);
ans_temp[ans_id*9+0]+=Pmtrx[4]*(Q_010001000*PR_010001000000+Q_010101000*PR_010001000010+Q_110001000*PR_010001000100+Q_110101000*PR_010001000110);
ans_temp[ans_id*9+0]+=Pmtrx[5]*(Q_010000001*PR_010001000000+Q_010000101*PR_010001000001+Q_110000001*PR_010001000100+Q_110000101*PR_010001000101);
ans_temp[ans_id*9+1]+=Pmtrx[3]*(Q_001010000*PR_010001000000+Q_001110000*PR_010001000010+Q_101010000*PR_010001000100+Q_101110000*PR_010001000110);
ans_temp[ans_id*9+1]+=Pmtrx[4]*(Q_000011000*PR_010001000000+Q_000111000*PR_010001000010+Q_000211000*PR_010001000020);
ans_temp[ans_id*9+1]+=Pmtrx[5]*(Q_000010001*PR_010001000000+Q_000010101*PR_010001000001+Q_000110001*PR_010001000010+Q_000110101*PR_010001000011);
ans_temp[ans_id*9+2]+=Pmtrx[3]*(Q_001000010*PR_010001000000+Q_001000110*PR_010001000001+Q_101000010*PR_010001000100+Q_101000110*PR_010001000101);
ans_temp[ans_id*9+2]+=Pmtrx[4]*(Q_000001010*PR_010001000000+Q_000001110*PR_010001000001+Q_000101010*PR_010001000010+Q_000101110*PR_010001000011);
ans_temp[ans_id*9+2]+=Pmtrx[5]*(Q_000000011*PR_010001000000+Q_000000111*PR_010001000001+Q_000000211*PR_010001000002);
ans_temp[ans_id*9+0]+=Pmtrx[6]*(Q_011000000*PR_010000001000+Q_111000000*PR_010000001100+Q_211000000*PR_010000001200);
ans_temp[ans_id*9+0]+=Pmtrx[7]*(Q_010001000*PR_010000001000+Q_010101000*PR_010000001010+Q_110001000*PR_010000001100+Q_110101000*PR_010000001110);
ans_temp[ans_id*9+0]+=Pmtrx[8]*(Q_010000001*PR_010000001000+Q_010000101*PR_010000001001+Q_110000001*PR_010000001100+Q_110000101*PR_010000001101);
ans_temp[ans_id*9+1]+=Pmtrx[6]*(Q_001010000*PR_010000001000+Q_001110000*PR_010000001010+Q_101010000*PR_010000001100+Q_101110000*PR_010000001110);
ans_temp[ans_id*9+1]+=Pmtrx[7]*(Q_000011000*PR_010000001000+Q_000111000*PR_010000001010+Q_000211000*PR_010000001020);
ans_temp[ans_id*9+1]+=Pmtrx[8]*(Q_000010001*PR_010000001000+Q_000010101*PR_010000001001+Q_000110001*PR_010000001010+Q_000110101*PR_010000001011);
ans_temp[ans_id*9+2]+=Pmtrx[6]*(Q_001000010*PR_010000001000+Q_001000110*PR_010000001001+Q_101000010*PR_010000001100+Q_101000110*PR_010000001101);
ans_temp[ans_id*9+2]+=Pmtrx[7]*(Q_000001010*PR_010000001000+Q_000001110*PR_010000001001+Q_000101010*PR_010000001010+Q_000101110*PR_010000001011);
ans_temp[ans_id*9+2]+=Pmtrx[8]*(Q_000000011*PR_010000001000+Q_000000111*PR_010000001001+Q_000000211*PR_010000001002);
ans_temp[ans_id*9+3]+=Pmtrx[0]*(Q_011000000*PR_001010000000+Q_111000000*PR_001010000100+Q_211000000*PR_001010000200);
ans_temp[ans_id*9+3]+=Pmtrx[1]*(Q_010001000*PR_001010000000+Q_010101000*PR_001010000010+Q_110001000*PR_001010000100+Q_110101000*PR_001010000110);
ans_temp[ans_id*9+3]+=Pmtrx[2]*(Q_010000001*PR_001010000000+Q_010000101*PR_001010000001+Q_110000001*PR_001010000100+Q_110000101*PR_001010000101);
ans_temp[ans_id*9+4]+=Pmtrx[0]*(Q_001010000*PR_001010000000+Q_001110000*PR_001010000010+Q_101010000*PR_001010000100+Q_101110000*PR_001010000110);
ans_temp[ans_id*9+4]+=Pmtrx[1]*(Q_000011000*PR_001010000000+Q_000111000*PR_001010000010+Q_000211000*PR_001010000020);
ans_temp[ans_id*9+4]+=Pmtrx[2]*(Q_000010001*PR_001010000000+Q_000010101*PR_001010000001+Q_000110001*PR_001010000010+Q_000110101*PR_001010000011);
ans_temp[ans_id*9+5]+=Pmtrx[0]*(Q_001000010*PR_001010000000+Q_001000110*PR_001010000001+Q_101000010*PR_001010000100+Q_101000110*PR_001010000101);
ans_temp[ans_id*9+5]+=Pmtrx[1]*(Q_000001010*PR_001010000000+Q_000001110*PR_001010000001+Q_000101010*PR_001010000010+Q_000101110*PR_001010000011);
ans_temp[ans_id*9+5]+=Pmtrx[2]*(Q_000000011*PR_001010000000+Q_000000111*PR_001010000001+Q_000000211*PR_001010000002);
ans_temp[ans_id*9+3]+=Pmtrx[3]*(Q_011000000*PR_000011000000+Q_111000000*PR_000011000100+Q_211000000*PR_000011000200);
ans_temp[ans_id*9+3]+=Pmtrx[4]*(Q_010001000*PR_000011000000+Q_010101000*PR_000011000010+Q_110001000*PR_000011000100+Q_110101000*PR_000011000110);
ans_temp[ans_id*9+3]+=Pmtrx[5]*(Q_010000001*PR_000011000000+Q_010000101*PR_000011000001+Q_110000001*PR_000011000100+Q_110000101*PR_000011000101);
ans_temp[ans_id*9+4]+=Pmtrx[3]*(Q_001010000*PR_000011000000+Q_001110000*PR_000011000010+Q_101010000*PR_000011000100+Q_101110000*PR_000011000110);
ans_temp[ans_id*9+4]+=Pmtrx[4]*(Q_000011000*PR_000011000000+Q_000111000*PR_000011000010+Q_000211000*PR_000011000020);
ans_temp[ans_id*9+4]+=Pmtrx[5]*(Q_000010001*PR_000011000000+Q_000010101*PR_000011000001+Q_000110001*PR_000011000010+Q_000110101*PR_000011000011);
ans_temp[ans_id*9+5]+=Pmtrx[3]*(Q_001000010*PR_000011000000+Q_001000110*PR_000011000001+Q_101000010*PR_000011000100+Q_101000110*PR_000011000101);
ans_temp[ans_id*9+5]+=Pmtrx[4]*(Q_000001010*PR_000011000000+Q_000001110*PR_000011000001+Q_000101010*PR_000011000010+Q_000101110*PR_000011000011);
ans_temp[ans_id*9+5]+=Pmtrx[5]*(Q_000000011*PR_000011000000+Q_000000111*PR_000011000001+Q_000000211*PR_000011000002);
ans_temp[ans_id*9+3]+=Pmtrx[6]*(Q_011000000*PR_000010001000+Q_111000000*PR_000010001100+Q_211000000*PR_000010001200);
ans_temp[ans_id*9+3]+=Pmtrx[7]*(Q_010001000*PR_000010001000+Q_010101000*PR_000010001010+Q_110001000*PR_000010001100+Q_110101000*PR_000010001110);
ans_temp[ans_id*9+3]+=Pmtrx[8]*(Q_010000001*PR_000010001000+Q_010000101*PR_000010001001+Q_110000001*PR_000010001100+Q_110000101*PR_000010001101);
ans_temp[ans_id*9+4]+=Pmtrx[6]*(Q_001010000*PR_000010001000+Q_001110000*PR_000010001010+Q_101010000*PR_000010001100+Q_101110000*PR_000010001110);
ans_temp[ans_id*9+4]+=Pmtrx[7]*(Q_000011000*PR_000010001000+Q_000111000*PR_000010001010+Q_000211000*PR_000010001020);
ans_temp[ans_id*9+4]+=Pmtrx[8]*(Q_000010001*PR_000010001000+Q_000010101*PR_000010001001+Q_000110001*PR_000010001010+Q_000110101*PR_000010001011);
ans_temp[ans_id*9+5]+=Pmtrx[6]*(Q_001000010*PR_000010001000+Q_001000110*PR_000010001001+Q_101000010*PR_000010001100+Q_101000110*PR_000010001101);
ans_temp[ans_id*9+5]+=Pmtrx[7]*(Q_000001010*PR_000010001000+Q_000001110*PR_000010001001+Q_000101010*PR_000010001010+Q_000101110*PR_000010001011);
ans_temp[ans_id*9+5]+=Pmtrx[8]*(Q_000000011*PR_000010001000+Q_000000111*PR_000010001001+Q_000000211*PR_000010001002);
ans_temp[ans_id*9+6]+=Pmtrx[0]*(Q_011000000*PR_001000010000+Q_111000000*PR_001000010100+Q_211000000*PR_001000010200);
ans_temp[ans_id*9+6]+=Pmtrx[1]*(Q_010001000*PR_001000010000+Q_010101000*PR_001000010010+Q_110001000*PR_001000010100+Q_110101000*PR_001000010110);
ans_temp[ans_id*9+6]+=Pmtrx[2]*(Q_010000001*PR_001000010000+Q_010000101*PR_001000010001+Q_110000001*PR_001000010100+Q_110000101*PR_001000010101);
ans_temp[ans_id*9+7]+=Pmtrx[0]*(Q_001010000*PR_001000010000+Q_001110000*PR_001000010010+Q_101010000*PR_001000010100+Q_101110000*PR_001000010110);
ans_temp[ans_id*9+7]+=Pmtrx[1]*(Q_000011000*PR_001000010000+Q_000111000*PR_001000010010+Q_000211000*PR_001000010020);
ans_temp[ans_id*9+7]+=Pmtrx[2]*(Q_000010001*PR_001000010000+Q_000010101*PR_001000010001+Q_000110001*PR_001000010010+Q_000110101*PR_001000010011);
ans_temp[ans_id*9+8]+=Pmtrx[0]*(Q_001000010*PR_001000010000+Q_001000110*PR_001000010001+Q_101000010*PR_001000010100+Q_101000110*PR_001000010101);
ans_temp[ans_id*9+8]+=Pmtrx[1]*(Q_000001010*PR_001000010000+Q_000001110*PR_001000010001+Q_000101010*PR_001000010010+Q_000101110*PR_001000010011);
ans_temp[ans_id*9+8]+=Pmtrx[2]*(Q_000000011*PR_001000010000+Q_000000111*PR_001000010001+Q_000000211*PR_001000010002);
ans_temp[ans_id*9+6]+=Pmtrx[3]*(Q_011000000*PR_000001010000+Q_111000000*PR_000001010100+Q_211000000*PR_000001010200);
ans_temp[ans_id*9+6]+=Pmtrx[4]*(Q_010001000*PR_000001010000+Q_010101000*PR_000001010010+Q_110001000*PR_000001010100+Q_110101000*PR_000001010110);
ans_temp[ans_id*9+6]+=Pmtrx[5]*(Q_010000001*PR_000001010000+Q_010000101*PR_000001010001+Q_110000001*PR_000001010100+Q_110000101*PR_000001010101);
ans_temp[ans_id*9+7]+=Pmtrx[3]*(Q_001010000*PR_000001010000+Q_001110000*PR_000001010010+Q_101010000*PR_000001010100+Q_101110000*PR_000001010110);
ans_temp[ans_id*9+7]+=Pmtrx[4]*(Q_000011000*PR_000001010000+Q_000111000*PR_000001010010+Q_000211000*PR_000001010020);
ans_temp[ans_id*9+7]+=Pmtrx[5]*(Q_000010001*PR_000001010000+Q_000010101*PR_000001010001+Q_000110001*PR_000001010010+Q_000110101*PR_000001010011);
ans_temp[ans_id*9+8]+=Pmtrx[3]*(Q_001000010*PR_000001010000+Q_001000110*PR_000001010001+Q_101000010*PR_000001010100+Q_101000110*PR_000001010101);
ans_temp[ans_id*9+8]+=Pmtrx[4]*(Q_000001010*PR_000001010000+Q_000001110*PR_000001010001+Q_000101010*PR_000001010010+Q_000101110*PR_000001010011);
ans_temp[ans_id*9+8]+=Pmtrx[5]*(Q_000000011*PR_000001010000+Q_000000111*PR_000001010001+Q_000000211*PR_000001010002);
ans_temp[ans_id*9+6]+=Pmtrx[6]*(Q_011000000*PR_000000011000+Q_111000000*PR_000000011100+Q_211000000*PR_000000011200);
ans_temp[ans_id*9+6]+=Pmtrx[7]*(Q_010001000*PR_000000011000+Q_010101000*PR_000000011010+Q_110001000*PR_000000011100+Q_110101000*PR_000000011110);
ans_temp[ans_id*9+6]+=Pmtrx[8]*(Q_010000001*PR_000000011000+Q_010000101*PR_000000011001+Q_110000001*PR_000000011100+Q_110000101*PR_000000011101);
ans_temp[ans_id*9+7]+=Pmtrx[6]*(Q_001010000*PR_000000011000+Q_001110000*PR_000000011010+Q_101010000*PR_000000011100+Q_101110000*PR_000000011110);
ans_temp[ans_id*9+7]+=Pmtrx[7]*(Q_000011000*PR_000000011000+Q_000111000*PR_000000011010+Q_000211000*PR_000000011020);
ans_temp[ans_id*9+7]+=Pmtrx[8]*(Q_000010001*PR_000000011000+Q_000010101*PR_000000011001+Q_000110001*PR_000000011010+Q_000110101*PR_000000011011);
ans_temp[ans_id*9+8]+=Pmtrx[6]*(Q_001000010*PR_000000011000+Q_001000110*PR_000000011001+Q_101000010*PR_000000011100+Q_101000110*PR_000000011101);
ans_temp[ans_id*9+8]+=Pmtrx[7]*(Q_000001010*PR_000000011000+Q_000001110*PR_000000011001+Q_000101010*PR_000000011010+Q_000101110*PR_000000011011);
ans_temp[ans_id*9+8]+=Pmtrx[8]*(Q_000000011*PR_000000011000+Q_000000111*PR_000000011001+Q_000000211*PR_000000011002);
}
}
__syncthreads();
int num_thread=NTHREAD/2;
while (num_thread!=0){
__syncthreads();
if(tId_x<num_thread){
for(int ians=0;ians<9;ians++){
ans_temp[tId_x*9+ians]+=ans_temp[(tId_x+num_thread)*9+ians];
}
}
num_thread/=2;
}
if(tId_x==0){
for(int ians=0;ians<9;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*9+ians]=ans_temp[(tId_x)*9+ians];
}
}
}
}
}
__global__ void MD_Kq_pppp_fs(unsigned int contrc_bra_num,unsigned int contrc_ket_num,\
unsigned int * contrc_bra_id,\
unsigned int * contrc_ket_id,\
unsigned int mtrx_len,\
double * Pmtrx_in,\
double * P,\
double * PA,\
double * PB,\
double * Zta_in,\
double * pp_in,\
float * K2_p_in,\
unsigned int * id_bra_in,\
double * Q,\
double * QC,\
double * QD,\
double * Eta_in,\
double * pq_in,\
float * K2_q_in,\
unsigned int * id_ket_in,\
double * ans){
unsigned int tId_x = threadIdx.x;
unsigned int bId_x = blockIdx.x;
unsigned int bId_y = blockIdx.y;
unsigned int tdis = blockDim.x;
unsigned int bdis_x = gridDim.x;
unsigned int bdis_y = gridDim.y;
unsigned int ans_id=tId_x;
double Pmtrx[9]={0.0};
__shared__ double ans_temp[NTHREAD*9];
for(int i=0;i<9;i++){
ans_temp[i*tdis+tId_x]=0.0;
}
for(unsigned int i_contrc_bra=bId_x;i_contrc_bra<contrc_bra_num;i_contrc_bra+=bdis_x){
for(unsigned int j_contrc_ket=bId_y;j_contrc_ket<contrc_ket_num;j_contrc_ket+=bdis_y){
unsigned int primit_bra_start = contrc_bra_id[i_contrc_bra ];
unsigned int primit_bra_end = contrc_bra_id[i_contrc_bra+1];
unsigned int primit_ket_start = contrc_ket_id[j_contrc_ket ];
unsigned int primit_ket_end = contrc_ket_id[j_contrc_ket+1];
if(i_contrc_bra>j_contrc_ket){
if(tId_x==0){
for(int ians=0;ians<9;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*9+ians]=0.0;
}
}
continue;
}
for(unsigned int ii=primit_bra_start;ii<primit_bra_end;ii++){
unsigned int id_bra=id_bra_in[ii];
double PX=P[ii*3+0];
double PY=P[ii*3+1];
double PZ=P[ii*3+2];
double Pd_010[3];
Pd_010[0]=PA[ii*3+0];
Pd_010[1]=PA[ii*3+1];
Pd_010[2]=PA[ii*3+2];
double Pd_001[3];
Pd_001[0]=PB[ii*3+0];
Pd_001[1]=PB[ii*3+1];
Pd_001[2]=PB[ii*3+2];
double Zta=Zta_in[ii];
double pp=pp_in[ii];
float K2_p=K2_p_in[ii];
double aPin1=1/(2*Zta);
for(unsigned int j=tId_x;j<primit_ket_end-primit_ket_start;j+=tdis){
unsigned int jj=primit_ket_start+j;
unsigned int id_ket=tex1Dfetch(tex_id_ket,jj);
double P_max=0.0;
for(int p_j=0;p_j<3;p_j++){
for(int p_i=0;p_i<3;p_i++){
Pmtrx[p_i*3+p_j]=Pmtrx_in[(id_ket+p_j)*mtrx_len+(id_bra+p_i)];
double temp_P=fabsf(Pmtrx[p_i*3+p_j]);
if(temp_P>P_max) P_max=temp_P;
}
}
float K2_q=tex1Dfetch(tex_K2_q,jj);
if(fabsf(K2_p*K2_q)<1.0E-14){
break;
}
if(fabsf(P_max*K2_p*K2_q)<1.0E-14) continue;
int2 temp_int2;
temp_int2=tex1Dfetch(tex_Eta,jj);
double Eta=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_pq,jj);
double pq=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+0);
double QX=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+1);
double QY=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+2);
double QZ=__hiloint2double(temp_int2.y,temp_int2.x);
double Qd_010[3];
temp_int2=tex1Dfetch(tex_QC,jj*3+0);
Qd_010[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QC,jj*3+1);
Qd_010[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QC,jj*3+2);
Qd_010[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double Qd_001[3];
temp_int2=tex1Dfetch(tex_QD,jj*3+0);
Qd_001[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QD,jj*3+1);
Qd_001[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QD,jj*3+2);
Qd_001[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double alphaT=rsqrt(Eta+Zta);
double lmd=2*P25*pp*pq*alphaT;
alphaT=Eta*Zta*alphaT*alphaT;
double TX=PX-QX;
double TY=PY-QY;
double TZ=PZ-QZ;
double T=alphaT*(TX*TX+TY*TY+TZ*TZ);
double R_000[5];
Ft_fs_4(4,T,R_000);
R_000[0]*=lmd;
R_000[1]*=-2*alphaT*lmd;
R_000[2]*=4*alphaT*alphaT*lmd;
R_000[3]*=-8*alphaT*alphaT*alphaT*lmd;
R_000[4]*=16*alphaT*alphaT*alphaT*alphaT*lmd;
double aQin1=1/(2*Eta);
double R_100[4];
double R_200[3];
double R_300[2];
double R_400[1];
double R_010[4];
double R_110[3];
double R_210[2];
double R_310[1];
double R_020[3];
double R_120[2];
double R_220[1];
double R_030[2];
double R_130[1];
double R_040[1];
double R_001[4];
double R_101[3];
double R_201[2];
double R_301[1];
double R_011[3];
double R_111[2];
double R_211[1];
double R_021[2];
double R_121[1];
double R_031[1];
double R_002[3];
double R_102[2];
double R_202[1];
double R_012[2];
double R_112[1];
double R_022[1];
double R_003[2];
double R_103[1];
double R_013[1];
double R_004[1];
for(int i=0;i<4;i++){
R_100[i]=TX*R_000[i+1];
}
for(int i=0;i<4;i++){
R_010[i]=TY*R_000[i+1];
}
for(int i=0;i<4;i++){
R_001[i]=TZ*R_000[i+1];
}
for(int i=0;i<3;i++){
R_200[i]=TX*R_100[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_110[i]=TX*R_010[i+1];
}
for(int i=0;i<3;i++){
R_020[i]=TY*R_010[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_101[i]=TX*R_001[i+1];
}
for(int i=0;i<3;i++){
R_011[i]=TY*R_001[i+1];
}
for(int i=0;i<3;i++){
R_002[i]=TZ*R_001[i+1]+R_000[i+1];
}
for(int i=0;i<2;i++){
R_300[i]=TX*R_200[i+1]+2*R_100[i+1];
}
for(int i=0;i<2;i++){
R_210[i]=TY*R_200[i+1];
}
for(int i=0;i<2;i++){
R_120[i]=TX*R_020[i+1];
}
for(int i=0;i<2;i++){
R_030[i]=TY*R_020[i+1]+2*R_010[i+1];
}
for(int i=0;i<2;i++){
R_201[i]=TZ*R_200[i+1];
}
for(int i=0;i<2;i++){
R_111[i]=TX*R_011[i+1];
}
for(int i=0;i<2;i++){
R_021[i]=TZ*R_020[i+1];
}
for(int i=0;i<2;i++){
R_102[i]=TX*R_002[i+1];
}
for(int i=0;i<2;i++){
R_012[i]=TY*R_002[i+1];
}
for(int i=0;i<2;i++){
R_003[i]=TZ*R_002[i+1]+2*R_001[i+1];
}
for(int i=0;i<1;i++){
R_400[i]=TX*R_300[i+1]+3*R_200[i+1];
}
for(int i=0;i<1;i++){
R_310[i]=TY*R_300[i+1];
}
for(int i=0;i<1;i++){
R_220[i]=TX*R_120[i+1]+R_020[i+1];
}
for(int i=0;i<1;i++){
R_130[i]=TX*R_030[i+1];
}
for(int i=0;i<1;i++){
R_040[i]=TY*R_030[i+1]+3*R_020[i+1];
}
for(int i=0;i<1;i++){
R_301[i]=TZ*R_300[i+1];
}
for(int i=0;i<1;i++){
R_211[i]=TY*R_201[i+1];
}
for(int i=0;i<1;i++){
R_121[i]=TX*R_021[i+1];
}
for(int i=0;i<1;i++){
R_031[i]=TZ*R_030[i+1];
}
for(int i=0;i<1;i++){
R_202[i]=TX*R_102[i+1]+R_002[i+1];
}
for(int i=0;i<1;i++){
R_112[i]=TX*R_012[i+1];
}
for(int i=0;i<1;i++){
R_022[i]=TY*R_012[i+1]+R_002[i+1];
}
for(int i=0;i<1;i++){
R_103[i]=TX*R_003[i+1];
}
for(int i=0;i<1;i++){
R_013[i]=TY*R_003[i+1];
}
for(int i=0;i<1;i++){
R_004[i]=TZ*R_003[i+1]+3*R_002[i+1];
}
double Qd_101[3];
double Qd_110[3];
double Qd_011[3];
double Qd_111[3];
double Qd_211[3];
for(int i=0;i<3;i++){
Qd_101[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_110[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_011[i]=Qd_101[i]+Qd_010[i]*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_111[i]=Qd_010[i]*Qd_101[i]+aQin1*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_211[i]=aQin1*Qd_101[i];
}
double Q_011000000=Qd_011[0];
double Q_111000000=Qd_111[0];
double Q_211000000=Qd_211[0];
double Q_010001000=Qd_010[0]*Qd_001[1];
double Q_010101000=Qd_010[0]*Qd_101[1];
double Q_110001000=Qd_110[0]*Qd_001[1];
double Q_110101000=Qd_110[0]*Qd_101[1];
double Q_010000001=Qd_010[0]*Qd_001[2];
double Q_010000101=Qd_010[0]*Qd_101[2];
double Q_110000001=Qd_110[0]*Qd_001[2];
double Q_110000101=Qd_110[0]*Qd_101[2];
double Q_001010000=Qd_001[0]*Qd_010[1];
double Q_001110000=Qd_001[0]*Qd_110[1];
double Q_101010000=Qd_101[0]*Qd_010[1];
double Q_101110000=Qd_101[0]*Qd_110[1];
double Q_000011000=Qd_011[1];
double Q_000111000=Qd_111[1];
double Q_000211000=Qd_211[1];
double Q_000010001=Qd_010[1]*Qd_001[2];
double Q_000010101=Qd_010[1]*Qd_101[2];
double Q_000110001=Qd_110[1]*Qd_001[2];
double Q_000110101=Qd_110[1]*Qd_101[2];
double Q_001000010=Qd_001[0]*Qd_010[2];
double Q_001000110=Qd_001[0]*Qd_110[2];
double Q_101000010=Qd_101[0]*Qd_010[2];
double Q_101000110=Qd_101[0]*Qd_110[2];
double Q_000001010=Qd_001[1]*Qd_010[2];
double Q_000001110=Qd_001[1]*Qd_110[2];
double Q_000101010=Qd_101[1]*Qd_010[2];
double Q_000101110=Qd_101[1]*Qd_110[2];
double Q_000000011=Qd_011[2];
double Q_000000111=Qd_111[2];
double Q_000000211=Qd_211[2];
double QR_011000000000=Q_011000000*R_000[0]+-1*Q_111000000*R_100[0]+Q_211000000*R_200[0];
double QR_010001000000=Q_010001000*R_000[0]+-1*Q_010101000*R_010[0]+-1*Q_110001000*R_100[0]+Q_110101000*R_110[0];
double QR_010000001000=Q_010000001*R_000[0]+-1*Q_010000101*R_001[0]+-1*Q_110000001*R_100[0]+Q_110000101*R_101[0];
double QR_001010000000=Q_001010000*R_000[0]+-1*Q_001110000*R_010[0]+-1*Q_101010000*R_100[0]+Q_101110000*R_110[0];
double QR_000011000000=Q_000011000*R_000[0]+-1*Q_000111000*R_010[0]+Q_000211000*R_020[0];
double QR_000010001000=Q_000010001*R_000[0]+-1*Q_000010101*R_001[0]+-1*Q_000110001*R_010[0]+Q_000110101*R_011[0];
double QR_001000010000=Q_001000010*R_000[0]+-1*Q_001000110*R_001[0]+-1*Q_101000010*R_100[0]+Q_101000110*R_101[0];
double QR_000001010000=Q_000001010*R_000[0]+-1*Q_000001110*R_001[0]+-1*Q_000101010*R_010[0]+Q_000101110*R_011[0];
double QR_000000011000=Q_000000011*R_000[0]+-1*Q_000000111*R_001[0]+Q_000000211*R_002[0];
double QR_011000000001=Q_011000000*R_001[0]+-1*Q_111000000*R_101[0]+Q_211000000*R_201[0];
double QR_010001000001=Q_010001000*R_001[0]+-1*Q_010101000*R_011[0]+-1*Q_110001000*R_101[0]+Q_110101000*R_111[0];
double QR_010000001001=Q_010000001*R_001[0]+-1*Q_010000101*R_002[0]+-1*Q_110000001*R_101[0]+Q_110000101*R_102[0];
double QR_001010000001=Q_001010000*R_001[0]+-1*Q_001110000*R_011[0]+-1*Q_101010000*R_101[0]+Q_101110000*R_111[0];
double QR_000011000001=Q_000011000*R_001[0]+-1*Q_000111000*R_011[0]+Q_000211000*R_021[0];
double QR_000010001001=Q_000010001*R_001[0]+-1*Q_000010101*R_002[0]+-1*Q_000110001*R_011[0]+Q_000110101*R_012[0];
double QR_001000010001=Q_001000010*R_001[0]+-1*Q_001000110*R_002[0]+-1*Q_101000010*R_101[0]+Q_101000110*R_102[0];
double QR_000001010001=Q_000001010*R_001[0]+-1*Q_000001110*R_002[0]+-1*Q_000101010*R_011[0]+Q_000101110*R_012[0];
double QR_000000011001=Q_000000011*R_001[0]+-1*Q_000000111*R_002[0]+Q_000000211*R_003[0];
double QR_011000000010=Q_011000000*R_010[0]+-1*Q_111000000*R_110[0]+Q_211000000*R_210[0];
double QR_010001000010=Q_010001000*R_010[0]+-1*Q_010101000*R_020[0]+-1*Q_110001000*R_110[0]+Q_110101000*R_120[0];
double QR_010000001010=Q_010000001*R_010[0]+-1*Q_010000101*R_011[0]+-1*Q_110000001*R_110[0]+Q_110000101*R_111[0];
double QR_001010000010=Q_001010000*R_010[0]+-1*Q_001110000*R_020[0]+-1*Q_101010000*R_110[0]+Q_101110000*R_120[0];
double QR_000011000010=Q_000011000*R_010[0]+-1*Q_000111000*R_020[0]+Q_000211000*R_030[0];
double QR_000010001010=Q_000010001*R_010[0]+-1*Q_000010101*R_011[0]+-1*Q_000110001*R_020[0]+Q_000110101*R_021[0];
double QR_001000010010=Q_001000010*R_010[0]+-1*Q_001000110*R_011[0]+-1*Q_101000010*R_110[0]+Q_101000110*R_111[0];
double QR_000001010010=Q_000001010*R_010[0]+-1*Q_000001110*R_011[0]+-1*Q_000101010*R_020[0]+Q_000101110*R_021[0];
double QR_000000011010=Q_000000011*R_010[0]+-1*Q_000000111*R_011[0]+Q_000000211*R_012[0];
double QR_011000000100=Q_011000000*R_100[0]+-1*Q_111000000*R_200[0]+Q_211000000*R_300[0];
double QR_010001000100=Q_010001000*R_100[0]+-1*Q_010101000*R_110[0]+-1*Q_110001000*R_200[0]+Q_110101000*R_210[0];
double QR_010000001100=Q_010000001*R_100[0]+-1*Q_010000101*R_101[0]+-1*Q_110000001*R_200[0]+Q_110000101*R_201[0];
double QR_001010000100=Q_001010000*R_100[0]+-1*Q_001110000*R_110[0]+-1*Q_101010000*R_200[0]+Q_101110000*R_210[0];
double QR_000011000100=Q_000011000*R_100[0]+-1*Q_000111000*R_110[0]+Q_000211000*R_120[0];
double QR_000010001100=Q_000010001*R_100[0]+-1*Q_000010101*R_101[0]+-1*Q_000110001*R_110[0]+Q_000110101*R_111[0];
double QR_001000010100=Q_001000010*R_100[0]+-1*Q_001000110*R_101[0]+-1*Q_101000010*R_200[0]+Q_101000110*R_201[0];
double QR_000001010100=Q_000001010*R_100[0]+-1*Q_000001110*R_101[0]+-1*Q_000101010*R_110[0]+Q_000101110*R_111[0];
double QR_000000011100=Q_000000011*R_100[0]+-1*Q_000000111*R_101[0]+Q_000000211*R_102[0];
double QR_011000000002=Q_011000000*R_002[0]+-1*Q_111000000*R_102[0]+Q_211000000*R_202[0];
double QR_010001000002=Q_010001000*R_002[0]+-1*Q_010101000*R_012[0]+-1*Q_110001000*R_102[0]+Q_110101000*R_112[0];
double QR_010000001002=Q_010000001*R_002[0]+-1*Q_010000101*R_003[0]+-1*Q_110000001*R_102[0]+Q_110000101*R_103[0];
double QR_001010000002=Q_001010000*R_002[0]+-1*Q_001110000*R_012[0]+-1*Q_101010000*R_102[0]+Q_101110000*R_112[0];
double QR_000011000002=Q_000011000*R_002[0]+-1*Q_000111000*R_012[0]+Q_000211000*R_022[0];
double QR_000010001002=Q_000010001*R_002[0]+-1*Q_000010101*R_003[0]+-1*Q_000110001*R_012[0]+Q_000110101*R_013[0];
double QR_001000010002=Q_001000010*R_002[0]+-1*Q_001000110*R_003[0]+-1*Q_101000010*R_102[0]+Q_101000110*R_103[0];
double QR_000001010002=Q_000001010*R_002[0]+-1*Q_000001110*R_003[0]+-1*Q_000101010*R_012[0]+Q_000101110*R_013[0];
double QR_000000011002=Q_000000011*R_002[0]+-1*Q_000000111*R_003[0]+Q_000000211*R_004[0];
double QR_011000000011=Q_011000000*R_011[0]+-1*Q_111000000*R_111[0]+Q_211000000*R_211[0];
double QR_010001000011=Q_010001000*R_011[0]+-1*Q_010101000*R_021[0]+-1*Q_110001000*R_111[0]+Q_110101000*R_121[0];
double QR_010000001011=Q_010000001*R_011[0]+-1*Q_010000101*R_012[0]+-1*Q_110000001*R_111[0]+Q_110000101*R_112[0];
double QR_001010000011=Q_001010000*R_011[0]+-1*Q_001110000*R_021[0]+-1*Q_101010000*R_111[0]+Q_101110000*R_121[0];
double QR_000011000011=Q_000011000*R_011[0]+-1*Q_000111000*R_021[0]+Q_000211000*R_031[0];
double QR_000010001011=Q_000010001*R_011[0]+-1*Q_000010101*R_012[0]+-1*Q_000110001*R_021[0]+Q_000110101*R_022[0];
double QR_001000010011=Q_001000010*R_011[0]+-1*Q_001000110*R_012[0]+-1*Q_101000010*R_111[0]+Q_101000110*R_112[0];
double QR_000001010011=Q_000001010*R_011[0]+-1*Q_000001110*R_012[0]+-1*Q_000101010*R_021[0]+Q_000101110*R_022[0];
double QR_000000011011=Q_000000011*R_011[0]+-1*Q_000000111*R_012[0]+Q_000000211*R_013[0];
double QR_011000000020=Q_011000000*R_020[0]+-1*Q_111000000*R_120[0]+Q_211000000*R_220[0];
double QR_010001000020=Q_010001000*R_020[0]+-1*Q_010101000*R_030[0]+-1*Q_110001000*R_120[0]+Q_110101000*R_130[0];
double QR_010000001020=Q_010000001*R_020[0]+-1*Q_010000101*R_021[0]+-1*Q_110000001*R_120[0]+Q_110000101*R_121[0];
double QR_001010000020=Q_001010000*R_020[0]+-1*Q_001110000*R_030[0]+-1*Q_101010000*R_120[0]+Q_101110000*R_130[0];
double QR_000011000020=Q_000011000*R_020[0]+-1*Q_000111000*R_030[0]+Q_000211000*R_040[0];
double QR_000010001020=Q_000010001*R_020[0]+-1*Q_000010101*R_021[0]+-1*Q_000110001*R_030[0]+Q_000110101*R_031[0];
double QR_001000010020=Q_001000010*R_020[0]+-1*Q_001000110*R_021[0]+-1*Q_101000010*R_120[0]+Q_101000110*R_121[0];
double QR_000001010020=Q_000001010*R_020[0]+-1*Q_000001110*R_021[0]+-1*Q_000101010*R_030[0]+Q_000101110*R_031[0];
double QR_000000011020=Q_000000011*R_020[0]+-1*Q_000000111*R_021[0]+Q_000000211*R_022[0];
double QR_011000000101=Q_011000000*R_101[0]+-1*Q_111000000*R_201[0]+Q_211000000*R_301[0];
double QR_010001000101=Q_010001000*R_101[0]+-1*Q_010101000*R_111[0]+-1*Q_110001000*R_201[0]+Q_110101000*R_211[0];
double QR_010000001101=Q_010000001*R_101[0]+-1*Q_010000101*R_102[0]+-1*Q_110000001*R_201[0]+Q_110000101*R_202[0];
double QR_001010000101=Q_001010000*R_101[0]+-1*Q_001110000*R_111[0]+-1*Q_101010000*R_201[0]+Q_101110000*R_211[0];
double QR_000011000101=Q_000011000*R_101[0]+-1*Q_000111000*R_111[0]+Q_000211000*R_121[0];
double QR_000010001101=Q_000010001*R_101[0]+-1*Q_000010101*R_102[0]+-1*Q_000110001*R_111[0]+Q_000110101*R_112[0];
double QR_001000010101=Q_001000010*R_101[0]+-1*Q_001000110*R_102[0]+-1*Q_101000010*R_201[0]+Q_101000110*R_202[0];
double QR_000001010101=Q_000001010*R_101[0]+-1*Q_000001110*R_102[0]+-1*Q_000101010*R_111[0]+Q_000101110*R_112[0];
double QR_000000011101=Q_000000011*R_101[0]+-1*Q_000000111*R_102[0]+Q_000000211*R_103[0];
double QR_011000000110=Q_011000000*R_110[0]+-1*Q_111000000*R_210[0]+Q_211000000*R_310[0];
double QR_010001000110=Q_010001000*R_110[0]+-1*Q_010101000*R_120[0]+-1*Q_110001000*R_210[0]+Q_110101000*R_220[0];
double QR_010000001110=Q_010000001*R_110[0]+-1*Q_010000101*R_111[0]+-1*Q_110000001*R_210[0]+Q_110000101*R_211[0];
double QR_001010000110=Q_001010000*R_110[0]+-1*Q_001110000*R_120[0]+-1*Q_101010000*R_210[0]+Q_101110000*R_220[0];
double QR_000011000110=Q_000011000*R_110[0]+-1*Q_000111000*R_120[0]+Q_000211000*R_130[0];
double QR_000010001110=Q_000010001*R_110[0]+-1*Q_000010101*R_111[0]+-1*Q_000110001*R_120[0]+Q_000110101*R_121[0];
double QR_001000010110=Q_001000010*R_110[0]+-1*Q_001000110*R_111[0]+-1*Q_101000010*R_210[0]+Q_101000110*R_211[0];
double QR_000001010110=Q_000001010*R_110[0]+-1*Q_000001110*R_111[0]+-1*Q_000101010*R_120[0]+Q_000101110*R_121[0];
double QR_000000011110=Q_000000011*R_110[0]+-1*Q_000000111*R_111[0]+Q_000000211*R_112[0];
double QR_011000000200=Q_011000000*R_200[0]+-1*Q_111000000*R_300[0]+Q_211000000*R_400[0];
double QR_010001000200=Q_010001000*R_200[0]+-1*Q_010101000*R_210[0]+-1*Q_110001000*R_300[0]+Q_110101000*R_310[0];
double QR_010000001200=Q_010000001*R_200[0]+-1*Q_010000101*R_201[0]+-1*Q_110000001*R_300[0]+Q_110000101*R_301[0];
double QR_001010000200=Q_001010000*R_200[0]+-1*Q_001110000*R_210[0]+-1*Q_101010000*R_300[0]+Q_101110000*R_310[0];
double QR_000011000200=Q_000011000*R_200[0]+-1*Q_000111000*R_210[0]+Q_000211000*R_220[0];
double QR_000010001200=Q_000010001*R_200[0]+-1*Q_000010101*R_201[0]+-1*Q_000110001*R_210[0]+Q_000110101*R_211[0];
double QR_001000010200=Q_001000010*R_200[0]+-1*Q_001000110*R_201[0]+-1*Q_101000010*R_300[0]+Q_101000110*R_301[0];
double QR_000001010200=Q_000001010*R_200[0]+-1*Q_000001110*R_201[0]+-1*Q_000101010*R_210[0]+Q_000101110*R_211[0];
double QR_000000011200=Q_000000011*R_200[0]+-1*Q_000000111*R_201[0]+Q_000000211*R_202[0];
double Pd_101[3];
double Pd_110[3];
double Pd_011[3];
double Pd_111[3];
double Pd_211[3];
for(int i=0;i<3;i++){
Pd_101[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_110[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_011[i]=Pd_101[i]+Pd_010[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_111[i]=Pd_010[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_211[i]=aPin1*Pd_101[i];
}
double P_011000000=Pd_011[0];
double P_111000000=Pd_111[0];
double P_211000000=Pd_211[0];
double P_010001000=Pd_010[0]*Pd_001[1];
double P_010101000=Pd_010[0]*Pd_101[1];
double P_110001000=Pd_110[0]*Pd_001[1];
double P_110101000=Pd_110[0]*Pd_101[1];
double P_010000001=Pd_010[0]*Pd_001[2];
double P_010000101=Pd_010[0]*Pd_101[2];
double P_110000001=Pd_110[0]*Pd_001[2];
double P_110000101=Pd_110[0]*Pd_101[2];
double P_001010000=Pd_001[0]*Pd_010[1];
double P_001110000=Pd_001[0]*Pd_110[1];
double P_101010000=Pd_101[0]*Pd_010[1];
double P_101110000=Pd_101[0]*Pd_110[1];
double P_000011000=Pd_011[1];
double P_000111000=Pd_111[1];
double P_000211000=Pd_211[1];
double P_000010001=Pd_010[1]*Pd_001[2];
double P_000010101=Pd_010[1]*Pd_101[2];
double P_000110001=Pd_110[1]*Pd_001[2];
double P_000110101=Pd_110[1]*Pd_101[2];
double P_001000010=Pd_001[0]*Pd_010[2];
double P_001000110=Pd_001[0]*Pd_110[2];
double P_101000010=Pd_101[0]*Pd_010[2];
double P_101000110=Pd_101[0]*Pd_110[2];
double P_000001010=Pd_001[1]*Pd_010[2];
double P_000001110=Pd_001[1]*Pd_110[2];
double P_000101010=Pd_101[1]*Pd_010[2];
double P_000101110=Pd_101[1]*Pd_110[2];
double P_000000011=Pd_011[2];
double P_000000111=Pd_111[2];
double P_000000211=Pd_211[2];
ans_temp[ans_id*9+0]+=Pmtrx[0]*(P_011000000*QR_011000000000+P_111000000*QR_011000000100+P_211000000*QR_011000000200);
ans_temp[ans_id*9+0]+=Pmtrx[1]*(P_011000000*QR_010001000000+P_111000000*QR_010001000100+P_211000000*QR_010001000200);
ans_temp[ans_id*9+0]+=Pmtrx[2]*(P_011000000*QR_010000001000+P_111000000*QR_010000001100+P_211000000*QR_010000001200);
ans_temp[ans_id*9+1]+=Pmtrx[0]*(P_011000000*QR_001010000000+P_111000000*QR_001010000100+P_211000000*QR_001010000200);
ans_temp[ans_id*9+1]+=Pmtrx[1]*(P_011000000*QR_000011000000+P_111000000*QR_000011000100+P_211000000*QR_000011000200);
ans_temp[ans_id*9+1]+=Pmtrx[2]*(P_011000000*QR_000010001000+P_111000000*QR_000010001100+P_211000000*QR_000010001200);
ans_temp[ans_id*9+2]+=Pmtrx[0]*(P_011000000*QR_001000010000+P_111000000*QR_001000010100+P_211000000*QR_001000010200);
ans_temp[ans_id*9+2]+=Pmtrx[1]*(P_011000000*QR_000001010000+P_111000000*QR_000001010100+P_211000000*QR_000001010200);
ans_temp[ans_id*9+2]+=Pmtrx[2]*(P_011000000*QR_000000011000+P_111000000*QR_000000011100+P_211000000*QR_000000011200);
ans_temp[ans_id*9+0]+=Pmtrx[3]*(P_010001000*QR_011000000000+P_010101000*QR_011000000010+P_110001000*QR_011000000100+P_110101000*QR_011000000110);
ans_temp[ans_id*9+0]+=Pmtrx[4]*(P_010001000*QR_010001000000+P_010101000*QR_010001000010+P_110001000*QR_010001000100+P_110101000*QR_010001000110);
ans_temp[ans_id*9+0]+=Pmtrx[5]*(P_010001000*QR_010000001000+P_010101000*QR_010000001010+P_110001000*QR_010000001100+P_110101000*QR_010000001110);
ans_temp[ans_id*9+1]+=Pmtrx[3]*(P_010001000*QR_001010000000+P_010101000*QR_001010000010+P_110001000*QR_001010000100+P_110101000*QR_001010000110);
ans_temp[ans_id*9+1]+=Pmtrx[4]*(P_010001000*QR_000011000000+P_010101000*QR_000011000010+P_110001000*QR_000011000100+P_110101000*QR_000011000110);
ans_temp[ans_id*9+1]+=Pmtrx[5]*(P_010001000*QR_000010001000+P_010101000*QR_000010001010+P_110001000*QR_000010001100+P_110101000*QR_000010001110);
ans_temp[ans_id*9+2]+=Pmtrx[3]*(P_010001000*QR_001000010000+P_010101000*QR_001000010010+P_110001000*QR_001000010100+P_110101000*QR_001000010110);
ans_temp[ans_id*9+2]+=Pmtrx[4]*(P_010001000*QR_000001010000+P_010101000*QR_000001010010+P_110001000*QR_000001010100+P_110101000*QR_000001010110);
ans_temp[ans_id*9+2]+=Pmtrx[5]*(P_010001000*QR_000000011000+P_010101000*QR_000000011010+P_110001000*QR_000000011100+P_110101000*QR_000000011110);
ans_temp[ans_id*9+0]+=Pmtrx[6]*(P_010000001*QR_011000000000+P_010000101*QR_011000000001+P_110000001*QR_011000000100+P_110000101*QR_011000000101);
ans_temp[ans_id*9+0]+=Pmtrx[7]*(P_010000001*QR_010001000000+P_010000101*QR_010001000001+P_110000001*QR_010001000100+P_110000101*QR_010001000101);
ans_temp[ans_id*9+0]+=Pmtrx[8]*(P_010000001*QR_010000001000+P_010000101*QR_010000001001+P_110000001*QR_010000001100+P_110000101*QR_010000001101);
ans_temp[ans_id*9+1]+=Pmtrx[6]*(P_010000001*QR_001010000000+P_010000101*QR_001010000001+P_110000001*QR_001010000100+P_110000101*QR_001010000101);
ans_temp[ans_id*9+1]+=Pmtrx[7]*(P_010000001*QR_000011000000+P_010000101*QR_000011000001+P_110000001*QR_000011000100+P_110000101*QR_000011000101);
ans_temp[ans_id*9+1]+=Pmtrx[8]*(P_010000001*QR_000010001000+P_010000101*QR_000010001001+P_110000001*QR_000010001100+P_110000101*QR_000010001101);
ans_temp[ans_id*9+2]+=Pmtrx[6]*(P_010000001*QR_001000010000+P_010000101*QR_001000010001+P_110000001*QR_001000010100+P_110000101*QR_001000010101);
ans_temp[ans_id*9+2]+=Pmtrx[7]*(P_010000001*QR_000001010000+P_010000101*QR_000001010001+P_110000001*QR_000001010100+P_110000101*QR_000001010101);
ans_temp[ans_id*9+2]+=Pmtrx[8]*(P_010000001*QR_000000011000+P_010000101*QR_000000011001+P_110000001*QR_000000011100+P_110000101*QR_000000011101);
ans_temp[ans_id*9+3]+=Pmtrx[0]*(P_001010000*QR_011000000000+P_001110000*QR_011000000010+P_101010000*QR_011000000100+P_101110000*QR_011000000110);
ans_temp[ans_id*9+3]+=Pmtrx[1]*(P_001010000*QR_010001000000+P_001110000*QR_010001000010+P_101010000*QR_010001000100+P_101110000*QR_010001000110);
ans_temp[ans_id*9+3]+=Pmtrx[2]*(P_001010000*QR_010000001000+P_001110000*QR_010000001010+P_101010000*QR_010000001100+P_101110000*QR_010000001110);
ans_temp[ans_id*9+4]+=Pmtrx[0]*(P_001010000*QR_001010000000+P_001110000*QR_001010000010+P_101010000*QR_001010000100+P_101110000*QR_001010000110);
ans_temp[ans_id*9+4]+=Pmtrx[1]*(P_001010000*QR_000011000000+P_001110000*QR_000011000010+P_101010000*QR_000011000100+P_101110000*QR_000011000110);
ans_temp[ans_id*9+4]+=Pmtrx[2]*(P_001010000*QR_000010001000+P_001110000*QR_000010001010+P_101010000*QR_000010001100+P_101110000*QR_000010001110);
ans_temp[ans_id*9+5]+=Pmtrx[0]*(P_001010000*QR_001000010000+P_001110000*QR_001000010010+P_101010000*QR_001000010100+P_101110000*QR_001000010110);
ans_temp[ans_id*9+5]+=Pmtrx[1]*(P_001010000*QR_000001010000+P_001110000*QR_000001010010+P_101010000*QR_000001010100+P_101110000*QR_000001010110);
ans_temp[ans_id*9+5]+=Pmtrx[2]*(P_001010000*QR_000000011000+P_001110000*QR_000000011010+P_101010000*QR_000000011100+P_101110000*QR_000000011110);
ans_temp[ans_id*9+3]+=Pmtrx[3]*(P_000011000*QR_011000000000+P_000111000*QR_011000000010+P_000211000*QR_011000000020);
ans_temp[ans_id*9+3]+=Pmtrx[4]*(P_000011000*QR_010001000000+P_000111000*QR_010001000010+P_000211000*QR_010001000020);
ans_temp[ans_id*9+3]+=Pmtrx[5]*(P_000011000*QR_010000001000+P_000111000*QR_010000001010+P_000211000*QR_010000001020);
ans_temp[ans_id*9+4]+=Pmtrx[3]*(P_000011000*QR_001010000000+P_000111000*QR_001010000010+P_000211000*QR_001010000020);
ans_temp[ans_id*9+4]+=Pmtrx[4]*(P_000011000*QR_000011000000+P_000111000*QR_000011000010+P_000211000*QR_000011000020);
ans_temp[ans_id*9+4]+=Pmtrx[5]*(P_000011000*QR_000010001000+P_000111000*QR_000010001010+P_000211000*QR_000010001020);
ans_temp[ans_id*9+5]+=Pmtrx[3]*(P_000011000*QR_001000010000+P_000111000*QR_001000010010+P_000211000*QR_001000010020);
ans_temp[ans_id*9+5]+=Pmtrx[4]*(P_000011000*QR_000001010000+P_000111000*QR_000001010010+P_000211000*QR_000001010020);
ans_temp[ans_id*9+5]+=Pmtrx[5]*(P_000011000*QR_000000011000+P_000111000*QR_000000011010+P_000211000*QR_000000011020);
ans_temp[ans_id*9+3]+=Pmtrx[6]*(P_000010001*QR_011000000000+P_000010101*QR_011000000001+P_000110001*QR_011000000010+P_000110101*QR_011000000011);
ans_temp[ans_id*9+3]+=Pmtrx[7]*(P_000010001*QR_010001000000+P_000010101*QR_010001000001+P_000110001*QR_010001000010+P_000110101*QR_010001000011);
ans_temp[ans_id*9+3]+=Pmtrx[8]*(P_000010001*QR_010000001000+P_000010101*QR_010000001001+P_000110001*QR_010000001010+P_000110101*QR_010000001011);
ans_temp[ans_id*9+4]+=Pmtrx[6]*(P_000010001*QR_001010000000+P_000010101*QR_001010000001+P_000110001*QR_001010000010+P_000110101*QR_001010000011);
ans_temp[ans_id*9+4]+=Pmtrx[7]*(P_000010001*QR_000011000000+P_000010101*QR_000011000001+P_000110001*QR_000011000010+P_000110101*QR_000011000011);
ans_temp[ans_id*9+4]+=Pmtrx[8]*(P_000010001*QR_000010001000+P_000010101*QR_000010001001+P_000110001*QR_000010001010+P_000110101*QR_000010001011);
ans_temp[ans_id*9+5]+=Pmtrx[6]*(P_000010001*QR_001000010000+P_000010101*QR_001000010001+P_000110001*QR_001000010010+P_000110101*QR_001000010011);
ans_temp[ans_id*9+5]+=Pmtrx[7]*(P_000010001*QR_000001010000+P_000010101*QR_000001010001+P_000110001*QR_000001010010+P_000110101*QR_000001010011);
ans_temp[ans_id*9+5]+=Pmtrx[8]*(P_000010001*QR_000000011000+P_000010101*QR_000000011001+P_000110001*QR_000000011010+P_000110101*QR_000000011011);
ans_temp[ans_id*9+6]+=Pmtrx[0]*(P_001000010*QR_011000000000+P_001000110*QR_011000000001+P_101000010*QR_011000000100+P_101000110*QR_011000000101);
ans_temp[ans_id*9+6]+=Pmtrx[1]*(P_001000010*QR_010001000000+P_001000110*QR_010001000001+P_101000010*QR_010001000100+P_101000110*QR_010001000101);
ans_temp[ans_id*9+6]+=Pmtrx[2]*(P_001000010*QR_010000001000+P_001000110*QR_010000001001+P_101000010*QR_010000001100+P_101000110*QR_010000001101);
ans_temp[ans_id*9+7]+=Pmtrx[0]*(P_001000010*QR_001010000000+P_001000110*QR_001010000001+P_101000010*QR_001010000100+P_101000110*QR_001010000101);
ans_temp[ans_id*9+7]+=Pmtrx[1]*(P_001000010*QR_000011000000+P_001000110*QR_000011000001+P_101000010*QR_000011000100+P_101000110*QR_000011000101);
ans_temp[ans_id*9+7]+=Pmtrx[2]*(P_001000010*QR_000010001000+P_001000110*QR_000010001001+P_101000010*QR_000010001100+P_101000110*QR_000010001101);
ans_temp[ans_id*9+8]+=Pmtrx[0]*(P_001000010*QR_001000010000+P_001000110*QR_001000010001+P_101000010*QR_001000010100+P_101000110*QR_001000010101);
ans_temp[ans_id*9+8]+=Pmtrx[1]*(P_001000010*QR_000001010000+P_001000110*QR_000001010001+P_101000010*QR_000001010100+P_101000110*QR_000001010101);
ans_temp[ans_id*9+8]+=Pmtrx[2]*(P_001000010*QR_000000011000+P_001000110*QR_000000011001+P_101000010*QR_000000011100+P_101000110*QR_000000011101);
ans_temp[ans_id*9+6]+=Pmtrx[3]*(P_000001010*QR_011000000000+P_000001110*QR_011000000001+P_000101010*QR_011000000010+P_000101110*QR_011000000011);
ans_temp[ans_id*9+6]+=Pmtrx[4]*(P_000001010*QR_010001000000+P_000001110*QR_010001000001+P_000101010*QR_010001000010+P_000101110*QR_010001000011);
ans_temp[ans_id*9+6]+=Pmtrx[5]*(P_000001010*QR_010000001000+P_000001110*QR_010000001001+P_000101010*QR_010000001010+P_000101110*QR_010000001011);
ans_temp[ans_id*9+7]+=Pmtrx[3]*(P_000001010*QR_001010000000+P_000001110*QR_001010000001+P_000101010*QR_001010000010+P_000101110*QR_001010000011);
ans_temp[ans_id*9+7]+=Pmtrx[4]*(P_000001010*QR_000011000000+P_000001110*QR_000011000001+P_000101010*QR_000011000010+P_000101110*QR_000011000011);
ans_temp[ans_id*9+7]+=Pmtrx[5]*(P_000001010*QR_000010001000+P_000001110*QR_000010001001+P_000101010*QR_000010001010+P_000101110*QR_000010001011);
ans_temp[ans_id*9+8]+=Pmtrx[3]*(P_000001010*QR_001000010000+P_000001110*QR_001000010001+P_000101010*QR_001000010010+P_000101110*QR_001000010011);
ans_temp[ans_id*9+8]+=Pmtrx[4]*(P_000001010*QR_000001010000+P_000001110*QR_000001010001+P_000101010*QR_000001010010+P_000101110*QR_000001010011);
ans_temp[ans_id*9+8]+=Pmtrx[5]*(P_000001010*QR_000000011000+P_000001110*QR_000000011001+P_000101010*QR_000000011010+P_000101110*QR_000000011011);
ans_temp[ans_id*9+6]+=Pmtrx[6]*(P_000000011*QR_011000000000+P_000000111*QR_011000000001+P_000000211*QR_011000000002);
ans_temp[ans_id*9+6]+=Pmtrx[7]*(P_000000011*QR_010001000000+P_000000111*QR_010001000001+P_000000211*QR_010001000002);
ans_temp[ans_id*9+6]+=Pmtrx[8]*(P_000000011*QR_010000001000+P_000000111*QR_010000001001+P_000000211*QR_010000001002);
ans_temp[ans_id*9+7]+=Pmtrx[6]*(P_000000011*QR_001010000000+P_000000111*QR_001010000001+P_000000211*QR_001010000002);
ans_temp[ans_id*9+7]+=Pmtrx[7]*(P_000000011*QR_000011000000+P_000000111*QR_000011000001+P_000000211*QR_000011000002);
ans_temp[ans_id*9+7]+=Pmtrx[8]*(P_000000011*QR_000010001000+P_000000111*QR_000010001001+P_000000211*QR_000010001002);
ans_temp[ans_id*9+8]+=Pmtrx[6]*(P_000000011*QR_001000010000+P_000000111*QR_001000010001+P_000000211*QR_001000010002);
ans_temp[ans_id*9+8]+=Pmtrx[7]*(P_000000011*QR_000001010000+P_000000111*QR_000001010001+P_000000211*QR_000001010002);
ans_temp[ans_id*9+8]+=Pmtrx[8]*(P_000000011*QR_000000011000+P_000000111*QR_000000011001+P_000000211*QR_000000011002);
}
}
__syncthreads();
int num_thread=NTHREAD/2;
while (num_thread!=0){
__syncthreads();
if(tId_x<num_thread){
for(int ians=0;ians<9;ians++){
ans_temp[tId_x*9+ians]+=ans_temp[(tId_x+num_thread)*9+ians];
}
}
num_thread/=2;
}
if(tId_x==0){
for(int ians=0;ians<9;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*9+ians]=ans_temp[(tId_x)*9+ians];
}
}
}
}
}
__global__ void MD_Kp_pdpp_fs(unsigned int contrc_bra_num,unsigned int contrc_ket_num,\
unsigned int * contrc_bra_id,\
unsigned int * contrc_ket_id,\
unsigned int mtrx_len,\
double * Pmtrx_in,\
double * P,\
double * PA,\
double * PB,\
double * Zta_in,\
double * pp_in,\
float * K2_p_in,\
unsigned int * id_bra_in,\
double * Q,\
double * QC,\
double * QD,\
double * Eta_in,\
double * pq_in,\
float * K2_q_in,\
unsigned int * id_ket_in,\
double * ans){
unsigned int tId_x = threadIdx.x;
unsigned int bId_x = blockIdx.x;
unsigned int bId_y = blockIdx.y;
unsigned int tdis = blockDim.x;
unsigned int bdis_x = gridDim.x;
unsigned int bdis_y = gridDim.y;
unsigned int ans_id=tId_x;
double Pmtrx[18]={0.0};
__shared__ double ans_temp[NTHREAD*9];
for(int i=0;i<9;i++){
ans_temp[i*tdis+tId_x]=0.0;
}
for(unsigned int i_contrc_bra=bId_x;i_contrc_bra<contrc_bra_num;i_contrc_bra+=bdis_x){
for(unsigned int j_contrc_ket=bId_y;j_contrc_ket<contrc_ket_num;j_contrc_ket+=bdis_y){
unsigned int primit_bra_start = contrc_bra_id[i_contrc_bra ];
unsigned int primit_bra_end = contrc_bra_id[i_contrc_bra+1];
unsigned int primit_ket_start = contrc_ket_id[j_contrc_ket ];
unsigned int primit_ket_end = contrc_ket_id[j_contrc_ket+1];
for(unsigned int ii=primit_ket_start;ii<primit_ket_end;ii++){
unsigned int id_ket=id_ket_in[ii];
double QX=Q[ii*3+0];
double QY=Q[ii*3+1];
double QZ=Q[ii*3+2];
double Qd_010[3];
Qd_010[0]=QC[ii*3+0];
Qd_010[1]=QC[ii*3+1];
Qd_010[2]=QC[ii*3+2];
double Qd_001[3];
Qd_001[0]=QD[ii*3+0];
Qd_001[1]=QD[ii*3+1];
Qd_001[2]=QD[ii*3+2];
double Eta=Eta_in[ii];
double pq=pq_in[ii];
float K2_q=K2_q_in[ii];
double aQin1=1/(2*Eta);
for(unsigned int j=tId_x;j<primit_bra_end-primit_bra_start;j+=tdis){
unsigned int jj=primit_bra_start+j;
unsigned int id_bra=tex1Dfetch(tex_id_bra,jj);
double P_max=0.0;
for(int p_j=0;p_j<3;p_j++){
for(int p_i=0;p_i<6;p_i++){
Pmtrx[p_i*3+p_j]=Pmtrx_in[(id_ket+p_j)*mtrx_len+(id_bra+p_i)];
double temp_P=fabsf(Pmtrx[p_i*3+p_j]);
if(temp_P>P_max) P_max=temp_P;
}
}
float K2_p=tex1Dfetch(tex_K2_p,jj);
if(fabsf(K2_p*K2_q)<1.0E-14){
break;
}
if(fabsf(P_max*K2_p*K2_q)<1.0E-14) continue;
int2 temp_int2;
temp_int2=tex1Dfetch(tex_Zta,jj);
double Zta=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_pp,jj);
double pp=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+0);
double PX=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+1);
double PY=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+2);
double PZ=__hiloint2double(temp_int2.y,temp_int2.x);
double Pd_010[3];
temp_int2=tex1Dfetch(tex_PA,jj*3+0);
Pd_010[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PA,jj*3+1);
Pd_010[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PA,jj*3+2);
Pd_010[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double Pd_001[3];
temp_int2=tex1Dfetch(tex_PB,jj*3+0);
Pd_001[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PB,jj*3+1);
Pd_001[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PB,jj*3+2);
Pd_001[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double alphaT=rsqrt(Eta+Zta);
double lmd=4*P25*pp*pq*alphaT;
alphaT=Eta*Zta*alphaT*alphaT;
double TX=PX-QX;
double TY=PY-QY;
double TZ=PZ-QZ;
double T=alphaT*(TX*TX+TY*TY+TZ*TZ);
double R_000[6];
Ft_fs_5(5,T,R_000);
R_000[0]*=lmd;
R_000[1]*=-2*alphaT*lmd;
R_000[2]*=4*alphaT*alphaT*lmd;
R_000[3]*=-8*alphaT*alphaT*alphaT*lmd;
R_000[4]*=16*alphaT*alphaT*alphaT*alphaT*lmd;
R_000[5]*=-32*alphaT*alphaT*alphaT*alphaT*alphaT*lmd;
double aPin1=1/(2*Zta);
double R_100[5];
double R_200[4];
double R_300[3];
double R_400[2];
double R_500[1];
double R_010[5];
double R_110[4];
double R_210[3];
double R_310[2];
double R_410[1];
double R_020[4];
double R_120[3];
double R_220[2];
double R_320[1];
double R_030[3];
double R_130[2];
double R_230[1];
double R_040[2];
double R_140[1];
double R_050[1];
double R_001[5];
double R_101[4];
double R_201[3];
double R_301[2];
double R_401[1];
double R_011[4];
double R_111[3];
double R_211[2];
double R_311[1];
double R_021[3];
double R_121[2];
double R_221[1];
double R_031[2];
double R_131[1];
double R_041[1];
double R_002[4];
double R_102[3];
double R_202[2];
double R_302[1];
double R_012[3];
double R_112[2];
double R_212[1];
double R_022[2];
double R_122[1];
double R_032[1];
double R_003[3];
double R_103[2];
double R_203[1];
double R_013[2];
double R_113[1];
double R_023[1];
double R_004[2];
double R_104[1];
double R_014[1];
double R_005[1];
for(int i=0;i<5;i++){
R_100[i]=TX*R_000[i+1];
}
for(int i=0;i<5;i++){
R_010[i]=TY*R_000[i+1];
}
for(int i=0;i<5;i++){
R_001[i]=TZ*R_000[i+1];
}
for(int i=0;i<4;i++){
R_200[i]=TX*R_100[i+1]+R_000[i+1];
}
for(int i=0;i<4;i++){
R_110[i]=TX*R_010[i+1];
}
for(int i=0;i<4;i++){
R_020[i]=TY*R_010[i+1]+R_000[i+1];
}
for(int i=0;i<4;i++){
R_101[i]=TX*R_001[i+1];
}
for(int i=0;i<4;i++){
R_011[i]=TY*R_001[i+1];
}
for(int i=0;i<4;i++){
R_002[i]=TZ*R_001[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_300[i]=TX*R_200[i+1]+2*R_100[i+1];
}
for(int i=0;i<3;i++){
R_210[i]=TY*R_200[i+1];
}
for(int i=0;i<3;i++){
R_120[i]=TX*R_020[i+1];
}
for(int i=0;i<3;i++){
R_030[i]=TY*R_020[i+1]+2*R_010[i+1];
}
for(int i=0;i<3;i++){
R_201[i]=TZ*R_200[i+1];
}
for(int i=0;i<3;i++){
R_111[i]=TX*R_011[i+1];
}
for(int i=0;i<3;i++){
R_021[i]=TZ*R_020[i+1];
}
for(int i=0;i<3;i++){
R_102[i]=TX*R_002[i+1];
}
for(int i=0;i<3;i++){
R_012[i]=TY*R_002[i+1];
}
for(int i=0;i<3;i++){
R_003[i]=TZ*R_002[i+1]+2*R_001[i+1];
}
for(int i=0;i<2;i++){
R_400[i]=TX*R_300[i+1]+3*R_200[i+1];
}
for(int i=0;i<2;i++){
R_310[i]=TY*R_300[i+1];
}
for(int i=0;i<2;i++){
R_220[i]=TX*R_120[i+1]+R_020[i+1];
}
for(int i=0;i<2;i++){
R_130[i]=TX*R_030[i+1];
}
for(int i=0;i<2;i++){
R_040[i]=TY*R_030[i+1]+3*R_020[i+1];
}
for(int i=0;i<2;i++){
R_301[i]=TZ*R_300[i+1];
}
for(int i=0;i<2;i++){
R_211[i]=TY*R_201[i+1];
}
for(int i=0;i<2;i++){
R_121[i]=TX*R_021[i+1];
}
for(int i=0;i<2;i++){
R_031[i]=TZ*R_030[i+1];
}
for(int i=0;i<2;i++){
R_202[i]=TX*R_102[i+1]+R_002[i+1];
}
for(int i=0;i<2;i++){
R_112[i]=TX*R_012[i+1];
}
for(int i=0;i<2;i++){
R_022[i]=TY*R_012[i+1]+R_002[i+1];
}
for(int i=0;i<2;i++){
R_103[i]=TX*R_003[i+1];
}
for(int i=0;i<2;i++){
R_013[i]=TY*R_003[i+1];
}
for(int i=0;i<2;i++){
R_004[i]=TZ*R_003[i+1]+3*R_002[i+1];
}
for(int i=0;i<1;i++){
R_500[i]=TX*R_400[i+1]+4*R_300[i+1];
}
for(int i=0;i<1;i++){
R_410[i]=TY*R_400[i+1];
}
for(int i=0;i<1;i++){
R_320[i]=TX*R_220[i+1]+2*R_120[i+1];
}
for(int i=0;i<1;i++){
R_230[i]=TY*R_220[i+1]+2*R_210[i+1];
}
for(int i=0;i<1;i++){
R_140[i]=TX*R_040[i+1];
}
for(int i=0;i<1;i++){
R_050[i]=TY*R_040[i+1]+4*R_030[i+1];
}
for(int i=0;i<1;i++){
R_401[i]=TZ*R_400[i+1];
}
for(int i=0;i<1;i++){
R_311[i]=TY*R_301[i+1];
}
for(int i=0;i<1;i++){
R_221[i]=TZ*R_220[i+1];
}
for(int i=0;i<1;i++){
R_131[i]=TX*R_031[i+1];
}
for(int i=0;i<1;i++){
R_041[i]=TZ*R_040[i+1];
}
for(int i=0;i<1;i++){
R_302[i]=TX*R_202[i+1]+2*R_102[i+1];
}
for(int i=0;i<1;i++){
R_212[i]=TY*R_202[i+1];
}
for(int i=0;i<1;i++){
R_122[i]=TX*R_022[i+1];
}
for(int i=0;i<1;i++){
R_032[i]=TY*R_022[i+1]+2*R_012[i+1];
}
for(int i=0;i<1;i++){
R_203[i]=TZ*R_202[i+1]+2*R_201[i+1];
}
for(int i=0;i<1;i++){
R_113[i]=TX*R_013[i+1];
}
for(int i=0;i<1;i++){
R_023[i]=TZ*R_022[i+1]+2*R_021[i+1];
}
for(int i=0;i<1;i++){
R_104[i]=TX*R_004[i+1];
}
for(int i=0;i<1;i++){
R_014[i]=TY*R_004[i+1];
}
for(int i=0;i<1;i++){
R_005[i]=TZ*R_004[i+1]+4*R_003[i+1];
}
double Pd_101[3];
double Pd_002[3];
double Pd_102[3];
double Pd_202[3];
double Pd_110[3];
double Pd_011[3];
double Pd_111[3];
double Pd_211[3];
double Pd_012[3];
double Pd_112[3];
double Pd_212[3];
double Pd_312[3];
for(int i=0;i<3;i++){
Pd_101[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_002[i]=Pd_101[i]+Pd_001[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_102[i]=Pd_001[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_202[i]=aPin1*Pd_101[i];
}
for(int i=0;i<3;i++){
Pd_110[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_011[i]=Pd_101[i]+Pd_010[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_111[i]=Pd_010[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_211[i]=aPin1*Pd_101[i];
}
for(int i=0;i<3;i++){
Pd_012[i]=Pd_111[i]+Pd_001[i]*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_112[i]=2*Pd_211[i]+Pd_001[i]*Pd_111[i]+aPin1*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_212[i]=Pd_001[i]*Pd_211[i]+aPin1*Pd_111[i];
}
for(int i=0;i<3;i++){
Pd_312[i]=aPin1*Pd_211[i];
}
double P_012000000=Pd_012[0];
double P_112000000=Pd_112[0];
double P_212000000=Pd_212[0];
double P_312000000=Pd_312[0];
double P_011001000=Pd_011[0]*Pd_001[1];
double P_011101000=Pd_011[0]*Pd_101[1];
double P_111001000=Pd_111[0]*Pd_001[1];
double P_111101000=Pd_111[0]*Pd_101[1];
double P_211001000=Pd_211[0]*Pd_001[1];
double P_211101000=Pd_211[0]*Pd_101[1];
double P_010002000=Pd_010[0]*Pd_002[1];
double P_010102000=Pd_010[0]*Pd_102[1];
double P_010202000=Pd_010[0]*Pd_202[1];
double P_110002000=Pd_110[0]*Pd_002[1];
double P_110102000=Pd_110[0]*Pd_102[1];
double P_110202000=Pd_110[0]*Pd_202[1];
double P_011000001=Pd_011[0]*Pd_001[2];
double P_011000101=Pd_011[0]*Pd_101[2];
double P_111000001=Pd_111[0]*Pd_001[2];
double P_111000101=Pd_111[0]*Pd_101[2];
double P_211000001=Pd_211[0]*Pd_001[2];
double P_211000101=Pd_211[0]*Pd_101[2];
double P_010001001=Pd_010[0]*Pd_001[1]*Pd_001[2];
double P_010001101=Pd_010[0]*Pd_001[1]*Pd_101[2];
double P_010101001=Pd_010[0]*Pd_101[1]*Pd_001[2];
double P_010101101=Pd_010[0]*Pd_101[1]*Pd_101[2];
double P_110001001=Pd_110[0]*Pd_001[1]*Pd_001[2];
double P_110001101=Pd_110[0]*Pd_001[1]*Pd_101[2];
double P_110101001=Pd_110[0]*Pd_101[1]*Pd_001[2];
double P_110101101=Pd_110[0]*Pd_101[1]*Pd_101[2];
double P_010000002=Pd_010[0]*Pd_002[2];
double P_010000102=Pd_010[0]*Pd_102[2];
double P_010000202=Pd_010[0]*Pd_202[2];
double P_110000002=Pd_110[0]*Pd_002[2];
double P_110000102=Pd_110[0]*Pd_102[2];
double P_110000202=Pd_110[0]*Pd_202[2];
double P_002010000=Pd_002[0]*Pd_010[1];
double P_002110000=Pd_002[0]*Pd_110[1];
double P_102010000=Pd_102[0]*Pd_010[1];
double P_102110000=Pd_102[0]*Pd_110[1];
double P_202010000=Pd_202[0]*Pd_010[1];
double P_202110000=Pd_202[0]*Pd_110[1];
double P_001011000=Pd_001[0]*Pd_011[1];
double P_001111000=Pd_001[0]*Pd_111[1];
double P_001211000=Pd_001[0]*Pd_211[1];
double P_101011000=Pd_101[0]*Pd_011[1];
double P_101111000=Pd_101[0]*Pd_111[1];
double P_101211000=Pd_101[0]*Pd_211[1];
double P_000012000=Pd_012[1];
double P_000112000=Pd_112[1];
double P_000212000=Pd_212[1];
double P_000312000=Pd_312[1];
double P_001010001=Pd_001[0]*Pd_010[1]*Pd_001[2];
double P_001010101=Pd_001[0]*Pd_010[1]*Pd_101[2];
double P_001110001=Pd_001[0]*Pd_110[1]*Pd_001[2];
double P_001110101=Pd_001[0]*Pd_110[1]*Pd_101[2];
double P_101010001=Pd_101[0]*Pd_010[1]*Pd_001[2];
double P_101010101=Pd_101[0]*Pd_010[1]*Pd_101[2];
double P_101110001=Pd_101[0]*Pd_110[1]*Pd_001[2];
double P_101110101=Pd_101[0]*Pd_110[1]*Pd_101[2];
double P_000011001=Pd_011[1]*Pd_001[2];
double P_000011101=Pd_011[1]*Pd_101[2];
double P_000111001=Pd_111[1]*Pd_001[2];
double P_000111101=Pd_111[1]*Pd_101[2];
double P_000211001=Pd_211[1]*Pd_001[2];
double P_000211101=Pd_211[1]*Pd_101[2];
double P_000010002=Pd_010[1]*Pd_002[2];
double P_000010102=Pd_010[1]*Pd_102[2];
double P_000010202=Pd_010[1]*Pd_202[2];
double P_000110002=Pd_110[1]*Pd_002[2];
double P_000110102=Pd_110[1]*Pd_102[2];
double P_000110202=Pd_110[1]*Pd_202[2];
double P_002000010=Pd_002[0]*Pd_010[2];
double P_002000110=Pd_002[0]*Pd_110[2];
double P_102000010=Pd_102[0]*Pd_010[2];
double P_102000110=Pd_102[0]*Pd_110[2];
double P_202000010=Pd_202[0]*Pd_010[2];
double P_202000110=Pd_202[0]*Pd_110[2];
double P_001001010=Pd_001[0]*Pd_001[1]*Pd_010[2];
double P_001001110=Pd_001[0]*Pd_001[1]*Pd_110[2];
double P_001101010=Pd_001[0]*Pd_101[1]*Pd_010[2];
double P_001101110=Pd_001[0]*Pd_101[1]*Pd_110[2];
double P_101001010=Pd_101[0]*Pd_001[1]*Pd_010[2];
double P_101001110=Pd_101[0]*Pd_001[1]*Pd_110[2];
double P_101101010=Pd_101[0]*Pd_101[1]*Pd_010[2];
double P_101101110=Pd_101[0]*Pd_101[1]*Pd_110[2];
double P_000002010=Pd_002[1]*Pd_010[2];
double P_000002110=Pd_002[1]*Pd_110[2];
double P_000102010=Pd_102[1]*Pd_010[2];
double P_000102110=Pd_102[1]*Pd_110[2];
double P_000202010=Pd_202[1]*Pd_010[2];
double P_000202110=Pd_202[1]*Pd_110[2];
double P_001000011=Pd_001[0]*Pd_011[2];
double P_001000111=Pd_001[0]*Pd_111[2];
double P_001000211=Pd_001[0]*Pd_211[2];
double P_101000011=Pd_101[0]*Pd_011[2];
double P_101000111=Pd_101[0]*Pd_111[2];
double P_101000211=Pd_101[0]*Pd_211[2];
double P_000001011=Pd_001[1]*Pd_011[2];
double P_000001111=Pd_001[1]*Pd_111[2];
double P_000001211=Pd_001[1]*Pd_211[2];
double P_000101011=Pd_101[1]*Pd_011[2];
double P_000101111=Pd_101[1]*Pd_111[2];
double P_000101211=Pd_101[1]*Pd_211[2];
double P_000000012=Pd_012[2];
double P_000000112=Pd_112[2];
double P_000000212=Pd_212[2];
double P_000000312=Pd_312[2];
double PR_012000000000=P_012000000*R_000[0]+-1*P_112000000*R_100[0]+P_212000000*R_200[0]+-1*P_312000000*R_300[0];
double PR_011001000000=P_011001000*R_000[0]+-1*P_011101000*R_010[0]+-1*P_111001000*R_100[0]+P_111101000*R_110[0]+P_211001000*R_200[0]+-1*P_211101000*R_210[0];
double PR_010002000000=P_010002000*R_000[0]+-1*P_010102000*R_010[0]+P_010202000*R_020[0]+-1*P_110002000*R_100[0]+P_110102000*R_110[0]+-1*P_110202000*R_120[0];
double PR_011000001000=P_011000001*R_000[0]+-1*P_011000101*R_001[0]+-1*P_111000001*R_100[0]+P_111000101*R_101[0]+P_211000001*R_200[0]+-1*P_211000101*R_201[0];
double PR_010001001000=P_010001001*R_000[0]+-1*P_010001101*R_001[0]+-1*P_010101001*R_010[0]+P_010101101*R_011[0]+-1*P_110001001*R_100[0]+P_110001101*R_101[0]+P_110101001*R_110[0]+-1*P_110101101*R_111[0];
double PR_010000002000=P_010000002*R_000[0]+-1*P_010000102*R_001[0]+P_010000202*R_002[0]+-1*P_110000002*R_100[0]+P_110000102*R_101[0]+-1*P_110000202*R_102[0];
double PR_002010000000=P_002010000*R_000[0]+-1*P_002110000*R_010[0]+-1*P_102010000*R_100[0]+P_102110000*R_110[0]+P_202010000*R_200[0]+-1*P_202110000*R_210[0];
double PR_001011000000=P_001011000*R_000[0]+-1*P_001111000*R_010[0]+P_001211000*R_020[0]+-1*P_101011000*R_100[0]+P_101111000*R_110[0]+-1*P_101211000*R_120[0];
double PR_000012000000=P_000012000*R_000[0]+-1*P_000112000*R_010[0]+P_000212000*R_020[0]+-1*P_000312000*R_030[0];
double PR_001010001000=P_001010001*R_000[0]+-1*P_001010101*R_001[0]+-1*P_001110001*R_010[0]+P_001110101*R_011[0]+-1*P_101010001*R_100[0]+P_101010101*R_101[0]+P_101110001*R_110[0]+-1*P_101110101*R_111[0];
double PR_000011001000=P_000011001*R_000[0]+-1*P_000011101*R_001[0]+-1*P_000111001*R_010[0]+P_000111101*R_011[0]+P_000211001*R_020[0]+-1*P_000211101*R_021[0];
double PR_000010002000=P_000010002*R_000[0]+-1*P_000010102*R_001[0]+P_000010202*R_002[0]+-1*P_000110002*R_010[0]+P_000110102*R_011[0]+-1*P_000110202*R_012[0];
double PR_002000010000=P_002000010*R_000[0]+-1*P_002000110*R_001[0]+-1*P_102000010*R_100[0]+P_102000110*R_101[0]+P_202000010*R_200[0]+-1*P_202000110*R_201[0];
double PR_001001010000=P_001001010*R_000[0]+-1*P_001001110*R_001[0]+-1*P_001101010*R_010[0]+P_001101110*R_011[0]+-1*P_101001010*R_100[0]+P_101001110*R_101[0]+P_101101010*R_110[0]+-1*P_101101110*R_111[0];
double PR_000002010000=P_000002010*R_000[0]+-1*P_000002110*R_001[0]+-1*P_000102010*R_010[0]+P_000102110*R_011[0]+P_000202010*R_020[0]+-1*P_000202110*R_021[0];
double PR_001000011000=P_001000011*R_000[0]+-1*P_001000111*R_001[0]+P_001000211*R_002[0]+-1*P_101000011*R_100[0]+P_101000111*R_101[0]+-1*P_101000211*R_102[0];
double PR_000001011000=P_000001011*R_000[0]+-1*P_000001111*R_001[0]+P_000001211*R_002[0]+-1*P_000101011*R_010[0]+P_000101111*R_011[0]+-1*P_000101211*R_012[0];
double PR_000000012000=P_000000012*R_000[0]+-1*P_000000112*R_001[0]+P_000000212*R_002[0]+-1*P_000000312*R_003[0];
double PR_012000000001=P_012000000*R_001[0]+-1*P_112000000*R_101[0]+P_212000000*R_201[0]+-1*P_312000000*R_301[0];
double PR_011001000001=P_011001000*R_001[0]+-1*P_011101000*R_011[0]+-1*P_111001000*R_101[0]+P_111101000*R_111[0]+P_211001000*R_201[0]+-1*P_211101000*R_211[0];
double PR_010002000001=P_010002000*R_001[0]+-1*P_010102000*R_011[0]+P_010202000*R_021[0]+-1*P_110002000*R_101[0]+P_110102000*R_111[0]+-1*P_110202000*R_121[0];
double PR_011000001001=P_011000001*R_001[0]+-1*P_011000101*R_002[0]+-1*P_111000001*R_101[0]+P_111000101*R_102[0]+P_211000001*R_201[0]+-1*P_211000101*R_202[0];
double PR_010001001001=P_010001001*R_001[0]+-1*P_010001101*R_002[0]+-1*P_010101001*R_011[0]+P_010101101*R_012[0]+-1*P_110001001*R_101[0]+P_110001101*R_102[0]+P_110101001*R_111[0]+-1*P_110101101*R_112[0];
double PR_010000002001=P_010000002*R_001[0]+-1*P_010000102*R_002[0]+P_010000202*R_003[0]+-1*P_110000002*R_101[0]+P_110000102*R_102[0]+-1*P_110000202*R_103[0];
double PR_002010000001=P_002010000*R_001[0]+-1*P_002110000*R_011[0]+-1*P_102010000*R_101[0]+P_102110000*R_111[0]+P_202010000*R_201[0]+-1*P_202110000*R_211[0];
double PR_001011000001=P_001011000*R_001[0]+-1*P_001111000*R_011[0]+P_001211000*R_021[0]+-1*P_101011000*R_101[0]+P_101111000*R_111[0]+-1*P_101211000*R_121[0];
double PR_000012000001=P_000012000*R_001[0]+-1*P_000112000*R_011[0]+P_000212000*R_021[0]+-1*P_000312000*R_031[0];
double PR_001010001001=P_001010001*R_001[0]+-1*P_001010101*R_002[0]+-1*P_001110001*R_011[0]+P_001110101*R_012[0]+-1*P_101010001*R_101[0]+P_101010101*R_102[0]+P_101110001*R_111[0]+-1*P_101110101*R_112[0];
double PR_000011001001=P_000011001*R_001[0]+-1*P_000011101*R_002[0]+-1*P_000111001*R_011[0]+P_000111101*R_012[0]+P_000211001*R_021[0]+-1*P_000211101*R_022[0];
double PR_000010002001=P_000010002*R_001[0]+-1*P_000010102*R_002[0]+P_000010202*R_003[0]+-1*P_000110002*R_011[0]+P_000110102*R_012[0]+-1*P_000110202*R_013[0];
double PR_002000010001=P_002000010*R_001[0]+-1*P_002000110*R_002[0]+-1*P_102000010*R_101[0]+P_102000110*R_102[0]+P_202000010*R_201[0]+-1*P_202000110*R_202[0];
double PR_001001010001=P_001001010*R_001[0]+-1*P_001001110*R_002[0]+-1*P_001101010*R_011[0]+P_001101110*R_012[0]+-1*P_101001010*R_101[0]+P_101001110*R_102[0]+P_101101010*R_111[0]+-1*P_101101110*R_112[0];
double PR_000002010001=P_000002010*R_001[0]+-1*P_000002110*R_002[0]+-1*P_000102010*R_011[0]+P_000102110*R_012[0]+P_000202010*R_021[0]+-1*P_000202110*R_022[0];
double PR_001000011001=P_001000011*R_001[0]+-1*P_001000111*R_002[0]+P_001000211*R_003[0]+-1*P_101000011*R_101[0]+P_101000111*R_102[0]+-1*P_101000211*R_103[0];
double PR_000001011001=P_000001011*R_001[0]+-1*P_000001111*R_002[0]+P_000001211*R_003[0]+-1*P_000101011*R_011[0]+P_000101111*R_012[0]+-1*P_000101211*R_013[0];
double PR_000000012001=P_000000012*R_001[0]+-1*P_000000112*R_002[0]+P_000000212*R_003[0]+-1*P_000000312*R_004[0];
double PR_012000000010=P_012000000*R_010[0]+-1*P_112000000*R_110[0]+P_212000000*R_210[0]+-1*P_312000000*R_310[0];
double PR_011001000010=P_011001000*R_010[0]+-1*P_011101000*R_020[0]+-1*P_111001000*R_110[0]+P_111101000*R_120[0]+P_211001000*R_210[0]+-1*P_211101000*R_220[0];
double PR_010002000010=P_010002000*R_010[0]+-1*P_010102000*R_020[0]+P_010202000*R_030[0]+-1*P_110002000*R_110[0]+P_110102000*R_120[0]+-1*P_110202000*R_130[0];
double PR_011000001010=P_011000001*R_010[0]+-1*P_011000101*R_011[0]+-1*P_111000001*R_110[0]+P_111000101*R_111[0]+P_211000001*R_210[0]+-1*P_211000101*R_211[0];
double PR_010001001010=P_010001001*R_010[0]+-1*P_010001101*R_011[0]+-1*P_010101001*R_020[0]+P_010101101*R_021[0]+-1*P_110001001*R_110[0]+P_110001101*R_111[0]+P_110101001*R_120[0]+-1*P_110101101*R_121[0];
double PR_010000002010=P_010000002*R_010[0]+-1*P_010000102*R_011[0]+P_010000202*R_012[0]+-1*P_110000002*R_110[0]+P_110000102*R_111[0]+-1*P_110000202*R_112[0];
double PR_002010000010=P_002010000*R_010[0]+-1*P_002110000*R_020[0]+-1*P_102010000*R_110[0]+P_102110000*R_120[0]+P_202010000*R_210[0]+-1*P_202110000*R_220[0];
double PR_001011000010=P_001011000*R_010[0]+-1*P_001111000*R_020[0]+P_001211000*R_030[0]+-1*P_101011000*R_110[0]+P_101111000*R_120[0]+-1*P_101211000*R_130[0];
double PR_000012000010=P_000012000*R_010[0]+-1*P_000112000*R_020[0]+P_000212000*R_030[0]+-1*P_000312000*R_040[0];
double PR_001010001010=P_001010001*R_010[0]+-1*P_001010101*R_011[0]+-1*P_001110001*R_020[0]+P_001110101*R_021[0]+-1*P_101010001*R_110[0]+P_101010101*R_111[0]+P_101110001*R_120[0]+-1*P_101110101*R_121[0];
double PR_000011001010=P_000011001*R_010[0]+-1*P_000011101*R_011[0]+-1*P_000111001*R_020[0]+P_000111101*R_021[0]+P_000211001*R_030[0]+-1*P_000211101*R_031[0];
double PR_000010002010=P_000010002*R_010[0]+-1*P_000010102*R_011[0]+P_000010202*R_012[0]+-1*P_000110002*R_020[0]+P_000110102*R_021[0]+-1*P_000110202*R_022[0];
double PR_002000010010=P_002000010*R_010[0]+-1*P_002000110*R_011[0]+-1*P_102000010*R_110[0]+P_102000110*R_111[0]+P_202000010*R_210[0]+-1*P_202000110*R_211[0];
double PR_001001010010=P_001001010*R_010[0]+-1*P_001001110*R_011[0]+-1*P_001101010*R_020[0]+P_001101110*R_021[0]+-1*P_101001010*R_110[0]+P_101001110*R_111[0]+P_101101010*R_120[0]+-1*P_101101110*R_121[0];
double PR_000002010010=P_000002010*R_010[0]+-1*P_000002110*R_011[0]+-1*P_000102010*R_020[0]+P_000102110*R_021[0]+P_000202010*R_030[0]+-1*P_000202110*R_031[0];
double PR_001000011010=P_001000011*R_010[0]+-1*P_001000111*R_011[0]+P_001000211*R_012[0]+-1*P_101000011*R_110[0]+P_101000111*R_111[0]+-1*P_101000211*R_112[0];
double PR_000001011010=P_000001011*R_010[0]+-1*P_000001111*R_011[0]+P_000001211*R_012[0]+-1*P_000101011*R_020[0]+P_000101111*R_021[0]+-1*P_000101211*R_022[0];
double PR_000000012010=P_000000012*R_010[0]+-1*P_000000112*R_011[0]+P_000000212*R_012[0]+-1*P_000000312*R_013[0];
double PR_012000000100=P_012000000*R_100[0]+-1*P_112000000*R_200[0]+P_212000000*R_300[0]+-1*P_312000000*R_400[0];
double PR_011001000100=P_011001000*R_100[0]+-1*P_011101000*R_110[0]+-1*P_111001000*R_200[0]+P_111101000*R_210[0]+P_211001000*R_300[0]+-1*P_211101000*R_310[0];
double PR_010002000100=P_010002000*R_100[0]+-1*P_010102000*R_110[0]+P_010202000*R_120[0]+-1*P_110002000*R_200[0]+P_110102000*R_210[0]+-1*P_110202000*R_220[0];
double PR_011000001100=P_011000001*R_100[0]+-1*P_011000101*R_101[0]+-1*P_111000001*R_200[0]+P_111000101*R_201[0]+P_211000001*R_300[0]+-1*P_211000101*R_301[0];
double PR_010001001100=P_010001001*R_100[0]+-1*P_010001101*R_101[0]+-1*P_010101001*R_110[0]+P_010101101*R_111[0]+-1*P_110001001*R_200[0]+P_110001101*R_201[0]+P_110101001*R_210[0]+-1*P_110101101*R_211[0];
double PR_010000002100=P_010000002*R_100[0]+-1*P_010000102*R_101[0]+P_010000202*R_102[0]+-1*P_110000002*R_200[0]+P_110000102*R_201[0]+-1*P_110000202*R_202[0];
double PR_002010000100=P_002010000*R_100[0]+-1*P_002110000*R_110[0]+-1*P_102010000*R_200[0]+P_102110000*R_210[0]+P_202010000*R_300[0]+-1*P_202110000*R_310[0];
double PR_001011000100=P_001011000*R_100[0]+-1*P_001111000*R_110[0]+P_001211000*R_120[0]+-1*P_101011000*R_200[0]+P_101111000*R_210[0]+-1*P_101211000*R_220[0];
double PR_000012000100=P_000012000*R_100[0]+-1*P_000112000*R_110[0]+P_000212000*R_120[0]+-1*P_000312000*R_130[0];
double PR_001010001100=P_001010001*R_100[0]+-1*P_001010101*R_101[0]+-1*P_001110001*R_110[0]+P_001110101*R_111[0]+-1*P_101010001*R_200[0]+P_101010101*R_201[0]+P_101110001*R_210[0]+-1*P_101110101*R_211[0];
double PR_000011001100=P_000011001*R_100[0]+-1*P_000011101*R_101[0]+-1*P_000111001*R_110[0]+P_000111101*R_111[0]+P_000211001*R_120[0]+-1*P_000211101*R_121[0];
double PR_000010002100=P_000010002*R_100[0]+-1*P_000010102*R_101[0]+P_000010202*R_102[0]+-1*P_000110002*R_110[0]+P_000110102*R_111[0]+-1*P_000110202*R_112[0];
double PR_002000010100=P_002000010*R_100[0]+-1*P_002000110*R_101[0]+-1*P_102000010*R_200[0]+P_102000110*R_201[0]+P_202000010*R_300[0]+-1*P_202000110*R_301[0];
double PR_001001010100=P_001001010*R_100[0]+-1*P_001001110*R_101[0]+-1*P_001101010*R_110[0]+P_001101110*R_111[0]+-1*P_101001010*R_200[0]+P_101001110*R_201[0]+P_101101010*R_210[0]+-1*P_101101110*R_211[0];
double PR_000002010100=P_000002010*R_100[0]+-1*P_000002110*R_101[0]+-1*P_000102010*R_110[0]+P_000102110*R_111[0]+P_000202010*R_120[0]+-1*P_000202110*R_121[0];
double PR_001000011100=P_001000011*R_100[0]+-1*P_001000111*R_101[0]+P_001000211*R_102[0]+-1*P_101000011*R_200[0]+P_101000111*R_201[0]+-1*P_101000211*R_202[0];
double PR_000001011100=P_000001011*R_100[0]+-1*P_000001111*R_101[0]+P_000001211*R_102[0]+-1*P_000101011*R_110[0]+P_000101111*R_111[0]+-1*P_000101211*R_112[0];
double PR_000000012100=P_000000012*R_100[0]+-1*P_000000112*R_101[0]+P_000000212*R_102[0]+-1*P_000000312*R_103[0];
double PR_012000000002=P_012000000*R_002[0]+-1*P_112000000*R_102[0]+P_212000000*R_202[0]+-1*P_312000000*R_302[0];
double PR_011001000002=P_011001000*R_002[0]+-1*P_011101000*R_012[0]+-1*P_111001000*R_102[0]+P_111101000*R_112[0]+P_211001000*R_202[0]+-1*P_211101000*R_212[0];
double PR_010002000002=P_010002000*R_002[0]+-1*P_010102000*R_012[0]+P_010202000*R_022[0]+-1*P_110002000*R_102[0]+P_110102000*R_112[0]+-1*P_110202000*R_122[0];
double PR_011000001002=P_011000001*R_002[0]+-1*P_011000101*R_003[0]+-1*P_111000001*R_102[0]+P_111000101*R_103[0]+P_211000001*R_202[0]+-1*P_211000101*R_203[0];
double PR_010001001002=P_010001001*R_002[0]+-1*P_010001101*R_003[0]+-1*P_010101001*R_012[0]+P_010101101*R_013[0]+-1*P_110001001*R_102[0]+P_110001101*R_103[0]+P_110101001*R_112[0]+-1*P_110101101*R_113[0];
double PR_010000002002=P_010000002*R_002[0]+-1*P_010000102*R_003[0]+P_010000202*R_004[0]+-1*P_110000002*R_102[0]+P_110000102*R_103[0]+-1*P_110000202*R_104[0];
double PR_002010000002=P_002010000*R_002[0]+-1*P_002110000*R_012[0]+-1*P_102010000*R_102[0]+P_102110000*R_112[0]+P_202010000*R_202[0]+-1*P_202110000*R_212[0];
double PR_001011000002=P_001011000*R_002[0]+-1*P_001111000*R_012[0]+P_001211000*R_022[0]+-1*P_101011000*R_102[0]+P_101111000*R_112[0]+-1*P_101211000*R_122[0];
double PR_000012000002=P_000012000*R_002[0]+-1*P_000112000*R_012[0]+P_000212000*R_022[0]+-1*P_000312000*R_032[0];
double PR_001010001002=P_001010001*R_002[0]+-1*P_001010101*R_003[0]+-1*P_001110001*R_012[0]+P_001110101*R_013[0]+-1*P_101010001*R_102[0]+P_101010101*R_103[0]+P_101110001*R_112[0]+-1*P_101110101*R_113[0];
double PR_000011001002=P_000011001*R_002[0]+-1*P_000011101*R_003[0]+-1*P_000111001*R_012[0]+P_000111101*R_013[0]+P_000211001*R_022[0]+-1*P_000211101*R_023[0];
double PR_000010002002=P_000010002*R_002[0]+-1*P_000010102*R_003[0]+P_000010202*R_004[0]+-1*P_000110002*R_012[0]+P_000110102*R_013[0]+-1*P_000110202*R_014[0];
double PR_002000010002=P_002000010*R_002[0]+-1*P_002000110*R_003[0]+-1*P_102000010*R_102[0]+P_102000110*R_103[0]+P_202000010*R_202[0]+-1*P_202000110*R_203[0];
double PR_001001010002=P_001001010*R_002[0]+-1*P_001001110*R_003[0]+-1*P_001101010*R_012[0]+P_001101110*R_013[0]+-1*P_101001010*R_102[0]+P_101001110*R_103[0]+P_101101010*R_112[0]+-1*P_101101110*R_113[0];
double PR_000002010002=P_000002010*R_002[0]+-1*P_000002110*R_003[0]+-1*P_000102010*R_012[0]+P_000102110*R_013[0]+P_000202010*R_022[0]+-1*P_000202110*R_023[0];
double PR_001000011002=P_001000011*R_002[0]+-1*P_001000111*R_003[0]+P_001000211*R_004[0]+-1*P_101000011*R_102[0]+P_101000111*R_103[0]+-1*P_101000211*R_104[0];
double PR_000001011002=P_000001011*R_002[0]+-1*P_000001111*R_003[0]+P_000001211*R_004[0]+-1*P_000101011*R_012[0]+P_000101111*R_013[0]+-1*P_000101211*R_014[0];
double PR_000000012002=P_000000012*R_002[0]+-1*P_000000112*R_003[0]+P_000000212*R_004[0]+-1*P_000000312*R_005[0];
double PR_012000000011=P_012000000*R_011[0]+-1*P_112000000*R_111[0]+P_212000000*R_211[0]+-1*P_312000000*R_311[0];
double PR_011001000011=P_011001000*R_011[0]+-1*P_011101000*R_021[0]+-1*P_111001000*R_111[0]+P_111101000*R_121[0]+P_211001000*R_211[0]+-1*P_211101000*R_221[0];
double PR_010002000011=P_010002000*R_011[0]+-1*P_010102000*R_021[0]+P_010202000*R_031[0]+-1*P_110002000*R_111[0]+P_110102000*R_121[0]+-1*P_110202000*R_131[0];
double PR_011000001011=P_011000001*R_011[0]+-1*P_011000101*R_012[0]+-1*P_111000001*R_111[0]+P_111000101*R_112[0]+P_211000001*R_211[0]+-1*P_211000101*R_212[0];
double PR_010001001011=P_010001001*R_011[0]+-1*P_010001101*R_012[0]+-1*P_010101001*R_021[0]+P_010101101*R_022[0]+-1*P_110001001*R_111[0]+P_110001101*R_112[0]+P_110101001*R_121[0]+-1*P_110101101*R_122[0];
double PR_010000002011=P_010000002*R_011[0]+-1*P_010000102*R_012[0]+P_010000202*R_013[0]+-1*P_110000002*R_111[0]+P_110000102*R_112[0]+-1*P_110000202*R_113[0];
double PR_002010000011=P_002010000*R_011[0]+-1*P_002110000*R_021[0]+-1*P_102010000*R_111[0]+P_102110000*R_121[0]+P_202010000*R_211[0]+-1*P_202110000*R_221[0];
double PR_001011000011=P_001011000*R_011[0]+-1*P_001111000*R_021[0]+P_001211000*R_031[0]+-1*P_101011000*R_111[0]+P_101111000*R_121[0]+-1*P_101211000*R_131[0];
double PR_000012000011=P_000012000*R_011[0]+-1*P_000112000*R_021[0]+P_000212000*R_031[0]+-1*P_000312000*R_041[0];
double PR_001010001011=P_001010001*R_011[0]+-1*P_001010101*R_012[0]+-1*P_001110001*R_021[0]+P_001110101*R_022[0]+-1*P_101010001*R_111[0]+P_101010101*R_112[0]+P_101110001*R_121[0]+-1*P_101110101*R_122[0];
double PR_000011001011=P_000011001*R_011[0]+-1*P_000011101*R_012[0]+-1*P_000111001*R_021[0]+P_000111101*R_022[0]+P_000211001*R_031[0]+-1*P_000211101*R_032[0];
double PR_000010002011=P_000010002*R_011[0]+-1*P_000010102*R_012[0]+P_000010202*R_013[0]+-1*P_000110002*R_021[0]+P_000110102*R_022[0]+-1*P_000110202*R_023[0];
double PR_002000010011=P_002000010*R_011[0]+-1*P_002000110*R_012[0]+-1*P_102000010*R_111[0]+P_102000110*R_112[0]+P_202000010*R_211[0]+-1*P_202000110*R_212[0];
double PR_001001010011=P_001001010*R_011[0]+-1*P_001001110*R_012[0]+-1*P_001101010*R_021[0]+P_001101110*R_022[0]+-1*P_101001010*R_111[0]+P_101001110*R_112[0]+P_101101010*R_121[0]+-1*P_101101110*R_122[0];
double PR_000002010011=P_000002010*R_011[0]+-1*P_000002110*R_012[0]+-1*P_000102010*R_021[0]+P_000102110*R_022[0]+P_000202010*R_031[0]+-1*P_000202110*R_032[0];
double PR_001000011011=P_001000011*R_011[0]+-1*P_001000111*R_012[0]+P_001000211*R_013[0]+-1*P_101000011*R_111[0]+P_101000111*R_112[0]+-1*P_101000211*R_113[0];
double PR_000001011011=P_000001011*R_011[0]+-1*P_000001111*R_012[0]+P_000001211*R_013[0]+-1*P_000101011*R_021[0]+P_000101111*R_022[0]+-1*P_000101211*R_023[0];
double PR_000000012011=P_000000012*R_011[0]+-1*P_000000112*R_012[0]+P_000000212*R_013[0]+-1*P_000000312*R_014[0];
double PR_012000000020=P_012000000*R_020[0]+-1*P_112000000*R_120[0]+P_212000000*R_220[0]+-1*P_312000000*R_320[0];
double PR_011001000020=P_011001000*R_020[0]+-1*P_011101000*R_030[0]+-1*P_111001000*R_120[0]+P_111101000*R_130[0]+P_211001000*R_220[0]+-1*P_211101000*R_230[0];
double PR_010002000020=P_010002000*R_020[0]+-1*P_010102000*R_030[0]+P_010202000*R_040[0]+-1*P_110002000*R_120[0]+P_110102000*R_130[0]+-1*P_110202000*R_140[0];
double PR_011000001020=P_011000001*R_020[0]+-1*P_011000101*R_021[0]+-1*P_111000001*R_120[0]+P_111000101*R_121[0]+P_211000001*R_220[0]+-1*P_211000101*R_221[0];
double PR_010001001020=P_010001001*R_020[0]+-1*P_010001101*R_021[0]+-1*P_010101001*R_030[0]+P_010101101*R_031[0]+-1*P_110001001*R_120[0]+P_110001101*R_121[0]+P_110101001*R_130[0]+-1*P_110101101*R_131[0];
double PR_010000002020=P_010000002*R_020[0]+-1*P_010000102*R_021[0]+P_010000202*R_022[0]+-1*P_110000002*R_120[0]+P_110000102*R_121[0]+-1*P_110000202*R_122[0];
double PR_002010000020=P_002010000*R_020[0]+-1*P_002110000*R_030[0]+-1*P_102010000*R_120[0]+P_102110000*R_130[0]+P_202010000*R_220[0]+-1*P_202110000*R_230[0];
double PR_001011000020=P_001011000*R_020[0]+-1*P_001111000*R_030[0]+P_001211000*R_040[0]+-1*P_101011000*R_120[0]+P_101111000*R_130[0]+-1*P_101211000*R_140[0];
double PR_000012000020=P_000012000*R_020[0]+-1*P_000112000*R_030[0]+P_000212000*R_040[0]+-1*P_000312000*R_050[0];
double PR_001010001020=P_001010001*R_020[0]+-1*P_001010101*R_021[0]+-1*P_001110001*R_030[0]+P_001110101*R_031[0]+-1*P_101010001*R_120[0]+P_101010101*R_121[0]+P_101110001*R_130[0]+-1*P_101110101*R_131[0];
double PR_000011001020=P_000011001*R_020[0]+-1*P_000011101*R_021[0]+-1*P_000111001*R_030[0]+P_000111101*R_031[0]+P_000211001*R_040[0]+-1*P_000211101*R_041[0];
double PR_000010002020=P_000010002*R_020[0]+-1*P_000010102*R_021[0]+P_000010202*R_022[0]+-1*P_000110002*R_030[0]+P_000110102*R_031[0]+-1*P_000110202*R_032[0];
double PR_002000010020=P_002000010*R_020[0]+-1*P_002000110*R_021[0]+-1*P_102000010*R_120[0]+P_102000110*R_121[0]+P_202000010*R_220[0]+-1*P_202000110*R_221[0];
double PR_001001010020=P_001001010*R_020[0]+-1*P_001001110*R_021[0]+-1*P_001101010*R_030[0]+P_001101110*R_031[0]+-1*P_101001010*R_120[0]+P_101001110*R_121[0]+P_101101010*R_130[0]+-1*P_101101110*R_131[0];
double PR_000002010020=P_000002010*R_020[0]+-1*P_000002110*R_021[0]+-1*P_000102010*R_030[0]+P_000102110*R_031[0]+P_000202010*R_040[0]+-1*P_000202110*R_041[0];
double PR_001000011020=P_001000011*R_020[0]+-1*P_001000111*R_021[0]+P_001000211*R_022[0]+-1*P_101000011*R_120[0]+P_101000111*R_121[0]+-1*P_101000211*R_122[0];
double PR_000001011020=P_000001011*R_020[0]+-1*P_000001111*R_021[0]+P_000001211*R_022[0]+-1*P_000101011*R_030[0]+P_000101111*R_031[0]+-1*P_000101211*R_032[0];
double PR_000000012020=P_000000012*R_020[0]+-1*P_000000112*R_021[0]+P_000000212*R_022[0]+-1*P_000000312*R_023[0];
double PR_012000000101=P_012000000*R_101[0]+-1*P_112000000*R_201[0]+P_212000000*R_301[0]+-1*P_312000000*R_401[0];
double PR_011001000101=P_011001000*R_101[0]+-1*P_011101000*R_111[0]+-1*P_111001000*R_201[0]+P_111101000*R_211[0]+P_211001000*R_301[0]+-1*P_211101000*R_311[0];
double PR_010002000101=P_010002000*R_101[0]+-1*P_010102000*R_111[0]+P_010202000*R_121[0]+-1*P_110002000*R_201[0]+P_110102000*R_211[0]+-1*P_110202000*R_221[0];
double PR_011000001101=P_011000001*R_101[0]+-1*P_011000101*R_102[0]+-1*P_111000001*R_201[0]+P_111000101*R_202[0]+P_211000001*R_301[0]+-1*P_211000101*R_302[0];
double PR_010001001101=P_010001001*R_101[0]+-1*P_010001101*R_102[0]+-1*P_010101001*R_111[0]+P_010101101*R_112[0]+-1*P_110001001*R_201[0]+P_110001101*R_202[0]+P_110101001*R_211[0]+-1*P_110101101*R_212[0];
double PR_010000002101=P_010000002*R_101[0]+-1*P_010000102*R_102[0]+P_010000202*R_103[0]+-1*P_110000002*R_201[0]+P_110000102*R_202[0]+-1*P_110000202*R_203[0];
double PR_002010000101=P_002010000*R_101[0]+-1*P_002110000*R_111[0]+-1*P_102010000*R_201[0]+P_102110000*R_211[0]+P_202010000*R_301[0]+-1*P_202110000*R_311[0];
double PR_001011000101=P_001011000*R_101[0]+-1*P_001111000*R_111[0]+P_001211000*R_121[0]+-1*P_101011000*R_201[0]+P_101111000*R_211[0]+-1*P_101211000*R_221[0];
double PR_000012000101=P_000012000*R_101[0]+-1*P_000112000*R_111[0]+P_000212000*R_121[0]+-1*P_000312000*R_131[0];
double PR_001010001101=P_001010001*R_101[0]+-1*P_001010101*R_102[0]+-1*P_001110001*R_111[0]+P_001110101*R_112[0]+-1*P_101010001*R_201[0]+P_101010101*R_202[0]+P_101110001*R_211[0]+-1*P_101110101*R_212[0];
double PR_000011001101=P_000011001*R_101[0]+-1*P_000011101*R_102[0]+-1*P_000111001*R_111[0]+P_000111101*R_112[0]+P_000211001*R_121[0]+-1*P_000211101*R_122[0];
double PR_000010002101=P_000010002*R_101[0]+-1*P_000010102*R_102[0]+P_000010202*R_103[0]+-1*P_000110002*R_111[0]+P_000110102*R_112[0]+-1*P_000110202*R_113[0];
double PR_002000010101=P_002000010*R_101[0]+-1*P_002000110*R_102[0]+-1*P_102000010*R_201[0]+P_102000110*R_202[0]+P_202000010*R_301[0]+-1*P_202000110*R_302[0];
double PR_001001010101=P_001001010*R_101[0]+-1*P_001001110*R_102[0]+-1*P_001101010*R_111[0]+P_001101110*R_112[0]+-1*P_101001010*R_201[0]+P_101001110*R_202[0]+P_101101010*R_211[0]+-1*P_101101110*R_212[0];
double PR_000002010101=P_000002010*R_101[0]+-1*P_000002110*R_102[0]+-1*P_000102010*R_111[0]+P_000102110*R_112[0]+P_000202010*R_121[0]+-1*P_000202110*R_122[0];
double PR_001000011101=P_001000011*R_101[0]+-1*P_001000111*R_102[0]+P_001000211*R_103[0]+-1*P_101000011*R_201[0]+P_101000111*R_202[0]+-1*P_101000211*R_203[0];
double PR_000001011101=P_000001011*R_101[0]+-1*P_000001111*R_102[0]+P_000001211*R_103[0]+-1*P_000101011*R_111[0]+P_000101111*R_112[0]+-1*P_000101211*R_113[0];
double PR_000000012101=P_000000012*R_101[0]+-1*P_000000112*R_102[0]+P_000000212*R_103[0]+-1*P_000000312*R_104[0];
double PR_012000000110=P_012000000*R_110[0]+-1*P_112000000*R_210[0]+P_212000000*R_310[0]+-1*P_312000000*R_410[0];
double PR_011001000110=P_011001000*R_110[0]+-1*P_011101000*R_120[0]+-1*P_111001000*R_210[0]+P_111101000*R_220[0]+P_211001000*R_310[0]+-1*P_211101000*R_320[0];
double PR_010002000110=P_010002000*R_110[0]+-1*P_010102000*R_120[0]+P_010202000*R_130[0]+-1*P_110002000*R_210[0]+P_110102000*R_220[0]+-1*P_110202000*R_230[0];
double PR_011000001110=P_011000001*R_110[0]+-1*P_011000101*R_111[0]+-1*P_111000001*R_210[0]+P_111000101*R_211[0]+P_211000001*R_310[0]+-1*P_211000101*R_311[0];
double PR_010001001110=P_010001001*R_110[0]+-1*P_010001101*R_111[0]+-1*P_010101001*R_120[0]+P_010101101*R_121[0]+-1*P_110001001*R_210[0]+P_110001101*R_211[0]+P_110101001*R_220[0]+-1*P_110101101*R_221[0];
double PR_010000002110=P_010000002*R_110[0]+-1*P_010000102*R_111[0]+P_010000202*R_112[0]+-1*P_110000002*R_210[0]+P_110000102*R_211[0]+-1*P_110000202*R_212[0];
double PR_002010000110=P_002010000*R_110[0]+-1*P_002110000*R_120[0]+-1*P_102010000*R_210[0]+P_102110000*R_220[0]+P_202010000*R_310[0]+-1*P_202110000*R_320[0];
double PR_001011000110=P_001011000*R_110[0]+-1*P_001111000*R_120[0]+P_001211000*R_130[0]+-1*P_101011000*R_210[0]+P_101111000*R_220[0]+-1*P_101211000*R_230[0];
double PR_000012000110=P_000012000*R_110[0]+-1*P_000112000*R_120[0]+P_000212000*R_130[0]+-1*P_000312000*R_140[0];
double PR_001010001110=P_001010001*R_110[0]+-1*P_001010101*R_111[0]+-1*P_001110001*R_120[0]+P_001110101*R_121[0]+-1*P_101010001*R_210[0]+P_101010101*R_211[0]+P_101110001*R_220[0]+-1*P_101110101*R_221[0];
double PR_000011001110=P_000011001*R_110[0]+-1*P_000011101*R_111[0]+-1*P_000111001*R_120[0]+P_000111101*R_121[0]+P_000211001*R_130[0]+-1*P_000211101*R_131[0];
double PR_000010002110=P_000010002*R_110[0]+-1*P_000010102*R_111[0]+P_000010202*R_112[0]+-1*P_000110002*R_120[0]+P_000110102*R_121[0]+-1*P_000110202*R_122[0];
double PR_002000010110=P_002000010*R_110[0]+-1*P_002000110*R_111[0]+-1*P_102000010*R_210[0]+P_102000110*R_211[0]+P_202000010*R_310[0]+-1*P_202000110*R_311[0];
double PR_001001010110=P_001001010*R_110[0]+-1*P_001001110*R_111[0]+-1*P_001101010*R_120[0]+P_001101110*R_121[0]+-1*P_101001010*R_210[0]+P_101001110*R_211[0]+P_101101010*R_220[0]+-1*P_101101110*R_221[0];
double PR_000002010110=P_000002010*R_110[0]+-1*P_000002110*R_111[0]+-1*P_000102010*R_120[0]+P_000102110*R_121[0]+P_000202010*R_130[0]+-1*P_000202110*R_131[0];
double PR_001000011110=P_001000011*R_110[0]+-1*P_001000111*R_111[0]+P_001000211*R_112[0]+-1*P_101000011*R_210[0]+P_101000111*R_211[0]+-1*P_101000211*R_212[0];
double PR_000001011110=P_000001011*R_110[0]+-1*P_000001111*R_111[0]+P_000001211*R_112[0]+-1*P_000101011*R_120[0]+P_000101111*R_121[0]+-1*P_000101211*R_122[0];
double PR_000000012110=P_000000012*R_110[0]+-1*P_000000112*R_111[0]+P_000000212*R_112[0]+-1*P_000000312*R_113[0];
double PR_012000000200=P_012000000*R_200[0]+-1*P_112000000*R_300[0]+P_212000000*R_400[0]+-1*P_312000000*R_500[0];
double PR_011001000200=P_011001000*R_200[0]+-1*P_011101000*R_210[0]+-1*P_111001000*R_300[0]+P_111101000*R_310[0]+P_211001000*R_400[0]+-1*P_211101000*R_410[0];
double PR_010002000200=P_010002000*R_200[0]+-1*P_010102000*R_210[0]+P_010202000*R_220[0]+-1*P_110002000*R_300[0]+P_110102000*R_310[0]+-1*P_110202000*R_320[0];
double PR_011000001200=P_011000001*R_200[0]+-1*P_011000101*R_201[0]+-1*P_111000001*R_300[0]+P_111000101*R_301[0]+P_211000001*R_400[0]+-1*P_211000101*R_401[0];
double PR_010001001200=P_010001001*R_200[0]+-1*P_010001101*R_201[0]+-1*P_010101001*R_210[0]+P_010101101*R_211[0]+-1*P_110001001*R_300[0]+P_110001101*R_301[0]+P_110101001*R_310[0]+-1*P_110101101*R_311[0];
double PR_010000002200=P_010000002*R_200[0]+-1*P_010000102*R_201[0]+P_010000202*R_202[0]+-1*P_110000002*R_300[0]+P_110000102*R_301[0]+-1*P_110000202*R_302[0];
double PR_002010000200=P_002010000*R_200[0]+-1*P_002110000*R_210[0]+-1*P_102010000*R_300[0]+P_102110000*R_310[0]+P_202010000*R_400[0]+-1*P_202110000*R_410[0];
double PR_001011000200=P_001011000*R_200[0]+-1*P_001111000*R_210[0]+P_001211000*R_220[0]+-1*P_101011000*R_300[0]+P_101111000*R_310[0]+-1*P_101211000*R_320[0];
double PR_000012000200=P_000012000*R_200[0]+-1*P_000112000*R_210[0]+P_000212000*R_220[0]+-1*P_000312000*R_230[0];
double PR_001010001200=P_001010001*R_200[0]+-1*P_001010101*R_201[0]+-1*P_001110001*R_210[0]+P_001110101*R_211[0]+-1*P_101010001*R_300[0]+P_101010101*R_301[0]+P_101110001*R_310[0]+-1*P_101110101*R_311[0];
double PR_000011001200=P_000011001*R_200[0]+-1*P_000011101*R_201[0]+-1*P_000111001*R_210[0]+P_000111101*R_211[0]+P_000211001*R_220[0]+-1*P_000211101*R_221[0];
double PR_000010002200=P_000010002*R_200[0]+-1*P_000010102*R_201[0]+P_000010202*R_202[0]+-1*P_000110002*R_210[0]+P_000110102*R_211[0]+-1*P_000110202*R_212[0];
double PR_002000010200=P_002000010*R_200[0]+-1*P_002000110*R_201[0]+-1*P_102000010*R_300[0]+P_102000110*R_301[0]+P_202000010*R_400[0]+-1*P_202000110*R_401[0];
double PR_001001010200=P_001001010*R_200[0]+-1*P_001001110*R_201[0]+-1*P_001101010*R_210[0]+P_001101110*R_211[0]+-1*P_101001010*R_300[0]+P_101001110*R_301[0]+P_101101010*R_310[0]+-1*P_101101110*R_311[0];
double PR_000002010200=P_000002010*R_200[0]+-1*P_000002110*R_201[0]+-1*P_000102010*R_210[0]+P_000102110*R_211[0]+P_000202010*R_220[0]+-1*P_000202110*R_221[0];
double PR_001000011200=P_001000011*R_200[0]+-1*P_001000111*R_201[0]+P_001000211*R_202[0]+-1*P_101000011*R_300[0]+P_101000111*R_301[0]+-1*P_101000211*R_302[0];
double PR_000001011200=P_000001011*R_200[0]+-1*P_000001111*R_201[0]+P_000001211*R_202[0]+-1*P_000101011*R_210[0]+P_000101111*R_211[0]+-1*P_000101211*R_212[0];
double PR_000000012200=P_000000012*R_200[0]+-1*P_000000112*R_201[0]+P_000000212*R_202[0]+-1*P_000000312*R_203[0];
double Qd_101[3];
double Qd_110[3];
double Qd_011[3];
double Qd_111[3];
double Qd_211[3];
for(int i=0;i<3;i++){
Qd_101[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_110[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_011[i]=Qd_101[i]+Qd_010[i]*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_111[i]=Qd_010[i]*Qd_101[i]+aQin1*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_211[i]=aQin1*Qd_101[i];
}
double Q_011000000=Qd_011[0];
double Q_111000000=Qd_111[0];
double Q_211000000=Qd_211[0];
double Q_010001000=Qd_010[0]*Qd_001[1];
double Q_010101000=Qd_010[0]*Qd_101[1];
double Q_110001000=Qd_110[0]*Qd_001[1];
double Q_110101000=Qd_110[0]*Qd_101[1];
double Q_010000001=Qd_010[0]*Qd_001[2];
double Q_010000101=Qd_010[0]*Qd_101[2];
double Q_110000001=Qd_110[0]*Qd_001[2];
double Q_110000101=Qd_110[0]*Qd_101[2];
double Q_001010000=Qd_001[0]*Qd_010[1];
double Q_001110000=Qd_001[0]*Qd_110[1];
double Q_101010000=Qd_101[0]*Qd_010[1];
double Q_101110000=Qd_101[0]*Qd_110[1];
double Q_000011000=Qd_011[1];
double Q_000111000=Qd_111[1];
double Q_000211000=Qd_211[1];
double Q_000010001=Qd_010[1]*Qd_001[2];
double Q_000010101=Qd_010[1]*Qd_101[2];
double Q_000110001=Qd_110[1]*Qd_001[2];
double Q_000110101=Qd_110[1]*Qd_101[2];
double Q_001000010=Qd_001[0]*Qd_010[2];
double Q_001000110=Qd_001[0]*Qd_110[2];
double Q_101000010=Qd_101[0]*Qd_010[2];
double Q_101000110=Qd_101[0]*Qd_110[2];
double Q_000001010=Qd_001[1]*Qd_010[2];
double Q_000001110=Qd_001[1]*Qd_110[2];
double Q_000101010=Qd_101[1]*Qd_010[2];
double Q_000101110=Qd_101[1]*Qd_110[2];
double Q_000000011=Qd_011[2];
double Q_000000111=Qd_111[2];
double Q_000000211=Qd_211[2];
ans_temp[ans_id*9+0]+=Pmtrx[0]*(Q_011000000*PR_012000000000+Q_111000000*PR_012000000100+Q_211000000*PR_012000000200);
ans_temp[ans_id*9+0]+=Pmtrx[1]*(Q_010001000*PR_012000000000+Q_010101000*PR_012000000010+Q_110001000*PR_012000000100+Q_110101000*PR_012000000110);
ans_temp[ans_id*9+0]+=Pmtrx[2]*(Q_010000001*PR_012000000000+Q_010000101*PR_012000000001+Q_110000001*PR_012000000100+Q_110000101*PR_012000000101);
ans_temp[ans_id*9+1]+=Pmtrx[0]*(Q_001010000*PR_012000000000+Q_001110000*PR_012000000010+Q_101010000*PR_012000000100+Q_101110000*PR_012000000110);
ans_temp[ans_id*9+1]+=Pmtrx[1]*(Q_000011000*PR_012000000000+Q_000111000*PR_012000000010+Q_000211000*PR_012000000020);
ans_temp[ans_id*9+1]+=Pmtrx[2]*(Q_000010001*PR_012000000000+Q_000010101*PR_012000000001+Q_000110001*PR_012000000010+Q_000110101*PR_012000000011);
ans_temp[ans_id*9+2]+=Pmtrx[0]*(Q_001000010*PR_012000000000+Q_001000110*PR_012000000001+Q_101000010*PR_012000000100+Q_101000110*PR_012000000101);
ans_temp[ans_id*9+2]+=Pmtrx[1]*(Q_000001010*PR_012000000000+Q_000001110*PR_012000000001+Q_000101010*PR_012000000010+Q_000101110*PR_012000000011);
ans_temp[ans_id*9+2]+=Pmtrx[2]*(Q_000000011*PR_012000000000+Q_000000111*PR_012000000001+Q_000000211*PR_012000000002);
ans_temp[ans_id*9+0]+=Pmtrx[3]*(Q_011000000*PR_011001000000+Q_111000000*PR_011001000100+Q_211000000*PR_011001000200);
ans_temp[ans_id*9+0]+=Pmtrx[4]*(Q_010001000*PR_011001000000+Q_010101000*PR_011001000010+Q_110001000*PR_011001000100+Q_110101000*PR_011001000110);
ans_temp[ans_id*9+0]+=Pmtrx[5]*(Q_010000001*PR_011001000000+Q_010000101*PR_011001000001+Q_110000001*PR_011001000100+Q_110000101*PR_011001000101);
ans_temp[ans_id*9+1]+=Pmtrx[3]*(Q_001010000*PR_011001000000+Q_001110000*PR_011001000010+Q_101010000*PR_011001000100+Q_101110000*PR_011001000110);
ans_temp[ans_id*9+1]+=Pmtrx[4]*(Q_000011000*PR_011001000000+Q_000111000*PR_011001000010+Q_000211000*PR_011001000020);
ans_temp[ans_id*9+1]+=Pmtrx[5]*(Q_000010001*PR_011001000000+Q_000010101*PR_011001000001+Q_000110001*PR_011001000010+Q_000110101*PR_011001000011);
ans_temp[ans_id*9+2]+=Pmtrx[3]*(Q_001000010*PR_011001000000+Q_001000110*PR_011001000001+Q_101000010*PR_011001000100+Q_101000110*PR_011001000101);
ans_temp[ans_id*9+2]+=Pmtrx[4]*(Q_000001010*PR_011001000000+Q_000001110*PR_011001000001+Q_000101010*PR_011001000010+Q_000101110*PR_011001000011);
ans_temp[ans_id*9+2]+=Pmtrx[5]*(Q_000000011*PR_011001000000+Q_000000111*PR_011001000001+Q_000000211*PR_011001000002);
ans_temp[ans_id*9+0]+=Pmtrx[6]*(Q_011000000*PR_010002000000+Q_111000000*PR_010002000100+Q_211000000*PR_010002000200);
ans_temp[ans_id*9+0]+=Pmtrx[7]*(Q_010001000*PR_010002000000+Q_010101000*PR_010002000010+Q_110001000*PR_010002000100+Q_110101000*PR_010002000110);
ans_temp[ans_id*9+0]+=Pmtrx[8]*(Q_010000001*PR_010002000000+Q_010000101*PR_010002000001+Q_110000001*PR_010002000100+Q_110000101*PR_010002000101);
ans_temp[ans_id*9+1]+=Pmtrx[6]*(Q_001010000*PR_010002000000+Q_001110000*PR_010002000010+Q_101010000*PR_010002000100+Q_101110000*PR_010002000110);
ans_temp[ans_id*9+1]+=Pmtrx[7]*(Q_000011000*PR_010002000000+Q_000111000*PR_010002000010+Q_000211000*PR_010002000020);
ans_temp[ans_id*9+1]+=Pmtrx[8]*(Q_000010001*PR_010002000000+Q_000010101*PR_010002000001+Q_000110001*PR_010002000010+Q_000110101*PR_010002000011);
ans_temp[ans_id*9+2]+=Pmtrx[6]*(Q_001000010*PR_010002000000+Q_001000110*PR_010002000001+Q_101000010*PR_010002000100+Q_101000110*PR_010002000101);
ans_temp[ans_id*9+2]+=Pmtrx[7]*(Q_000001010*PR_010002000000+Q_000001110*PR_010002000001+Q_000101010*PR_010002000010+Q_000101110*PR_010002000011);
ans_temp[ans_id*9+2]+=Pmtrx[8]*(Q_000000011*PR_010002000000+Q_000000111*PR_010002000001+Q_000000211*PR_010002000002);
ans_temp[ans_id*9+0]+=Pmtrx[9]*(Q_011000000*PR_011000001000+Q_111000000*PR_011000001100+Q_211000000*PR_011000001200);
ans_temp[ans_id*9+0]+=Pmtrx[10]*(Q_010001000*PR_011000001000+Q_010101000*PR_011000001010+Q_110001000*PR_011000001100+Q_110101000*PR_011000001110);
ans_temp[ans_id*9+0]+=Pmtrx[11]*(Q_010000001*PR_011000001000+Q_010000101*PR_011000001001+Q_110000001*PR_011000001100+Q_110000101*PR_011000001101);
ans_temp[ans_id*9+1]+=Pmtrx[9]*(Q_001010000*PR_011000001000+Q_001110000*PR_011000001010+Q_101010000*PR_011000001100+Q_101110000*PR_011000001110);
ans_temp[ans_id*9+1]+=Pmtrx[10]*(Q_000011000*PR_011000001000+Q_000111000*PR_011000001010+Q_000211000*PR_011000001020);
ans_temp[ans_id*9+1]+=Pmtrx[11]*(Q_000010001*PR_011000001000+Q_000010101*PR_011000001001+Q_000110001*PR_011000001010+Q_000110101*PR_011000001011);
ans_temp[ans_id*9+2]+=Pmtrx[9]*(Q_001000010*PR_011000001000+Q_001000110*PR_011000001001+Q_101000010*PR_011000001100+Q_101000110*PR_011000001101);
ans_temp[ans_id*9+2]+=Pmtrx[10]*(Q_000001010*PR_011000001000+Q_000001110*PR_011000001001+Q_000101010*PR_011000001010+Q_000101110*PR_011000001011);
ans_temp[ans_id*9+2]+=Pmtrx[11]*(Q_000000011*PR_011000001000+Q_000000111*PR_011000001001+Q_000000211*PR_011000001002);
ans_temp[ans_id*9+0]+=Pmtrx[12]*(Q_011000000*PR_010001001000+Q_111000000*PR_010001001100+Q_211000000*PR_010001001200);
ans_temp[ans_id*9+0]+=Pmtrx[13]*(Q_010001000*PR_010001001000+Q_010101000*PR_010001001010+Q_110001000*PR_010001001100+Q_110101000*PR_010001001110);
ans_temp[ans_id*9+0]+=Pmtrx[14]*(Q_010000001*PR_010001001000+Q_010000101*PR_010001001001+Q_110000001*PR_010001001100+Q_110000101*PR_010001001101);
ans_temp[ans_id*9+1]+=Pmtrx[12]*(Q_001010000*PR_010001001000+Q_001110000*PR_010001001010+Q_101010000*PR_010001001100+Q_101110000*PR_010001001110);
ans_temp[ans_id*9+1]+=Pmtrx[13]*(Q_000011000*PR_010001001000+Q_000111000*PR_010001001010+Q_000211000*PR_010001001020);
ans_temp[ans_id*9+1]+=Pmtrx[14]*(Q_000010001*PR_010001001000+Q_000010101*PR_010001001001+Q_000110001*PR_010001001010+Q_000110101*PR_010001001011);
ans_temp[ans_id*9+2]+=Pmtrx[12]*(Q_001000010*PR_010001001000+Q_001000110*PR_010001001001+Q_101000010*PR_010001001100+Q_101000110*PR_010001001101);
ans_temp[ans_id*9+2]+=Pmtrx[13]*(Q_000001010*PR_010001001000+Q_000001110*PR_010001001001+Q_000101010*PR_010001001010+Q_000101110*PR_010001001011);
ans_temp[ans_id*9+2]+=Pmtrx[14]*(Q_000000011*PR_010001001000+Q_000000111*PR_010001001001+Q_000000211*PR_010001001002);
ans_temp[ans_id*9+0]+=Pmtrx[15]*(Q_011000000*PR_010000002000+Q_111000000*PR_010000002100+Q_211000000*PR_010000002200);
ans_temp[ans_id*9+0]+=Pmtrx[16]*(Q_010001000*PR_010000002000+Q_010101000*PR_010000002010+Q_110001000*PR_010000002100+Q_110101000*PR_010000002110);
ans_temp[ans_id*9+0]+=Pmtrx[17]*(Q_010000001*PR_010000002000+Q_010000101*PR_010000002001+Q_110000001*PR_010000002100+Q_110000101*PR_010000002101);
ans_temp[ans_id*9+1]+=Pmtrx[15]*(Q_001010000*PR_010000002000+Q_001110000*PR_010000002010+Q_101010000*PR_010000002100+Q_101110000*PR_010000002110);
ans_temp[ans_id*9+1]+=Pmtrx[16]*(Q_000011000*PR_010000002000+Q_000111000*PR_010000002010+Q_000211000*PR_010000002020);
ans_temp[ans_id*9+1]+=Pmtrx[17]*(Q_000010001*PR_010000002000+Q_000010101*PR_010000002001+Q_000110001*PR_010000002010+Q_000110101*PR_010000002011);
ans_temp[ans_id*9+2]+=Pmtrx[15]*(Q_001000010*PR_010000002000+Q_001000110*PR_010000002001+Q_101000010*PR_010000002100+Q_101000110*PR_010000002101);
ans_temp[ans_id*9+2]+=Pmtrx[16]*(Q_000001010*PR_010000002000+Q_000001110*PR_010000002001+Q_000101010*PR_010000002010+Q_000101110*PR_010000002011);
ans_temp[ans_id*9+2]+=Pmtrx[17]*(Q_000000011*PR_010000002000+Q_000000111*PR_010000002001+Q_000000211*PR_010000002002);
ans_temp[ans_id*9+3]+=Pmtrx[0]*(Q_011000000*PR_002010000000+Q_111000000*PR_002010000100+Q_211000000*PR_002010000200);
ans_temp[ans_id*9+3]+=Pmtrx[1]*(Q_010001000*PR_002010000000+Q_010101000*PR_002010000010+Q_110001000*PR_002010000100+Q_110101000*PR_002010000110);
ans_temp[ans_id*9+3]+=Pmtrx[2]*(Q_010000001*PR_002010000000+Q_010000101*PR_002010000001+Q_110000001*PR_002010000100+Q_110000101*PR_002010000101);
ans_temp[ans_id*9+4]+=Pmtrx[0]*(Q_001010000*PR_002010000000+Q_001110000*PR_002010000010+Q_101010000*PR_002010000100+Q_101110000*PR_002010000110);
ans_temp[ans_id*9+4]+=Pmtrx[1]*(Q_000011000*PR_002010000000+Q_000111000*PR_002010000010+Q_000211000*PR_002010000020);
ans_temp[ans_id*9+4]+=Pmtrx[2]*(Q_000010001*PR_002010000000+Q_000010101*PR_002010000001+Q_000110001*PR_002010000010+Q_000110101*PR_002010000011);
ans_temp[ans_id*9+5]+=Pmtrx[0]*(Q_001000010*PR_002010000000+Q_001000110*PR_002010000001+Q_101000010*PR_002010000100+Q_101000110*PR_002010000101);
ans_temp[ans_id*9+5]+=Pmtrx[1]*(Q_000001010*PR_002010000000+Q_000001110*PR_002010000001+Q_000101010*PR_002010000010+Q_000101110*PR_002010000011);
ans_temp[ans_id*9+5]+=Pmtrx[2]*(Q_000000011*PR_002010000000+Q_000000111*PR_002010000001+Q_000000211*PR_002010000002);
ans_temp[ans_id*9+3]+=Pmtrx[3]*(Q_011000000*PR_001011000000+Q_111000000*PR_001011000100+Q_211000000*PR_001011000200);
ans_temp[ans_id*9+3]+=Pmtrx[4]*(Q_010001000*PR_001011000000+Q_010101000*PR_001011000010+Q_110001000*PR_001011000100+Q_110101000*PR_001011000110);
ans_temp[ans_id*9+3]+=Pmtrx[5]*(Q_010000001*PR_001011000000+Q_010000101*PR_001011000001+Q_110000001*PR_001011000100+Q_110000101*PR_001011000101);
ans_temp[ans_id*9+4]+=Pmtrx[3]*(Q_001010000*PR_001011000000+Q_001110000*PR_001011000010+Q_101010000*PR_001011000100+Q_101110000*PR_001011000110);
ans_temp[ans_id*9+4]+=Pmtrx[4]*(Q_000011000*PR_001011000000+Q_000111000*PR_001011000010+Q_000211000*PR_001011000020);
ans_temp[ans_id*9+4]+=Pmtrx[5]*(Q_000010001*PR_001011000000+Q_000010101*PR_001011000001+Q_000110001*PR_001011000010+Q_000110101*PR_001011000011);
ans_temp[ans_id*9+5]+=Pmtrx[3]*(Q_001000010*PR_001011000000+Q_001000110*PR_001011000001+Q_101000010*PR_001011000100+Q_101000110*PR_001011000101);
ans_temp[ans_id*9+5]+=Pmtrx[4]*(Q_000001010*PR_001011000000+Q_000001110*PR_001011000001+Q_000101010*PR_001011000010+Q_000101110*PR_001011000011);
ans_temp[ans_id*9+5]+=Pmtrx[5]*(Q_000000011*PR_001011000000+Q_000000111*PR_001011000001+Q_000000211*PR_001011000002);
ans_temp[ans_id*9+3]+=Pmtrx[6]*(Q_011000000*PR_000012000000+Q_111000000*PR_000012000100+Q_211000000*PR_000012000200);
ans_temp[ans_id*9+3]+=Pmtrx[7]*(Q_010001000*PR_000012000000+Q_010101000*PR_000012000010+Q_110001000*PR_000012000100+Q_110101000*PR_000012000110);
ans_temp[ans_id*9+3]+=Pmtrx[8]*(Q_010000001*PR_000012000000+Q_010000101*PR_000012000001+Q_110000001*PR_000012000100+Q_110000101*PR_000012000101);
ans_temp[ans_id*9+4]+=Pmtrx[6]*(Q_001010000*PR_000012000000+Q_001110000*PR_000012000010+Q_101010000*PR_000012000100+Q_101110000*PR_000012000110);
ans_temp[ans_id*9+4]+=Pmtrx[7]*(Q_000011000*PR_000012000000+Q_000111000*PR_000012000010+Q_000211000*PR_000012000020);
ans_temp[ans_id*9+4]+=Pmtrx[8]*(Q_000010001*PR_000012000000+Q_000010101*PR_000012000001+Q_000110001*PR_000012000010+Q_000110101*PR_000012000011);
ans_temp[ans_id*9+5]+=Pmtrx[6]*(Q_001000010*PR_000012000000+Q_001000110*PR_000012000001+Q_101000010*PR_000012000100+Q_101000110*PR_000012000101);
ans_temp[ans_id*9+5]+=Pmtrx[7]*(Q_000001010*PR_000012000000+Q_000001110*PR_000012000001+Q_000101010*PR_000012000010+Q_000101110*PR_000012000011);
ans_temp[ans_id*9+5]+=Pmtrx[8]*(Q_000000011*PR_000012000000+Q_000000111*PR_000012000001+Q_000000211*PR_000012000002);
ans_temp[ans_id*9+3]+=Pmtrx[9]*(Q_011000000*PR_001010001000+Q_111000000*PR_001010001100+Q_211000000*PR_001010001200);
ans_temp[ans_id*9+3]+=Pmtrx[10]*(Q_010001000*PR_001010001000+Q_010101000*PR_001010001010+Q_110001000*PR_001010001100+Q_110101000*PR_001010001110);
ans_temp[ans_id*9+3]+=Pmtrx[11]*(Q_010000001*PR_001010001000+Q_010000101*PR_001010001001+Q_110000001*PR_001010001100+Q_110000101*PR_001010001101);
ans_temp[ans_id*9+4]+=Pmtrx[9]*(Q_001010000*PR_001010001000+Q_001110000*PR_001010001010+Q_101010000*PR_001010001100+Q_101110000*PR_001010001110);
ans_temp[ans_id*9+4]+=Pmtrx[10]*(Q_000011000*PR_001010001000+Q_000111000*PR_001010001010+Q_000211000*PR_001010001020);
ans_temp[ans_id*9+4]+=Pmtrx[11]*(Q_000010001*PR_001010001000+Q_000010101*PR_001010001001+Q_000110001*PR_001010001010+Q_000110101*PR_001010001011);
ans_temp[ans_id*9+5]+=Pmtrx[9]*(Q_001000010*PR_001010001000+Q_001000110*PR_001010001001+Q_101000010*PR_001010001100+Q_101000110*PR_001010001101);
ans_temp[ans_id*9+5]+=Pmtrx[10]*(Q_000001010*PR_001010001000+Q_000001110*PR_001010001001+Q_000101010*PR_001010001010+Q_000101110*PR_001010001011);
ans_temp[ans_id*9+5]+=Pmtrx[11]*(Q_000000011*PR_001010001000+Q_000000111*PR_001010001001+Q_000000211*PR_001010001002);
ans_temp[ans_id*9+3]+=Pmtrx[12]*(Q_011000000*PR_000011001000+Q_111000000*PR_000011001100+Q_211000000*PR_000011001200);
ans_temp[ans_id*9+3]+=Pmtrx[13]*(Q_010001000*PR_000011001000+Q_010101000*PR_000011001010+Q_110001000*PR_000011001100+Q_110101000*PR_000011001110);
ans_temp[ans_id*9+3]+=Pmtrx[14]*(Q_010000001*PR_000011001000+Q_010000101*PR_000011001001+Q_110000001*PR_000011001100+Q_110000101*PR_000011001101);
ans_temp[ans_id*9+4]+=Pmtrx[12]*(Q_001010000*PR_000011001000+Q_001110000*PR_000011001010+Q_101010000*PR_000011001100+Q_101110000*PR_000011001110);
ans_temp[ans_id*9+4]+=Pmtrx[13]*(Q_000011000*PR_000011001000+Q_000111000*PR_000011001010+Q_000211000*PR_000011001020);
ans_temp[ans_id*9+4]+=Pmtrx[14]*(Q_000010001*PR_000011001000+Q_000010101*PR_000011001001+Q_000110001*PR_000011001010+Q_000110101*PR_000011001011);
ans_temp[ans_id*9+5]+=Pmtrx[12]*(Q_001000010*PR_000011001000+Q_001000110*PR_000011001001+Q_101000010*PR_000011001100+Q_101000110*PR_000011001101);
ans_temp[ans_id*9+5]+=Pmtrx[13]*(Q_000001010*PR_000011001000+Q_000001110*PR_000011001001+Q_000101010*PR_000011001010+Q_000101110*PR_000011001011);
ans_temp[ans_id*9+5]+=Pmtrx[14]*(Q_000000011*PR_000011001000+Q_000000111*PR_000011001001+Q_000000211*PR_000011001002);
ans_temp[ans_id*9+3]+=Pmtrx[15]*(Q_011000000*PR_000010002000+Q_111000000*PR_000010002100+Q_211000000*PR_000010002200);
ans_temp[ans_id*9+3]+=Pmtrx[16]*(Q_010001000*PR_000010002000+Q_010101000*PR_000010002010+Q_110001000*PR_000010002100+Q_110101000*PR_000010002110);
ans_temp[ans_id*9+3]+=Pmtrx[17]*(Q_010000001*PR_000010002000+Q_010000101*PR_000010002001+Q_110000001*PR_000010002100+Q_110000101*PR_000010002101);
ans_temp[ans_id*9+4]+=Pmtrx[15]*(Q_001010000*PR_000010002000+Q_001110000*PR_000010002010+Q_101010000*PR_000010002100+Q_101110000*PR_000010002110);
ans_temp[ans_id*9+4]+=Pmtrx[16]*(Q_000011000*PR_000010002000+Q_000111000*PR_000010002010+Q_000211000*PR_000010002020);
ans_temp[ans_id*9+4]+=Pmtrx[17]*(Q_000010001*PR_000010002000+Q_000010101*PR_000010002001+Q_000110001*PR_000010002010+Q_000110101*PR_000010002011);
ans_temp[ans_id*9+5]+=Pmtrx[15]*(Q_001000010*PR_000010002000+Q_001000110*PR_000010002001+Q_101000010*PR_000010002100+Q_101000110*PR_000010002101);
ans_temp[ans_id*9+5]+=Pmtrx[16]*(Q_000001010*PR_000010002000+Q_000001110*PR_000010002001+Q_000101010*PR_000010002010+Q_000101110*PR_000010002011);
ans_temp[ans_id*9+5]+=Pmtrx[17]*(Q_000000011*PR_000010002000+Q_000000111*PR_000010002001+Q_000000211*PR_000010002002);
ans_temp[ans_id*9+6]+=Pmtrx[0]*(Q_011000000*PR_002000010000+Q_111000000*PR_002000010100+Q_211000000*PR_002000010200);
ans_temp[ans_id*9+6]+=Pmtrx[1]*(Q_010001000*PR_002000010000+Q_010101000*PR_002000010010+Q_110001000*PR_002000010100+Q_110101000*PR_002000010110);
ans_temp[ans_id*9+6]+=Pmtrx[2]*(Q_010000001*PR_002000010000+Q_010000101*PR_002000010001+Q_110000001*PR_002000010100+Q_110000101*PR_002000010101);
ans_temp[ans_id*9+7]+=Pmtrx[0]*(Q_001010000*PR_002000010000+Q_001110000*PR_002000010010+Q_101010000*PR_002000010100+Q_101110000*PR_002000010110);
ans_temp[ans_id*9+7]+=Pmtrx[1]*(Q_000011000*PR_002000010000+Q_000111000*PR_002000010010+Q_000211000*PR_002000010020);
ans_temp[ans_id*9+7]+=Pmtrx[2]*(Q_000010001*PR_002000010000+Q_000010101*PR_002000010001+Q_000110001*PR_002000010010+Q_000110101*PR_002000010011);
ans_temp[ans_id*9+8]+=Pmtrx[0]*(Q_001000010*PR_002000010000+Q_001000110*PR_002000010001+Q_101000010*PR_002000010100+Q_101000110*PR_002000010101);
ans_temp[ans_id*9+8]+=Pmtrx[1]*(Q_000001010*PR_002000010000+Q_000001110*PR_002000010001+Q_000101010*PR_002000010010+Q_000101110*PR_002000010011);
ans_temp[ans_id*9+8]+=Pmtrx[2]*(Q_000000011*PR_002000010000+Q_000000111*PR_002000010001+Q_000000211*PR_002000010002);
ans_temp[ans_id*9+6]+=Pmtrx[3]*(Q_011000000*PR_001001010000+Q_111000000*PR_001001010100+Q_211000000*PR_001001010200);
ans_temp[ans_id*9+6]+=Pmtrx[4]*(Q_010001000*PR_001001010000+Q_010101000*PR_001001010010+Q_110001000*PR_001001010100+Q_110101000*PR_001001010110);
ans_temp[ans_id*9+6]+=Pmtrx[5]*(Q_010000001*PR_001001010000+Q_010000101*PR_001001010001+Q_110000001*PR_001001010100+Q_110000101*PR_001001010101);
ans_temp[ans_id*9+7]+=Pmtrx[3]*(Q_001010000*PR_001001010000+Q_001110000*PR_001001010010+Q_101010000*PR_001001010100+Q_101110000*PR_001001010110);
ans_temp[ans_id*9+7]+=Pmtrx[4]*(Q_000011000*PR_001001010000+Q_000111000*PR_001001010010+Q_000211000*PR_001001010020);
ans_temp[ans_id*9+7]+=Pmtrx[5]*(Q_000010001*PR_001001010000+Q_000010101*PR_001001010001+Q_000110001*PR_001001010010+Q_000110101*PR_001001010011);
ans_temp[ans_id*9+8]+=Pmtrx[3]*(Q_001000010*PR_001001010000+Q_001000110*PR_001001010001+Q_101000010*PR_001001010100+Q_101000110*PR_001001010101);
ans_temp[ans_id*9+8]+=Pmtrx[4]*(Q_000001010*PR_001001010000+Q_000001110*PR_001001010001+Q_000101010*PR_001001010010+Q_000101110*PR_001001010011);
ans_temp[ans_id*9+8]+=Pmtrx[5]*(Q_000000011*PR_001001010000+Q_000000111*PR_001001010001+Q_000000211*PR_001001010002);
ans_temp[ans_id*9+6]+=Pmtrx[6]*(Q_011000000*PR_000002010000+Q_111000000*PR_000002010100+Q_211000000*PR_000002010200);
ans_temp[ans_id*9+6]+=Pmtrx[7]*(Q_010001000*PR_000002010000+Q_010101000*PR_000002010010+Q_110001000*PR_000002010100+Q_110101000*PR_000002010110);
ans_temp[ans_id*9+6]+=Pmtrx[8]*(Q_010000001*PR_000002010000+Q_010000101*PR_000002010001+Q_110000001*PR_000002010100+Q_110000101*PR_000002010101);
ans_temp[ans_id*9+7]+=Pmtrx[6]*(Q_001010000*PR_000002010000+Q_001110000*PR_000002010010+Q_101010000*PR_000002010100+Q_101110000*PR_000002010110);
ans_temp[ans_id*9+7]+=Pmtrx[7]*(Q_000011000*PR_000002010000+Q_000111000*PR_000002010010+Q_000211000*PR_000002010020);
ans_temp[ans_id*9+7]+=Pmtrx[8]*(Q_000010001*PR_000002010000+Q_000010101*PR_000002010001+Q_000110001*PR_000002010010+Q_000110101*PR_000002010011);
ans_temp[ans_id*9+8]+=Pmtrx[6]*(Q_001000010*PR_000002010000+Q_001000110*PR_000002010001+Q_101000010*PR_000002010100+Q_101000110*PR_000002010101);
ans_temp[ans_id*9+8]+=Pmtrx[7]*(Q_000001010*PR_000002010000+Q_000001110*PR_000002010001+Q_000101010*PR_000002010010+Q_000101110*PR_000002010011);
ans_temp[ans_id*9+8]+=Pmtrx[8]*(Q_000000011*PR_000002010000+Q_000000111*PR_000002010001+Q_000000211*PR_000002010002);
ans_temp[ans_id*9+6]+=Pmtrx[9]*(Q_011000000*PR_001000011000+Q_111000000*PR_001000011100+Q_211000000*PR_001000011200);
ans_temp[ans_id*9+6]+=Pmtrx[10]*(Q_010001000*PR_001000011000+Q_010101000*PR_001000011010+Q_110001000*PR_001000011100+Q_110101000*PR_001000011110);
ans_temp[ans_id*9+6]+=Pmtrx[11]*(Q_010000001*PR_001000011000+Q_010000101*PR_001000011001+Q_110000001*PR_001000011100+Q_110000101*PR_001000011101);
ans_temp[ans_id*9+7]+=Pmtrx[9]*(Q_001010000*PR_001000011000+Q_001110000*PR_001000011010+Q_101010000*PR_001000011100+Q_101110000*PR_001000011110);
ans_temp[ans_id*9+7]+=Pmtrx[10]*(Q_000011000*PR_001000011000+Q_000111000*PR_001000011010+Q_000211000*PR_001000011020);
ans_temp[ans_id*9+7]+=Pmtrx[11]*(Q_000010001*PR_001000011000+Q_000010101*PR_001000011001+Q_000110001*PR_001000011010+Q_000110101*PR_001000011011);
ans_temp[ans_id*9+8]+=Pmtrx[9]*(Q_001000010*PR_001000011000+Q_001000110*PR_001000011001+Q_101000010*PR_001000011100+Q_101000110*PR_001000011101);
ans_temp[ans_id*9+8]+=Pmtrx[10]*(Q_000001010*PR_001000011000+Q_000001110*PR_001000011001+Q_000101010*PR_001000011010+Q_000101110*PR_001000011011);
ans_temp[ans_id*9+8]+=Pmtrx[11]*(Q_000000011*PR_001000011000+Q_000000111*PR_001000011001+Q_000000211*PR_001000011002);
ans_temp[ans_id*9+6]+=Pmtrx[12]*(Q_011000000*PR_000001011000+Q_111000000*PR_000001011100+Q_211000000*PR_000001011200);
ans_temp[ans_id*9+6]+=Pmtrx[13]*(Q_010001000*PR_000001011000+Q_010101000*PR_000001011010+Q_110001000*PR_000001011100+Q_110101000*PR_000001011110);
ans_temp[ans_id*9+6]+=Pmtrx[14]*(Q_010000001*PR_000001011000+Q_010000101*PR_000001011001+Q_110000001*PR_000001011100+Q_110000101*PR_000001011101);
ans_temp[ans_id*9+7]+=Pmtrx[12]*(Q_001010000*PR_000001011000+Q_001110000*PR_000001011010+Q_101010000*PR_000001011100+Q_101110000*PR_000001011110);
ans_temp[ans_id*9+7]+=Pmtrx[13]*(Q_000011000*PR_000001011000+Q_000111000*PR_000001011010+Q_000211000*PR_000001011020);
ans_temp[ans_id*9+7]+=Pmtrx[14]*(Q_000010001*PR_000001011000+Q_000010101*PR_000001011001+Q_000110001*PR_000001011010+Q_000110101*PR_000001011011);
ans_temp[ans_id*9+8]+=Pmtrx[12]*(Q_001000010*PR_000001011000+Q_001000110*PR_000001011001+Q_101000010*PR_000001011100+Q_101000110*PR_000001011101);
ans_temp[ans_id*9+8]+=Pmtrx[13]*(Q_000001010*PR_000001011000+Q_000001110*PR_000001011001+Q_000101010*PR_000001011010+Q_000101110*PR_000001011011);
ans_temp[ans_id*9+8]+=Pmtrx[14]*(Q_000000011*PR_000001011000+Q_000000111*PR_000001011001+Q_000000211*PR_000001011002);
ans_temp[ans_id*9+6]+=Pmtrx[15]*(Q_011000000*PR_000000012000+Q_111000000*PR_000000012100+Q_211000000*PR_000000012200);
ans_temp[ans_id*9+6]+=Pmtrx[16]*(Q_010001000*PR_000000012000+Q_010101000*PR_000000012010+Q_110001000*PR_000000012100+Q_110101000*PR_000000012110);
ans_temp[ans_id*9+6]+=Pmtrx[17]*(Q_010000001*PR_000000012000+Q_010000101*PR_000000012001+Q_110000001*PR_000000012100+Q_110000101*PR_000000012101);
ans_temp[ans_id*9+7]+=Pmtrx[15]*(Q_001010000*PR_000000012000+Q_001110000*PR_000000012010+Q_101010000*PR_000000012100+Q_101110000*PR_000000012110);
ans_temp[ans_id*9+7]+=Pmtrx[16]*(Q_000011000*PR_000000012000+Q_000111000*PR_000000012010+Q_000211000*PR_000000012020);
ans_temp[ans_id*9+7]+=Pmtrx[17]*(Q_000010001*PR_000000012000+Q_000010101*PR_000000012001+Q_000110001*PR_000000012010+Q_000110101*PR_000000012011);
ans_temp[ans_id*9+8]+=Pmtrx[15]*(Q_001000010*PR_000000012000+Q_001000110*PR_000000012001+Q_101000010*PR_000000012100+Q_101000110*PR_000000012101);
ans_temp[ans_id*9+8]+=Pmtrx[16]*(Q_000001010*PR_000000012000+Q_000001110*PR_000000012001+Q_000101010*PR_000000012010+Q_000101110*PR_000000012011);
ans_temp[ans_id*9+8]+=Pmtrx[17]*(Q_000000011*PR_000000012000+Q_000000111*PR_000000012001+Q_000000211*PR_000000012002);
}
}
__syncthreads();
int num_thread=NTHREAD/2;
while (num_thread!=0){
__syncthreads();
if(tId_x<num_thread){
for(int ians=0;ians<9;ians++){
ans_temp[tId_x*9+ians]+=ans_temp[(tId_x+num_thread)*9+ians];
}
}
num_thread/=2;
}
if(tId_x==0){
for(int ians=0;ians<9;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*9+ians]=ans_temp[(tId_x)*9+ians];
}
}
}
}
}
__global__ void MD_Kq_pdpp_fs(unsigned int contrc_bra_num,unsigned int contrc_ket_num,\
unsigned int * contrc_bra_id,\
unsigned int * contrc_ket_id,\
unsigned int mtrx_len,\
double * Pmtrx_in,\
double * P,\
double * PA,\
double * PB,\
double * Zta_in,\
double * pp_in,\
float * K2_p_in,\
unsigned int * id_bra_in,\
double * Q,\
double * QC,\
double * QD,\
double * Eta_in,\
double * pq_in,\
float * K2_q_in,\
unsigned int * id_ket_in,\
double * ans){
unsigned int tId_x = threadIdx.x;
unsigned int bId_x = blockIdx.x;
unsigned int bId_y = blockIdx.y;
unsigned int tdis = blockDim.x;
unsigned int bdis_x = gridDim.x;
unsigned int bdis_y = gridDim.y;
unsigned int ans_id=tId_x;
double Pmtrx[18]={0.0};
__shared__ double ans_temp[NTHREAD*9];
for(int i=0;i<9;i++){
ans_temp[i*tdis+tId_x]=0.0;
}
for(unsigned int i_contrc_bra=bId_x;i_contrc_bra<contrc_bra_num;i_contrc_bra+=bdis_x){
for(unsigned int j_contrc_ket=bId_y;j_contrc_ket<contrc_ket_num;j_contrc_ket+=bdis_y){
unsigned int primit_bra_start = contrc_bra_id[i_contrc_bra ];
unsigned int primit_bra_end = contrc_bra_id[i_contrc_bra+1];
unsigned int primit_ket_start = contrc_ket_id[j_contrc_ket ];
unsigned int primit_ket_end = contrc_ket_id[j_contrc_ket+1];
for(unsigned int ii=primit_bra_start;ii<primit_bra_end;ii++){
unsigned int id_bra=id_bra_in[ii];
double PX=P[ii*3+0];
double PY=P[ii*3+1];
double PZ=P[ii*3+2];
double Pd_010[3];
Pd_010[0]=PA[ii*3+0];
Pd_010[1]=PA[ii*3+1];
Pd_010[2]=PA[ii*3+2];
double Pd_001[3];
Pd_001[0]=PB[ii*3+0];
Pd_001[1]=PB[ii*3+1];
Pd_001[2]=PB[ii*3+2];
double Zta=Zta_in[ii];
double pp=pp_in[ii];
float K2_p=K2_p_in[ii];
double aPin1=1/(2*Zta);
for(unsigned int j=tId_x;j<primit_ket_end-primit_ket_start;j+=tdis){
unsigned int jj=primit_ket_start+j;
unsigned int id_ket=tex1Dfetch(tex_id_ket,jj);
double P_max=0.0;
for(int p_j=0;p_j<3;p_j++){
for(int p_i=0;p_i<6;p_i++){
Pmtrx[p_i*3+p_j]=Pmtrx_in[(id_ket+p_j)*mtrx_len+(id_bra+p_i)];
double temp_P=fabsf(Pmtrx[p_i*3+p_j]);
if(temp_P>P_max) P_max=temp_P;
}
}
float K2_q=tex1Dfetch(tex_K2_q,jj);
if(fabsf(K2_p*K2_q)<1.0E-14){
break;
}
if(fabsf(P_max*K2_p*K2_q)<1.0E-14) continue;
int2 temp_int2;
temp_int2=tex1Dfetch(tex_Eta,jj);
double Eta=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_pq,jj);
double pq=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+0);
double QX=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+1);
double QY=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+2);
double QZ=__hiloint2double(temp_int2.y,temp_int2.x);
double Qd_010[3];
temp_int2=tex1Dfetch(tex_QC,jj*3+0);
Qd_010[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QC,jj*3+1);
Qd_010[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QC,jj*3+2);
Qd_010[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double Qd_001[3];
temp_int2=tex1Dfetch(tex_QD,jj*3+0);
Qd_001[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QD,jj*3+1);
Qd_001[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QD,jj*3+2);
Qd_001[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double alphaT=rsqrt(Eta+Zta);
double lmd=4*P25*pp*pq*alphaT;
alphaT=Eta*Zta*alphaT*alphaT;
double TX=PX-QX;
double TY=PY-QY;
double TZ=PZ-QZ;
double T=alphaT*(TX*TX+TY*TY+TZ*TZ);
double R_000[6];
Ft_fs_5(5,T,R_000);
R_000[0]*=lmd;
R_000[1]*=-2*alphaT*lmd;
R_000[2]*=4*alphaT*alphaT*lmd;
R_000[3]*=-8*alphaT*alphaT*alphaT*lmd;
R_000[4]*=16*alphaT*alphaT*alphaT*alphaT*lmd;
R_000[5]*=-32*alphaT*alphaT*alphaT*alphaT*alphaT*lmd;
double aQin1=1/(2*Eta);
double R_100[5];
double R_200[4];
double R_300[3];
double R_400[2];
double R_500[1];
double R_010[5];
double R_110[4];
double R_210[3];
double R_310[2];
double R_410[1];
double R_020[4];
double R_120[3];
double R_220[2];
double R_320[1];
double R_030[3];
double R_130[2];
double R_230[1];
double R_040[2];
double R_140[1];
double R_050[1];
double R_001[5];
double R_101[4];
double R_201[3];
double R_301[2];
double R_401[1];
double R_011[4];
double R_111[3];
double R_211[2];
double R_311[1];
double R_021[3];
double R_121[2];
double R_221[1];
double R_031[2];
double R_131[1];
double R_041[1];
double R_002[4];
double R_102[3];
double R_202[2];
double R_302[1];
double R_012[3];
double R_112[2];
double R_212[1];
double R_022[2];
double R_122[1];
double R_032[1];
double R_003[3];
double R_103[2];
double R_203[1];
double R_013[2];
double R_113[1];
double R_023[1];
double R_004[2];
double R_104[1];
double R_014[1];
double R_005[1];
for(int i=0;i<5;i++){
R_100[i]=TX*R_000[i+1];
}
for(int i=0;i<5;i++){
R_010[i]=TY*R_000[i+1];
}
for(int i=0;i<5;i++){
R_001[i]=TZ*R_000[i+1];
}
for(int i=0;i<4;i++){
R_200[i]=TX*R_100[i+1]+R_000[i+1];
}
for(int i=0;i<4;i++){
R_110[i]=TX*R_010[i+1];
}
for(int i=0;i<4;i++){
R_020[i]=TY*R_010[i+1]+R_000[i+1];
}
for(int i=0;i<4;i++){
R_101[i]=TX*R_001[i+1];
}
for(int i=0;i<4;i++){
R_011[i]=TY*R_001[i+1];
}
for(int i=0;i<4;i++){
R_002[i]=TZ*R_001[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_300[i]=TX*R_200[i+1]+2*R_100[i+1];
}
for(int i=0;i<3;i++){
R_210[i]=TY*R_200[i+1];
}
for(int i=0;i<3;i++){
R_120[i]=TX*R_020[i+1];
}
for(int i=0;i<3;i++){
R_030[i]=TY*R_020[i+1]+2*R_010[i+1];
}
for(int i=0;i<3;i++){
R_201[i]=TZ*R_200[i+1];
}
for(int i=0;i<3;i++){
R_111[i]=TX*R_011[i+1];
}
for(int i=0;i<3;i++){
R_021[i]=TZ*R_020[i+1];
}
for(int i=0;i<3;i++){
R_102[i]=TX*R_002[i+1];
}
for(int i=0;i<3;i++){
R_012[i]=TY*R_002[i+1];
}
for(int i=0;i<3;i++){
R_003[i]=TZ*R_002[i+1]+2*R_001[i+1];
}
for(int i=0;i<2;i++){
R_400[i]=TX*R_300[i+1]+3*R_200[i+1];
}
for(int i=0;i<2;i++){
R_310[i]=TY*R_300[i+1];
}
for(int i=0;i<2;i++){
R_220[i]=TX*R_120[i+1]+R_020[i+1];
}
for(int i=0;i<2;i++){
R_130[i]=TX*R_030[i+1];
}
for(int i=0;i<2;i++){
R_040[i]=TY*R_030[i+1]+3*R_020[i+1];
}
for(int i=0;i<2;i++){
R_301[i]=TZ*R_300[i+1];
}
for(int i=0;i<2;i++){
R_211[i]=TY*R_201[i+1];
}
for(int i=0;i<2;i++){
R_121[i]=TX*R_021[i+1];
}
for(int i=0;i<2;i++){
R_031[i]=TZ*R_030[i+1];
}
for(int i=0;i<2;i++){
R_202[i]=TX*R_102[i+1]+R_002[i+1];
}
for(int i=0;i<2;i++){
R_112[i]=TX*R_012[i+1];
}
for(int i=0;i<2;i++){
R_022[i]=TY*R_012[i+1]+R_002[i+1];
}
for(int i=0;i<2;i++){
R_103[i]=TX*R_003[i+1];
}
for(int i=0;i<2;i++){
R_013[i]=TY*R_003[i+1];
}
for(int i=0;i<2;i++){
R_004[i]=TZ*R_003[i+1]+3*R_002[i+1];
}
for(int i=0;i<1;i++){
R_500[i]=TX*R_400[i+1]+4*R_300[i+1];
}
for(int i=0;i<1;i++){
R_410[i]=TY*R_400[i+1];
}
for(int i=0;i<1;i++){
R_320[i]=TX*R_220[i+1]+2*R_120[i+1];
}
for(int i=0;i<1;i++){
R_230[i]=TY*R_220[i+1]+2*R_210[i+1];
}
for(int i=0;i<1;i++){
R_140[i]=TX*R_040[i+1];
}
for(int i=0;i<1;i++){
R_050[i]=TY*R_040[i+1]+4*R_030[i+1];
}
for(int i=0;i<1;i++){
R_401[i]=TZ*R_400[i+1];
}
for(int i=0;i<1;i++){
R_311[i]=TY*R_301[i+1];
}
for(int i=0;i<1;i++){
R_221[i]=TZ*R_220[i+1];
}
for(int i=0;i<1;i++){
R_131[i]=TX*R_031[i+1];
}
for(int i=0;i<1;i++){
R_041[i]=TZ*R_040[i+1];
}
for(int i=0;i<1;i++){
R_302[i]=TX*R_202[i+1]+2*R_102[i+1];
}
for(int i=0;i<1;i++){
R_212[i]=TY*R_202[i+1];
}
for(int i=0;i<1;i++){
R_122[i]=TX*R_022[i+1];
}
for(int i=0;i<1;i++){
R_032[i]=TY*R_022[i+1]+2*R_012[i+1];
}
for(int i=0;i<1;i++){
R_203[i]=TZ*R_202[i+1]+2*R_201[i+1];
}
for(int i=0;i<1;i++){
R_113[i]=TX*R_013[i+1];
}
for(int i=0;i<1;i++){
R_023[i]=TZ*R_022[i+1]+2*R_021[i+1];
}
for(int i=0;i<1;i++){
R_104[i]=TX*R_004[i+1];
}
for(int i=0;i<1;i++){
R_014[i]=TY*R_004[i+1];
}
for(int i=0;i<1;i++){
R_005[i]=TZ*R_004[i+1]+4*R_003[i+1];
}
double Qd_101[3];
double Qd_110[3];
double Qd_011[3];
double Qd_111[3];
double Qd_211[3];
for(int i=0;i<3;i++){
Qd_101[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_110[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_011[i]=Qd_101[i]+Qd_010[i]*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_111[i]=Qd_010[i]*Qd_101[i]+aQin1*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_211[i]=aQin1*Qd_101[i];
}
double Q_011000000=Qd_011[0];
double Q_111000000=Qd_111[0];
double Q_211000000=Qd_211[0];
double Q_010001000=Qd_010[0]*Qd_001[1];
double Q_010101000=Qd_010[0]*Qd_101[1];
double Q_110001000=Qd_110[0]*Qd_001[1];
double Q_110101000=Qd_110[0]*Qd_101[1];
double Q_010000001=Qd_010[0]*Qd_001[2];
double Q_010000101=Qd_010[0]*Qd_101[2];
double Q_110000001=Qd_110[0]*Qd_001[2];
double Q_110000101=Qd_110[0]*Qd_101[2];
double Q_001010000=Qd_001[0]*Qd_010[1];
double Q_001110000=Qd_001[0]*Qd_110[1];
double Q_101010000=Qd_101[0]*Qd_010[1];
double Q_101110000=Qd_101[0]*Qd_110[1];
double Q_000011000=Qd_011[1];
double Q_000111000=Qd_111[1];
double Q_000211000=Qd_211[1];
double Q_000010001=Qd_010[1]*Qd_001[2];
double Q_000010101=Qd_010[1]*Qd_101[2];
double Q_000110001=Qd_110[1]*Qd_001[2];
double Q_000110101=Qd_110[1]*Qd_101[2];
double Q_001000010=Qd_001[0]*Qd_010[2];
double Q_001000110=Qd_001[0]*Qd_110[2];
double Q_101000010=Qd_101[0]*Qd_010[2];
double Q_101000110=Qd_101[0]*Qd_110[2];
double Q_000001010=Qd_001[1]*Qd_010[2];
double Q_000001110=Qd_001[1]*Qd_110[2];
double Q_000101010=Qd_101[1]*Qd_010[2];
double Q_000101110=Qd_101[1]*Qd_110[2];
double Q_000000011=Qd_011[2];
double Q_000000111=Qd_111[2];
double Q_000000211=Qd_211[2];
double QR_011000000000=Q_011000000*R_000[0]+-1*Q_111000000*R_100[0]+Q_211000000*R_200[0];
double QR_010001000000=Q_010001000*R_000[0]+-1*Q_010101000*R_010[0]+-1*Q_110001000*R_100[0]+Q_110101000*R_110[0];
double QR_010000001000=Q_010000001*R_000[0]+-1*Q_010000101*R_001[0]+-1*Q_110000001*R_100[0]+Q_110000101*R_101[0];
double QR_001010000000=Q_001010000*R_000[0]+-1*Q_001110000*R_010[0]+-1*Q_101010000*R_100[0]+Q_101110000*R_110[0];
double QR_000011000000=Q_000011000*R_000[0]+-1*Q_000111000*R_010[0]+Q_000211000*R_020[0];
double QR_000010001000=Q_000010001*R_000[0]+-1*Q_000010101*R_001[0]+-1*Q_000110001*R_010[0]+Q_000110101*R_011[0];
double QR_001000010000=Q_001000010*R_000[0]+-1*Q_001000110*R_001[0]+-1*Q_101000010*R_100[0]+Q_101000110*R_101[0];
double QR_000001010000=Q_000001010*R_000[0]+-1*Q_000001110*R_001[0]+-1*Q_000101010*R_010[0]+Q_000101110*R_011[0];
double QR_000000011000=Q_000000011*R_000[0]+-1*Q_000000111*R_001[0]+Q_000000211*R_002[0];
double QR_011000000001=Q_011000000*R_001[0]+-1*Q_111000000*R_101[0]+Q_211000000*R_201[0];
double QR_010001000001=Q_010001000*R_001[0]+-1*Q_010101000*R_011[0]+-1*Q_110001000*R_101[0]+Q_110101000*R_111[0];
double QR_010000001001=Q_010000001*R_001[0]+-1*Q_010000101*R_002[0]+-1*Q_110000001*R_101[0]+Q_110000101*R_102[0];
double QR_001010000001=Q_001010000*R_001[0]+-1*Q_001110000*R_011[0]+-1*Q_101010000*R_101[0]+Q_101110000*R_111[0];
double QR_000011000001=Q_000011000*R_001[0]+-1*Q_000111000*R_011[0]+Q_000211000*R_021[0];
double QR_000010001001=Q_000010001*R_001[0]+-1*Q_000010101*R_002[0]+-1*Q_000110001*R_011[0]+Q_000110101*R_012[0];
double QR_001000010001=Q_001000010*R_001[0]+-1*Q_001000110*R_002[0]+-1*Q_101000010*R_101[0]+Q_101000110*R_102[0];
double QR_000001010001=Q_000001010*R_001[0]+-1*Q_000001110*R_002[0]+-1*Q_000101010*R_011[0]+Q_000101110*R_012[0];
double QR_000000011001=Q_000000011*R_001[0]+-1*Q_000000111*R_002[0]+Q_000000211*R_003[0];
double QR_011000000010=Q_011000000*R_010[0]+-1*Q_111000000*R_110[0]+Q_211000000*R_210[0];
double QR_010001000010=Q_010001000*R_010[0]+-1*Q_010101000*R_020[0]+-1*Q_110001000*R_110[0]+Q_110101000*R_120[0];
double QR_010000001010=Q_010000001*R_010[0]+-1*Q_010000101*R_011[0]+-1*Q_110000001*R_110[0]+Q_110000101*R_111[0];
double QR_001010000010=Q_001010000*R_010[0]+-1*Q_001110000*R_020[0]+-1*Q_101010000*R_110[0]+Q_101110000*R_120[0];
double QR_000011000010=Q_000011000*R_010[0]+-1*Q_000111000*R_020[0]+Q_000211000*R_030[0];
double QR_000010001010=Q_000010001*R_010[0]+-1*Q_000010101*R_011[0]+-1*Q_000110001*R_020[0]+Q_000110101*R_021[0];
double QR_001000010010=Q_001000010*R_010[0]+-1*Q_001000110*R_011[0]+-1*Q_101000010*R_110[0]+Q_101000110*R_111[0];
double QR_000001010010=Q_000001010*R_010[0]+-1*Q_000001110*R_011[0]+-1*Q_000101010*R_020[0]+Q_000101110*R_021[0];
double QR_000000011010=Q_000000011*R_010[0]+-1*Q_000000111*R_011[0]+Q_000000211*R_012[0];
double QR_011000000100=Q_011000000*R_100[0]+-1*Q_111000000*R_200[0]+Q_211000000*R_300[0];
double QR_010001000100=Q_010001000*R_100[0]+-1*Q_010101000*R_110[0]+-1*Q_110001000*R_200[0]+Q_110101000*R_210[0];
double QR_010000001100=Q_010000001*R_100[0]+-1*Q_010000101*R_101[0]+-1*Q_110000001*R_200[0]+Q_110000101*R_201[0];
double QR_001010000100=Q_001010000*R_100[0]+-1*Q_001110000*R_110[0]+-1*Q_101010000*R_200[0]+Q_101110000*R_210[0];
double QR_000011000100=Q_000011000*R_100[0]+-1*Q_000111000*R_110[0]+Q_000211000*R_120[0];
double QR_000010001100=Q_000010001*R_100[0]+-1*Q_000010101*R_101[0]+-1*Q_000110001*R_110[0]+Q_000110101*R_111[0];
double QR_001000010100=Q_001000010*R_100[0]+-1*Q_001000110*R_101[0]+-1*Q_101000010*R_200[0]+Q_101000110*R_201[0];
double QR_000001010100=Q_000001010*R_100[0]+-1*Q_000001110*R_101[0]+-1*Q_000101010*R_110[0]+Q_000101110*R_111[0];
double QR_000000011100=Q_000000011*R_100[0]+-1*Q_000000111*R_101[0]+Q_000000211*R_102[0];
double QR_011000000002=Q_011000000*R_002[0]+-1*Q_111000000*R_102[0]+Q_211000000*R_202[0];
double QR_010001000002=Q_010001000*R_002[0]+-1*Q_010101000*R_012[0]+-1*Q_110001000*R_102[0]+Q_110101000*R_112[0];
double QR_010000001002=Q_010000001*R_002[0]+-1*Q_010000101*R_003[0]+-1*Q_110000001*R_102[0]+Q_110000101*R_103[0];
double QR_001010000002=Q_001010000*R_002[0]+-1*Q_001110000*R_012[0]+-1*Q_101010000*R_102[0]+Q_101110000*R_112[0];
double QR_000011000002=Q_000011000*R_002[0]+-1*Q_000111000*R_012[0]+Q_000211000*R_022[0];
double QR_000010001002=Q_000010001*R_002[0]+-1*Q_000010101*R_003[0]+-1*Q_000110001*R_012[0]+Q_000110101*R_013[0];
double QR_001000010002=Q_001000010*R_002[0]+-1*Q_001000110*R_003[0]+-1*Q_101000010*R_102[0]+Q_101000110*R_103[0];
double QR_000001010002=Q_000001010*R_002[0]+-1*Q_000001110*R_003[0]+-1*Q_000101010*R_012[0]+Q_000101110*R_013[0];
double QR_000000011002=Q_000000011*R_002[0]+-1*Q_000000111*R_003[0]+Q_000000211*R_004[0];
double QR_011000000011=Q_011000000*R_011[0]+-1*Q_111000000*R_111[0]+Q_211000000*R_211[0];
double QR_010001000011=Q_010001000*R_011[0]+-1*Q_010101000*R_021[0]+-1*Q_110001000*R_111[0]+Q_110101000*R_121[0];
double QR_010000001011=Q_010000001*R_011[0]+-1*Q_010000101*R_012[0]+-1*Q_110000001*R_111[0]+Q_110000101*R_112[0];
double QR_001010000011=Q_001010000*R_011[0]+-1*Q_001110000*R_021[0]+-1*Q_101010000*R_111[0]+Q_101110000*R_121[0];
double QR_000011000011=Q_000011000*R_011[0]+-1*Q_000111000*R_021[0]+Q_000211000*R_031[0];
double QR_000010001011=Q_000010001*R_011[0]+-1*Q_000010101*R_012[0]+-1*Q_000110001*R_021[0]+Q_000110101*R_022[0];
double QR_001000010011=Q_001000010*R_011[0]+-1*Q_001000110*R_012[0]+-1*Q_101000010*R_111[0]+Q_101000110*R_112[0];
double QR_000001010011=Q_000001010*R_011[0]+-1*Q_000001110*R_012[0]+-1*Q_000101010*R_021[0]+Q_000101110*R_022[0];
double QR_000000011011=Q_000000011*R_011[0]+-1*Q_000000111*R_012[0]+Q_000000211*R_013[0];
double QR_011000000020=Q_011000000*R_020[0]+-1*Q_111000000*R_120[0]+Q_211000000*R_220[0];
double QR_010001000020=Q_010001000*R_020[0]+-1*Q_010101000*R_030[0]+-1*Q_110001000*R_120[0]+Q_110101000*R_130[0];
double QR_010000001020=Q_010000001*R_020[0]+-1*Q_010000101*R_021[0]+-1*Q_110000001*R_120[0]+Q_110000101*R_121[0];
double QR_001010000020=Q_001010000*R_020[0]+-1*Q_001110000*R_030[0]+-1*Q_101010000*R_120[0]+Q_101110000*R_130[0];
double QR_000011000020=Q_000011000*R_020[0]+-1*Q_000111000*R_030[0]+Q_000211000*R_040[0];
double QR_000010001020=Q_000010001*R_020[0]+-1*Q_000010101*R_021[0]+-1*Q_000110001*R_030[0]+Q_000110101*R_031[0];
double QR_001000010020=Q_001000010*R_020[0]+-1*Q_001000110*R_021[0]+-1*Q_101000010*R_120[0]+Q_101000110*R_121[0];
double QR_000001010020=Q_000001010*R_020[0]+-1*Q_000001110*R_021[0]+-1*Q_000101010*R_030[0]+Q_000101110*R_031[0];
double QR_000000011020=Q_000000011*R_020[0]+-1*Q_000000111*R_021[0]+Q_000000211*R_022[0];
double QR_011000000101=Q_011000000*R_101[0]+-1*Q_111000000*R_201[0]+Q_211000000*R_301[0];
double QR_010001000101=Q_010001000*R_101[0]+-1*Q_010101000*R_111[0]+-1*Q_110001000*R_201[0]+Q_110101000*R_211[0];
double QR_010000001101=Q_010000001*R_101[0]+-1*Q_010000101*R_102[0]+-1*Q_110000001*R_201[0]+Q_110000101*R_202[0];
double QR_001010000101=Q_001010000*R_101[0]+-1*Q_001110000*R_111[0]+-1*Q_101010000*R_201[0]+Q_101110000*R_211[0];
double QR_000011000101=Q_000011000*R_101[0]+-1*Q_000111000*R_111[0]+Q_000211000*R_121[0];
double QR_000010001101=Q_000010001*R_101[0]+-1*Q_000010101*R_102[0]+-1*Q_000110001*R_111[0]+Q_000110101*R_112[0];
double QR_001000010101=Q_001000010*R_101[0]+-1*Q_001000110*R_102[0]+-1*Q_101000010*R_201[0]+Q_101000110*R_202[0];
double QR_000001010101=Q_000001010*R_101[0]+-1*Q_000001110*R_102[0]+-1*Q_000101010*R_111[0]+Q_000101110*R_112[0];
double QR_000000011101=Q_000000011*R_101[0]+-1*Q_000000111*R_102[0]+Q_000000211*R_103[0];
double QR_011000000110=Q_011000000*R_110[0]+-1*Q_111000000*R_210[0]+Q_211000000*R_310[0];
double QR_010001000110=Q_010001000*R_110[0]+-1*Q_010101000*R_120[0]+-1*Q_110001000*R_210[0]+Q_110101000*R_220[0];
double QR_010000001110=Q_010000001*R_110[0]+-1*Q_010000101*R_111[0]+-1*Q_110000001*R_210[0]+Q_110000101*R_211[0];
double QR_001010000110=Q_001010000*R_110[0]+-1*Q_001110000*R_120[0]+-1*Q_101010000*R_210[0]+Q_101110000*R_220[0];
double QR_000011000110=Q_000011000*R_110[0]+-1*Q_000111000*R_120[0]+Q_000211000*R_130[0];
double QR_000010001110=Q_000010001*R_110[0]+-1*Q_000010101*R_111[0]+-1*Q_000110001*R_120[0]+Q_000110101*R_121[0];
double QR_001000010110=Q_001000010*R_110[0]+-1*Q_001000110*R_111[0]+-1*Q_101000010*R_210[0]+Q_101000110*R_211[0];
double QR_000001010110=Q_000001010*R_110[0]+-1*Q_000001110*R_111[0]+-1*Q_000101010*R_120[0]+Q_000101110*R_121[0];
double QR_000000011110=Q_000000011*R_110[0]+-1*Q_000000111*R_111[0]+Q_000000211*R_112[0];
double QR_011000000200=Q_011000000*R_200[0]+-1*Q_111000000*R_300[0]+Q_211000000*R_400[0];
double QR_010001000200=Q_010001000*R_200[0]+-1*Q_010101000*R_210[0]+-1*Q_110001000*R_300[0]+Q_110101000*R_310[0];
double QR_010000001200=Q_010000001*R_200[0]+-1*Q_010000101*R_201[0]+-1*Q_110000001*R_300[0]+Q_110000101*R_301[0];
double QR_001010000200=Q_001010000*R_200[0]+-1*Q_001110000*R_210[0]+-1*Q_101010000*R_300[0]+Q_101110000*R_310[0];
double QR_000011000200=Q_000011000*R_200[0]+-1*Q_000111000*R_210[0]+Q_000211000*R_220[0];
double QR_000010001200=Q_000010001*R_200[0]+-1*Q_000010101*R_201[0]+-1*Q_000110001*R_210[0]+Q_000110101*R_211[0];
double QR_001000010200=Q_001000010*R_200[0]+-1*Q_001000110*R_201[0]+-1*Q_101000010*R_300[0]+Q_101000110*R_301[0];
double QR_000001010200=Q_000001010*R_200[0]+-1*Q_000001110*R_201[0]+-1*Q_000101010*R_210[0]+Q_000101110*R_211[0];
double QR_000000011200=Q_000000011*R_200[0]+-1*Q_000000111*R_201[0]+Q_000000211*R_202[0];
double QR_011000000003=Q_011000000*R_003[0]+-1*Q_111000000*R_103[0]+Q_211000000*R_203[0];
double QR_010001000003=Q_010001000*R_003[0]+-1*Q_010101000*R_013[0]+-1*Q_110001000*R_103[0]+Q_110101000*R_113[0];
double QR_010000001003=Q_010000001*R_003[0]+-1*Q_010000101*R_004[0]+-1*Q_110000001*R_103[0]+Q_110000101*R_104[0];
double QR_001010000003=Q_001010000*R_003[0]+-1*Q_001110000*R_013[0]+-1*Q_101010000*R_103[0]+Q_101110000*R_113[0];
double QR_000011000003=Q_000011000*R_003[0]+-1*Q_000111000*R_013[0]+Q_000211000*R_023[0];
double QR_000010001003=Q_000010001*R_003[0]+-1*Q_000010101*R_004[0]+-1*Q_000110001*R_013[0]+Q_000110101*R_014[0];
double QR_001000010003=Q_001000010*R_003[0]+-1*Q_001000110*R_004[0]+-1*Q_101000010*R_103[0]+Q_101000110*R_104[0];
double QR_000001010003=Q_000001010*R_003[0]+-1*Q_000001110*R_004[0]+-1*Q_000101010*R_013[0]+Q_000101110*R_014[0];
double QR_000000011003=Q_000000011*R_003[0]+-1*Q_000000111*R_004[0]+Q_000000211*R_005[0];
double QR_011000000012=Q_011000000*R_012[0]+-1*Q_111000000*R_112[0]+Q_211000000*R_212[0];
double QR_010001000012=Q_010001000*R_012[0]+-1*Q_010101000*R_022[0]+-1*Q_110001000*R_112[0]+Q_110101000*R_122[0];
double QR_010000001012=Q_010000001*R_012[0]+-1*Q_010000101*R_013[0]+-1*Q_110000001*R_112[0]+Q_110000101*R_113[0];
double QR_001010000012=Q_001010000*R_012[0]+-1*Q_001110000*R_022[0]+-1*Q_101010000*R_112[0]+Q_101110000*R_122[0];
double QR_000011000012=Q_000011000*R_012[0]+-1*Q_000111000*R_022[0]+Q_000211000*R_032[0];
double QR_000010001012=Q_000010001*R_012[0]+-1*Q_000010101*R_013[0]+-1*Q_000110001*R_022[0]+Q_000110101*R_023[0];
double QR_001000010012=Q_001000010*R_012[0]+-1*Q_001000110*R_013[0]+-1*Q_101000010*R_112[0]+Q_101000110*R_113[0];
double QR_000001010012=Q_000001010*R_012[0]+-1*Q_000001110*R_013[0]+-1*Q_000101010*R_022[0]+Q_000101110*R_023[0];
double QR_000000011012=Q_000000011*R_012[0]+-1*Q_000000111*R_013[0]+Q_000000211*R_014[0];
double QR_011000000021=Q_011000000*R_021[0]+-1*Q_111000000*R_121[0]+Q_211000000*R_221[0];
double QR_010001000021=Q_010001000*R_021[0]+-1*Q_010101000*R_031[0]+-1*Q_110001000*R_121[0]+Q_110101000*R_131[0];
double QR_010000001021=Q_010000001*R_021[0]+-1*Q_010000101*R_022[0]+-1*Q_110000001*R_121[0]+Q_110000101*R_122[0];
double QR_001010000021=Q_001010000*R_021[0]+-1*Q_001110000*R_031[0]+-1*Q_101010000*R_121[0]+Q_101110000*R_131[0];
double QR_000011000021=Q_000011000*R_021[0]+-1*Q_000111000*R_031[0]+Q_000211000*R_041[0];
double QR_000010001021=Q_000010001*R_021[0]+-1*Q_000010101*R_022[0]+-1*Q_000110001*R_031[0]+Q_000110101*R_032[0];
double QR_001000010021=Q_001000010*R_021[0]+-1*Q_001000110*R_022[0]+-1*Q_101000010*R_121[0]+Q_101000110*R_122[0];
double QR_000001010021=Q_000001010*R_021[0]+-1*Q_000001110*R_022[0]+-1*Q_000101010*R_031[0]+Q_000101110*R_032[0];
double QR_000000011021=Q_000000011*R_021[0]+-1*Q_000000111*R_022[0]+Q_000000211*R_023[0];
double QR_011000000030=Q_011000000*R_030[0]+-1*Q_111000000*R_130[0]+Q_211000000*R_230[0];
double QR_010001000030=Q_010001000*R_030[0]+-1*Q_010101000*R_040[0]+-1*Q_110001000*R_130[0]+Q_110101000*R_140[0];
double QR_010000001030=Q_010000001*R_030[0]+-1*Q_010000101*R_031[0]+-1*Q_110000001*R_130[0]+Q_110000101*R_131[0];
double QR_001010000030=Q_001010000*R_030[0]+-1*Q_001110000*R_040[0]+-1*Q_101010000*R_130[0]+Q_101110000*R_140[0];
double QR_000011000030=Q_000011000*R_030[0]+-1*Q_000111000*R_040[0]+Q_000211000*R_050[0];
double QR_000010001030=Q_000010001*R_030[0]+-1*Q_000010101*R_031[0]+-1*Q_000110001*R_040[0]+Q_000110101*R_041[0];
double QR_001000010030=Q_001000010*R_030[0]+-1*Q_001000110*R_031[0]+-1*Q_101000010*R_130[0]+Q_101000110*R_131[0];
double QR_000001010030=Q_000001010*R_030[0]+-1*Q_000001110*R_031[0]+-1*Q_000101010*R_040[0]+Q_000101110*R_041[0];
double QR_000000011030=Q_000000011*R_030[0]+-1*Q_000000111*R_031[0]+Q_000000211*R_032[0];
double QR_011000000102=Q_011000000*R_102[0]+-1*Q_111000000*R_202[0]+Q_211000000*R_302[0];
double QR_010001000102=Q_010001000*R_102[0]+-1*Q_010101000*R_112[0]+-1*Q_110001000*R_202[0]+Q_110101000*R_212[0];
double QR_010000001102=Q_010000001*R_102[0]+-1*Q_010000101*R_103[0]+-1*Q_110000001*R_202[0]+Q_110000101*R_203[0];
double QR_001010000102=Q_001010000*R_102[0]+-1*Q_001110000*R_112[0]+-1*Q_101010000*R_202[0]+Q_101110000*R_212[0];
double QR_000011000102=Q_000011000*R_102[0]+-1*Q_000111000*R_112[0]+Q_000211000*R_122[0];
double QR_000010001102=Q_000010001*R_102[0]+-1*Q_000010101*R_103[0]+-1*Q_000110001*R_112[0]+Q_000110101*R_113[0];
double QR_001000010102=Q_001000010*R_102[0]+-1*Q_001000110*R_103[0]+-1*Q_101000010*R_202[0]+Q_101000110*R_203[0];
double QR_000001010102=Q_000001010*R_102[0]+-1*Q_000001110*R_103[0]+-1*Q_000101010*R_112[0]+Q_000101110*R_113[0];
double QR_000000011102=Q_000000011*R_102[0]+-1*Q_000000111*R_103[0]+Q_000000211*R_104[0];
double QR_011000000111=Q_011000000*R_111[0]+-1*Q_111000000*R_211[0]+Q_211000000*R_311[0];
double QR_010001000111=Q_010001000*R_111[0]+-1*Q_010101000*R_121[0]+-1*Q_110001000*R_211[0]+Q_110101000*R_221[0];
double QR_010000001111=Q_010000001*R_111[0]+-1*Q_010000101*R_112[0]+-1*Q_110000001*R_211[0]+Q_110000101*R_212[0];
double QR_001010000111=Q_001010000*R_111[0]+-1*Q_001110000*R_121[0]+-1*Q_101010000*R_211[0]+Q_101110000*R_221[0];
double QR_000011000111=Q_000011000*R_111[0]+-1*Q_000111000*R_121[0]+Q_000211000*R_131[0];
double QR_000010001111=Q_000010001*R_111[0]+-1*Q_000010101*R_112[0]+-1*Q_000110001*R_121[0]+Q_000110101*R_122[0];
double QR_001000010111=Q_001000010*R_111[0]+-1*Q_001000110*R_112[0]+-1*Q_101000010*R_211[0]+Q_101000110*R_212[0];
double QR_000001010111=Q_000001010*R_111[0]+-1*Q_000001110*R_112[0]+-1*Q_000101010*R_121[0]+Q_000101110*R_122[0];
double QR_000000011111=Q_000000011*R_111[0]+-1*Q_000000111*R_112[0]+Q_000000211*R_113[0];
double QR_011000000120=Q_011000000*R_120[0]+-1*Q_111000000*R_220[0]+Q_211000000*R_320[0];
double QR_010001000120=Q_010001000*R_120[0]+-1*Q_010101000*R_130[0]+-1*Q_110001000*R_220[0]+Q_110101000*R_230[0];
double QR_010000001120=Q_010000001*R_120[0]+-1*Q_010000101*R_121[0]+-1*Q_110000001*R_220[0]+Q_110000101*R_221[0];
double QR_001010000120=Q_001010000*R_120[0]+-1*Q_001110000*R_130[0]+-1*Q_101010000*R_220[0]+Q_101110000*R_230[0];
double QR_000011000120=Q_000011000*R_120[0]+-1*Q_000111000*R_130[0]+Q_000211000*R_140[0];
double QR_000010001120=Q_000010001*R_120[0]+-1*Q_000010101*R_121[0]+-1*Q_000110001*R_130[0]+Q_000110101*R_131[0];
double QR_001000010120=Q_001000010*R_120[0]+-1*Q_001000110*R_121[0]+-1*Q_101000010*R_220[0]+Q_101000110*R_221[0];
double QR_000001010120=Q_000001010*R_120[0]+-1*Q_000001110*R_121[0]+-1*Q_000101010*R_130[0]+Q_000101110*R_131[0];
double QR_000000011120=Q_000000011*R_120[0]+-1*Q_000000111*R_121[0]+Q_000000211*R_122[0];
double QR_011000000201=Q_011000000*R_201[0]+-1*Q_111000000*R_301[0]+Q_211000000*R_401[0];
double QR_010001000201=Q_010001000*R_201[0]+-1*Q_010101000*R_211[0]+-1*Q_110001000*R_301[0]+Q_110101000*R_311[0];
double QR_010000001201=Q_010000001*R_201[0]+-1*Q_010000101*R_202[0]+-1*Q_110000001*R_301[0]+Q_110000101*R_302[0];
double QR_001010000201=Q_001010000*R_201[0]+-1*Q_001110000*R_211[0]+-1*Q_101010000*R_301[0]+Q_101110000*R_311[0];
double QR_000011000201=Q_000011000*R_201[0]+-1*Q_000111000*R_211[0]+Q_000211000*R_221[0];
double QR_000010001201=Q_000010001*R_201[0]+-1*Q_000010101*R_202[0]+-1*Q_000110001*R_211[0]+Q_000110101*R_212[0];
double QR_001000010201=Q_001000010*R_201[0]+-1*Q_001000110*R_202[0]+-1*Q_101000010*R_301[0]+Q_101000110*R_302[0];
double QR_000001010201=Q_000001010*R_201[0]+-1*Q_000001110*R_202[0]+-1*Q_000101010*R_211[0]+Q_000101110*R_212[0];
double QR_000000011201=Q_000000011*R_201[0]+-1*Q_000000111*R_202[0]+Q_000000211*R_203[0];
double QR_011000000210=Q_011000000*R_210[0]+-1*Q_111000000*R_310[0]+Q_211000000*R_410[0];
double QR_010001000210=Q_010001000*R_210[0]+-1*Q_010101000*R_220[0]+-1*Q_110001000*R_310[0]+Q_110101000*R_320[0];
double QR_010000001210=Q_010000001*R_210[0]+-1*Q_010000101*R_211[0]+-1*Q_110000001*R_310[0]+Q_110000101*R_311[0];
double QR_001010000210=Q_001010000*R_210[0]+-1*Q_001110000*R_220[0]+-1*Q_101010000*R_310[0]+Q_101110000*R_320[0];
double QR_000011000210=Q_000011000*R_210[0]+-1*Q_000111000*R_220[0]+Q_000211000*R_230[0];
double QR_000010001210=Q_000010001*R_210[0]+-1*Q_000010101*R_211[0]+-1*Q_000110001*R_220[0]+Q_000110101*R_221[0];
double QR_001000010210=Q_001000010*R_210[0]+-1*Q_001000110*R_211[0]+-1*Q_101000010*R_310[0]+Q_101000110*R_311[0];
double QR_000001010210=Q_000001010*R_210[0]+-1*Q_000001110*R_211[0]+-1*Q_000101010*R_220[0]+Q_000101110*R_221[0];
double QR_000000011210=Q_000000011*R_210[0]+-1*Q_000000111*R_211[0]+Q_000000211*R_212[0];
double QR_011000000300=Q_011000000*R_300[0]+-1*Q_111000000*R_400[0]+Q_211000000*R_500[0];
double QR_010001000300=Q_010001000*R_300[0]+-1*Q_010101000*R_310[0]+-1*Q_110001000*R_400[0]+Q_110101000*R_410[0];
double QR_010000001300=Q_010000001*R_300[0]+-1*Q_010000101*R_301[0]+-1*Q_110000001*R_400[0]+Q_110000101*R_401[0];
double QR_001010000300=Q_001010000*R_300[0]+-1*Q_001110000*R_310[0]+-1*Q_101010000*R_400[0]+Q_101110000*R_410[0];
double QR_000011000300=Q_000011000*R_300[0]+-1*Q_000111000*R_310[0]+Q_000211000*R_320[0];
double QR_000010001300=Q_000010001*R_300[0]+-1*Q_000010101*R_301[0]+-1*Q_000110001*R_310[0]+Q_000110101*R_311[0];
double QR_001000010300=Q_001000010*R_300[0]+-1*Q_001000110*R_301[0]+-1*Q_101000010*R_400[0]+Q_101000110*R_401[0];
double QR_000001010300=Q_000001010*R_300[0]+-1*Q_000001110*R_301[0]+-1*Q_000101010*R_310[0]+Q_000101110*R_311[0];
double QR_000000011300=Q_000000011*R_300[0]+-1*Q_000000111*R_301[0]+Q_000000211*R_302[0];
double Pd_101[3];
double Pd_002[3];
double Pd_102[3];
double Pd_202[3];
double Pd_110[3];
double Pd_011[3];
double Pd_111[3];
double Pd_211[3];
double Pd_012[3];
double Pd_112[3];
double Pd_212[3];
double Pd_312[3];
for(int i=0;i<3;i++){
Pd_101[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_002[i]=Pd_101[i]+Pd_001[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_102[i]=Pd_001[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_202[i]=aPin1*Pd_101[i];
}
for(int i=0;i<3;i++){
Pd_110[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_011[i]=Pd_101[i]+Pd_010[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_111[i]=Pd_010[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_211[i]=aPin1*Pd_101[i];
}
for(int i=0;i<3;i++){
Pd_012[i]=Pd_111[i]+Pd_001[i]*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_112[i]=2*Pd_211[i]+Pd_001[i]*Pd_111[i]+aPin1*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_212[i]=Pd_001[i]*Pd_211[i]+aPin1*Pd_111[i];
}
for(int i=0;i<3;i++){
Pd_312[i]=aPin1*Pd_211[i];
}
double P_012000000=Pd_012[0];
double P_112000000=Pd_112[0];
double P_212000000=Pd_212[0];
double P_312000000=Pd_312[0];
double P_011001000=Pd_011[0]*Pd_001[1];
double P_011101000=Pd_011[0]*Pd_101[1];
double P_111001000=Pd_111[0]*Pd_001[1];
double P_111101000=Pd_111[0]*Pd_101[1];
double P_211001000=Pd_211[0]*Pd_001[1];
double P_211101000=Pd_211[0]*Pd_101[1];
double P_010002000=Pd_010[0]*Pd_002[1];
double P_010102000=Pd_010[0]*Pd_102[1];
double P_010202000=Pd_010[0]*Pd_202[1];
double P_110002000=Pd_110[0]*Pd_002[1];
double P_110102000=Pd_110[0]*Pd_102[1];
double P_110202000=Pd_110[0]*Pd_202[1];
double P_011000001=Pd_011[0]*Pd_001[2];
double P_011000101=Pd_011[0]*Pd_101[2];
double P_111000001=Pd_111[0]*Pd_001[2];
double P_111000101=Pd_111[0]*Pd_101[2];
double P_211000001=Pd_211[0]*Pd_001[2];
double P_211000101=Pd_211[0]*Pd_101[2];
double P_010001001=Pd_010[0]*Pd_001[1]*Pd_001[2];
double P_010001101=Pd_010[0]*Pd_001[1]*Pd_101[2];
double P_010101001=Pd_010[0]*Pd_101[1]*Pd_001[2];
double P_010101101=Pd_010[0]*Pd_101[1]*Pd_101[2];
double P_110001001=Pd_110[0]*Pd_001[1]*Pd_001[2];
double P_110001101=Pd_110[0]*Pd_001[1]*Pd_101[2];
double P_110101001=Pd_110[0]*Pd_101[1]*Pd_001[2];
double P_110101101=Pd_110[0]*Pd_101[1]*Pd_101[2];
double P_010000002=Pd_010[0]*Pd_002[2];
double P_010000102=Pd_010[0]*Pd_102[2];
double P_010000202=Pd_010[0]*Pd_202[2];
double P_110000002=Pd_110[0]*Pd_002[2];
double P_110000102=Pd_110[0]*Pd_102[2];
double P_110000202=Pd_110[0]*Pd_202[2];
double P_002010000=Pd_002[0]*Pd_010[1];
double P_002110000=Pd_002[0]*Pd_110[1];
double P_102010000=Pd_102[0]*Pd_010[1];
double P_102110000=Pd_102[0]*Pd_110[1];
double P_202010000=Pd_202[0]*Pd_010[1];
double P_202110000=Pd_202[0]*Pd_110[1];
double P_001011000=Pd_001[0]*Pd_011[1];
double P_001111000=Pd_001[0]*Pd_111[1];
double P_001211000=Pd_001[0]*Pd_211[1];
double P_101011000=Pd_101[0]*Pd_011[1];
double P_101111000=Pd_101[0]*Pd_111[1];
double P_101211000=Pd_101[0]*Pd_211[1];
double P_000012000=Pd_012[1];
double P_000112000=Pd_112[1];
double P_000212000=Pd_212[1];
double P_000312000=Pd_312[1];
double P_001010001=Pd_001[0]*Pd_010[1]*Pd_001[2];
double P_001010101=Pd_001[0]*Pd_010[1]*Pd_101[2];
double P_001110001=Pd_001[0]*Pd_110[1]*Pd_001[2];
double P_001110101=Pd_001[0]*Pd_110[1]*Pd_101[2];
double P_101010001=Pd_101[0]*Pd_010[1]*Pd_001[2];
double P_101010101=Pd_101[0]*Pd_010[1]*Pd_101[2];
double P_101110001=Pd_101[0]*Pd_110[1]*Pd_001[2];
double P_101110101=Pd_101[0]*Pd_110[1]*Pd_101[2];
double P_000011001=Pd_011[1]*Pd_001[2];
double P_000011101=Pd_011[1]*Pd_101[2];
double P_000111001=Pd_111[1]*Pd_001[2];
double P_000111101=Pd_111[1]*Pd_101[2];
double P_000211001=Pd_211[1]*Pd_001[2];
double P_000211101=Pd_211[1]*Pd_101[2];
double P_000010002=Pd_010[1]*Pd_002[2];
double P_000010102=Pd_010[1]*Pd_102[2];
double P_000010202=Pd_010[1]*Pd_202[2];
double P_000110002=Pd_110[1]*Pd_002[2];
double P_000110102=Pd_110[1]*Pd_102[2];
double P_000110202=Pd_110[1]*Pd_202[2];
double P_002000010=Pd_002[0]*Pd_010[2];
double P_002000110=Pd_002[0]*Pd_110[2];
double P_102000010=Pd_102[0]*Pd_010[2];
double P_102000110=Pd_102[0]*Pd_110[2];
double P_202000010=Pd_202[0]*Pd_010[2];
double P_202000110=Pd_202[0]*Pd_110[2];
double P_001001010=Pd_001[0]*Pd_001[1]*Pd_010[2];
double P_001001110=Pd_001[0]*Pd_001[1]*Pd_110[2];
double P_001101010=Pd_001[0]*Pd_101[1]*Pd_010[2];
double P_001101110=Pd_001[0]*Pd_101[1]*Pd_110[2];
double P_101001010=Pd_101[0]*Pd_001[1]*Pd_010[2];
double P_101001110=Pd_101[0]*Pd_001[1]*Pd_110[2];
double P_101101010=Pd_101[0]*Pd_101[1]*Pd_010[2];
double P_101101110=Pd_101[0]*Pd_101[1]*Pd_110[2];
double P_000002010=Pd_002[1]*Pd_010[2];
double P_000002110=Pd_002[1]*Pd_110[2];
double P_000102010=Pd_102[1]*Pd_010[2];
double P_000102110=Pd_102[1]*Pd_110[2];
double P_000202010=Pd_202[1]*Pd_010[2];
double P_000202110=Pd_202[1]*Pd_110[2];
double P_001000011=Pd_001[0]*Pd_011[2];
double P_001000111=Pd_001[0]*Pd_111[2];
double P_001000211=Pd_001[0]*Pd_211[2];
double P_101000011=Pd_101[0]*Pd_011[2];
double P_101000111=Pd_101[0]*Pd_111[2];
double P_101000211=Pd_101[0]*Pd_211[2];
double P_000001011=Pd_001[1]*Pd_011[2];
double P_000001111=Pd_001[1]*Pd_111[2];
double P_000001211=Pd_001[1]*Pd_211[2];
double P_000101011=Pd_101[1]*Pd_011[2];
double P_000101111=Pd_101[1]*Pd_111[2];
double P_000101211=Pd_101[1]*Pd_211[2];
double P_000000012=Pd_012[2];
double P_000000112=Pd_112[2];
double P_000000212=Pd_212[2];
double P_000000312=Pd_312[2];
ans_temp[ans_id*9+0]+=Pmtrx[0]*(P_012000000*QR_011000000000+P_112000000*QR_011000000100+P_212000000*QR_011000000200+P_312000000*QR_011000000300);
ans_temp[ans_id*9+0]+=Pmtrx[1]*(P_012000000*QR_010001000000+P_112000000*QR_010001000100+P_212000000*QR_010001000200+P_312000000*QR_010001000300);
ans_temp[ans_id*9+0]+=Pmtrx[2]*(P_012000000*QR_010000001000+P_112000000*QR_010000001100+P_212000000*QR_010000001200+P_312000000*QR_010000001300);
ans_temp[ans_id*9+1]+=Pmtrx[0]*(P_012000000*QR_001010000000+P_112000000*QR_001010000100+P_212000000*QR_001010000200+P_312000000*QR_001010000300);
ans_temp[ans_id*9+1]+=Pmtrx[1]*(P_012000000*QR_000011000000+P_112000000*QR_000011000100+P_212000000*QR_000011000200+P_312000000*QR_000011000300);
ans_temp[ans_id*9+1]+=Pmtrx[2]*(P_012000000*QR_000010001000+P_112000000*QR_000010001100+P_212000000*QR_000010001200+P_312000000*QR_000010001300);
ans_temp[ans_id*9+2]+=Pmtrx[0]*(P_012000000*QR_001000010000+P_112000000*QR_001000010100+P_212000000*QR_001000010200+P_312000000*QR_001000010300);
ans_temp[ans_id*9+2]+=Pmtrx[1]*(P_012000000*QR_000001010000+P_112000000*QR_000001010100+P_212000000*QR_000001010200+P_312000000*QR_000001010300);
ans_temp[ans_id*9+2]+=Pmtrx[2]*(P_012000000*QR_000000011000+P_112000000*QR_000000011100+P_212000000*QR_000000011200+P_312000000*QR_000000011300);
ans_temp[ans_id*9+0]+=Pmtrx[3]*(P_011001000*QR_011000000000+P_011101000*QR_011000000010+P_111001000*QR_011000000100+P_111101000*QR_011000000110+P_211001000*QR_011000000200+P_211101000*QR_011000000210);
ans_temp[ans_id*9+0]+=Pmtrx[4]*(P_011001000*QR_010001000000+P_011101000*QR_010001000010+P_111001000*QR_010001000100+P_111101000*QR_010001000110+P_211001000*QR_010001000200+P_211101000*QR_010001000210);
ans_temp[ans_id*9+0]+=Pmtrx[5]*(P_011001000*QR_010000001000+P_011101000*QR_010000001010+P_111001000*QR_010000001100+P_111101000*QR_010000001110+P_211001000*QR_010000001200+P_211101000*QR_010000001210);
ans_temp[ans_id*9+1]+=Pmtrx[3]*(P_011001000*QR_001010000000+P_011101000*QR_001010000010+P_111001000*QR_001010000100+P_111101000*QR_001010000110+P_211001000*QR_001010000200+P_211101000*QR_001010000210);
ans_temp[ans_id*9+1]+=Pmtrx[4]*(P_011001000*QR_000011000000+P_011101000*QR_000011000010+P_111001000*QR_000011000100+P_111101000*QR_000011000110+P_211001000*QR_000011000200+P_211101000*QR_000011000210);
ans_temp[ans_id*9+1]+=Pmtrx[5]*(P_011001000*QR_000010001000+P_011101000*QR_000010001010+P_111001000*QR_000010001100+P_111101000*QR_000010001110+P_211001000*QR_000010001200+P_211101000*QR_000010001210);
ans_temp[ans_id*9+2]+=Pmtrx[3]*(P_011001000*QR_001000010000+P_011101000*QR_001000010010+P_111001000*QR_001000010100+P_111101000*QR_001000010110+P_211001000*QR_001000010200+P_211101000*QR_001000010210);
ans_temp[ans_id*9+2]+=Pmtrx[4]*(P_011001000*QR_000001010000+P_011101000*QR_000001010010+P_111001000*QR_000001010100+P_111101000*QR_000001010110+P_211001000*QR_000001010200+P_211101000*QR_000001010210);
ans_temp[ans_id*9+2]+=Pmtrx[5]*(P_011001000*QR_000000011000+P_011101000*QR_000000011010+P_111001000*QR_000000011100+P_111101000*QR_000000011110+P_211001000*QR_000000011200+P_211101000*QR_000000011210);
ans_temp[ans_id*9+0]+=Pmtrx[6]*(P_010002000*QR_011000000000+P_010102000*QR_011000000010+P_010202000*QR_011000000020+P_110002000*QR_011000000100+P_110102000*QR_011000000110+P_110202000*QR_011000000120);
ans_temp[ans_id*9+0]+=Pmtrx[7]*(P_010002000*QR_010001000000+P_010102000*QR_010001000010+P_010202000*QR_010001000020+P_110002000*QR_010001000100+P_110102000*QR_010001000110+P_110202000*QR_010001000120);
ans_temp[ans_id*9+0]+=Pmtrx[8]*(P_010002000*QR_010000001000+P_010102000*QR_010000001010+P_010202000*QR_010000001020+P_110002000*QR_010000001100+P_110102000*QR_010000001110+P_110202000*QR_010000001120);
ans_temp[ans_id*9+1]+=Pmtrx[6]*(P_010002000*QR_001010000000+P_010102000*QR_001010000010+P_010202000*QR_001010000020+P_110002000*QR_001010000100+P_110102000*QR_001010000110+P_110202000*QR_001010000120);
ans_temp[ans_id*9+1]+=Pmtrx[7]*(P_010002000*QR_000011000000+P_010102000*QR_000011000010+P_010202000*QR_000011000020+P_110002000*QR_000011000100+P_110102000*QR_000011000110+P_110202000*QR_000011000120);
ans_temp[ans_id*9+1]+=Pmtrx[8]*(P_010002000*QR_000010001000+P_010102000*QR_000010001010+P_010202000*QR_000010001020+P_110002000*QR_000010001100+P_110102000*QR_000010001110+P_110202000*QR_000010001120);
ans_temp[ans_id*9+2]+=Pmtrx[6]*(P_010002000*QR_001000010000+P_010102000*QR_001000010010+P_010202000*QR_001000010020+P_110002000*QR_001000010100+P_110102000*QR_001000010110+P_110202000*QR_001000010120);
ans_temp[ans_id*9+2]+=Pmtrx[7]*(P_010002000*QR_000001010000+P_010102000*QR_000001010010+P_010202000*QR_000001010020+P_110002000*QR_000001010100+P_110102000*QR_000001010110+P_110202000*QR_000001010120);
ans_temp[ans_id*9+2]+=Pmtrx[8]*(P_010002000*QR_000000011000+P_010102000*QR_000000011010+P_010202000*QR_000000011020+P_110002000*QR_000000011100+P_110102000*QR_000000011110+P_110202000*QR_000000011120);
ans_temp[ans_id*9+0]+=Pmtrx[9]*(P_011000001*QR_011000000000+P_011000101*QR_011000000001+P_111000001*QR_011000000100+P_111000101*QR_011000000101+P_211000001*QR_011000000200+P_211000101*QR_011000000201);
ans_temp[ans_id*9+0]+=Pmtrx[10]*(P_011000001*QR_010001000000+P_011000101*QR_010001000001+P_111000001*QR_010001000100+P_111000101*QR_010001000101+P_211000001*QR_010001000200+P_211000101*QR_010001000201);
ans_temp[ans_id*9+0]+=Pmtrx[11]*(P_011000001*QR_010000001000+P_011000101*QR_010000001001+P_111000001*QR_010000001100+P_111000101*QR_010000001101+P_211000001*QR_010000001200+P_211000101*QR_010000001201);
ans_temp[ans_id*9+1]+=Pmtrx[9]*(P_011000001*QR_001010000000+P_011000101*QR_001010000001+P_111000001*QR_001010000100+P_111000101*QR_001010000101+P_211000001*QR_001010000200+P_211000101*QR_001010000201);
ans_temp[ans_id*9+1]+=Pmtrx[10]*(P_011000001*QR_000011000000+P_011000101*QR_000011000001+P_111000001*QR_000011000100+P_111000101*QR_000011000101+P_211000001*QR_000011000200+P_211000101*QR_000011000201);
ans_temp[ans_id*9+1]+=Pmtrx[11]*(P_011000001*QR_000010001000+P_011000101*QR_000010001001+P_111000001*QR_000010001100+P_111000101*QR_000010001101+P_211000001*QR_000010001200+P_211000101*QR_000010001201);
ans_temp[ans_id*9+2]+=Pmtrx[9]*(P_011000001*QR_001000010000+P_011000101*QR_001000010001+P_111000001*QR_001000010100+P_111000101*QR_001000010101+P_211000001*QR_001000010200+P_211000101*QR_001000010201);
ans_temp[ans_id*9+2]+=Pmtrx[10]*(P_011000001*QR_000001010000+P_011000101*QR_000001010001+P_111000001*QR_000001010100+P_111000101*QR_000001010101+P_211000001*QR_000001010200+P_211000101*QR_000001010201);
ans_temp[ans_id*9+2]+=Pmtrx[11]*(P_011000001*QR_000000011000+P_011000101*QR_000000011001+P_111000001*QR_000000011100+P_111000101*QR_000000011101+P_211000001*QR_000000011200+P_211000101*QR_000000011201);
ans_temp[ans_id*9+0]+=Pmtrx[12]*(P_010001001*QR_011000000000+P_010001101*QR_011000000001+P_010101001*QR_011000000010+P_010101101*QR_011000000011+P_110001001*QR_011000000100+P_110001101*QR_011000000101+P_110101001*QR_011000000110+P_110101101*QR_011000000111);
ans_temp[ans_id*9+0]+=Pmtrx[13]*(P_010001001*QR_010001000000+P_010001101*QR_010001000001+P_010101001*QR_010001000010+P_010101101*QR_010001000011+P_110001001*QR_010001000100+P_110001101*QR_010001000101+P_110101001*QR_010001000110+P_110101101*QR_010001000111);
ans_temp[ans_id*9+0]+=Pmtrx[14]*(P_010001001*QR_010000001000+P_010001101*QR_010000001001+P_010101001*QR_010000001010+P_010101101*QR_010000001011+P_110001001*QR_010000001100+P_110001101*QR_010000001101+P_110101001*QR_010000001110+P_110101101*QR_010000001111);
ans_temp[ans_id*9+1]+=Pmtrx[12]*(P_010001001*QR_001010000000+P_010001101*QR_001010000001+P_010101001*QR_001010000010+P_010101101*QR_001010000011+P_110001001*QR_001010000100+P_110001101*QR_001010000101+P_110101001*QR_001010000110+P_110101101*QR_001010000111);
ans_temp[ans_id*9+1]+=Pmtrx[13]*(P_010001001*QR_000011000000+P_010001101*QR_000011000001+P_010101001*QR_000011000010+P_010101101*QR_000011000011+P_110001001*QR_000011000100+P_110001101*QR_000011000101+P_110101001*QR_000011000110+P_110101101*QR_000011000111);
ans_temp[ans_id*9+1]+=Pmtrx[14]*(P_010001001*QR_000010001000+P_010001101*QR_000010001001+P_010101001*QR_000010001010+P_010101101*QR_000010001011+P_110001001*QR_000010001100+P_110001101*QR_000010001101+P_110101001*QR_000010001110+P_110101101*QR_000010001111);
ans_temp[ans_id*9+2]+=Pmtrx[12]*(P_010001001*QR_001000010000+P_010001101*QR_001000010001+P_010101001*QR_001000010010+P_010101101*QR_001000010011+P_110001001*QR_001000010100+P_110001101*QR_001000010101+P_110101001*QR_001000010110+P_110101101*QR_001000010111);
ans_temp[ans_id*9+2]+=Pmtrx[13]*(P_010001001*QR_000001010000+P_010001101*QR_000001010001+P_010101001*QR_000001010010+P_010101101*QR_000001010011+P_110001001*QR_000001010100+P_110001101*QR_000001010101+P_110101001*QR_000001010110+P_110101101*QR_000001010111);
ans_temp[ans_id*9+2]+=Pmtrx[14]*(P_010001001*QR_000000011000+P_010001101*QR_000000011001+P_010101001*QR_000000011010+P_010101101*QR_000000011011+P_110001001*QR_000000011100+P_110001101*QR_000000011101+P_110101001*QR_000000011110+P_110101101*QR_000000011111);
ans_temp[ans_id*9+0]+=Pmtrx[15]*(P_010000002*QR_011000000000+P_010000102*QR_011000000001+P_010000202*QR_011000000002+P_110000002*QR_011000000100+P_110000102*QR_011000000101+P_110000202*QR_011000000102);
ans_temp[ans_id*9+0]+=Pmtrx[16]*(P_010000002*QR_010001000000+P_010000102*QR_010001000001+P_010000202*QR_010001000002+P_110000002*QR_010001000100+P_110000102*QR_010001000101+P_110000202*QR_010001000102);
ans_temp[ans_id*9+0]+=Pmtrx[17]*(P_010000002*QR_010000001000+P_010000102*QR_010000001001+P_010000202*QR_010000001002+P_110000002*QR_010000001100+P_110000102*QR_010000001101+P_110000202*QR_010000001102);
ans_temp[ans_id*9+1]+=Pmtrx[15]*(P_010000002*QR_001010000000+P_010000102*QR_001010000001+P_010000202*QR_001010000002+P_110000002*QR_001010000100+P_110000102*QR_001010000101+P_110000202*QR_001010000102);
ans_temp[ans_id*9+1]+=Pmtrx[16]*(P_010000002*QR_000011000000+P_010000102*QR_000011000001+P_010000202*QR_000011000002+P_110000002*QR_000011000100+P_110000102*QR_000011000101+P_110000202*QR_000011000102);
ans_temp[ans_id*9+1]+=Pmtrx[17]*(P_010000002*QR_000010001000+P_010000102*QR_000010001001+P_010000202*QR_000010001002+P_110000002*QR_000010001100+P_110000102*QR_000010001101+P_110000202*QR_000010001102);
ans_temp[ans_id*9+2]+=Pmtrx[15]*(P_010000002*QR_001000010000+P_010000102*QR_001000010001+P_010000202*QR_001000010002+P_110000002*QR_001000010100+P_110000102*QR_001000010101+P_110000202*QR_001000010102);
ans_temp[ans_id*9+2]+=Pmtrx[16]*(P_010000002*QR_000001010000+P_010000102*QR_000001010001+P_010000202*QR_000001010002+P_110000002*QR_000001010100+P_110000102*QR_000001010101+P_110000202*QR_000001010102);
ans_temp[ans_id*9+2]+=Pmtrx[17]*(P_010000002*QR_000000011000+P_010000102*QR_000000011001+P_010000202*QR_000000011002+P_110000002*QR_000000011100+P_110000102*QR_000000011101+P_110000202*QR_000000011102);
ans_temp[ans_id*9+3]+=Pmtrx[0]*(P_002010000*QR_011000000000+P_002110000*QR_011000000010+P_102010000*QR_011000000100+P_102110000*QR_011000000110+P_202010000*QR_011000000200+P_202110000*QR_011000000210);
ans_temp[ans_id*9+3]+=Pmtrx[1]*(P_002010000*QR_010001000000+P_002110000*QR_010001000010+P_102010000*QR_010001000100+P_102110000*QR_010001000110+P_202010000*QR_010001000200+P_202110000*QR_010001000210);
ans_temp[ans_id*9+3]+=Pmtrx[2]*(P_002010000*QR_010000001000+P_002110000*QR_010000001010+P_102010000*QR_010000001100+P_102110000*QR_010000001110+P_202010000*QR_010000001200+P_202110000*QR_010000001210);
ans_temp[ans_id*9+4]+=Pmtrx[0]*(P_002010000*QR_001010000000+P_002110000*QR_001010000010+P_102010000*QR_001010000100+P_102110000*QR_001010000110+P_202010000*QR_001010000200+P_202110000*QR_001010000210);
ans_temp[ans_id*9+4]+=Pmtrx[1]*(P_002010000*QR_000011000000+P_002110000*QR_000011000010+P_102010000*QR_000011000100+P_102110000*QR_000011000110+P_202010000*QR_000011000200+P_202110000*QR_000011000210);
ans_temp[ans_id*9+4]+=Pmtrx[2]*(P_002010000*QR_000010001000+P_002110000*QR_000010001010+P_102010000*QR_000010001100+P_102110000*QR_000010001110+P_202010000*QR_000010001200+P_202110000*QR_000010001210);
ans_temp[ans_id*9+5]+=Pmtrx[0]*(P_002010000*QR_001000010000+P_002110000*QR_001000010010+P_102010000*QR_001000010100+P_102110000*QR_001000010110+P_202010000*QR_001000010200+P_202110000*QR_001000010210);
ans_temp[ans_id*9+5]+=Pmtrx[1]*(P_002010000*QR_000001010000+P_002110000*QR_000001010010+P_102010000*QR_000001010100+P_102110000*QR_000001010110+P_202010000*QR_000001010200+P_202110000*QR_000001010210);
ans_temp[ans_id*9+5]+=Pmtrx[2]*(P_002010000*QR_000000011000+P_002110000*QR_000000011010+P_102010000*QR_000000011100+P_102110000*QR_000000011110+P_202010000*QR_000000011200+P_202110000*QR_000000011210);
ans_temp[ans_id*9+3]+=Pmtrx[3]*(P_001011000*QR_011000000000+P_001111000*QR_011000000010+P_001211000*QR_011000000020+P_101011000*QR_011000000100+P_101111000*QR_011000000110+P_101211000*QR_011000000120);
ans_temp[ans_id*9+3]+=Pmtrx[4]*(P_001011000*QR_010001000000+P_001111000*QR_010001000010+P_001211000*QR_010001000020+P_101011000*QR_010001000100+P_101111000*QR_010001000110+P_101211000*QR_010001000120);
ans_temp[ans_id*9+3]+=Pmtrx[5]*(P_001011000*QR_010000001000+P_001111000*QR_010000001010+P_001211000*QR_010000001020+P_101011000*QR_010000001100+P_101111000*QR_010000001110+P_101211000*QR_010000001120);
ans_temp[ans_id*9+4]+=Pmtrx[3]*(P_001011000*QR_001010000000+P_001111000*QR_001010000010+P_001211000*QR_001010000020+P_101011000*QR_001010000100+P_101111000*QR_001010000110+P_101211000*QR_001010000120);
ans_temp[ans_id*9+4]+=Pmtrx[4]*(P_001011000*QR_000011000000+P_001111000*QR_000011000010+P_001211000*QR_000011000020+P_101011000*QR_000011000100+P_101111000*QR_000011000110+P_101211000*QR_000011000120);
ans_temp[ans_id*9+4]+=Pmtrx[5]*(P_001011000*QR_000010001000+P_001111000*QR_000010001010+P_001211000*QR_000010001020+P_101011000*QR_000010001100+P_101111000*QR_000010001110+P_101211000*QR_000010001120);
ans_temp[ans_id*9+5]+=Pmtrx[3]*(P_001011000*QR_001000010000+P_001111000*QR_001000010010+P_001211000*QR_001000010020+P_101011000*QR_001000010100+P_101111000*QR_001000010110+P_101211000*QR_001000010120);
ans_temp[ans_id*9+5]+=Pmtrx[4]*(P_001011000*QR_000001010000+P_001111000*QR_000001010010+P_001211000*QR_000001010020+P_101011000*QR_000001010100+P_101111000*QR_000001010110+P_101211000*QR_000001010120);
ans_temp[ans_id*9+5]+=Pmtrx[5]*(P_001011000*QR_000000011000+P_001111000*QR_000000011010+P_001211000*QR_000000011020+P_101011000*QR_000000011100+P_101111000*QR_000000011110+P_101211000*QR_000000011120);
ans_temp[ans_id*9+3]+=Pmtrx[6]*(P_000012000*QR_011000000000+P_000112000*QR_011000000010+P_000212000*QR_011000000020+P_000312000*QR_011000000030);
ans_temp[ans_id*9+3]+=Pmtrx[7]*(P_000012000*QR_010001000000+P_000112000*QR_010001000010+P_000212000*QR_010001000020+P_000312000*QR_010001000030);
ans_temp[ans_id*9+3]+=Pmtrx[8]*(P_000012000*QR_010000001000+P_000112000*QR_010000001010+P_000212000*QR_010000001020+P_000312000*QR_010000001030);
ans_temp[ans_id*9+4]+=Pmtrx[6]*(P_000012000*QR_001010000000+P_000112000*QR_001010000010+P_000212000*QR_001010000020+P_000312000*QR_001010000030);
ans_temp[ans_id*9+4]+=Pmtrx[7]*(P_000012000*QR_000011000000+P_000112000*QR_000011000010+P_000212000*QR_000011000020+P_000312000*QR_000011000030);
ans_temp[ans_id*9+4]+=Pmtrx[8]*(P_000012000*QR_000010001000+P_000112000*QR_000010001010+P_000212000*QR_000010001020+P_000312000*QR_000010001030);
ans_temp[ans_id*9+5]+=Pmtrx[6]*(P_000012000*QR_001000010000+P_000112000*QR_001000010010+P_000212000*QR_001000010020+P_000312000*QR_001000010030);
ans_temp[ans_id*9+5]+=Pmtrx[7]*(P_000012000*QR_000001010000+P_000112000*QR_000001010010+P_000212000*QR_000001010020+P_000312000*QR_000001010030);
ans_temp[ans_id*9+5]+=Pmtrx[8]*(P_000012000*QR_000000011000+P_000112000*QR_000000011010+P_000212000*QR_000000011020+P_000312000*QR_000000011030);
ans_temp[ans_id*9+3]+=Pmtrx[9]*(P_001010001*QR_011000000000+P_001010101*QR_011000000001+P_001110001*QR_011000000010+P_001110101*QR_011000000011+P_101010001*QR_011000000100+P_101010101*QR_011000000101+P_101110001*QR_011000000110+P_101110101*QR_011000000111);
ans_temp[ans_id*9+3]+=Pmtrx[10]*(P_001010001*QR_010001000000+P_001010101*QR_010001000001+P_001110001*QR_010001000010+P_001110101*QR_010001000011+P_101010001*QR_010001000100+P_101010101*QR_010001000101+P_101110001*QR_010001000110+P_101110101*QR_010001000111);
ans_temp[ans_id*9+3]+=Pmtrx[11]*(P_001010001*QR_010000001000+P_001010101*QR_010000001001+P_001110001*QR_010000001010+P_001110101*QR_010000001011+P_101010001*QR_010000001100+P_101010101*QR_010000001101+P_101110001*QR_010000001110+P_101110101*QR_010000001111);
ans_temp[ans_id*9+4]+=Pmtrx[9]*(P_001010001*QR_001010000000+P_001010101*QR_001010000001+P_001110001*QR_001010000010+P_001110101*QR_001010000011+P_101010001*QR_001010000100+P_101010101*QR_001010000101+P_101110001*QR_001010000110+P_101110101*QR_001010000111);
ans_temp[ans_id*9+4]+=Pmtrx[10]*(P_001010001*QR_000011000000+P_001010101*QR_000011000001+P_001110001*QR_000011000010+P_001110101*QR_000011000011+P_101010001*QR_000011000100+P_101010101*QR_000011000101+P_101110001*QR_000011000110+P_101110101*QR_000011000111);
ans_temp[ans_id*9+4]+=Pmtrx[11]*(P_001010001*QR_000010001000+P_001010101*QR_000010001001+P_001110001*QR_000010001010+P_001110101*QR_000010001011+P_101010001*QR_000010001100+P_101010101*QR_000010001101+P_101110001*QR_000010001110+P_101110101*QR_000010001111);
ans_temp[ans_id*9+5]+=Pmtrx[9]*(P_001010001*QR_001000010000+P_001010101*QR_001000010001+P_001110001*QR_001000010010+P_001110101*QR_001000010011+P_101010001*QR_001000010100+P_101010101*QR_001000010101+P_101110001*QR_001000010110+P_101110101*QR_001000010111);
ans_temp[ans_id*9+5]+=Pmtrx[10]*(P_001010001*QR_000001010000+P_001010101*QR_000001010001+P_001110001*QR_000001010010+P_001110101*QR_000001010011+P_101010001*QR_000001010100+P_101010101*QR_000001010101+P_101110001*QR_000001010110+P_101110101*QR_000001010111);
ans_temp[ans_id*9+5]+=Pmtrx[11]*(P_001010001*QR_000000011000+P_001010101*QR_000000011001+P_001110001*QR_000000011010+P_001110101*QR_000000011011+P_101010001*QR_000000011100+P_101010101*QR_000000011101+P_101110001*QR_000000011110+P_101110101*QR_000000011111);
ans_temp[ans_id*9+3]+=Pmtrx[12]*(P_000011001*QR_011000000000+P_000011101*QR_011000000001+P_000111001*QR_011000000010+P_000111101*QR_011000000011+P_000211001*QR_011000000020+P_000211101*QR_011000000021);
ans_temp[ans_id*9+3]+=Pmtrx[13]*(P_000011001*QR_010001000000+P_000011101*QR_010001000001+P_000111001*QR_010001000010+P_000111101*QR_010001000011+P_000211001*QR_010001000020+P_000211101*QR_010001000021);
ans_temp[ans_id*9+3]+=Pmtrx[14]*(P_000011001*QR_010000001000+P_000011101*QR_010000001001+P_000111001*QR_010000001010+P_000111101*QR_010000001011+P_000211001*QR_010000001020+P_000211101*QR_010000001021);
ans_temp[ans_id*9+4]+=Pmtrx[12]*(P_000011001*QR_001010000000+P_000011101*QR_001010000001+P_000111001*QR_001010000010+P_000111101*QR_001010000011+P_000211001*QR_001010000020+P_000211101*QR_001010000021);
ans_temp[ans_id*9+4]+=Pmtrx[13]*(P_000011001*QR_000011000000+P_000011101*QR_000011000001+P_000111001*QR_000011000010+P_000111101*QR_000011000011+P_000211001*QR_000011000020+P_000211101*QR_000011000021);
ans_temp[ans_id*9+4]+=Pmtrx[14]*(P_000011001*QR_000010001000+P_000011101*QR_000010001001+P_000111001*QR_000010001010+P_000111101*QR_000010001011+P_000211001*QR_000010001020+P_000211101*QR_000010001021);
ans_temp[ans_id*9+5]+=Pmtrx[12]*(P_000011001*QR_001000010000+P_000011101*QR_001000010001+P_000111001*QR_001000010010+P_000111101*QR_001000010011+P_000211001*QR_001000010020+P_000211101*QR_001000010021);
ans_temp[ans_id*9+5]+=Pmtrx[13]*(P_000011001*QR_000001010000+P_000011101*QR_000001010001+P_000111001*QR_000001010010+P_000111101*QR_000001010011+P_000211001*QR_000001010020+P_000211101*QR_000001010021);
ans_temp[ans_id*9+5]+=Pmtrx[14]*(P_000011001*QR_000000011000+P_000011101*QR_000000011001+P_000111001*QR_000000011010+P_000111101*QR_000000011011+P_000211001*QR_000000011020+P_000211101*QR_000000011021);
ans_temp[ans_id*9+3]+=Pmtrx[15]*(P_000010002*QR_011000000000+P_000010102*QR_011000000001+P_000010202*QR_011000000002+P_000110002*QR_011000000010+P_000110102*QR_011000000011+P_000110202*QR_011000000012);
ans_temp[ans_id*9+3]+=Pmtrx[16]*(P_000010002*QR_010001000000+P_000010102*QR_010001000001+P_000010202*QR_010001000002+P_000110002*QR_010001000010+P_000110102*QR_010001000011+P_000110202*QR_010001000012);
ans_temp[ans_id*9+3]+=Pmtrx[17]*(P_000010002*QR_010000001000+P_000010102*QR_010000001001+P_000010202*QR_010000001002+P_000110002*QR_010000001010+P_000110102*QR_010000001011+P_000110202*QR_010000001012);
ans_temp[ans_id*9+4]+=Pmtrx[15]*(P_000010002*QR_001010000000+P_000010102*QR_001010000001+P_000010202*QR_001010000002+P_000110002*QR_001010000010+P_000110102*QR_001010000011+P_000110202*QR_001010000012);
ans_temp[ans_id*9+4]+=Pmtrx[16]*(P_000010002*QR_000011000000+P_000010102*QR_000011000001+P_000010202*QR_000011000002+P_000110002*QR_000011000010+P_000110102*QR_000011000011+P_000110202*QR_000011000012);
ans_temp[ans_id*9+4]+=Pmtrx[17]*(P_000010002*QR_000010001000+P_000010102*QR_000010001001+P_000010202*QR_000010001002+P_000110002*QR_000010001010+P_000110102*QR_000010001011+P_000110202*QR_000010001012);
ans_temp[ans_id*9+5]+=Pmtrx[15]*(P_000010002*QR_001000010000+P_000010102*QR_001000010001+P_000010202*QR_001000010002+P_000110002*QR_001000010010+P_000110102*QR_001000010011+P_000110202*QR_001000010012);
ans_temp[ans_id*9+5]+=Pmtrx[16]*(P_000010002*QR_000001010000+P_000010102*QR_000001010001+P_000010202*QR_000001010002+P_000110002*QR_000001010010+P_000110102*QR_000001010011+P_000110202*QR_000001010012);
ans_temp[ans_id*9+5]+=Pmtrx[17]*(P_000010002*QR_000000011000+P_000010102*QR_000000011001+P_000010202*QR_000000011002+P_000110002*QR_000000011010+P_000110102*QR_000000011011+P_000110202*QR_000000011012);
ans_temp[ans_id*9+6]+=Pmtrx[0]*(P_002000010*QR_011000000000+P_002000110*QR_011000000001+P_102000010*QR_011000000100+P_102000110*QR_011000000101+P_202000010*QR_011000000200+P_202000110*QR_011000000201);
ans_temp[ans_id*9+6]+=Pmtrx[1]*(P_002000010*QR_010001000000+P_002000110*QR_010001000001+P_102000010*QR_010001000100+P_102000110*QR_010001000101+P_202000010*QR_010001000200+P_202000110*QR_010001000201);
ans_temp[ans_id*9+6]+=Pmtrx[2]*(P_002000010*QR_010000001000+P_002000110*QR_010000001001+P_102000010*QR_010000001100+P_102000110*QR_010000001101+P_202000010*QR_010000001200+P_202000110*QR_010000001201);
ans_temp[ans_id*9+7]+=Pmtrx[0]*(P_002000010*QR_001010000000+P_002000110*QR_001010000001+P_102000010*QR_001010000100+P_102000110*QR_001010000101+P_202000010*QR_001010000200+P_202000110*QR_001010000201);
ans_temp[ans_id*9+7]+=Pmtrx[1]*(P_002000010*QR_000011000000+P_002000110*QR_000011000001+P_102000010*QR_000011000100+P_102000110*QR_000011000101+P_202000010*QR_000011000200+P_202000110*QR_000011000201);
ans_temp[ans_id*9+7]+=Pmtrx[2]*(P_002000010*QR_000010001000+P_002000110*QR_000010001001+P_102000010*QR_000010001100+P_102000110*QR_000010001101+P_202000010*QR_000010001200+P_202000110*QR_000010001201);
ans_temp[ans_id*9+8]+=Pmtrx[0]*(P_002000010*QR_001000010000+P_002000110*QR_001000010001+P_102000010*QR_001000010100+P_102000110*QR_001000010101+P_202000010*QR_001000010200+P_202000110*QR_001000010201);
ans_temp[ans_id*9+8]+=Pmtrx[1]*(P_002000010*QR_000001010000+P_002000110*QR_000001010001+P_102000010*QR_000001010100+P_102000110*QR_000001010101+P_202000010*QR_000001010200+P_202000110*QR_000001010201);
ans_temp[ans_id*9+8]+=Pmtrx[2]*(P_002000010*QR_000000011000+P_002000110*QR_000000011001+P_102000010*QR_000000011100+P_102000110*QR_000000011101+P_202000010*QR_000000011200+P_202000110*QR_000000011201);
ans_temp[ans_id*9+6]+=Pmtrx[3]*(P_001001010*QR_011000000000+P_001001110*QR_011000000001+P_001101010*QR_011000000010+P_001101110*QR_011000000011+P_101001010*QR_011000000100+P_101001110*QR_011000000101+P_101101010*QR_011000000110+P_101101110*QR_011000000111);
ans_temp[ans_id*9+6]+=Pmtrx[4]*(P_001001010*QR_010001000000+P_001001110*QR_010001000001+P_001101010*QR_010001000010+P_001101110*QR_010001000011+P_101001010*QR_010001000100+P_101001110*QR_010001000101+P_101101010*QR_010001000110+P_101101110*QR_010001000111);
ans_temp[ans_id*9+6]+=Pmtrx[5]*(P_001001010*QR_010000001000+P_001001110*QR_010000001001+P_001101010*QR_010000001010+P_001101110*QR_010000001011+P_101001010*QR_010000001100+P_101001110*QR_010000001101+P_101101010*QR_010000001110+P_101101110*QR_010000001111);
ans_temp[ans_id*9+7]+=Pmtrx[3]*(P_001001010*QR_001010000000+P_001001110*QR_001010000001+P_001101010*QR_001010000010+P_001101110*QR_001010000011+P_101001010*QR_001010000100+P_101001110*QR_001010000101+P_101101010*QR_001010000110+P_101101110*QR_001010000111);
ans_temp[ans_id*9+7]+=Pmtrx[4]*(P_001001010*QR_000011000000+P_001001110*QR_000011000001+P_001101010*QR_000011000010+P_001101110*QR_000011000011+P_101001010*QR_000011000100+P_101001110*QR_000011000101+P_101101010*QR_000011000110+P_101101110*QR_000011000111);
ans_temp[ans_id*9+7]+=Pmtrx[5]*(P_001001010*QR_000010001000+P_001001110*QR_000010001001+P_001101010*QR_000010001010+P_001101110*QR_000010001011+P_101001010*QR_000010001100+P_101001110*QR_000010001101+P_101101010*QR_000010001110+P_101101110*QR_000010001111);
ans_temp[ans_id*9+8]+=Pmtrx[3]*(P_001001010*QR_001000010000+P_001001110*QR_001000010001+P_001101010*QR_001000010010+P_001101110*QR_001000010011+P_101001010*QR_001000010100+P_101001110*QR_001000010101+P_101101010*QR_001000010110+P_101101110*QR_001000010111);
ans_temp[ans_id*9+8]+=Pmtrx[4]*(P_001001010*QR_000001010000+P_001001110*QR_000001010001+P_001101010*QR_000001010010+P_001101110*QR_000001010011+P_101001010*QR_000001010100+P_101001110*QR_000001010101+P_101101010*QR_000001010110+P_101101110*QR_000001010111);
ans_temp[ans_id*9+8]+=Pmtrx[5]*(P_001001010*QR_000000011000+P_001001110*QR_000000011001+P_001101010*QR_000000011010+P_001101110*QR_000000011011+P_101001010*QR_000000011100+P_101001110*QR_000000011101+P_101101010*QR_000000011110+P_101101110*QR_000000011111);
ans_temp[ans_id*9+6]+=Pmtrx[6]*(P_000002010*QR_011000000000+P_000002110*QR_011000000001+P_000102010*QR_011000000010+P_000102110*QR_011000000011+P_000202010*QR_011000000020+P_000202110*QR_011000000021);
ans_temp[ans_id*9+6]+=Pmtrx[7]*(P_000002010*QR_010001000000+P_000002110*QR_010001000001+P_000102010*QR_010001000010+P_000102110*QR_010001000011+P_000202010*QR_010001000020+P_000202110*QR_010001000021);
ans_temp[ans_id*9+6]+=Pmtrx[8]*(P_000002010*QR_010000001000+P_000002110*QR_010000001001+P_000102010*QR_010000001010+P_000102110*QR_010000001011+P_000202010*QR_010000001020+P_000202110*QR_010000001021);
ans_temp[ans_id*9+7]+=Pmtrx[6]*(P_000002010*QR_001010000000+P_000002110*QR_001010000001+P_000102010*QR_001010000010+P_000102110*QR_001010000011+P_000202010*QR_001010000020+P_000202110*QR_001010000021);
ans_temp[ans_id*9+7]+=Pmtrx[7]*(P_000002010*QR_000011000000+P_000002110*QR_000011000001+P_000102010*QR_000011000010+P_000102110*QR_000011000011+P_000202010*QR_000011000020+P_000202110*QR_000011000021);
ans_temp[ans_id*9+7]+=Pmtrx[8]*(P_000002010*QR_000010001000+P_000002110*QR_000010001001+P_000102010*QR_000010001010+P_000102110*QR_000010001011+P_000202010*QR_000010001020+P_000202110*QR_000010001021);
ans_temp[ans_id*9+8]+=Pmtrx[6]*(P_000002010*QR_001000010000+P_000002110*QR_001000010001+P_000102010*QR_001000010010+P_000102110*QR_001000010011+P_000202010*QR_001000010020+P_000202110*QR_001000010021);
ans_temp[ans_id*9+8]+=Pmtrx[7]*(P_000002010*QR_000001010000+P_000002110*QR_000001010001+P_000102010*QR_000001010010+P_000102110*QR_000001010011+P_000202010*QR_000001010020+P_000202110*QR_000001010021);
ans_temp[ans_id*9+8]+=Pmtrx[8]*(P_000002010*QR_000000011000+P_000002110*QR_000000011001+P_000102010*QR_000000011010+P_000102110*QR_000000011011+P_000202010*QR_000000011020+P_000202110*QR_000000011021);
ans_temp[ans_id*9+6]+=Pmtrx[9]*(P_001000011*QR_011000000000+P_001000111*QR_011000000001+P_001000211*QR_011000000002+P_101000011*QR_011000000100+P_101000111*QR_011000000101+P_101000211*QR_011000000102);
ans_temp[ans_id*9+6]+=Pmtrx[10]*(P_001000011*QR_010001000000+P_001000111*QR_010001000001+P_001000211*QR_010001000002+P_101000011*QR_010001000100+P_101000111*QR_010001000101+P_101000211*QR_010001000102);
ans_temp[ans_id*9+6]+=Pmtrx[11]*(P_001000011*QR_010000001000+P_001000111*QR_010000001001+P_001000211*QR_010000001002+P_101000011*QR_010000001100+P_101000111*QR_010000001101+P_101000211*QR_010000001102);
ans_temp[ans_id*9+7]+=Pmtrx[9]*(P_001000011*QR_001010000000+P_001000111*QR_001010000001+P_001000211*QR_001010000002+P_101000011*QR_001010000100+P_101000111*QR_001010000101+P_101000211*QR_001010000102);
ans_temp[ans_id*9+7]+=Pmtrx[10]*(P_001000011*QR_000011000000+P_001000111*QR_000011000001+P_001000211*QR_000011000002+P_101000011*QR_000011000100+P_101000111*QR_000011000101+P_101000211*QR_000011000102);
ans_temp[ans_id*9+7]+=Pmtrx[11]*(P_001000011*QR_000010001000+P_001000111*QR_000010001001+P_001000211*QR_000010001002+P_101000011*QR_000010001100+P_101000111*QR_000010001101+P_101000211*QR_000010001102);
ans_temp[ans_id*9+8]+=Pmtrx[9]*(P_001000011*QR_001000010000+P_001000111*QR_001000010001+P_001000211*QR_001000010002+P_101000011*QR_001000010100+P_101000111*QR_001000010101+P_101000211*QR_001000010102);
ans_temp[ans_id*9+8]+=Pmtrx[10]*(P_001000011*QR_000001010000+P_001000111*QR_000001010001+P_001000211*QR_000001010002+P_101000011*QR_000001010100+P_101000111*QR_000001010101+P_101000211*QR_000001010102);
ans_temp[ans_id*9+8]+=Pmtrx[11]*(P_001000011*QR_000000011000+P_001000111*QR_000000011001+P_001000211*QR_000000011002+P_101000011*QR_000000011100+P_101000111*QR_000000011101+P_101000211*QR_000000011102);
ans_temp[ans_id*9+6]+=Pmtrx[12]*(P_000001011*QR_011000000000+P_000001111*QR_011000000001+P_000001211*QR_011000000002+P_000101011*QR_011000000010+P_000101111*QR_011000000011+P_000101211*QR_011000000012);
ans_temp[ans_id*9+6]+=Pmtrx[13]*(P_000001011*QR_010001000000+P_000001111*QR_010001000001+P_000001211*QR_010001000002+P_000101011*QR_010001000010+P_000101111*QR_010001000011+P_000101211*QR_010001000012);
ans_temp[ans_id*9+6]+=Pmtrx[14]*(P_000001011*QR_010000001000+P_000001111*QR_010000001001+P_000001211*QR_010000001002+P_000101011*QR_010000001010+P_000101111*QR_010000001011+P_000101211*QR_010000001012);
ans_temp[ans_id*9+7]+=Pmtrx[12]*(P_000001011*QR_001010000000+P_000001111*QR_001010000001+P_000001211*QR_001010000002+P_000101011*QR_001010000010+P_000101111*QR_001010000011+P_000101211*QR_001010000012);
ans_temp[ans_id*9+7]+=Pmtrx[13]*(P_000001011*QR_000011000000+P_000001111*QR_000011000001+P_000001211*QR_000011000002+P_000101011*QR_000011000010+P_000101111*QR_000011000011+P_000101211*QR_000011000012);
ans_temp[ans_id*9+7]+=Pmtrx[14]*(P_000001011*QR_000010001000+P_000001111*QR_000010001001+P_000001211*QR_000010001002+P_000101011*QR_000010001010+P_000101111*QR_000010001011+P_000101211*QR_000010001012);
ans_temp[ans_id*9+8]+=Pmtrx[12]*(P_000001011*QR_001000010000+P_000001111*QR_001000010001+P_000001211*QR_001000010002+P_000101011*QR_001000010010+P_000101111*QR_001000010011+P_000101211*QR_001000010012);
ans_temp[ans_id*9+8]+=Pmtrx[13]*(P_000001011*QR_000001010000+P_000001111*QR_000001010001+P_000001211*QR_000001010002+P_000101011*QR_000001010010+P_000101111*QR_000001010011+P_000101211*QR_000001010012);
ans_temp[ans_id*9+8]+=Pmtrx[14]*(P_000001011*QR_000000011000+P_000001111*QR_000000011001+P_000001211*QR_000000011002+P_000101011*QR_000000011010+P_000101111*QR_000000011011+P_000101211*QR_000000011012);
ans_temp[ans_id*9+6]+=Pmtrx[15]*(P_000000012*QR_011000000000+P_000000112*QR_011000000001+P_000000212*QR_011000000002+P_000000312*QR_011000000003);
ans_temp[ans_id*9+6]+=Pmtrx[16]*(P_000000012*QR_010001000000+P_000000112*QR_010001000001+P_000000212*QR_010001000002+P_000000312*QR_010001000003);
ans_temp[ans_id*9+6]+=Pmtrx[17]*(P_000000012*QR_010000001000+P_000000112*QR_010000001001+P_000000212*QR_010000001002+P_000000312*QR_010000001003);
ans_temp[ans_id*9+7]+=Pmtrx[15]*(P_000000012*QR_001010000000+P_000000112*QR_001010000001+P_000000212*QR_001010000002+P_000000312*QR_001010000003);
ans_temp[ans_id*9+7]+=Pmtrx[16]*(P_000000012*QR_000011000000+P_000000112*QR_000011000001+P_000000212*QR_000011000002+P_000000312*QR_000011000003);
ans_temp[ans_id*9+7]+=Pmtrx[17]*(P_000000012*QR_000010001000+P_000000112*QR_000010001001+P_000000212*QR_000010001002+P_000000312*QR_000010001003);
ans_temp[ans_id*9+8]+=Pmtrx[15]*(P_000000012*QR_001000010000+P_000000112*QR_001000010001+P_000000212*QR_001000010002+P_000000312*QR_001000010003);
ans_temp[ans_id*9+8]+=Pmtrx[16]*(P_000000012*QR_000001010000+P_000000112*QR_000001010001+P_000000212*QR_000001010002+P_000000312*QR_000001010003);
ans_temp[ans_id*9+8]+=Pmtrx[17]*(P_000000012*QR_000000011000+P_000000112*QR_000000011001+P_000000212*QR_000000011002+P_000000312*QR_000000011003);
}
}
__syncthreads();
int num_thread=NTHREAD/2;
while (num_thread!=0){
__syncthreads();
if(tId_x<num_thread){
for(int ians=0;ians<9;ians++){
ans_temp[tId_x*9+ians]+=ans_temp[(tId_x+num_thread)*9+ians];
}
}
num_thread/=2;
}
if(tId_x==0){
for(int ians=0;ians<9;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*9+ians]=ans_temp[(tId_x)*9+ians];
}
}
}
}
}
__global__ void MD_Kp_dspp_fs(unsigned int contrc_bra_num,unsigned int contrc_ket_num,\
unsigned int * contrc_bra_id,\
unsigned int * contrc_ket_id,\
unsigned int mtrx_len,\
double * Pmtrx_in,\
double * P,\
double * PA,\
double * PB,\
double * Zta_in,\
double * pp_in,\
float * K2_p_in,\
unsigned int * id_bra_in,\
double * Q,\
double * QC,\
double * QD,\
double * Eta_in,\
double * pq_in,\
float * K2_q_in,\
unsigned int * id_ket_in,\
double * ans){
unsigned int tId_x = threadIdx.x;
unsigned int bId_x = blockIdx.x;
unsigned int bId_y = blockIdx.y;
unsigned int tdis = blockDim.x;
unsigned int bdis_x = gridDim.x;
unsigned int bdis_y = gridDim.y;
unsigned int ans_id=tId_x;
double Pmtrx[3]={0.0};
__shared__ double ans_temp[NTHREAD*18];
for(int i=0;i<18;i++){
ans_temp[i*tdis+tId_x]=0.0;
}
for(unsigned int i_contrc_bra=bId_x;i_contrc_bra<contrc_bra_num;i_contrc_bra+=bdis_x){
for(unsigned int j_contrc_ket=bId_y;j_contrc_ket<contrc_ket_num;j_contrc_ket+=bdis_y){
unsigned int primit_bra_start = contrc_bra_id[i_contrc_bra ];
unsigned int primit_bra_end = contrc_bra_id[i_contrc_bra+1];
unsigned int primit_ket_start = contrc_ket_id[j_contrc_ket ];
unsigned int primit_ket_end = contrc_ket_id[j_contrc_ket+1];
for(unsigned int ii=primit_ket_start;ii<primit_ket_end;ii++){
unsigned int id_ket=id_ket_in[ii];
double QX=Q[ii*3+0];
double QY=Q[ii*3+1];
double QZ=Q[ii*3+2];
double Qd_010[3];
Qd_010[0]=QC[ii*3+0];
Qd_010[1]=QC[ii*3+1];
Qd_010[2]=QC[ii*3+2];
double Qd_001[3];
Qd_001[0]=QD[ii*3+0];
Qd_001[1]=QD[ii*3+1];
Qd_001[2]=QD[ii*3+2];
double Eta=Eta_in[ii];
double pq=pq_in[ii];
float K2_q=K2_q_in[ii];
double aQin1=1/(2*Eta);
for(unsigned int j=tId_x;j<primit_bra_end-primit_bra_start;j+=tdis){
unsigned int jj=primit_bra_start+j;
unsigned int id_bra=tex1Dfetch(tex_id_bra,jj);
double P_max=0.0;
for(int p_j=0;p_j<3;p_j++){
for(int p_i=0;p_i<1;p_i++){
Pmtrx[p_i*3+p_j]=Pmtrx_in[(id_ket+p_j)*mtrx_len+(id_bra+p_i)];
double temp_P=fabsf(Pmtrx[p_i*3+p_j]);
if(temp_P>P_max) P_max=temp_P;
}
}
float K2_p=tex1Dfetch(tex_K2_p,jj);
if(fabsf(K2_p*K2_q)<1.0E-14){
break;
}
if(fabsf(P_max*K2_p*K2_q)<1.0E-14) continue;
int2 temp_int2;
temp_int2=tex1Dfetch(tex_Zta,jj);
double Zta=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_pp,jj);
double pp=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+0);
double PX=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+1);
double PY=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+2);
double PZ=__hiloint2double(temp_int2.y,temp_int2.x);
double Pd_010[3];
temp_int2=tex1Dfetch(tex_PA,jj*3+0);
Pd_010[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PA,jj*3+1);
Pd_010[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PA,jj*3+2);
Pd_010[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double alphaT=rsqrt(Eta+Zta);
double lmd=4*P25*pp*pq*alphaT;
alphaT=Eta*Zta*alphaT*alphaT;
double TX=PX-QX;
double TY=PY-QY;
double TZ=PZ-QZ;
double T=alphaT*(TX*TX+TY*TY+TZ*TZ);
double R_000[5];
Ft_fs_4(4,T,R_000);
R_000[0]*=lmd;
R_000[1]*=-2*alphaT*lmd;
R_000[2]*=4*alphaT*alphaT*lmd;
R_000[3]*=-8*alphaT*alphaT*alphaT*lmd;
R_000[4]*=16*alphaT*alphaT*alphaT*alphaT*lmd;
double aPin1=1/(2*Zta);
double R_100[4];
double R_200[3];
double R_300[2];
double R_400[1];
double R_010[4];
double R_110[3];
double R_210[2];
double R_310[1];
double R_020[3];
double R_120[2];
double R_220[1];
double R_030[2];
double R_130[1];
double R_040[1];
double R_001[4];
double R_101[3];
double R_201[2];
double R_301[1];
double R_011[3];
double R_111[2];
double R_211[1];
double R_021[2];
double R_121[1];
double R_031[1];
double R_002[3];
double R_102[2];
double R_202[1];
double R_012[2];
double R_112[1];
double R_022[1];
double R_003[2];
double R_103[1];
double R_013[1];
double R_004[1];
for(int i=0;i<4;i++){
R_100[i]=TX*R_000[i+1];
}
for(int i=0;i<4;i++){
R_010[i]=TY*R_000[i+1];
}
for(int i=0;i<4;i++){
R_001[i]=TZ*R_000[i+1];
}
for(int i=0;i<3;i++){
R_200[i]=TX*R_100[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_110[i]=TX*R_010[i+1];
}
for(int i=0;i<3;i++){
R_020[i]=TY*R_010[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_101[i]=TX*R_001[i+1];
}
for(int i=0;i<3;i++){
R_011[i]=TY*R_001[i+1];
}
for(int i=0;i<3;i++){
R_002[i]=TZ*R_001[i+1]+R_000[i+1];
}
for(int i=0;i<2;i++){
R_300[i]=TX*R_200[i+1]+2*R_100[i+1];
}
for(int i=0;i<2;i++){
R_210[i]=TY*R_200[i+1];
}
for(int i=0;i<2;i++){
R_120[i]=TX*R_020[i+1];
}
for(int i=0;i<2;i++){
R_030[i]=TY*R_020[i+1]+2*R_010[i+1];
}
for(int i=0;i<2;i++){
R_201[i]=TZ*R_200[i+1];
}
for(int i=0;i<2;i++){
R_111[i]=TX*R_011[i+1];
}
for(int i=0;i<2;i++){
R_021[i]=TZ*R_020[i+1];
}
for(int i=0;i<2;i++){
R_102[i]=TX*R_002[i+1];
}
for(int i=0;i<2;i++){
R_012[i]=TY*R_002[i+1];
}
for(int i=0;i<2;i++){
R_003[i]=TZ*R_002[i+1]+2*R_001[i+1];
}
for(int i=0;i<1;i++){
R_400[i]=TX*R_300[i+1]+3*R_200[i+1];
}
for(int i=0;i<1;i++){
R_310[i]=TY*R_300[i+1];
}
for(int i=0;i<1;i++){
R_220[i]=TX*R_120[i+1]+R_020[i+1];
}
for(int i=0;i<1;i++){
R_130[i]=TX*R_030[i+1];
}
for(int i=0;i<1;i++){
R_040[i]=TY*R_030[i+1]+3*R_020[i+1];
}
for(int i=0;i<1;i++){
R_301[i]=TZ*R_300[i+1];
}
for(int i=0;i<1;i++){
R_211[i]=TY*R_201[i+1];
}
for(int i=0;i<1;i++){
R_121[i]=TX*R_021[i+1];
}
for(int i=0;i<1;i++){
R_031[i]=TZ*R_030[i+1];
}
for(int i=0;i<1;i++){
R_202[i]=TX*R_102[i+1]+R_002[i+1];
}
for(int i=0;i<1;i++){
R_112[i]=TX*R_012[i+1];
}
for(int i=0;i<1;i++){
R_022[i]=TY*R_012[i+1]+R_002[i+1];
}
for(int i=0;i<1;i++){
R_103[i]=TX*R_003[i+1];
}
for(int i=0;i<1;i++){
R_013[i]=TY*R_003[i+1];
}
for(int i=0;i<1;i++){
R_004[i]=TZ*R_003[i+1]+3*R_002[i+1];
}
double Pd_110[3];
double Pd_020[3];
double Pd_120[3];
double Pd_220[3];
for(int i=0;i<3;i++){
Pd_110[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_020[i]=Pd_110[i]+Pd_010[i]*Pd_010[i];
}
for(int i=0;i<3;i++){
Pd_120[i]=Pd_010[i]*Pd_110[i]+aPin1*Pd_010[i];
}
for(int i=0;i<3;i++){
Pd_220[i]=aPin1*Pd_110[i];
}
double P_020000000=Pd_020[0];
double P_120000000=Pd_120[0];
double P_220000000=Pd_220[0];
double P_010010000=Pd_010[0]*Pd_010[1];
double P_010110000=Pd_010[0]*Pd_110[1];
double P_110010000=Pd_110[0]*Pd_010[1];
double P_110110000=Pd_110[0]*Pd_110[1];
double P_000020000=Pd_020[1];
double P_000120000=Pd_120[1];
double P_000220000=Pd_220[1];
double P_010000010=Pd_010[0]*Pd_010[2];
double P_010000110=Pd_010[0]*Pd_110[2];
double P_110000010=Pd_110[0]*Pd_010[2];
double P_110000110=Pd_110[0]*Pd_110[2];
double P_000010010=Pd_010[1]*Pd_010[2];
double P_000010110=Pd_010[1]*Pd_110[2];
double P_000110010=Pd_110[1]*Pd_010[2];
double P_000110110=Pd_110[1]*Pd_110[2];
double P_000000020=Pd_020[2];
double P_000000120=Pd_120[2];
double P_000000220=Pd_220[2];
double PR_020000000000=P_020000000*R_000[0]+-1*P_120000000*R_100[0]+P_220000000*R_200[0];
double PR_010010000000=P_010010000*R_000[0]+-1*P_010110000*R_010[0]+-1*P_110010000*R_100[0]+P_110110000*R_110[0];
double PR_000020000000=P_000020000*R_000[0]+-1*P_000120000*R_010[0]+P_000220000*R_020[0];
double PR_010000010000=P_010000010*R_000[0]+-1*P_010000110*R_001[0]+-1*P_110000010*R_100[0]+P_110000110*R_101[0];
double PR_000010010000=P_000010010*R_000[0]+-1*P_000010110*R_001[0]+-1*P_000110010*R_010[0]+P_000110110*R_011[0];
double PR_000000020000=P_000000020*R_000[0]+-1*P_000000120*R_001[0]+P_000000220*R_002[0];
double PR_020000000001=P_020000000*R_001[0]+-1*P_120000000*R_101[0]+P_220000000*R_201[0];
double PR_010010000001=P_010010000*R_001[0]+-1*P_010110000*R_011[0]+-1*P_110010000*R_101[0]+P_110110000*R_111[0];
double PR_000020000001=P_000020000*R_001[0]+-1*P_000120000*R_011[0]+P_000220000*R_021[0];
double PR_010000010001=P_010000010*R_001[0]+-1*P_010000110*R_002[0]+-1*P_110000010*R_101[0]+P_110000110*R_102[0];
double PR_000010010001=P_000010010*R_001[0]+-1*P_000010110*R_002[0]+-1*P_000110010*R_011[0]+P_000110110*R_012[0];
double PR_000000020001=P_000000020*R_001[0]+-1*P_000000120*R_002[0]+P_000000220*R_003[0];
double PR_020000000010=P_020000000*R_010[0]+-1*P_120000000*R_110[0]+P_220000000*R_210[0];
double PR_010010000010=P_010010000*R_010[0]+-1*P_010110000*R_020[0]+-1*P_110010000*R_110[0]+P_110110000*R_120[0];
double PR_000020000010=P_000020000*R_010[0]+-1*P_000120000*R_020[0]+P_000220000*R_030[0];
double PR_010000010010=P_010000010*R_010[0]+-1*P_010000110*R_011[0]+-1*P_110000010*R_110[0]+P_110000110*R_111[0];
double PR_000010010010=P_000010010*R_010[0]+-1*P_000010110*R_011[0]+-1*P_000110010*R_020[0]+P_000110110*R_021[0];
double PR_000000020010=P_000000020*R_010[0]+-1*P_000000120*R_011[0]+P_000000220*R_012[0];
double PR_020000000100=P_020000000*R_100[0]+-1*P_120000000*R_200[0]+P_220000000*R_300[0];
double PR_010010000100=P_010010000*R_100[0]+-1*P_010110000*R_110[0]+-1*P_110010000*R_200[0]+P_110110000*R_210[0];
double PR_000020000100=P_000020000*R_100[0]+-1*P_000120000*R_110[0]+P_000220000*R_120[0];
double PR_010000010100=P_010000010*R_100[0]+-1*P_010000110*R_101[0]+-1*P_110000010*R_200[0]+P_110000110*R_201[0];
double PR_000010010100=P_000010010*R_100[0]+-1*P_000010110*R_101[0]+-1*P_000110010*R_110[0]+P_000110110*R_111[0];
double PR_000000020100=P_000000020*R_100[0]+-1*P_000000120*R_101[0]+P_000000220*R_102[0];
double PR_020000000002=P_020000000*R_002[0]+-1*P_120000000*R_102[0]+P_220000000*R_202[0];
double PR_010010000002=P_010010000*R_002[0]+-1*P_010110000*R_012[0]+-1*P_110010000*R_102[0]+P_110110000*R_112[0];
double PR_000020000002=P_000020000*R_002[0]+-1*P_000120000*R_012[0]+P_000220000*R_022[0];
double PR_010000010002=P_010000010*R_002[0]+-1*P_010000110*R_003[0]+-1*P_110000010*R_102[0]+P_110000110*R_103[0];
double PR_000010010002=P_000010010*R_002[0]+-1*P_000010110*R_003[0]+-1*P_000110010*R_012[0]+P_000110110*R_013[0];
double PR_000000020002=P_000000020*R_002[0]+-1*P_000000120*R_003[0]+P_000000220*R_004[0];
double PR_020000000011=P_020000000*R_011[0]+-1*P_120000000*R_111[0]+P_220000000*R_211[0];
double PR_010010000011=P_010010000*R_011[0]+-1*P_010110000*R_021[0]+-1*P_110010000*R_111[0]+P_110110000*R_121[0];
double PR_000020000011=P_000020000*R_011[0]+-1*P_000120000*R_021[0]+P_000220000*R_031[0];
double PR_010000010011=P_010000010*R_011[0]+-1*P_010000110*R_012[0]+-1*P_110000010*R_111[0]+P_110000110*R_112[0];
double PR_000010010011=P_000010010*R_011[0]+-1*P_000010110*R_012[0]+-1*P_000110010*R_021[0]+P_000110110*R_022[0];
double PR_000000020011=P_000000020*R_011[0]+-1*P_000000120*R_012[0]+P_000000220*R_013[0];
double PR_020000000020=P_020000000*R_020[0]+-1*P_120000000*R_120[0]+P_220000000*R_220[0];
double PR_010010000020=P_010010000*R_020[0]+-1*P_010110000*R_030[0]+-1*P_110010000*R_120[0]+P_110110000*R_130[0];
double PR_000020000020=P_000020000*R_020[0]+-1*P_000120000*R_030[0]+P_000220000*R_040[0];
double PR_010000010020=P_010000010*R_020[0]+-1*P_010000110*R_021[0]+-1*P_110000010*R_120[0]+P_110000110*R_121[0];
double PR_000010010020=P_000010010*R_020[0]+-1*P_000010110*R_021[0]+-1*P_000110010*R_030[0]+P_000110110*R_031[0];
double PR_000000020020=P_000000020*R_020[0]+-1*P_000000120*R_021[0]+P_000000220*R_022[0];
double PR_020000000101=P_020000000*R_101[0]+-1*P_120000000*R_201[0]+P_220000000*R_301[0];
double PR_010010000101=P_010010000*R_101[0]+-1*P_010110000*R_111[0]+-1*P_110010000*R_201[0]+P_110110000*R_211[0];
double PR_000020000101=P_000020000*R_101[0]+-1*P_000120000*R_111[0]+P_000220000*R_121[0];
double PR_010000010101=P_010000010*R_101[0]+-1*P_010000110*R_102[0]+-1*P_110000010*R_201[0]+P_110000110*R_202[0];
double PR_000010010101=P_000010010*R_101[0]+-1*P_000010110*R_102[0]+-1*P_000110010*R_111[0]+P_000110110*R_112[0];
double PR_000000020101=P_000000020*R_101[0]+-1*P_000000120*R_102[0]+P_000000220*R_103[0];
double PR_020000000110=P_020000000*R_110[0]+-1*P_120000000*R_210[0]+P_220000000*R_310[0];
double PR_010010000110=P_010010000*R_110[0]+-1*P_010110000*R_120[0]+-1*P_110010000*R_210[0]+P_110110000*R_220[0];
double PR_000020000110=P_000020000*R_110[0]+-1*P_000120000*R_120[0]+P_000220000*R_130[0];
double PR_010000010110=P_010000010*R_110[0]+-1*P_010000110*R_111[0]+-1*P_110000010*R_210[0]+P_110000110*R_211[0];
double PR_000010010110=P_000010010*R_110[0]+-1*P_000010110*R_111[0]+-1*P_000110010*R_120[0]+P_000110110*R_121[0];
double PR_000000020110=P_000000020*R_110[0]+-1*P_000000120*R_111[0]+P_000000220*R_112[0];
double PR_020000000200=P_020000000*R_200[0]+-1*P_120000000*R_300[0]+P_220000000*R_400[0];
double PR_010010000200=P_010010000*R_200[0]+-1*P_010110000*R_210[0]+-1*P_110010000*R_300[0]+P_110110000*R_310[0];
double PR_000020000200=P_000020000*R_200[0]+-1*P_000120000*R_210[0]+P_000220000*R_220[0];
double PR_010000010200=P_010000010*R_200[0]+-1*P_010000110*R_201[0]+-1*P_110000010*R_300[0]+P_110000110*R_301[0];
double PR_000010010200=P_000010010*R_200[0]+-1*P_000010110*R_201[0]+-1*P_000110010*R_210[0]+P_000110110*R_211[0];
double PR_000000020200=P_000000020*R_200[0]+-1*P_000000120*R_201[0]+P_000000220*R_202[0];
double Qd_101[3];
double Qd_110[3];
double Qd_011[3];
double Qd_111[3];
double Qd_211[3];
for(int i=0;i<3;i++){
Qd_101[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_110[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_011[i]=Qd_101[i]+Qd_010[i]*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_111[i]=Qd_010[i]*Qd_101[i]+aQin1*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_211[i]=aQin1*Qd_101[i];
}
double Q_011000000=Qd_011[0];
double Q_111000000=Qd_111[0];
double Q_211000000=Qd_211[0];
double Q_010001000=Qd_010[0]*Qd_001[1];
double Q_010101000=Qd_010[0]*Qd_101[1];
double Q_110001000=Qd_110[0]*Qd_001[1];
double Q_110101000=Qd_110[0]*Qd_101[1];
double Q_010000001=Qd_010[0]*Qd_001[2];
double Q_010000101=Qd_010[0]*Qd_101[2];
double Q_110000001=Qd_110[0]*Qd_001[2];
double Q_110000101=Qd_110[0]*Qd_101[2];
double Q_001010000=Qd_001[0]*Qd_010[1];
double Q_001110000=Qd_001[0]*Qd_110[1];
double Q_101010000=Qd_101[0]*Qd_010[1];
double Q_101110000=Qd_101[0]*Qd_110[1];
double Q_000011000=Qd_011[1];
double Q_000111000=Qd_111[1];
double Q_000211000=Qd_211[1];
double Q_000010001=Qd_010[1]*Qd_001[2];
double Q_000010101=Qd_010[1]*Qd_101[2];
double Q_000110001=Qd_110[1]*Qd_001[2];
double Q_000110101=Qd_110[1]*Qd_101[2];
double Q_001000010=Qd_001[0]*Qd_010[2];
double Q_001000110=Qd_001[0]*Qd_110[2];
double Q_101000010=Qd_101[0]*Qd_010[2];
double Q_101000110=Qd_101[0]*Qd_110[2];
double Q_000001010=Qd_001[1]*Qd_010[2];
double Q_000001110=Qd_001[1]*Qd_110[2];
double Q_000101010=Qd_101[1]*Qd_010[2];
double Q_000101110=Qd_101[1]*Qd_110[2];
double Q_000000011=Qd_011[2];
double Q_000000111=Qd_111[2];
double Q_000000211=Qd_211[2];
ans_temp[ans_id*18+0]+=Pmtrx[0]*(Q_011000000*PR_020000000000+Q_111000000*PR_020000000100+Q_211000000*PR_020000000200);
ans_temp[ans_id*18+0]+=Pmtrx[1]*(Q_010001000*PR_020000000000+Q_010101000*PR_020000000010+Q_110001000*PR_020000000100+Q_110101000*PR_020000000110);
ans_temp[ans_id*18+0]+=Pmtrx[2]*(Q_010000001*PR_020000000000+Q_010000101*PR_020000000001+Q_110000001*PR_020000000100+Q_110000101*PR_020000000101);
ans_temp[ans_id*18+1]+=Pmtrx[0]*(Q_001010000*PR_020000000000+Q_001110000*PR_020000000010+Q_101010000*PR_020000000100+Q_101110000*PR_020000000110);
ans_temp[ans_id*18+1]+=Pmtrx[1]*(Q_000011000*PR_020000000000+Q_000111000*PR_020000000010+Q_000211000*PR_020000000020);
ans_temp[ans_id*18+1]+=Pmtrx[2]*(Q_000010001*PR_020000000000+Q_000010101*PR_020000000001+Q_000110001*PR_020000000010+Q_000110101*PR_020000000011);
ans_temp[ans_id*18+2]+=Pmtrx[0]*(Q_001000010*PR_020000000000+Q_001000110*PR_020000000001+Q_101000010*PR_020000000100+Q_101000110*PR_020000000101);
ans_temp[ans_id*18+2]+=Pmtrx[1]*(Q_000001010*PR_020000000000+Q_000001110*PR_020000000001+Q_000101010*PR_020000000010+Q_000101110*PR_020000000011);
ans_temp[ans_id*18+2]+=Pmtrx[2]*(Q_000000011*PR_020000000000+Q_000000111*PR_020000000001+Q_000000211*PR_020000000002);
ans_temp[ans_id*18+3]+=Pmtrx[0]*(Q_011000000*PR_010010000000+Q_111000000*PR_010010000100+Q_211000000*PR_010010000200);
ans_temp[ans_id*18+3]+=Pmtrx[1]*(Q_010001000*PR_010010000000+Q_010101000*PR_010010000010+Q_110001000*PR_010010000100+Q_110101000*PR_010010000110);
ans_temp[ans_id*18+3]+=Pmtrx[2]*(Q_010000001*PR_010010000000+Q_010000101*PR_010010000001+Q_110000001*PR_010010000100+Q_110000101*PR_010010000101);
ans_temp[ans_id*18+4]+=Pmtrx[0]*(Q_001010000*PR_010010000000+Q_001110000*PR_010010000010+Q_101010000*PR_010010000100+Q_101110000*PR_010010000110);
ans_temp[ans_id*18+4]+=Pmtrx[1]*(Q_000011000*PR_010010000000+Q_000111000*PR_010010000010+Q_000211000*PR_010010000020);
ans_temp[ans_id*18+4]+=Pmtrx[2]*(Q_000010001*PR_010010000000+Q_000010101*PR_010010000001+Q_000110001*PR_010010000010+Q_000110101*PR_010010000011);
ans_temp[ans_id*18+5]+=Pmtrx[0]*(Q_001000010*PR_010010000000+Q_001000110*PR_010010000001+Q_101000010*PR_010010000100+Q_101000110*PR_010010000101);
ans_temp[ans_id*18+5]+=Pmtrx[1]*(Q_000001010*PR_010010000000+Q_000001110*PR_010010000001+Q_000101010*PR_010010000010+Q_000101110*PR_010010000011);
ans_temp[ans_id*18+5]+=Pmtrx[2]*(Q_000000011*PR_010010000000+Q_000000111*PR_010010000001+Q_000000211*PR_010010000002);
ans_temp[ans_id*18+6]+=Pmtrx[0]*(Q_011000000*PR_000020000000+Q_111000000*PR_000020000100+Q_211000000*PR_000020000200);
ans_temp[ans_id*18+6]+=Pmtrx[1]*(Q_010001000*PR_000020000000+Q_010101000*PR_000020000010+Q_110001000*PR_000020000100+Q_110101000*PR_000020000110);
ans_temp[ans_id*18+6]+=Pmtrx[2]*(Q_010000001*PR_000020000000+Q_010000101*PR_000020000001+Q_110000001*PR_000020000100+Q_110000101*PR_000020000101);
ans_temp[ans_id*18+7]+=Pmtrx[0]*(Q_001010000*PR_000020000000+Q_001110000*PR_000020000010+Q_101010000*PR_000020000100+Q_101110000*PR_000020000110);
ans_temp[ans_id*18+7]+=Pmtrx[1]*(Q_000011000*PR_000020000000+Q_000111000*PR_000020000010+Q_000211000*PR_000020000020);
ans_temp[ans_id*18+7]+=Pmtrx[2]*(Q_000010001*PR_000020000000+Q_000010101*PR_000020000001+Q_000110001*PR_000020000010+Q_000110101*PR_000020000011);
ans_temp[ans_id*18+8]+=Pmtrx[0]*(Q_001000010*PR_000020000000+Q_001000110*PR_000020000001+Q_101000010*PR_000020000100+Q_101000110*PR_000020000101);
ans_temp[ans_id*18+8]+=Pmtrx[1]*(Q_000001010*PR_000020000000+Q_000001110*PR_000020000001+Q_000101010*PR_000020000010+Q_000101110*PR_000020000011);
ans_temp[ans_id*18+8]+=Pmtrx[2]*(Q_000000011*PR_000020000000+Q_000000111*PR_000020000001+Q_000000211*PR_000020000002);
ans_temp[ans_id*18+9]+=Pmtrx[0]*(Q_011000000*PR_010000010000+Q_111000000*PR_010000010100+Q_211000000*PR_010000010200);
ans_temp[ans_id*18+9]+=Pmtrx[1]*(Q_010001000*PR_010000010000+Q_010101000*PR_010000010010+Q_110001000*PR_010000010100+Q_110101000*PR_010000010110);
ans_temp[ans_id*18+9]+=Pmtrx[2]*(Q_010000001*PR_010000010000+Q_010000101*PR_010000010001+Q_110000001*PR_010000010100+Q_110000101*PR_010000010101);
ans_temp[ans_id*18+10]+=Pmtrx[0]*(Q_001010000*PR_010000010000+Q_001110000*PR_010000010010+Q_101010000*PR_010000010100+Q_101110000*PR_010000010110);
ans_temp[ans_id*18+10]+=Pmtrx[1]*(Q_000011000*PR_010000010000+Q_000111000*PR_010000010010+Q_000211000*PR_010000010020);
ans_temp[ans_id*18+10]+=Pmtrx[2]*(Q_000010001*PR_010000010000+Q_000010101*PR_010000010001+Q_000110001*PR_010000010010+Q_000110101*PR_010000010011);
ans_temp[ans_id*18+11]+=Pmtrx[0]*(Q_001000010*PR_010000010000+Q_001000110*PR_010000010001+Q_101000010*PR_010000010100+Q_101000110*PR_010000010101);
ans_temp[ans_id*18+11]+=Pmtrx[1]*(Q_000001010*PR_010000010000+Q_000001110*PR_010000010001+Q_000101010*PR_010000010010+Q_000101110*PR_010000010011);
ans_temp[ans_id*18+11]+=Pmtrx[2]*(Q_000000011*PR_010000010000+Q_000000111*PR_010000010001+Q_000000211*PR_010000010002);
ans_temp[ans_id*18+12]+=Pmtrx[0]*(Q_011000000*PR_000010010000+Q_111000000*PR_000010010100+Q_211000000*PR_000010010200);
ans_temp[ans_id*18+12]+=Pmtrx[1]*(Q_010001000*PR_000010010000+Q_010101000*PR_000010010010+Q_110001000*PR_000010010100+Q_110101000*PR_000010010110);
ans_temp[ans_id*18+12]+=Pmtrx[2]*(Q_010000001*PR_000010010000+Q_010000101*PR_000010010001+Q_110000001*PR_000010010100+Q_110000101*PR_000010010101);
ans_temp[ans_id*18+13]+=Pmtrx[0]*(Q_001010000*PR_000010010000+Q_001110000*PR_000010010010+Q_101010000*PR_000010010100+Q_101110000*PR_000010010110);
ans_temp[ans_id*18+13]+=Pmtrx[1]*(Q_000011000*PR_000010010000+Q_000111000*PR_000010010010+Q_000211000*PR_000010010020);
ans_temp[ans_id*18+13]+=Pmtrx[2]*(Q_000010001*PR_000010010000+Q_000010101*PR_000010010001+Q_000110001*PR_000010010010+Q_000110101*PR_000010010011);
ans_temp[ans_id*18+14]+=Pmtrx[0]*(Q_001000010*PR_000010010000+Q_001000110*PR_000010010001+Q_101000010*PR_000010010100+Q_101000110*PR_000010010101);
ans_temp[ans_id*18+14]+=Pmtrx[1]*(Q_000001010*PR_000010010000+Q_000001110*PR_000010010001+Q_000101010*PR_000010010010+Q_000101110*PR_000010010011);
ans_temp[ans_id*18+14]+=Pmtrx[2]*(Q_000000011*PR_000010010000+Q_000000111*PR_000010010001+Q_000000211*PR_000010010002);
ans_temp[ans_id*18+15]+=Pmtrx[0]*(Q_011000000*PR_000000020000+Q_111000000*PR_000000020100+Q_211000000*PR_000000020200);
ans_temp[ans_id*18+15]+=Pmtrx[1]*(Q_010001000*PR_000000020000+Q_010101000*PR_000000020010+Q_110001000*PR_000000020100+Q_110101000*PR_000000020110);
ans_temp[ans_id*18+15]+=Pmtrx[2]*(Q_010000001*PR_000000020000+Q_010000101*PR_000000020001+Q_110000001*PR_000000020100+Q_110000101*PR_000000020101);
ans_temp[ans_id*18+16]+=Pmtrx[0]*(Q_001010000*PR_000000020000+Q_001110000*PR_000000020010+Q_101010000*PR_000000020100+Q_101110000*PR_000000020110);
ans_temp[ans_id*18+16]+=Pmtrx[1]*(Q_000011000*PR_000000020000+Q_000111000*PR_000000020010+Q_000211000*PR_000000020020);
ans_temp[ans_id*18+16]+=Pmtrx[2]*(Q_000010001*PR_000000020000+Q_000010101*PR_000000020001+Q_000110001*PR_000000020010+Q_000110101*PR_000000020011);
ans_temp[ans_id*18+17]+=Pmtrx[0]*(Q_001000010*PR_000000020000+Q_001000110*PR_000000020001+Q_101000010*PR_000000020100+Q_101000110*PR_000000020101);
ans_temp[ans_id*18+17]+=Pmtrx[1]*(Q_000001010*PR_000000020000+Q_000001110*PR_000000020001+Q_000101010*PR_000000020010+Q_000101110*PR_000000020011);
ans_temp[ans_id*18+17]+=Pmtrx[2]*(Q_000000011*PR_000000020000+Q_000000111*PR_000000020001+Q_000000211*PR_000000020002);
}
}
__syncthreads();
int num_thread=NTHREAD/2;
while (num_thread!=0){
__syncthreads();
if(tId_x<num_thread){
for(int ians=0;ians<18;ians++){
ans_temp[tId_x*18+ians]+=ans_temp[(tId_x+num_thread)*18+ians];
}
}
num_thread/=2;
}
if(tId_x==0){
for(int ians=0;ians<18;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*18+ians]=ans_temp[(tId_x)*18+ians];
}
}
}
}
}
__global__ void MD_Kq_dspp_fs(unsigned int contrc_bra_num,unsigned int contrc_ket_num,\
unsigned int * contrc_bra_id,\
unsigned int * contrc_ket_id,\
unsigned int mtrx_len,\
double * Pmtrx_in,\
double * P,\
double * PA,\
double * PB,\
double * Zta_in,\
double * pp_in,\
float * K2_p_in,\
unsigned int * id_bra_in,\
double * Q,\
double * QC,\
double * QD,\
double * Eta_in,\
double * pq_in,\
float * K2_q_in,\
unsigned int * id_ket_in,\
double * ans){
unsigned int tId_x = threadIdx.x;
unsigned int bId_x = blockIdx.x;
unsigned int bId_y = blockIdx.y;
unsigned int tdis = blockDim.x;
unsigned int bdis_x = gridDim.x;
unsigned int bdis_y = gridDim.y;
unsigned int ans_id=tId_x;
double Pmtrx[3]={0.0};
__shared__ double ans_temp[NTHREAD*18];
for(int i=0;i<18;i++){
ans_temp[i*tdis+tId_x]=0.0;
}
for(unsigned int i_contrc_bra=bId_x;i_contrc_bra<contrc_bra_num;i_contrc_bra+=bdis_x){
for(unsigned int j_contrc_ket=bId_y;j_contrc_ket<contrc_ket_num;j_contrc_ket+=bdis_y){
unsigned int primit_bra_start = contrc_bra_id[i_contrc_bra ];
unsigned int primit_bra_end = contrc_bra_id[i_contrc_bra+1];
unsigned int primit_ket_start = contrc_ket_id[j_contrc_ket ];
unsigned int primit_ket_end = contrc_ket_id[j_contrc_ket+1];
for(unsigned int ii=primit_bra_start;ii<primit_bra_end;ii++){
unsigned int id_bra=id_bra_in[ii];
double PX=P[ii*3+0];
double PY=P[ii*3+1];
double PZ=P[ii*3+2];
double Pd_010[3];
Pd_010[0]=PA[ii*3+0];
Pd_010[1]=PA[ii*3+1];
Pd_010[2]=PA[ii*3+2];
double Zta=Zta_in[ii];
double pp=pp_in[ii];
float K2_p=K2_p_in[ii];
double aPin1=1/(2*Zta);
for(unsigned int j=tId_x;j<primit_ket_end-primit_ket_start;j+=tdis){
unsigned int jj=primit_ket_start+j;
unsigned int id_ket=tex1Dfetch(tex_id_ket,jj);
double P_max=0.0;
for(int p_j=0;p_j<3;p_j++){
for(int p_i=0;p_i<1;p_i++){
Pmtrx[p_i*3+p_j]=Pmtrx_in[(id_ket+p_j)*mtrx_len+(id_bra+p_i)];
double temp_P=fabsf(Pmtrx[p_i*3+p_j]);
if(temp_P>P_max) P_max=temp_P;
}
}
float K2_q=tex1Dfetch(tex_K2_q,jj);
if(fabsf(K2_p*K2_q)<1.0E-14){
break;
}
if(fabsf(P_max*K2_p*K2_q)<1.0E-14) continue;
int2 temp_int2;
temp_int2=tex1Dfetch(tex_Eta,jj);
double Eta=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_pq,jj);
double pq=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+0);
double QX=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+1);
double QY=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+2);
double QZ=__hiloint2double(temp_int2.y,temp_int2.x);
double Qd_010[3];
temp_int2=tex1Dfetch(tex_QC,jj*3+0);
Qd_010[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QC,jj*3+1);
Qd_010[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QC,jj*3+2);
Qd_010[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double Qd_001[3];
temp_int2=tex1Dfetch(tex_QD,jj*3+0);
Qd_001[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QD,jj*3+1);
Qd_001[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QD,jj*3+2);
Qd_001[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double alphaT=rsqrt(Eta+Zta);
double lmd=4*P25*pp*pq*alphaT;
alphaT=Eta*Zta*alphaT*alphaT;
double TX=PX-QX;
double TY=PY-QY;
double TZ=PZ-QZ;
double T=alphaT*(TX*TX+TY*TY+TZ*TZ);
double R_000[5];
Ft_fs_4(4,T,R_000);
R_000[0]*=lmd;
R_000[1]*=-2*alphaT*lmd;
R_000[2]*=4*alphaT*alphaT*lmd;
R_000[3]*=-8*alphaT*alphaT*alphaT*lmd;
R_000[4]*=16*alphaT*alphaT*alphaT*alphaT*lmd;
double aQin1=1/(2*Eta);
double R_100[4];
double R_200[3];
double R_300[2];
double R_400[1];
double R_010[4];
double R_110[3];
double R_210[2];
double R_310[1];
double R_020[3];
double R_120[2];
double R_220[1];
double R_030[2];
double R_130[1];
double R_040[1];
double R_001[4];
double R_101[3];
double R_201[2];
double R_301[1];
double R_011[3];
double R_111[2];
double R_211[1];
double R_021[2];
double R_121[1];
double R_031[1];
double R_002[3];
double R_102[2];
double R_202[1];
double R_012[2];
double R_112[1];
double R_022[1];
double R_003[2];
double R_103[1];
double R_013[1];
double R_004[1];
for(int i=0;i<4;i++){
R_100[i]=TX*R_000[i+1];
}
for(int i=0;i<4;i++){
R_010[i]=TY*R_000[i+1];
}
for(int i=0;i<4;i++){
R_001[i]=TZ*R_000[i+1];
}
for(int i=0;i<3;i++){
R_200[i]=TX*R_100[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_110[i]=TX*R_010[i+1];
}
for(int i=0;i<3;i++){
R_020[i]=TY*R_010[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_101[i]=TX*R_001[i+1];
}
for(int i=0;i<3;i++){
R_011[i]=TY*R_001[i+1];
}
for(int i=0;i<3;i++){
R_002[i]=TZ*R_001[i+1]+R_000[i+1];
}
for(int i=0;i<2;i++){
R_300[i]=TX*R_200[i+1]+2*R_100[i+1];
}
for(int i=0;i<2;i++){
R_210[i]=TY*R_200[i+1];
}
for(int i=0;i<2;i++){
R_120[i]=TX*R_020[i+1];
}
for(int i=0;i<2;i++){
R_030[i]=TY*R_020[i+1]+2*R_010[i+1];
}
for(int i=0;i<2;i++){
R_201[i]=TZ*R_200[i+1];
}
for(int i=0;i<2;i++){
R_111[i]=TX*R_011[i+1];
}
for(int i=0;i<2;i++){
R_021[i]=TZ*R_020[i+1];
}
for(int i=0;i<2;i++){
R_102[i]=TX*R_002[i+1];
}
for(int i=0;i<2;i++){
R_012[i]=TY*R_002[i+1];
}
for(int i=0;i<2;i++){
R_003[i]=TZ*R_002[i+1]+2*R_001[i+1];
}
for(int i=0;i<1;i++){
R_400[i]=TX*R_300[i+1]+3*R_200[i+1];
}
for(int i=0;i<1;i++){
R_310[i]=TY*R_300[i+1];
}
for(int i=0;i<1;i++){
R_220[i]=TX*R_120[i+1]+R_020[i+1];
}
for(int i=0;i<1;i++){
R_130[i]=TX*R_030[i+1];
}
for(int i=0;i<1;i++){
R_040[i]=TY*R_030[i+1]+3*R_020[i+1];
}
for(int i=0;i<1;i++){
R_301[i]=TZ*R_300[i+1];
}
for(int i=0;i<1;i++){
R_211[i]=TY*R_201[i+1];
}
for(int i=0;i<1;i++){
R_121[i]=TX*R_021[i+1];
}
for(int i=0;i<1;i++){
R_031[i]=TZ*R_030[i+1];
}
for(int i=0;i<1;i++){
R_202[i]=TX*R_102[i+1]+R_002[i+1];
}
for(int i=0;i<1;i++){
R_112[i]=TX*R_012[i+1];
}
for(int i=0;i<1;i++){
R_022[i]=TY*R_012[i+1]+R_002[i+1];
}
for(int i=0;i<1;i++){
R_103[i]=TX*R_003[i+1];
}
for(int i=0;i<1;i++){
R_013[i]=TY*R_003[i+1];
}
for(int i=0;i<1;i++){
R_004[i]=TZ*R_003[i+1]+3*R_002[i+1];
}
double Qd_101[3];
double Qd_110[3];
double Qd_011[3];
double Qd_111[3];
double Qd_211[3];
for(int i=0;i<3;i++){
Qd_101[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_110[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_011[i]=Qd_101[i]+Qd_010[i]*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_111[i]=Qd_010[i]*Qd_101[i]+aQin1*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_211[i]=aQin1*Qd_101[i];
}
double Q_011000000=Qd_011[0];
double Q_111000000=Qd_111[0];
double Q_211000000=Qd_211[0];
double Q_010001000=Qd_010[0]*Qd_001[1];
double Q_010101000=Qd_010[0]*Qd_101[1];
double Q_110001000=Qd_110[0]*Qd_001[1];
double Q_110101000=Qd_110[0]*Qd_101[1];
double Q_010000001=Qd_010[0]*Qd_001[2];
double Q_010000101=Qd_010[0]*Qd_101[2];
double Q_110000001=Qd_110[0]*Qd_001[2];
double Q_110000101=Qd_110[0]*Qd_101[2];
double Q_001010000=Qd_001[0]*Qd_010[1];
double Q_001110000=Qd_001[0]*Qd_110[1];
double Q_101010000=Qd_101[0]*Qd_010[1];
double Q_101110000=Qd_101[0]*Qd_110[1];
double Q_000011000=Qd_011[1];
double Q_000111000=Qd_111[1];
double Q_000211000=Qd_211[1];
double Q_000010001=Qd_010[1]*Qd_001[2];
double Q_000010101=Qd_010[1]*Qd_101[2];
double Q_000110001=Qd_110[1]*Qd_001[2];
double Q_000110101=Qd_110[1]*Qd_101[2];
double Q_001000010=Qd_001[0]*Qd_010[2];
double Q_001000110=Qd_001[0]*Qd_110[2];
double Q_101000010=Qd_101[0]*Qd_010[2];
double Q_101000110=Qd_101[0]*Qd_110[2];
double Q_000001010=Qd_001[1]*Qd_010[2];
double Q_000001110=Qd_001[1]*Qd_110[2];
double Q_000101010=Qd_101[1]*Qd_010[2];
double Q_000101110=Qd_101[1]*Qd_110[2];
double Q_000000011=Qd_011[2];
double Q_000000111=Qd_111[2];
double Q_000000211=Qd_211[2];
double QR_011000000000=Q_011000000*R_000[0]+-1*Q_111000000*R_100[0]+Q_211000000*R_200[0];
double QR_010001000000=Q_010001000*R_000[0]+-1*Q_010101000*R_010[0]+-1*Q_110001000*R_100[0]+Q_110101000*R_110[0];
double QR_010000001000=Q_010000001*R_000[0]+-1*Q_010000101*R_001[0]+-1*Q_110000001*R_100[0]+Q_110000101*R_101[0];
double QR_001010000000=Q_001010000*R_000[0]+-1*Q_001110000*R_010[0]+-1*Q_101010000*R_100[0]+Q_101110000*R_110[0];
double QR_000011000000=Q_000011000*R_000[0]+-1*Q_000111000*R_010[0]+Q_000211000*R_020[0];
double QR_000010001000=Q_000010001*R_000[0]+-1*Q_000010101*R_001[0]+-1*Q_000110001*R_010[0]+Q_000110101*R_011[0];
double QR_001000010000=Q_001000010*R_000[0]+-1*Q_001000110*R_001[0]+-1*Q_101000010*R_100[0]+Q_101000110*R_101[0];
double QR_000001010000=Q_000001010*R_000[0]+-1*Q_000001110*R_001[0]+-1*Q_000101010*R_010[0]+Q_000101110*R_011[0];
double QR_000000011000=Q_000000011*R_000[0]+-1*Q_000000111*R_001[0]+Q_000000211*R_002[0];
double QR_011000000001=Q_011000000*R_001[0]+-1*Q_111000000*R_101[0]+Q_211000000*R_201[0];
double QR_010001000001=Q_010001000*R_001[0]+-1*Q_010101000*R_011[0]+-1*Q_110001000*R_101[0]+Q_110101000*R_111[0];
double QR_010000001001=Q_010000001*R_001[0]+-1*Q_010000101*R_002[0]+-1*Q_110000001*R_101[0]+Q_110000101*R_102[0];
double QR_001010000001=Q_001010000*R_001[0]+-1*Q_001110000*R_011[0]+-1*Q_101010000*R_101[0]+Q_101110000*R_111[0];
double QR_000011000001=Q_000011000*R_001[0]+-1*Q_000111000*R_011[0]+Q_000211000*R_021[0];
double QR_000010001001=Q_000010001*R_001[0]+-1*Q_000010101*R_002[0]+-1*Q_000110001*R_011[0]+Q_000110101*R_012[0];
double QR_001000010001=Q_001000010*R_001[0]+-1*Q_001000110*R_002[0]+-1*Q_101000010*R_101[0]+Q_101000110*R_102[0];
double QR_000001010001=Q_000001010*R_001[0]+-1*Q_000001110*R_002[0]+-1*Q_000101010*R_011[0]+Q_000101110*R_012[0];
double QR_000000011001=Q_000000011*R_001[0]+-1*Q_000000111*R_002[0]+Q_000000211*R_003[0];
double QR_011000000010=Q_011000000*R_010[0]+-1*Q_111000000*R_110[0]+Q_211000000*R_210[0];
double QR_010001000010=Q_010001000*R_010[0]+-1*Q_010101000*R_020[0]+-1*Q_110001000*R_110[0]+Q_110101000*R_120[0];
double QR_010000001010=Q_010000001*R_010[0]+-1*Q_010000101*R_011[0]+-1*Q_110000001*R_110[0]+Q_110000101*R_111[0];
double QR_001010000010=Q_001010000*R_010[0]+-1*Q_001110000*R_020[0]+-1*Q_101010000*R_110[0]+Q_101110000*R_120[0];
double QR_000011000010=Q_000011000*R_010[0]+-1*Q_000111000*R_020[0]+Q_000211000*R_030[0];
double QR_000010001010=Q_000010001*R_010[0]+-1*Q_000010101*R_011[0]+-1*Q_000110001*R_020[0]+Q_000110101*R_021[0];
double QR_001000010010=Q_001000010*R_010[0]+-1*Q_001000110*R_011[0]+-1*Q_101000010*R_110[0]+Q_101000110*R_111[0];
double QR_000001010010=Q_000001010*R_010[0]+-1*Q_000001110*R_011[0]+-1*Q_000101010*R_020[0]+Q_000101110*R_021[0];
double QR_000000011010=Q_000000011*R_010[0]+-1*Q_000000111*R_011[0]+Q_000000211*R_012[0];
double QR_011000000100=Q_011000000*R_100[0]+-1*Q_111000000*R_200[0]+Q_211000000*R_300[0];
double QR_010001000100=Q_010001000*R_100[0]+-1*Q_010101000*R_110[0]+-1*Q_110001000*R_200[0]+Q_110101000*R_210[0];
double QR_010000001100=Q_010000001*R_100[0]+-1*Q_010000101*R_101[0]+-1*Q_110000001*R_200[0]+Q_110000101*R_201[0];
double QR_001010000100=Q_001010000*R_100[0]+-1*Q_001110000*R_110[0]+-1*Q_101010000*R_200[0]+Q_101110000*R_210[0];
double QR_000011000100=Q_000011000*R_100[0]+-1*Q_000111000*R_110[0]+Q_000211000*R_120[0];
double QR_000010001100=Q_000010001*R_100[0]+-1*Q_000010101*R_101[0]+-1*Q_000110001*R_110[0]+Q_000110101*R_111[0];
double QR_001000010100=Q_001000010*R_100[0]+-1*Q_001000110*R_101[0]+-1*Q_101000010*R_200[0]+Q_101000110*R_201[0];
double QR_000001010100=Q_000001010*R_100[0]+-1*Q_000001110*R_101[0]+-1*Q_000101010*R_110[0]+Q_000101110*R_111[0];
double QR_000000011100=Q_000000011*R_100[0]+-1*Q_000000111*R_101[0]+Q_000000211*R_102[0];
double QR_011000000002=Q_011000000*R_002[0]+-1*Q_111000000*R_102[0]+Q_211000000*R_202[0];
double QR_010001000002=Q_010001000*R_002[0]+-1*Q_010101000*R_012[0]+-1*Q_110001000*R_102[0]+Q_110101000*R_112[0];
double QR_010000001002=Q_010000001*R_002[0]+-1*Q_010000101*R_003[0]+-1*Q_110000001*R_102[0]+Q_110000101*R_103[0];
double QR_001010000002=Q_001010000*R_002[0]+-1*Q_001110000*R_012[0]+-1*Q_101010000*R_102[0]+Q_101110000*R_112[0];
double QR_000011000002=Q_000011000*R_002[0]+-1*Q_000111000*R_012[0]+Q_000211000*R_022[0];
double QR_000010001002=Q_000010001*R_002[0]+-1*Q_000010101*R_003[0]+-1*Q_000110001*R_012[0]+Q_000110101*R_013[0];
double QR_001000010002=Q_001000010*R_002[0]+-1*Q_001000110*R_003[0]+-1*Q_101000010*R_102[0]+Q_101000110*R_103[0];
double QR_000001010002=Q_000001010*R_002[0]+-1*Q_000001110*R_003[0]+-1*Q_000101010*R_012[0]+Q_000101110*R_013[0];
double QR_000000011002=Q_000000011*R_002[0]+-1*Q_000000111*R_003[0]+Q_000000211*R_004[0];
double QR_011000000011=Q_011000000*R_011[0]+-1*Q_111000000*R_111[0]+Q_211000000*R_211[0];
double QR_010001000011=Q_010001000*R_011[0]+-1*Q_010101000*R_021[0]+-1*Q_110001000*R_111[0]+Q_110101000*R_121[0];
double QR_010000001011=Q_010000001*R_011[0]+-1*Q_010000101*R_012[0]+-1*Q_110000001*R_111[0]+Q_110000101*R_112[0];
double QR_001010000011=Q_001010000*R_011[0]+-1*Q_001110000*R_021[0]+-1*Q_101010000*R_111[0]+Q_101110000*R_121[0];
double QR_000011000011=Q_000011000*R_011[0]+-1*Q_000111000*R_021[0]+Q_000211000*R_031[0];
double QR_000010001011=Q_000010001*R_011[0]+-1*Q_000010101*R_012[0]+-1*Q_000110001*R_021[0]+Q_000110101*R_022[0];
double QR_001000010011=Q_001000010*R_011[0]+-1*Q_001000110*R_012[0]+-1*Q_101000010*R_111[0]+Q_101000110*R_112[0];
double QR_000001010011=Q_000001010*R_011[0]+-1*Q_000001110*R_012[0]+-1*Q_000101010*R_021[0]+Q_000101110*R_022[0];
double QR_000000011011=Q_000000011*R_011[0]+-1*Q_000000111*R_012[0]+Q_000000211*R_013[0];
double QR_011000000020=Q_011000000*R_020[0]+-1*Q_111000000*R_120[0]+Q_211000000*R_220[0];
double QR_010001000020=Q_010001000*R_020[0]+-1*Q_010101000*R_030[0]+-1*Q_110001000*R_120[0]+Q_110101000*R_130[0];
double QR_010000001020=Q_010000001*R_020[0]+-1*Q_010000101*R_021[0]+-1*Q_110000001*R_120[0]+Q_110000101*R_121[0];
double QR_001010000020=Q_001010000*R_020[0]+-1*Q_001110000*R_030[0]+-1*Q_101010000*R_120[0]+Q_101110000*R_130[0];
double QR_000011000020=Q_000011000*R_020[0]+-1*Q_000111000*R_030[0]+Q_000211000*R_040[0];
double QR_000010001020=Q_000010001*R_020[0]+-1*Q_000010101*R_021[0]+-1*Q_000110001*R_030[0]+Q_000110101*R_031[0];
double QR_001000010020=Q_001000010*R_020[0]+-1*Q_001000110*R_021[0]+-1*Q_101000010*R_120[0]+Q_101000110*R_121[0];
double QR_000001010020=Q_000001010*R_020[0]+-1*Q_000001110*R_021[0]+-1*Q_000101010*R_030[0]+Q_000101110*R_031[0];
double QR_000000011020=Q_000000011*R_020[0]+-1*Q_000000111*R_021[0]+Q_000000211*R_022[0];
double QR_011000000101=Q_011000000*R_101[0]+-1*Q_111000000*R_201[0]+Q_211000000*R_301[0];
double QR_010001000101=Q_010001000*R_101[0]+-1*Q_010101000*R_111[0]+-1*Q_110001000*R_201[0]+Q_110101000*R_211[0];
double QR_010000001101=Q_010000001*R_101[0]+-1*Q_010000101*R_102[0]+-1*Q_110000001*R_201[0]+Q_110000101*R_202[0];
double QR_001010000101=Q_001010000*R_101[0]+-1*Q_001110000*R_111[0]+-1*Q_101010000*R_201[0]+Q_101110000*R_211[0];
double QR_000011000101=Q_000011000*R_101[0]+-1*Q_000111000*R_111[0]+Q_000211000*R_121[0];
double QR_000010001101=Q_000010001*R_101[0]+-1*Q_000010101*R_102[0]+-1*Q_000110001*R_111[0]+Q_000110101*R_112[0];
double QR_001000010101=Q_001000010*R_101[0]+-1*Q_001000110*R_102[0]+-1*Q_101000010*R_201[0]+Q_101000110*R_202[0];
double QR_000001010101=Q_000001010*R_101[0]+-1*Q_000001110*R_102[0]+-1*Q_000101010*R_111[0]+Q_000101110*R_112[0];
double QR_000000011101=Q_000000011*R_101[0]+-1*Q_000000111*R_102[0]+Q_000000211*R_103[0];
double QR_011000000110=Q_011000000*R_110[0]+-1*Q_111000000*R_210[0]+Q_211000000*R_310[0];
double QR_010001000110=Q_010001000*R_110[0]+-1*Q_010101000*R_120[0]+-1*Q_110001000*R_210[0]+Q_110101000*R_220[0];
double QR_010000001110=Q_010000001*R_110[0]+-1*Q_010000101*R_111[0]+-1*Q_110000001*R_210[0]+Q_110000101*R_211[0];
double QR_001010000110=Q_001010000*R_110[0]+-1*Q_001110000*R_120[0]+-1*Q_101010000*R_210[0]+Q_101110000*R_220[0];
double QR_000011000110=Q_000011000*R_110[0]+-1*Q_000111000*R_120[0]+Q_000211000*R_130[0];
double QR_000010001110=Q_000010001*R_110[0]+-1*Q_000010101*R_111[0]+-1*Q_000110001*R_120[0]+Q_000110101*R_121[0];
double QR_001000010110=Q_001000010*R_110[0]+-1*Q_001000110*R_111[0]+-1*Q_101000010*R_210[0]+Q_101000110*R_211[0];
double QR_000001010110=Q_000001010*R_110[0]+-1*Q_000001110*R_111[0]+-1*Q_000101010*R_120[0]+Q_000101110*R_121[0];
double QR_000000011110=Q_000000011*R_110[0]+-1*Q_000000111*R_111[0]+Q_000000211*R_112[0];
double QR_011000000200=Q_011000000*R_200[0]+-1*Q_111000000*R_300[0]+Q_211000000*R_400[0];
double QR_010001000200=Q_010001000*R_200[0]+-1*Q_010101000*R_210[0]+-1*Q_110001000*R_300[0]+Q_110101000*R_310[0];
double QR_010000001200=Q_010000001*R_200[0]+-1*Q_010000101*R_201[0]+-1*Q_110000001*R_300[0]+Q_110000101*R_301[0];
double QR_001010000200=Q_001010000*R_200[0]+-1*Q_001110000*R_210[0]+-1*Q_101010000*R_300[0]+Q_101110000*R_310[0];
double QR_000011000200=Q_000011000*R_200[0]+-1*Q_000111000*R_210[0]+Q_000211000*R_220[0];
double QR_000010001200=Q_000010001*R_200[0]+-1*Q_000010101*R_201[0]+-1*Q_000110001*R_210[0]+Q_000110101*R_211[0];
double QR_001000010200=Q_001000010*R_200[0]+-1*Q_001000110*R_201[0]+-1*Q_101000010*R_300[0]+Q_101000110*R_301[0];
double QR_000001010200=Q_000001010*R_200[0]+-1*Q_000001110*R_201[0]+-1*Q_000101010*R_210[0]+Q_000101110*R_211[0];
double QR_000000011200=Q_000000011*R_200[0]+-1*Q_000000111*R_201[0]+Q_000000211*R_202[0];
double Pd_110[3];
double Pd_020[3];
double Pd_120[3];
double Pd_220[3];
for(int i=0;i<3;i++){
Pd_110[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_020[i]=Pd_110[i]+Pd_010[i]*Pd_010[i];
}
for(int i=0;i<3;i++){
Pd_120[i]=Pd_010[i]*Pd_110[i]+aPin1*Pd_010[i];
}
for(int i=0;i<3;i++){
Pd_220[i]=aPin1*Pd_110[i];
}
double P_020000000=Pd_020[0];
double P_120000000=Pd_120[0];
double P_220000000=Pd_220[0];
double P_010010000=Pd_010[0]*Pd_010[1];
double P_010110000=Pd_010[0]*Pd_110[1];
double P_110010000=Pd_110[0]*Pd_010[1];
double P_110110000=Pd_110[0]*Pd_110[1];
double P_000020000=Pd_020[1];
double P_000120000=Pd_120[1];
double P_000220000=Pd_220[1];
double P_010000010=Pd_010[0]*Pd_010[2];
double P_010000110=Pd_010[0]*Pd_110[2];
double P_110000010=Pd_110[0]*Pd_010[2];
double P_110000110=Pd_110[0]*Pd_110[2];
double P_000010010=Pd_010[1]*Pd_010[2];
double P_000010110=Pd_010[1]*Pd_110[2];
double P_000110010=Pd_110[1]*Pd_010[2];
double P_000110110=Pd_110[1]*Pd_110[2];
double P_000000020=Pd_020[2];
double P_000000120=Pd_120[2];
double P_000000220=Pd_220[2];
ans_temp[ans_id*18+0]+=Pmtrx[0]*(P_020000000*QR_011000000000+P_120000000*QR_011000000100+P_220000000*QR_011000000200);
ans_temp[ans_id*18+0]+=Pmtrx[1]*(P_020000000*QR_010001000000+P_120000000*QR_010001000100+P_220000000*QR_010001000200);
ans_temp[ans_id*18+0]+=Pmtrx[2]*(P_020000000*QR_010000001000+P_120000000*QR_010000001100+P_220000000*QR_010000001200);
ans_temp[ans_id*18+1]+=Pmtrx[0]*(P_020000000*QR_001010000000+P_120000000*QR_001010000100+P_220000000*QR_001010000200);
ans_temp[ans_id*18+1]+=Pmtrx[1]*(P_020000000*QR_000011000000+P_120000000*QR_000011000100+P_220000000*QR_000011000200);
ans_temp[ans_id*18+1]+=Pmtrx[2]*(P_020000000*QR_000010001000+P_120000000*QR_000010001100+P_220000000*QR_000010001200);
ans_temp[ans_id*18+2]+=Pmtrx[0]*(P_020000000*QR_001000010000+P_120000000*QR_001000010100+P_220000000*QR_001000010200);
ans_temp[ans_id*18+2]+=Pmtrx[1]*(P_020000000*QR_000001010000+P_120000000*QR_000001010100+P_220000000*QR_000001010200);
ans_temp[ans_id*18+2]+=Pmtrx[2]*(P_020000000*QR_000000011000+P_120000000*QR_000000011100+P_220000000*QR_000000011200);
ans_temp[ans_id*18+3]+=Pmtrx[0]*(P_010010000*QR_011000000000+P_010110000*QR_011000000010+P_110010000*QR_011000000100+P_110110000*QR_011000000110);
ans_temp[ans_id*18+3]+=Pmtrx[1]*(P_010010000*QR_010001000000+P_010110000*QR_010001000010+P_110010000*QR_010001000100+P_110110000*QR_010001000110);
ans_temp[ans_id*18+3]+=Pmtrx[2]*(P_010010000*QR_010000001000+P_010110000*QR_010000001010+P_110010000*QR_010000001100+P_110110000*QR_010000001110);
ans_temp[ans_id*18+4]+=Pmtrx[0]*(P_010010000*QR_001010000000+P_010110000*QR_001010000010+P_110010000*QR_001010000100+P_110110000*QR_001010000110);
ans_temp[ans_id*18+4]+=Pmtrx[1]*(P_010010000*QR_000011000000+P_010110000*QR_000011000010+P_110010000*QR_000011000100+P_110110000*QR_000011000110);
ans_temp[ans_id*18+4]+=Pmtrx[2]*(P_010010000*QR_000010001000+P_010110000*QR_000010001010+P_110010000*QR_000010001100+P_110110000*QR_000010001110);
ans_temp[ans_id*18+5]+=Pmtrx[0]*(P_010010000*QR_001000010000+P_010110000*QR_001000010010+P_110010000*QR_001000010100+P_110110000*QR_001000010110);
ans_temp[ans_id*18+5]+=Pmtrx[1]*(P_010010000*QR_000001010000+P_010110000*QR_000001010010+P_110010000*QR_000001010100+P_110110000*QR_000001010110);
ans_temp[ans_id*18+5]+=Pmtrx[2]*(P_010010000*QR_000000011000+P_010110000*QR_000000011010+P_110010000*QR_000000011100+P_110110000*QR_000000011110);
ans_temp[ans_id*18+6]+=Pmtrx[0]*(P_000020000*QR_011000000000+P_000120000*QR_011000000010+P_000220000*QR_011000000020);
ans_temp[ans_id*18+6]+=Pmtrx[1]*(P_000020000*QR_010001000000+P_000120000*QR_010001000010+P_000220000*QR_010001000020);
ans_temp[ans_id*18+6]+=Pmtrx[2]*(P_000020000*QR_010000001000+P_000120000*QR_010000001010+P_000220000*QR_010000001020);
ans_temp[ans_id*18+7]+=Pmtrx[0]*(P_000020000*QR_001010000000+P_000120000*QR_001010000010+P_000220000*QR_001010000020);
ans_temp[ans_id*18+7]+=Pmtrx[1]*(P_000020000*QR_000011000000+P_000120000*QR_000011000010+P_000220000*QR_000011000020);
ans_temp[ans_id*18+7]+=Pmtrx[2]*(P_000020000*QR_000010001000+P_000120000*QR_000010001010+P_000220000*QR_000010001020);
ans_temp[ans_id*18+8]+=Pmtrx[0]*(P_000020000*QR_001000010000+P_000120000*QR_001000010010+P_000220000*QR_001000010020);
ans_temp[ans_id*18+8]+=Pmtrx[1]*(P_000020000*QR_000001010000+P_000120000*QR_000001010010+P_000220000*QR_000001010020);
ans_temp[ans_id*18+8]+=Pmtrx[2]*(P_000020000*QR_000000011000+P_000120000*QR_000000011010+P_000220000*QR_000000011020);
ans_temp[ans_id*18+9]+=Pmtrx[0]*(P_010000010*QR_011000000000+P_010000110*QR_011000000001+P_110000010*QR_011000000100+P_110000110*QR_011000000101);
ans_temp[ans_id*18+9]+=Pmtrx[1]*(P_010000010*QR_010001000000+P_010000110*QR_010001000001+P_110000010*QR_010001000100+P_110000110*QR_010001000101);
ans_temp[ans_id*18+9]+=Pmtrx[2]*(P_010000010*QR_010000001000+P_010000110*QR_010000001001+P_110000010*QR_010000001100+P_110000110*QR_010000001101);
ans_temp[ans_id*18+10]+=Pmtrx[0]*(P_010000010*QR_001010000000+P_010000110*QR_001010000001+P_110000010*QR_001010000100+P_110000110*QR_001010000101);
ans_temp[ans_id*18+10]+=Pmtrx[1]*(P_010000010*QR_000011000000+P_010000110*QR_000011000001+P_110000010*QR_000011000100+P_110000110*QR_000011000101);
ans_temp[ans_id*18+10]+=Pmtrx[2]*(P_010000010*QR_000010001000+P_010000110*QR_000010001001+P_110000010*QR_000010001100+P_110000110*QR_000010001101);
ans_temp[ans_id*18+11]+=Pmtrx[0]*(P_010000010*QR_001000010000+P_010000110*QR_001000010001+P_110000010*QR_001000010100+P_110000110*QR_001000010101);
ans_temp[ans_id*18+11]+=Pmtrx[1]*(P_010000010*QR_000001010000+P_010000110*QR_000001010001+P_110000010*QR_000001010100+P_110000110*QR_000001010101);
ans_temp[ans_id*18+11]+=Pmtrx[2]*(P_010000010*QR_000000011000+P_010000110*QR_000000011001+P_110000010*QR_000000011100+P_110000110*QR_000000011101);
ans_temp[ans_id*18+12]+=Pmtrx[0]*(P_000010010*QR_011000000000+P_000010110*QR_011000000001+P_000110010*QR_011000000010+P_000110110*QR_011000000011);
ans_temp[ans_id*18+12]+=Pmtrx[1]*(P_000010010*QR_010001000000+P_000010110*QR_010001000001+P_000110010*QR_010001000010+P_000110110*QR_010001000011);
ans_temp[ans_id*18+12]+=Pmtrx[2]*(P_000010010*QR_010000001000+P_000010110*QR_010000001001+P_000110010*QR_010000001010+P_000110110*QR_010000001011);
ans_temp[ans_id*18+13]+=Pmtrx[0]*(P_000010010*QR_001010000000+P_000010110*QR_001010000001+P_000110010*QR_001010000010+P_000110110*QR_001010000011);
ans_temp[ans_id*18+13]+=Pmtrx[1]*(P_000010010*QR_000011000000+P_000010110*QR_000011000001+P_000110010*QR_000011000010+P_000110110*QR_000011000011);
ans_temp[ans_id*18+13]+=Pmtrx[2]*(P_000010010*QR_000010001000+P_000010110*QR_000010001001+P_000110010*QR_000010001010+P_000110110*QR_000010001011);
ans_temp[ans_id*18+14]+=Pmtrx[0]*(P_000010010*QR_001000010000+P_000010110*QR_001000010001+P_000110010*QR_001000010010+P_000110110*QR_001000010011);
ans_temp[ans_id*18+14]+=Pmtrx[1]*(P_000010010*QR_000001010000+P_000010110*QR_000001010001+P_000110010*QR_000001010010+P_000110110*QR_000001010011);
ans_temp[ans_id*18+14]+=Pmtrx[2]*(P_000010010*QR_000000011000+P_000010110*QR_000000011001+P_000110010*QR_000000011010+P_000110110*QR_000000011011);
ans_temp[ans_id*18+15]+=Pmtrx[0]*(P_000000020*QR_011000000000+P_000000120*QR_011000000001+P_000000220*QR_011000000002);
ans_temp[ans_id*18+15]+=Pmtrx[1]*(P_000000020*QR_010001000000+P_000000120*QR_010001000001+P_000000220*QR_010001000002);
ans_temp[ans_id*18+15]+=Pmtrx[2]*(P_000000020*QR_010000001000+P_000000120*QR_010000001001+P_000000220*QR_010000001002);
ans_temp[ans_id*18+16]+=Pmtrx[0]*(P_000000020*QR_001010000000+P_000000120*QR_001010000001+P_000000220*QR_001010000002);
ans_temp[ans_id*18+16]+=Pmtrx[1]*(P_000000020*QR_000011000000+P_000000120*QR_000011000001+P_000000220*QR_000011000002);
ans_temp[ans_id*18+16]+=Pmtrx[2]*(P_000000020*QR_000010001000+P_000000120*QR_000010001001+P_000000220*QR_000010001002);
ans_temp[ans_id*18+17]+=Pmtrx[0]*(P_000000020*QR_001000010000+P_000000120*QR_001000010001+P_000000220*QR_001000010002);
ans_temp[ans_id*18+17]+=Pmtrx[1]*(P_000000020*QR_000001010000+P_000000120*QR_000001010001+P_000000220*QR_000001010002);
ans_temp[ans_id*18+17]+=Pmtrx[2]*(P_000000020*QR_000000011000+P_000000120*QR_000000011001+P_000000220*QR_000000011002);
}
}
__syncthreads();
int num_thread=NTHREAD/2;
while (num_thread!=0){
__syncthreads();
if(tId_x<num_thread){
for(int ians=0;ians<18;ians++){
ans_temp[tId_x*18+ians]+=ans_temp[(tId_x+num_thread)*18+ians];
}
}
num_thread/=2;
}
if(tId_x==0){
for(int ians=0;ians<18;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*18+ians]=ans_temp[(tId_x)*18+ians];
}
}
}
}
}
__global__ void MD_Kp_dppp_fs(unsigned int contrc_bra_num,unsigned int contrc_ket_num,\
unsigned int * contrc_bra_id,\
unsigned int * contrc_ket_id,\
unsigned int mtrx_len,\
double * Pmtrx_in,\
double * P,\
double * PA,\
double * PB,\
double * Zta_in,\
double * pp_in,\
float * K2_p_in,\
unsigned int * id_bra_in,\
double * Q,\
double * QC,\
double * QD,\
double * Eta_in,\
double * pq_in,\
float * K2_q_in,\
unsigned int * id_ket_in,\
double * ans){
unsigned int tId_x = threadIdx.x;
unsigned int bId_x = blockIdx.x;
unsigned int bId_y = blockIdx.y;
unsigned int tdis = blockDim.x;
unsigned int bdis_x = gridDim.x;
unsigned int bdis_y = gridDim.y;
unsigned int ans_id=tId_x;
double Pmtrx[9]={0.0};
__shared__ double ans_temp[NTHREAD*18];
for(int i=0;i<18;i++){
ans_temp[i*tdis+tId_x]=0.0;
}
for(unsigned int i_contrc_bra=bId_x;i_contrc_bra<contrc_bra_num;i_contrc_bra+=bdis_x){
for(unsigned int j_contrc_ket=bId_y;j_contrc_ket<contrc_ket_num;j_contrc_ket+=bdis_y){
unsigned int primit_bra_start = contrc_bra_id[i_contrc_bra ];
unsigned int primit_bra_end = contrc_bra_id[i_contrc_bra+1];
unsigned int primit_ket_start = contrc_ket_id[j_contrc_ket ];
unsigned int primit_ket_end = contrc_ket_id[j_contrc_ket+1];
for(unsigned int ii=primit_ket_start;ii<primit_ket_end;ii++){
unsigned int id_ket=id_ket_in[ii];
double QX=Q[ii*3+0];
double QY=Q[ii*3+1];
double QZ=Q[ii*3+2];
double Qd_010[3];
Qd_010[0]=QC[ii*3+0];
Qd_010[1]=QC[ii*3+1];
Qd_010[2]=QC[ii*3+2];
double Qd_001[3];
Qd_001[0]=QD[ii*3+0];
Qd_001[1]=QD[ii*3+1];
Qd_001[2]=QD[ii*3+2];
double Eta=Eta_in[ii];
double pq=pq_in[ii];
float K2_q=K2_q_in[ii];
double aQin1=1/(2*Eta);
for(unsigned int j=tId_x;j<primit_bra_end-primit_bra_start;j+=tdis){
unsigned int jj=primit_bra_start+j;
unsigned int id_bra=tex1Dfetch(tex_id_bra,jj);
double P_max=0.0;
for(int p_j=0;p_j<3;p_j++){
for(int p_i=0;p_i<3;p_i++){
Pmtrx[p_i*3+p_j]=Pmtrx_in[(id_ket+p_j)*mtrx_len+(id_bra+p_i)];
double temp_P=fabsf(Pmtrx[p_i*3+p_j]);
if(temp_P>P_max) P_max=temp_P;
}
}
float K2_p=tex1Dfetch(tex_K2_p,jj);
if(fabsf(K2_p*K2_q)<1.0E-14){
break;
}
if(fabsf(P_max*K2_p*K2_q)<1.0E-14) continue;
int2 temp_int2;
temp_int2=tex1Dfetch(tex_Zta,jj);
double Zta=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_pp,jj);
double pp=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+0);
double PX=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+1);
double PY=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+2);
double PZ=__hiloint2double(temp_int2.y,temp_int2.x);
double Pd_010[3];
temp_int2=tex1Dfetch(tex_PA,jj*3+0);
Pd_010[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PA,jj*3+1);
Pd_010[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PA,jj*3+2);
Pd_010[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double Pd_001[3];
temp_int2=tex1Dfetch(tex_PB,jj*3+0);
Pd_001[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PB,jj*3+1);
Pd_001[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PB,jj*3+2);
Pd_001[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double alphaT=rsqrt(Eta+Zta);
double lmd=4*P25*pp*pq*alphaT;
alphaT=Eta*Zta*alphaT*alphaT;
double TX=PX-QX;
double TY=PY-QY;
double TZ=PZ-QZ;
double T=alphaT*(TX*TX+TY*TY+TZ*TZ);
double R_000[6];
Ft_fs_5(5,T,R_000);
R_000[0]*=lmd;
R_000[1]*=-2*alphaT*lmd;
R_000[2]*=4*alphaT*alphaT*lmd;
R_000[3]*=-8*alphaT*alphaT*alphaT*lmd;
R_000[4]*=16*alphaT*alphaT*alphaT*alphaT*lmd;
R_000[5]*=-32*alphaT*alphaT*alphaT*alphaT*alphaT*lmd;
double aPin1=1/(2*Zta);
double R_100[5];
double R_200[4];
double R_300[3];
double R_400[2];
double R_500[1];
double R_010[5];
double R_110[4];
double R_210[3];
double R_310[2];
double R_410[1];
double R_020[4];
double R_120[3];
double R_220[2];
double R_320[1];
double R_030[3];
double R_130[2];
double R_230[1];
double R_040[2];
double R_140[1];
double R_050[1];
double R_001[5];
double R_101[4];
double R_201[3];
double R_301[2];
double R_401[1];
double R_011[4];
double R_111[3];
double R_211[2];
double R_311[1];
double R_021[3];
double R_121[2];
double R_221[1];
double R_031[2];
double R_131[1];
double R_041[1];
double R_002[4];
double R_102[3];
double R_202[2];
double R_302[1];
double R_012[3];
double R_112[2];
double R_212[1];
double R_022[2];
double R_122[1];
double R_032[1];
double R_003[3];
double R_103[2];
double R_203[1];
double R_013[2];
double R_113[1];
double R_023[1];
double R_004[2];
double R_104[1];
double R_014[1];
double R_005[1];
for(int i=0;i<5;i++){
R_100[i]=TX*R_000[i+1];
}
for(int i=0;i<5;i++){
R_010[i]=TY*R_000[i+1];
}
for(int i=0;i<5;i++){
R_001[i]=TZ*R_000[i+1];
}
for(int i=0;i<4;i++){
R_200[i]=TX*R_100[i+1]+R_000[i+1];
}
for(int i=0;i<4;i++){
R_110[i]=TX*R_010[i+1];
}
for(int i=0;i<4;i++){
R_020[i]=TY*R_010[i+1]+R_000[i+1];
}
for(int i=0;i<4;i++){
R_101[i]=TX*R_001[i+1];
}
for(int i=0;i<4;i++){
R_011[i]=TY*R_001[i+1];
}
for(int i=0;i<4;i++){
R_002[i]=TZ*R_001[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_300[i]=TX*R_200[i+1]+2*R_100[i+1];
}
for(int i=0;i<3;i++){
R_210[i]=TY*R_200[i+1];
}
for(int i=0;i<3;i++){
R_120[i]=TX*R_020[i+1];
}
for(int i=0;i<3;i++){
R_030[i]=TY*R_020[i+1]+2*R_010[i+1];
}
for(int i=0;i<3;i++){
R_201[i]=TZ*R_200[i+1];
}
for(int i=0;i<3;i++){
R_111[i]=TX*R_011[i+1];
}
for(int i=0;i<3;i++){
R_021[i]=TZ*R_020[i+1];
}
for(int i=0;i<3;i++){
R_102[i]=TX*R_002[i+1];
}
for(int i=0;i<3;i++){
R_012[i]=TY*R_002[i+1];
}
for(int i=0;i<3;i++){
R_003[i]=TZ*R_002[i+1]+2*R_001[i+1];
}
for(int i=0;i<2;i++){
R_400[i]=TX*R_300[i+1]+3*R_200[i+1];
}
for(int i=0;i<2;i++){
R_310[i]=TY*R_300[i+1];
}
for(int i=0;i<2;i++){
R_220[i]=TX*R_120[i+1]+R_020[i+1];
}
for(int i=0;i<2;i++){
R_130[i]=TX*R_030[i+1];
}
for(int i=0;i<2;i++){
R_040[i]=TY*R_030[i+1]+3*R_020[i+1];
}
for(int i=0;i<2;i++){
R_301[i]=TZ*R_300[i+1];
}
for(int i=0;i<2;i++){
R_211[i]=TY*R_201[i+1];
}
for(int i=0;i<2;i++){
R_121[i]=TX*R_021[i+1];
}
for(int i=0;i<2;i++){
R_031[i]=TZ*R_030[i+1];
}
for(int i=0;i<2;i++){
R_202[i]=TX*R_102[i+1]+R_002[i+1];
}
for(int i=0;i<2;i++){
R_112[i]=TX*R_012[i+1];
}
for(int i=0;i<2;i++){
R_022[i]=TY*R_012[i+1]+R_002[i+1];
}
for(int i=0;i<2;i++){
R_103[i]=TX*R_003[i+1];
}
for(int i=0;i<2;i++){
R_013[i]=TY*R_003[i+1];
}
for(int i=0;i<2;i++){
R_004[i]=TZ*R_003[i+1]+3*R_002[i+1];
}
for(int i=0;i<1;i++){
R_500[i]=TX*R_400[i+1]+4*R_300[i+1];
}
for(int i=0;i<1;i++){
R_410[i]=TY*R_400[i+1];
}
for(int i=0;i<1;i++){
R_320[i]=TX*R_220[i+1]+2*R_120[i+1];
}
for(int i=0;i<1;i++){
R_230[i]=TY*R_220[i+1]+2*R_210[i+1];
}
for(int i=0;i<1;i++){
R_140[i]=TX*R_040[i+1];
}
for(int i=0;i<1;i++){
R_050[i]=TY*R_040[i+1]+4*R_030[i+1];
}
for(int i=0;i<1;i++){
R_401[i]=TZ*R_400[i+1];
}
for(int i=0;i<1;i++){
R_311[i]=TY*R_301[i+1];
}
for(int i=0;i<1;i++){
R_221[i]=TZ*R_220[i+1];
}
for(int i=0;i<1;i++){
R_131[i]=TX*R_031[i+1];
}
for(int i=0;i<1;i++){
R_041[i]=TZ*R_040[i+1];
}
for(int i=0;i<1;i++){
R_302[i]=TX*R_202[i+1]+2*R_102[i+1];
}
for(int i=0;i<1;i++){
R_212[i]=TY*R_202[i+1];
}
for(int i=0;i<1;i++){
R_122[i]=TX*R_022[i+1];
}
for(int i=0;i<1;i++){
R_032[i]=TY*R_022[i+1]+2*R_012[i+1];
}
for(int i=0;i<1;i++){
R_203[i]=TZ*R_202[i+1]+2*R_201[i+1];
}
for(int i=0;i<1;i++){
R_113[i]=TX*R_013[i+1];
}
for(int i=0;i<1;i++){
R_023[i]=TZ*R_022[i+1]+2*R_021[i+1];
}
for(int i=0;i<1;i++){
R_104[i]=TX*R_004[i+1];
}
for(int i=0;i<1;i++){
R_014[i]=TY*R_004[i+1];
}
for(int i=0;i<1;i++){
R_005[i]=TZ*R_004[i+1]+4*R_003[i+1];
}
double Pd_101[3];
double Pd_110[3];
double Pd_011[3];
double Pd_111[3];
double Pd_211[3];
double Pd_020[3];
double Pd_120[3];
double Pd_220[3];
double Pd_021[3];
double Pd_121[3];
double Pd_221[3];
double Pd_321[3];
for(int i=0;i<3;i++){
Pd_101[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_110[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_011[i]=Pd_101[i]+Pd_010[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_111[i]=Pd_010[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_211[i]=aPin1*Pd_101[i];
}
for(int i=0;i<3;i++){
Pd_020[i]=Pd_110[i]+Pd_010[i]*Pd_010[i];
}
for(int i=0;i<3;i++){
Pd_120[i]=Pd_010[i]*Pd_110[i]+aPin1*Pd_010[i];
}
for(int i=0;i<3;i++){
Pd_220[i]=aPin1*Pd_110[i];
}
for(int i=0;i<3;i++){
Pd_021[i]=Pd_111[i]+Pd_010[i]*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_121[i]=2*Pd_211[i]+Pd_010[i]*Pd_111[i]+aPin1*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_221[i]=Pd_010[i]*Pd_211[i]+aPin1*Pd_111[i];
}
for(int i=0;i<3;i++){
Pd_321[i]=aPin1*Pd_211[i];
}
double P_021000000=Pd_021[0];
double P_121000000=Pd_121[0];
double P_221000000=Pd_221[0];
double P_321000000=Pd_321[0];
double P_020001000=Pd_020[0]*Pd_001[1];
double P_020101000=Pd_020[0]*Pd_101[1];
double P_120001000=Pd_120[0]*Pd_001[1];
double P_120101000=Pd_120[0]*Pd_101[1];
double P_220001000=Pd_220[0]*Pd_001[1];
double P_220101000=Pd_220[0]*Pd_101[1];
double P_020000001=Pd_020[0]*Pd_001[2];
double P_020000101=Pd_020[0]*Pd_101[2];
double P_120000001=Pd_120[0]*Pd_001[2];
double P_120000101=Pd_120[0]*Pd_101[2];
double P_220000001=Pd_220[0]*Pd_001[2];
double P_220000101=Pd_220[0]*Pd_101[2];
double P_011010000=Pd_011[0]*Pd_010[1];
double P_011110000=Pd_011[0]*Pd_110[1];
double P_111010000=Pd_111[0]*Pd_010[1];
double P_111110000=Pd_111[0]*Pd_110[1];
double P_211010000=Pd_211[0]*Pd_010[1];
double P_211110000=Pd_211[0]*Pd_110[1];
double P_010011000=Pd_010[0]*Pd_011[1];
double P_010111000=Pd_010[0]*Pd_111[1];
double P_010211000=Pd_010[0]*Pd_211[1];
double P_110011000=Pd_110[0]*Pd_011[1];
double P_110111000=Pd_110[0]*Pd_111[1];
double P_110211000=Pd_110[0]*Pd_211[1];
double P_010010001=Pd_010[0]*Pd_010[1]*Pd_001[2];
double P_010010101=Pd_010[0]*Pd_010[1]*Pd_101[2];
double P_010110001=Pd_010[0]*Pd_110[1]*Pd_001[2];
double P_010110101=Pd_010[0]*Pd_110[1]*Pd_101[2];
double P_110010001=Pd_110[0]*Pd_010[1]*Pd_001[2];
double P_110010101=Pd_110[0]*Pd_010[1]*Pd_101[2];
double P_110110001=Pd_110[0]*Pd_110[1]*Pd_001[2];
double P_110110101=Pd_110[0]*Pd_110[1]*Pd_101[2];
double P_001020000=Pd_001[0]*Pd_020[1];
double P_001120000=Pd_001[0]*Pd_120[1];
double P_001220000=Pd_001[0]*Pd_220[1];
double P_101020000=Pd_101[0]*Pd_020[1];
double P_101120000=Pd_101[0]*Pd_120[1];
double P_101220000=Pd_101[0]*Pd_220[1];
double P_000021000=Pd_021[1];
double P_000121000=Pd_121[1];
double P_000221000=Pd_221[1];
double P_000321000=Pd_321[1];
double P_000020001=Pd_020[1]*Pd_001[2];
double P_000020101=Pd_020[1]*Pd_101[2];
double P_000120001=Pd_120[1]*Pd_001[2];
double P_000120101=Pd_120[1]*Pd_101[2];
double P_000220001=Pd_220[1]*Pd_001[2];
double P_000220101=Pd_220[1]*Pd_101[2];
double P_011000010=Pd_011[0]*Pd_010[2];
double P_011000110=Pd_011[0]*Pd_110[2];
double P_111000010=Pd_111[0]*Pd_010[2];
double P_111000110=Pd_111[0]*Pd_110[2];
double P_211000010=Pd_211[0]*Pd_010[2];
double P_211000110=Pd_211[0]*Pd_110[2];
double P_010001010=Pd_010[0]*Pd_001[1]*Pd_010[2];
double P_010001110=Pd_010[0]*Pd_001[1]*Pd_110[2];
double P_010101010=Pd_010[0]*Pd_101[1]*Pd_010[2];
double P_010101110=Pd_010[0]*Pd_101[1]*Pd_110[2];
double P_110001010=Pd_110[0]*Pd_001[1]*Pd_010[2];
double P_110001110=Pd_110[0]*Pd_001[1]*Pd_110[2];
double P_110101010=Pd_110[0]*Pd_101[1]*Pd_010[2];
double P_110101110=Pd_110[0]*Pd_101[1]*Pd_110[2];
double P_010000011=Pd_010[0]*Pd_011[2];
double P_010000111=Pd_010[0]*Pd_111[2];
double P_010000211=Pd_010[0]*Pd_211[2];
double P_110000011=Pd_110[0]*Pd_011[2];
double P_110000111=Pd_110[0]*Pd_111[2];
double P_110000211=Pd_110[0]*Pd_211[2];
double P_001010010=Pd_001[0]*Pd_010[1]*Pd_010[2];
double P_001010110=Pd_001[0]*Pd_010[1]*Pd_110[2];
double P_001110010=Pd_001[0]*Pd_110[1]*Pd_010[2];
double P_001110110=Pd_001[0]*Pd_110[1]*Pd_110[2];
double P_101010010=Pd_101[0]*Pd_010[1]*Pd_010[2];
double P_101010110=Pd_101[0]*Pd_010[1]*Pd_110[2];
double P_101110010=Pd_101[0]*Pd_110[1]*Pd_010[2];
double P_101110110=Pd_101[0]*Pd_110[1]*Pd_110[2];
double P_000011010=Pd_011[1]*Pd_010[2];
double P_000011110=Pd_011[1]*Pd_110[2];
double P_000111010=Pd_111[1]*Pd_010[2];
double P_000111110=Pd_111[1]*Pd_110[2];
double P_000211010=Pd_211[1]*Pd_010[2];
double P_000211110=Pd_211[1]*Pd_110[2];
double P_000010011=Pd_010[1]*Pd_011[2];
double P_000010111=Pd_010[1]*Pd_111[2];
double P_000010211=Pd_010[1]*Pd_211[2];
double P_000110011=Pd_110[1]*Pd_011[2];
double P_000110111=Pd_110[1]*Pd_111[2];
double P_000110211=Pd_110[1]*Pd_211[2];
double P_001000020=Pd_001[0]*Pd_020[2];
double P_001000120=Pd_001[0]*Pd_120[2];
double P_001000220=Pd_001[0]*Pd_220[2];
double P_101000020=Pd_101[0]*Pd_020[2];
double P_101000120=Pd_101[0]*Pd_120[2];
double P_101000220=Pd_101[0]*Pd_220[2];
double P_000001020=Pd_001[1]*Pd_020[2];
double P_000001120=Pd_001[1]*Pd_120[2];
double P_000001220=Pd_001[1]*Pd_220[2];
double P_000101020=Pd_101[1]*Pd_020[2];
double P_000101120=Pd_101[1]*Pd_120[2];
double P_000101220=Pd_101[1]*Pd_220[2];
double P_000000021=Pd_021[2];
double P_000000121=Pd_121[2];
double P_000000221=Pd_221[2];
double P_000000321=Pd_321[2];
double PR_021000000000=P_021000000*R_000[0]+-1*P_121000000*R_100[0]+P_221000000*R_200[0]+-1*P_321000000*R_300[0];
double PR_020001000000=P_020001000*R_000[0]+-1*P_020101000*R_010[0]+-1*P_120001000*R_100[0]+P_120101000*R_110[0]+P_220001000*R_200[0]+-1*P_220101000*R_210[0];
double PR_020000001000=P_020000001*R_000[0]+-1*P_020000101*R_001[0]+-1*P_120000001*R_100[0]+P_120000101*R_101[0]+P_220000001*R_200[0]+-1*P_220000101*R_201[0];
double PR_011010000000=P_011010000*R_000[0]+-1*P_011110000*R_010[0]+-1*P_111010000*R_100[0]+P_111110000*R_110[0]+P_211010000*R_200[0]+-1*P_211110000*R_210[0];
double PR_010011000000=P_010011000*R_000[0]+-1*P_010111000*R_010[0]+P_010211000*R_020[0]+-1*P_110011000*R_100[0]+P_110111000*R_110[0]+-1*P_110211000*R_120[0];
double PR_010010001000=P_010010001*R_000[0]+-1*P_010010101*R_001[0]+-1*P_010110001*R_010[0]+P_010110101*R_011[0]+-1*P_110010001*R_100[0]+P_110010101*R_101[0]+P_110110001*R_110[0]+-1*P_110110101*R_111[0];
double PR_001020000000=P_001020000*R_000[0]+-1*P_001120000*R_010[0]+P_001220000*R_020[0]+-1*P_101020000*R_100[0]+P_101120000*R_110[0]+-1*P_101220000*R_120[0];
double PR_000021000000=P_000021000*R_000[0]+-1*P_000121000*R_010[0]+P_000221000*R_020[0]+-1*P_000321000*R_030[0];
double PR_000020001000=P_000020001*R_000[0]+-1*P_000020101*R_001[0]+-1*P_000120001*R_010[0]+P_000120101*R_011[0]+P_000220001*R_020[0]+-1*P_000220101*R_021[0];
double PR_011000010000=P_011000010*R_000[0]+-1*P_011000110*R_001[0]+-1*P_111000010*R_100[0]+P_111000110*R_101[0]+P_211000010*R_200[0]+-1*P_211000110*R_201[0];
double PR_010001010000=P_010001010*R_000[0]+-1*P_010001110*R_001[0]+-1*P_010101010*R_010[0]+P_010101110*R_011[0]+-1*P_110001010*R_100[0]+P_110001110*R_101[0]+P_110101010*R_110[0]+-1*P_110101110*R_111[0];
double PR_010000011000=P_010000011*R_000[0]+-1*P_010000111*R_001[0]+P_010000211*R_002[0]+-1*P_110000011*R_100[0]+P_110000111*R_101[0]+-1*P_110000211*R_102[0];
double PR_001010010000=P_001010010*R_000[0]+-1*P_001010110*R_001[0]+-1*P_001110010*R_010[0]+P_001110110*R_011[0]+-1*P_101010010*R_100[0]+P_101010110*R_101[0]+P_101110010*R_110[0]+-1*P_101110110*R_111[0];
double PR_000011010000=P_000011010*R_000[0]+-1*P_000011110*R_001[0]+-1*P_000111010*R_010[0]+P_000111110*R_011[0]+P_000211010*R_020[0]+-1*P_000211110*R_021[0];
double PR_000010011000=P_000010011*R_000[0]+-1*P_000010111*R_001[0]+P_000010211*R_002[0]+-1*P_000110011*R_010[0]+P_000110111*R_011[0]+-1*P_000110211*R_012[0];
double PR_001000020000=P_001000020*R_000[0]+-1*P_001000120*R_001[0]+P_001000220*R_002[0]+-1*P_101000020*R_100[0]+P_101000120*R_101[0]+-1*P_101000220*R_102[0];
double PR_000001020000=P_000001020*R_000[0]+-1*P_000001120*R_001[0]+P_000001220*R_002[0]+-1*P_000101020*R_010[0]+P_000101120*R_011[0]+-1*P_000101220*R_012[0];
double PR_000000021000=P_000000021*R_000[0]+-1*P_000000121*R_001[0]+P_000000221*R_002[0]+-1*P_000000321*R_003[0];
double PR_021000000001=P_021000000*R_001[0]+-1*P_121000000*R_101[0]+P_221000000*R_201[0]+-1*P_321000000*R_301[0];
double PR_020001000001=P_020001000*R_001[0]+-1*P_020101000*R_011[0]+-1*P_120001000*R_101[0]+P_120101000*R_111[0]+P_220001000*R_201[0]+-1*P_220101000*R_211[0];
double PR_020000001001=P_020000001*R_001[0]+-1*P_020000101*R_002[0]+-1*P_120000001*R_101[0]+P_120000101*R_102[0]+P_220000001*R_201[0]+-1*P_220000101*R_202[0];
double PR_011010000001=P_011010000*R_001[0]+-1*P_011110000*R_011[0]+-1*P_111010000*R_101[0]+P_111110000*R_111[0]+P_211010000*R_201[0]+-1*P_211110000*R_211[0];
double PR_010011000001=P_010011000*R_001[0]+-1*P_010111000*R_011[0]+P_010211000*R_021[0]+-1*P_110011000*R_101[0]+P_110111000*R_111[0]+-1*P_110211000*R_121[0];
double PR_010010001001=P_010010001*R_001[0]+-1*P_010010101*R_002[0]+-1*P_010110001*R_011[0]+P_010110101*R_012[0]+-1*P_110010001*R_101[0]+P_110010101*R_102[0]+P_110110001*R_111[0]+-1*P_110110101*R_112[0];
double PR_001020000001=P_001020000*R_001[0]+-1*P_001120000*R_011[0]+P_001220000*R_021[0]+-1*P_101020000*R_101[0]+P_101120000*R_111[0]+-1*P_101220000*R_121[0];
double PR_000021000001=P_000021000*R_001[0]+-1*P_000121000*R_011[0]+P_000221000*R_021[0]+-1*P_000321000*R_031[0];
double PR_000020001001=P_000020001*R_001[0]+-1*P_000020101*R_002[0]+-1*P_000120001*R_011[0]+P_000120101*R_012[0]+P_000220001*R_021[0]+-1*P_000220101*R_022[0];
double PR_011000010001=P_011000010*R_001[0]+-1*P_011000110*R_002[0]+-1*P_111000010*R_101[0]+P_111000110*R_102[0]+P_211000010*R_201[0]+-1*P_211000110*R_202[0];
double PR_010001010001=P_010001010*R_001[0]+-1*P_010001110*R_002[0]+-1*P_010101010*R_011[0]+P_010101110*R_012[0]+-1*P_110001010*R_101[0]+P_110001110*R_102[0]+P_110101010*R_111[0]+-1*P_110101110*R_112[0];
double PR_010000011001=P_010000011*R_001[0]+-1*P_010000111*R_002[0]+P_010000211*R_003[0]+-1*P_110000011*R_101[0]+P_110000111*R_102[0]+-1*P_110000211*R_103[0];
double PR_001010010001=P_001010010*R_001[0]+-1*P_001010110*R_002[0]+-1*P_001110010*R_011[0]+P_001110110*R_012[0]+-1*P_101010010*R_101[0]+P_101010110*R_102[0]+P_101110010*R_111[0]+-1*P_101110110*R_112[0];
double PR_000011010001=P_000011010*R_001[0]+-1*P_000011110*R_002[0]+-1*P_000111010*R_011[0]+P_000111110*R_012[0]+P_000211010*R_021[0]+-1*P_000211110*R_022[0];
double PR_000010011001=P_000010011*R_001[0]+-1*P_000010111*R_002[0]+P_000010211*R_003[0]+-1*P_000110011*R_011[0]+P_000110111*R_012[0]+-1*P_000110211*R_013[0];
double PR_001000020001=P_001000020*R_001[0]+-1*P_001000120*R_002[0]+P_001000220*R_003[0]+-1*P_101000020*R_101[0]+P_101000120*R_102[0]+-1*P_101000220*R_103[0];
double PR_000001020001=P_000001020*R_001[0]+-1*P_000001120*R_002[0]+P_000001220*R_003[0]+-1*P_000101020*R_011[0]+P_000101120*R_012[0]+-1*P_000101220*R_013[0];
double PR_000000021001=P_000000021*R_001[0]+-1*P_000000121*R_002[0]+P_000000221*R_003[0]+-1*P_000000321*R_004[0];
double PR_021000000010=P_021000000*R_010[0]+-1*P_121000000*R_110[0]+P_221000000*R_210[0]+-1*P_321000000*R_310[0];
double PR_020001000010=P_020001000*R_010[0]+-1*P_020101000*R_020[0]+-1*P_120001000*R_110[0]+P_120101000*R_120[0]+P_220001000*R_210[0]+-1*P_220101000*R_220[0];
double PR_020000001010=P_020000001*R_010[0]+-1*P_020000101*R_011[0]+-1*P_120000001*R_110[0]+P_120000101*R_111[0]+P_220000001*R_210[0]+-1*P_220000101*R_211[0];
double PR_011010000010=P_011010000*R_010[0]+-1*P_011110000*R_020[0]+-1*P_111010000*R_110[0]+P_111110000*R_120[0]+P_211010000*R_210[0]+-1*P_211110000*R_220[0];
double PR_010011000010=P_010011000*R_010[0]+-1*P_010111000*R_020[0]+P_010211000*R_030[0]+-1*P_110011000*R_110[0]+P_110111000*R_120[0]+-1*P_110211000*R_130[0];
double PR_010010001010=P_010010001*R_010[0]+-1*P_010010101*R_011[0]+-1*P_010110001*R_020[0]+P_010110101*R_021[0]+-1*P_110010001*R_110[0]+P_110010101*R_111[0]+P_110110001*R_120[0]+-1*P_110110101*R_121[0];
double PR_001020000010=P_001020000*R_010[0]+-1*P_001120000*R_020[0]+P_001220000*R_030[0]+-1*P_101020000*R_110[0]+P_101120000*R_120[0]+-1*P_101220000*R_130[0];
double PR_000021000010=P_000021000*R_010[0]+-1*P_000121000*R_020[0]+P_000221000*R_030[0]+-1*P_000321000*R_040[0];
double PR_000020001010=P_000020001*R_010[0]+-1*P_000020101*R_011[0]+-1*P_000120001*R_020[0]+P_000120101*R_021[0]+P_000220001*R_030[0]+-1*P_000220101*R_031[0];
double PR_011000010010=P_011000010*R_010[0]+-1*P_011000110*R_011[0]+-1*P_111000010*R_110[0]+P_111000110*R_111[0]+P_211000010*R_210[0]+-1*P_211000110*R_211[0];
double PR_010001010010=P_010001010*R_010[0]+-1*P_010001110*R_011[0]+-1*P_010101010*R_020[0]+P_010101110*R_021[0]+-1*P_110001010*R_110[0]+P_110001110*R_111[0]+P_110101010*R_120[0]+-1*P_110101110*R_121[0];
double PR_010000011010=P_010000011*R_010[0]+-1*P_010000111*R_011[0]+P_010000211*R_012[0]+-1*P_110000011*R_110[0]+P_110000111*R_111[0]+-1*P_110000211*R_112[0];
double PR_001010010010=P_001010010*R_010[0]+-1*P_001010110*R_011[0]+-1*P_001110010*R_020[0]+P_001110110*R_021[0]+-1*P_101010010*R_110[0]+P_101010110*R_111[0]+P_101110010*R_120[0]+-1*P_101110110*R_121[0];
double PR_000011010010=P_000011010*R_010[0]+-1*P_000011110*R_011[0]+-1*P_000111010*R_020[0]+P_000111110*R_021[0]+P_000211010*R_030[0]+-1*P_000211110*R_031[0];
double PR_000010011010=P_000010011*R_010[0]+-1*P_000010111*R_011[0]+P_000010211*R_012[0]+-1*P_000110011*R_020[0]+P_000110111*R_021[0]+-1*P_000110211*R_022[0];
double PR_001000020010=P_001000020*R_010[0]+-1*P_001000120*R_011[0]+P_001000220*R_012[0]+-1*P_101000020*R_110[0]+P_101000120*R_111[0]+-1*P_101000220*R_112[0];
double PR_000001020010=P_000001020*R_010[0]+-1*P_000001120*R_011[0]+P_000001220*R_012[0]+-1*P_000101020*R_020[0]+P_000101120*R_021[0]+-1*P_000101220*R_022[0];
double PR_000000021010=P_000000021*R_010[0]+-1*P_000000121*R_011[0]+P_000000221*R_012[0]+-1*P_000000321*R_013[0];
double PR_021000000100=P_021000000*R_100[0]+-1*P_121000000*R_200[0]+P_221000000*R_300[0]+-1*P_321000000*R_400[0];
double PR_020001000100=P_020001000*R_100[0]+-1*P_020101000*R_110[0]+-1*P_120001000*R_200[0]+P_120101000*R_210[0]+P_220001000*R_300[0]+-1*P_220101000*R_310[0];
double PR_020000001100=P_020000001*R_100[0]+-1*P_020000101*R_101[0]+-1*P_120000001*R_200[0]+P_120000101*R_201[0]+P_220000001*R_300[0]+-1*P_220000101*R_301[0];
double PR_011010000100=P_011010000*R_100[0]+-1*P_011110000*R_110[0]+-1*P_111010000*R_200[0]+P_111110000*R_210[0]+P_211010000*R_300[0]+-1*P_211110000*R_310[0];
double PR_010011000100=P_010011000*R_100[0]+-1*P_010111000*R_110[0]+P_010211000*R_120[0]+-1*P_110011000*R_200[0]+P_110111000*R_210[0]+-1*P_110211000*R_220[0];
double PR_010010001100=P_010010001*R_100[0]+-1*P_010010101*R_101[0]+-1*P_010110001*R_110[0]+P_010110101*R_111[0]+-1*P_110010001*R_200[0]+P_110010101*R_201[0]+P_110110001*R_210[0]+-1*P_110110101*R_211[0];
double PR_001020000100=P_001020000*R_100[0]+-1*P_001120000*R_110[0]+P_001220000*R_120[0]+-1*P_101020000*R_200[0]+P_101120000*R_210[0]+-1*P_101220000*R_220[0];
double PR_000021000100=P_000021000*R_100[0]+-1*P_000121000*R_110[0]+P_000221000*R_120[0]+-1*P_000321000*R_130[0];
double PR_000020001100=P_000020001*R_100[0]+-1*P_000020101*R_101[0]+-1*P_000120001*R_110[0]+P_000120101*R_111[0]+P_000220001*R_120[0]+-1*P_000220101*R_121[0];
double PR_011000010100=P_011000010*R_100[0]+-1*P_011000110*R_101[0]+-1*P_111000010*R_200[0]+P_111000110*R_201[0]+P_211000010*R_300[0]+-1*P_211000110*R_301[0];
double PR_010001010100=P_010001010*R_100[0]+-1*P_010001110*R_101[0]+-1*P_010101010*R_110[0]+P_010101110*R_111[0]+-1*P_110001010*R_200[0]+P_110001110*R_201[0]+P_110101010*R_210[0]+-1*P_110101110*R_211[0];
double PR_010000011100=P_010000011*R_100[0]+-1*P_010000111*R_101[0]+P_010000211*R_102[0]+-1*P_110000011*R_200[0]+P_110000111*R_201[0]+-1*P_110000211*R_202[0];
double PR_001010010100=P_001010010*R_100[0]+-1*P_001010110*R_101[0]+-1*P_001110010*R_110[0]+P_001110110*R_111[0]+-1*P_101010010*R_200[0]+P_101010110*R_201[0]+P_101110010*R_210[0]+-1*P_101110110*R_211[0];
double PR_000011010100=P_000011010*R_100[0]+-1*P_000011110*R_101[0]+-1*P_000111010*R_110[0]+P_000111110*R_111[0]+P_000211010*R_120[0]+-1*P_000211110*R_121[0];
double PR_000010011100=P_000010011*R_100[0]+-1*P_000010111*R_101[0]+P_000010211*R_102[0]+-1*P_000110011*R_110[0]+P_000110111*R_111[0]+-1*P_000110211*R_112[0];
double PR_001000020100=P_001000020*R_100[0]+-1*P_001000120*R_101[0]+P_001000220*R_102[0]+-1*P_101000020*R_200[0]+P_101000120*R_201[0]+-1*P_101000220*R_202[0];
double PR_000001020100=P_000001020*R_100[0]+-1*P_000001120*R_101[0]+P_000001220*R_102[0]+-1*P_000101020*R_110[0]+P_000101120*R_111[0]+-1*P_000101220*R_112[0];
double PR_000000021100=P_000000021*R_100[0]+-1*P_000000121*R_101[0]+P_000000221*R_102[0]+-1*P_000000321*R_103[0];
double PR_021000000002=P_021000000*R_002[0]+-1*P_121000000*R_102[0]+P_221000000*R_202[0]+-1*P_321000000*R_302[0];
double PR_020001000002=P_020001000*R_002[0]+-1*P_020101000*R_012[0]+-1*P_120001000*R_102[0]+P_120101000*R_112[0]+P_220001000*R_202[0]+-1*P_220101000*R_212[0];
double PR_020000001002=P_020000001*R_002[0]+-1*P_020000101*R_003[0]+-1*P_120000001*R_102[0]+P_120000101*R_103[0]+P_220000001*R_202[0]+-1*P_220000101*R_203[0];
double PR_011010000002=P_011010000*R_002[0]+-1*P_011110000*R_012[0]+-1*P_111010000*R_102[0]+P_111110000*R_112[0]+P_211010000*R_202[0]+-1*P_211110000*R_212[0];
double PR_010011000002=P_010011000*R_002[0]+-1*P_010111000*R_012[0]+P_010211000*R_022[0]+-1*P_110011000*R_102[0]+P_110111000*R_112[0]+-1*P_110211000*R_122[0];
double PR_010010001002=P_010010001*R_002[0]+-1*P_010010101*R_003[0]+-1*P_010110001*R_012[0]+P_010110101*R_013[0]+-1*P_110010001*R_102[0]+P_110010101*R_103[0]+P_110110001*R_112[0]+-1*P_110110101*R_113[0];
double PR_001020000002=P_001020000*R_002[0]+-1*P_001120000*R_012[0]+P_001220000*R_022[0]+-1*P_101020000*R_102[0]+P_101120000*R_112[0]+-1*P_101220000*R_122[0];
double PR_000021000002=P_000021000*R_002[0]+-1*P_000121000*R_012[0]+P_000221000*R_022[0]+-1*P_000321000*R_032[0];
double PR_000020001002=P_000020001*R_002[0]+-1*P_000020101*R_003[0]+-1*P_000120001*R_012[0]+P_000120101*R_013[0]+P_000220001*R_022[0]+-1*P_000220101*R_023[0];
double PR_011000010002=P_011000010*R_002[0]+-1*P_011000110*R_003[0]+-1*P_111000010*R_102[0]+P_111000110*R_103[0]+P_211000010*R_202[0]+-1*P_211000110*R_203[0];
double PR_010001010002=P_010001010*R_002[0]+-1*P_010001110*R_003[0]+-1*P_010101010*R_012[0]+P_010101110*R_013[0]+-1*P_110001010*R_102[0]+P_110001110*R_103[0]+P_110101010*R_112[0]+-1*P_110101110*R_113[0];
double PR_010000011002=P_010000011*R_002[0]+-1*P_010000111*R_003[0]+P_010000211*R_004[0]+-1*P_110000011*R_102[0]+P_110000111*R_103[0]+-1*P_110000211*R_104[0];
double PR_001010010002=P_001010010*R_002[0]+-1*P_001010110*R_003[0]+-1*P_001110010*R_012[0]+P_001110110*R_013[0]+-1*P_101010010*R_102[0]+P_101010110*R_103[0]+P_101110010*R_112[0]+-1*P_101110110*R_113[0];
double PR_000011010002=P_000011010*R_002[0]+-1*P_000011110*R_003[0]+-1*P_000111010*R_012[0]+P_000111110*R_013[0]+P_000211010*R_022[0]+-1*P_000211110*R_023[0];
double PR_000010011002=P_000010011*R_002[0]+-1*P_000010111*R_003[0]+P_000010211*R_004[0]+-1*P_000110011*R_012[0]+P_000110111*R_013[0]+-1*P_000110211*R_014[0];
double PR_001000020002=P_001000020*R_002[0]+-1*P_001000120*R_003[0]+P_001000220*R_004[0]+-1*P_101000020*R_102[0]+P_101000120*R_103[0]+-1*P_101000220*R_104[0];
double PR_000001020002=P_000001020*R_002[0]+-1*P_000001120*R_003[0]+P_000001220*R_004[0]+-1*P_000101020*R_012[0]+P_000101120*R_013[0]+-1*P_000101220*R_014[0];
double PR_000000021002=P_000000021*R_002[0]+-1*P_000000121*R_003[0]+P_000000221*R_004[0]+-1*P_000000321*R_005[0];
double PR_021000000011=P_021000000*R_011[0]+-1*P_121000000*R_111[0]+P_221000000*R_211[0]+-1*P_321000000*R_311[0];
double PR_020001000011=P_020001000*R_011[0]+-1*P_020101000*R_021[0]+-1*P_120001000*R_111[0]+P_120101000*R_121[0]+P_220001000*R_211[0]+-1*P_220101000*R_221[0];
double PR_020000001011=P_020000001*R_011[0]+-1*P_020000101*R_012[0]+-1*P_120000001*R_111[0]+P_120000101*R_112[0]+P_220000001*R_211[0]+-1*P_220000101*R_212[0];
double PR_011010000011=P_011010000*R_011[0]+-1*P_011110000*R_021[0]+-1*P_111010000*R_111[0]+P_111110000*R_121[0]+P_211010000*R_211[0]+-1*P_211110000*R_221[0];
double PR_010011000011=P_010011000*R_011[0]+-1*P_010111000*R_021[0]+P_010211000*R_031[0]+-1*P_110011000*R_111[0]+P_110111000*R_121[0]+-1*P_110211000*R_131[0];
double PR_010010001011=P_010010001*R_011[0]+-1*P_010010101*R_012[0]+-1*P_010110001*R_021[0]+P_010110101*R_022[0]+-1*P_110010001*R_111[0]+P_110010101*R_112[0]+P_110110001*R_121[0]+-1*P_110110101*R_122[0];
double PR_001020000011=P_001020000*R_011[0]+-1*P_001120000*R_021[0]+P_001220000*R_031[0]+-1*P_101020000*R_111[0]+P_101120000*R_121[0]+-1*P_101220000*R_131[0];
double PR_000021000011=P_000021000*R_011[0]+-1*P_000121000*R_021[0]+P_000221000*R_031[0]+-1*P_000321000*R_041[0];
double PR_000020001011=P_000020001*R_011[0]+-1*P_000020101*R_012[0]+-1*P_000120001*R_021[0]+P_000120101*R_022[0]+P_000220001*R_031[0]+-1*P_000220101*R_032[0];
double PR_011000010011=P_011000010*R_011[0]+-1*P_011000110*R_012[0]+-1*P_111000010*R_111[0]+P_111000110*R_112[0]+P_211000010*R_211[0]+-1*P_211000110*R_212[0];
double PR_010001010011=P_010001010*R_011[0]+-1*P_010001110*R_012[0]+-1*P_010101010*R_021[0]+P_010101110*R_022[0]+-1*P_110001010*R_111[0]+P_110001110*R_112[0]+P_110101010*R_121[0]+-1*P_110101110*R_122[0];
double PR_010000011011=P_010000011*R_011[0]+-1*P_010000111*R_012[0]+P_010000211*R_013[0]+-1*P_110000011*R_111[0]+P_110000111*R_112[0]+-1*P_110000211*R_113[0];
double PR_001010010011=P_001010010*R_011[0]+-1*P_001010110*R_012[0]+-1*P_001110010*R_021[0]+P_001110110*R_022[0]+-1*P_101010010*R_111[0]+P_101010110*R_112[0]+P_101110010*R_121[0]+-1*P_101110110*R_122[0];
double PR_000011010011=P_000011010*R_011[0]+-1*P_000011110*R_012[0]+-1*P_000111010*R_021[0]+P_000111110*R_022[0]+P_000211010*R_031[0]+-1*P_000211110*R_032[0];
double PR_000010011011=P_000010011*R_011[0]+-1*P_000010111*R_012[0]+P_000010211*R_013[0]+-1*P_000110011*R_021[0]+P_000110111*R_022[0]+-1*P_000110211*R_023[0];
double PR_001000020011=P_001000020*R_011[0]+-1*P_001000120*R_012[0]+P_001000220*R_013[0]+-1*P_101000020*R_111[0]+P_101000120*R_112[0]+-1*P_101000220*R_113[0];
double PR_000001020011=P_000001020*R_011[0]+-1*P_000001120*R_012[0]+P_000001220*R_013[0]+-1*P_000101020*R_021[0]+P_000101120*R_022[0]+-1*P_000101220*R_023[0];
double PR_000000021011=P_000000021*R_011[0]+-1*P_000000121*R_012[0]+P_000000221*R_013[0]+-1*P_000000321*R_014[0];
double PR_021000000020=P_021000000*R_020[0]+-1*P_121000000*R_120[0]+P_221000000*R_220[0]+-1*P_321000000*R_320[0];
double PR_020001000020=P_020001000*R_020[0]+-1*P_020101000*R_030[0]+-1*P_120001000*R_120[0]+P_120101000*R_130[0]+P_220001000*R_220[0]+-1*P_220101000*R_230[0];
double PR_020000001020=P_020000001*R_020[0]+-1*P_020000101*R_021[0]+-1*P_120000001*R_120[0]+P_120000101*R_121[0]+P_220000001*R_220[0]+-1*P_220000101*R_221[0];
double PR_011010000020=P_011010000*R_020[0]+-1*P_011110000*R_030[0]+-1*P_111010000*R_120[0]+P_111110000*R_130[0]+P_211010000*R_220[0]+-1*P_211110000*R_230[0];
double PR_010011000020=P_010011000*R_020[0]+-1*P_010111000*R_030[0]+P_010211000*R_040[0]+-1*P_110011000*R_120[0]+P_110111000*R_130[0]+-1*P_110211000*R_140[0];
double PR_010010001020=P_010010001*R_020[0]+-1*P_010010101*R_021[0]+-1*P_010110001*R_030[0]+P_010110101*R_031[0]+-1*P_110010001*R_120[0]+P_110010101*R_121[0]+P_110110001*R_130[0]+-1*P_110110101*R_131[0];
double PR_001020000020=P_001020000*R_020[0]+-1*P_001120000*R_030[0]+P_001220000*R_040[0]+-1*P_101020000*R_120[0]+P_101120000*R_130[0]+-1*P_101220000*R_140[0];
double PR_000021000020=P_000021000*R_020[0]+-1*P_000121000*R_030[0]+P_000221000*R_040[0]+-1*P_000321000*R_050[0];
double PR_000020001020=P_000020001*R_020[0]+-1*P_000020101*R_021[0]+-1*P_000120001*R_030[0]+P_000120101*R_031[0]+P_000220001*R_040[0]+-1*P_000220101*R_041[0];
double PR_011000010020=P_011000010*R_020[0]+-1*P_011000110*R_021[0]+-1*P_111000010*R_120[0]+P_111000110*R_121[0]+P_211000010*R_220[0]+-1*P_211000110*R_221[0];
double PR_010001010020=P_010001010*R_020[0]+-1*P_010001110*R_021[0]+-1*P_010101010*R_030[0]+P_010101110*R_031[0]+-1*P_110001010*R_120[0]+P_110001110*R_121[0]+P_110101010*R_130[0]+-1*P_110101110*R_131[0];
double PR_010000011020=P_010000011*R_020[0]+-1*P_010000111*R_021[0]+P_010000211*R_022[0]+-1*P_110000011*R_120[0]+P_110000111*R_121[0]+-1*P_110000211*R_122[0];
double PR_001010010020=P_001010010*R_020[0]+-1*P_001010110*R_021[0]+-1*P_001110010*R_030[0]+P_001110110*R_031[0]+-1*P_101010010*R_120[0]+P_101010110*R_121[0]+P_101110010*R_130[0]+-1*P_101110110*R_131[0];
double PR_000011010020=P_000011010*R_020[0]+-1*P_000011110*R_021[0]+-1*P_000111010*R_030[0]+P_000111110*R_031[0]+P_000211010*R_040[0]+-1*P_000211110*R_041[0];
double PR_000010011020=P_000010011*R_020[0]+-1*P_000010111*R_021[0]+P_000010211*R_022[0]+-1*P_000110011*R_030[0]+P_000110111*R_031[0]+-1*P_000110211*R_032[0];
double PR_001000020020=P_001000020*R_020[0]+-1*P_001000120*R_021[0]+P_001000220*R_022[0]+-1*P_101000020*R_120[0]+P_101000120*R_121[0]+-1*P_101000220*R_122[0];
double PR_000001020020=P_000001020*R_020[0]+-1*P_000001120*R_021[0]+P_000001220*R_022[0]+-1*P_000101020*R_030[0]+P_000101120*R_031[0]+-1*P_000101220*R_032[0];
double PR_000000021020=P_000000021*R_020[0]+-1*P_000000121*R_021[0]+P_000000221*R_022[0]+-1*P_000000321*R_023[0];
double PR_021000000101=P_021000000*R_101[0]+-1*P_121000000*R_201[0]+P_221000000*R_301[0]+-1*P_321000000*R_401[0];
double PR_020001000101=P_020001000*R_101[0]+-1*P_020101000*R_111[0]+-1*P_120001000*R_201[0]+P_120101000*R_211[0]+P_220001000*R_301[0]+-1*P_220101000*R_311[0];
double PR_020000001101=P_020000001*R_101[0]+-1*P_020000101*R_102[0]+-1*P_120000001*R_201[0]+P_120000101*R_202[0]+P_220000001*R_301[0]+-1*P_220000101*R_302[0];
double PR_011010000101=P_011010000*R_101[0]+-1*P_011110000*R_111[0]+-1*P_111010000*R_201[0]+P_111110000*R_211[0]+P_211010000*R_301[0]+-1*P_211110000*R_311[0];
double PR_010011000101=P_010011000*R_101[0]+-1*P_010111000*R_111[0]+P_010211000*R_121[0]+-1*P_110011000*R_201[0]+P_110111000*R_211[0]+-1*P_110211000*R_221[0];
double PR_010010001101=P_010010001*R_101[0]+-1*P_010010101*R_102[0]+-1*P_010110001*R_111[0]+P_010110101*R_112[0]+-1*P_110010001*R_201[0]+P_110010101*R_202[0]+P_110110001*R_211[0]+-1*P_110110101*R_212[0];
double PR_001020000101=P_001020000*R_101[0]+-1*P_001120000*R_111[0]+P_001220000*R_121[0]+-1*P_101020000*R_201[0]+P_101120000*R_211[0]+-1*P_101220000*R_221[0];
double PR_000021000101=P_000021000*R_101[0]+-1*P_000121000*R_111[0]+P_000221000*R_121[0]+-1*P_000321000*R_131[0];
double PR_000020001101=P_000020001*R_101[0]+-1*P_000020101*R_102[0]+-1*P_000120001*R_111[0]+P_000120101*R_112[0]+P_000220001*R_121[0]+-1*P_000220101*R_122[0];
double PR_011000010101=P_011000010*R_101[0]+-1*P_011000110*R_102[0]+-1*P_111000010*R_201[0]+P_111000110*R_202[0]+P_211000010*R_301[0]+-1*P_211000110*R_302[0];
double PR_010001010101=P_010001010*R_101[0]+-1*P_010001110*R_102[0]+-1*P_010101010*R_111[0]+P_010101110*R_112[0]+-1*P_110001010*R_201[0]+P_110001110*R_202[0]+P_110101010*R_211[0]+-1*P_110101110*R_212[0];
double PR_010000011101=P_010000011*R_101[0]+-1*P_010000111*R_102[0]+P_010000211*R_103[0]+-1*P_110000011*R_201[0]+P_110000111*R_202[0]+-1*P_110000211*R_203[0];
double PR_001010010101=P_001010010*R_101[0]+-1*P_001010110*R_102[0]+-1*P_001110010*R_111[0]+P_001110110*R_112[0]+-1*P_101010010*R_201[0]+P_101010110*R_202[0]+P_101110010*R_211[0]+-1*P_101110110*R_212[0];
double PR_000011010101=P_000011010*R_101[0]+-1*P_000011110*R_102[0]+-1*P_000111010*R_111[0]+P_000111110*R_112[0]+P_000211010*R_121[0]+-1*P_000211110*R_122[0];
double PR_000010011101=P_000010011*R_101[0]+-1*P_000010111*R_102[0]+P_000010211*R_103[0]+-1*P_000110011*R_111[0]+P_000110111*R_112[0]+-1*P_000110211*R_113[0];
double PR_001000020101=P_001000020*R_101[0]+-1*P_001000120*R_102[0]+P_001000220*R_103[0]+-1*P_101000020*R_201[0]+P_101000120*R_202[0]+-1*P_101000220*R_203[0];
double PR_000001020101=P_000001020*R_101[0]+-1*P_000001120*R_102[0]+P_000001220*R_103[0]+-1*P_000101020*R_111[0]+P_000101120*R_112[0]+-1*P_000101220*R_113[0];
double PR_000000021101=P_000000021*R_101[0]+-1*P_000000121*R_102[0]+P_000000221*R_103[0]+-1*P_000000321*R_104[0];
double PR_021000000110=P_021000000*R_110[0]+-1*P_121000000*R_210[0]+P_221000000*R_310[0]+-1*P_321000000*R_410[0];
double PR_020001000110=P_020001000*R_110[0]+-1*P_020101000*R_120[0]+-1*P_120001000*R_210[0]+P_120101000*R_220[0]+P_220001000*R_310[0]+-1*P_220101000*R_320[0];
double PR_020000001110=P_020000001*R_110[0]+-1*P_020000101*R_111[0]+-1*P_120000001*R_210[0]+P_120000101*R_211[0]+P_220000001*R_310[0]+-1*P_220000101*R_311[0];
double PR_011010000110=P_011010000*R_110[0]+-1*P_011110000*R_120[0]+-1*P_111010000*R_210[0]+P_111110000*R_220[0]+P_211010000*R_310[0]+-1*P_211110000*R_320[0];
double PR_010011000110=P_010011000*R_110[0]+-1*P_010111000*R_120[0]+P_010211000*R_130[0]+-1*P_110011000*R_210[0]+P_110111000*R_220[0]+-1*P_110211000*R_230[0];
double PR_010010001110=P_010010001*R_110[0]+-1*P_010010101*R_111[0]+-1*P_010110001*R_120[0]+P_010110101*R_121[0]+-1*P_110010001*R_210[0]+P_110010101*R_211[0]+P_110110001*R_220[0]+-1*P_110110101*R_221[0];
double PR_001020000110=P_001020000*R_110[0]+-1*P_001120000*R_120[0]+P_001220000*R_130[0]+-1*P_101020000*R_210[0]+P_101120000*R_220[0]+-1*P_101220000*R_230[0];
double PR_000021000110=P_000021000*R_110[0]+-1*P_000121000*R_120[0]+P_000221000*R_130[0]+-1*P_000321000*R_140[0];
double PR_000020001110=P_000020001*R_110[0]+-1*P_000020101*R_111[0]+-1*P_000120001*R_120[0]+P_000120101*R_121[0]+P_000220001*R_130[0]+-1*P_000220101*R_131[0];
double PR_011000010110=P_011000010*R_110[0]+-1*P_011000110*R_111[0]+-1*P_111000010*R_210[0]+P_111000110*R_211[0]+P_211000010*R_310[0]+-1*P_211000110*R_311[0];
double PR_010001010110=P_010001010*R_110[0]+-1*P_010001110*R_111[0]+-1*P_010101010*R_120[0]+P_010101110*R_121[0]+-1*P_110001010*R_210[0]+P_110001110*R_211[0]+P_110101010*R_220[0]+-1*P_110101110*R_221[0];
double PR_010000011110=P_010000011*R_110[0]+-1*P_010000111*R_111[0]+P_010000211*R_112[0]+-1*P_110000011*R_210[0]+P_110000111*R_211[0]+-1*P_110000211*R_212[0];
double PR_001010010110=P_001010010*R_110[0]+-1*P_001010110*R_111[0]+-1*P_001110010*R_120[0]+P_001110110*R_121[0]+-1*P_101010010*R_210[0]+P_101010110*R_211[0]+P_101110010*R_220[0]+-1*P_101110110*R_221[0];
double PR_000011010110=P_000011010*R_110[0]+-1*P_000011110*R_111[0]+-1*P_000111010*R_120[0]+P_000111110*R_121[0]+P_000211010*R_130[0]+-1*P_000211110*R_131[0];
double PR_000010011110=P_000010011*R_110[0]+-1*P_000010111*R_111[0]+P_000010211*R_112[0]+-1*P_000110011*R_120[0]+P_000110111*R_121[0]+-1*P_000110211*R_122[0];
double PR_001000020110=P_001000020*R_110[0]+-1*P_001000120*R_111[0]+P_001000220*R_112[0]+-1*P_101000020*R_210[0]+P_101000120*R_211[0]+-1*P_101000220*R_212[0];
double PR_000001020110=P_000001020*R_110[0]+-1*P_000001120*R_111[0]+P_000001220*R_112[0]+-1*P_000101020*R_120[0]+P_000101120*R_121[0]+-1*P_000101220*R_122[0];
double PR_000000021110=P_000000021*R_110[0]+-1*P_000000121*R_111[0]+P_000000221*R_112[0]+-1*P_000000321*R_113[0];
double PR_021000000200=P_021000000*R_200[0]+-1*P_121000000*R_300[0]+P_221000000*R_400[0]+-1*P_321000000*R_500[0];
double PR_020001000200=P_020001000*R_200[0]+-1*P_020101000*R_210[0]+-1*P_120001000*R_300[0]+P_120101000*R_310[0]+P_220001000*R_400[0]+-1*P_220101000*R_410[0];
double PR_020000001200=P_020000001*R_200[0]+-1*P_020000101*R_201[0]+-1*P_120000001*R_300[0]+P_120000101*R_301[0]+P_220000001*R_400[0]+-1*P_220000101*R_401[0];
double PR_011010000200=P_011010000*R_200[0]+-1*P_011110000*R_210[0]+-1*P_111010000*R_300[0]+P_111110000*R_310[0]+P_211010000*R_400[0]+-1*P_211110000*R_410[0];
double PR_010011000200=P_010011000*R_200[0]+-1*P_010111000*R_210[0]+P_010211000*R_220[0]+-1*P_110011000*R_300[0]+P_110111000*R_310[0]+-1*P_110211000*R_320[0];
double PR_010010001200=P_010010001*R_200[0]+-1*P_010010101*R_201[0]+-1*P_010110001*R_210[0]+P_010110101*R_211[0]+-1*P_110010001*R_300[0]+P_110010101*R_301[0]+P_110110001*R_310[0]+-1*P_110110101*R_311[0];
double PR_001020000200=P_001020000*R_200[0]+-1*P_001120000*R_210[0]+P_001220000*R_220[0]+-1*P_101020000*R_300[0]+P_101120000*R_310[0]+-1*P_101220000*R_320[0];
double PR_000021000200=P_000021000*R_200[0]+-1*P_000121000*R_210[0]+P_000221000*R_220[0]+-1*P_000321000*R_230[0];
double PR_000020001200=P_000020001*R_200[0]+-1*P_000020101*R_201[0]+-1*P_000120001*R_210[0]+P_000120101*R_211[0]+P_000220001*R_220[0]+-1*P_000220101*R_221[0];
double PR_011000010200=P_011000010*R_200[0]+-1*P_011000110*R_201[0]+-1*P_111000010*R_300[0]+P_111000110*R_301[0]+P_211000010*R_400[0]+-1*P_211000110*R_401[0];
double PR_010001010200=P_010001010*R_200[0]+-1*P_010001110*R_201[0]+-1*P_010101010*R_210[0]+P_010101110*R_211[0]+-1*P_110001010*R_300[0]+P_110001110*R_301[0]+P_110101010*R_310[0]+-1*P_110101110*R_311[0];
double PR_010000011200=P_010000011*R_200[0]+-1*P_010000111*R_201[0]+P_010000211*R_202[0]+-1*P_110000011*R_300[0]+P_110000111*R_301[0]+-1*P_110000211*R_302[0];
double PR_001010010200=P_001010010*R_200[0]+-1*P_001010110*R_201[0]+-1*P_001110010*R_210[0]+P_001110110*R_211[0]+-1*P_101010010*R_300[0]+P_101010110*R_301[0]+P_101110010*R_310[0]+-1*P_101110110*R_311[0];
double PR_000011010200=P_000011010*R_200[0]+-1*P_000011110*R_201[0]+-1*P_000111010*R_210[0]+P_000111110*R_211[0]+P_000211010*R_220[0]+-1*P_000211110*R_221[0];
double PR_000010011200=P_000010011*R_200[0]+-1*P_000010111*R_201[0]+P_000010211*R_202[0]+-1*P_000110011*R_210[0]+P_000110111*R_211[0]+-1*P_000110211*R_212[0];
double PR_001000020200=P_001000020*R_200[0]+-1*P_001000120*R_201[0]+P_001000220*R_202[0]+-1*P_101000020*R_300[0]+P_101000120*R_301[0]+-1*P_101000220*R_302[0];
double PR_000001020200=P_000001020*R_200[0]+-1*P_000001120*R_201[0]+P_000001220*R_202[0]+-1*P_000101020*R_210[0]+P_000101120*R_211[0]+-1*P_000101220*R_212[0];
double PR_000000021200=P_000000021*R_200[0]+-1*P_000000121*R_201[0]+P_000000221*R_202[0]+-1*P_000000321*R_203[0];
double Qd_101[3];
double Qd_110[3];
double Qd_011[3];
double Qd_111[3];
double Qd_211[3];
for(int i=0;i<3;i++){
Qd_101[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_110[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_011[i]=Qd_101[i]+Qd_010[i]*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_111[i]=Qd_010[i]*Qd_101[i]+aQin1*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_211[i]=aQin1*Qd_101[i];
}
double Q_011000000=Qd_011[0];
double Q_111000000=Qd_111[0];
double Q_211000000=Qd_211[0];
double Q_010001000=Qd_010[0]*Qd_001[1];
double Q_010101000=Qd_010[0]*Qd_101[1];
double Q_110001000=Qd_110[0]*Qd_001[1];
double Q_110101000=Qd_110[0]*Qd_101[1];
double Q_010000001=Qd_010[0]*Qd_001[2];
double Q_010000101=Qd_010[0]*Qd_101[2];
double Q_110000001=Qd_110[0]*Qd_001[2];
double Q_110000101=Qd_110[0]*Qd_101[2];
double Q_001010000=Qd_001[0]*Qd_010[1];
double Q_001110000=Qd_001[0]*Qd_110[1];
double Q_101010000=Qd_101[0]*Qd_010[1];
double Q_101110000=Qd_101[0]*Qd_110[1];
double Q_000011000=Qd_011[1];
double Q_000111000=Qd_111[1];
double Q_000211000=Qd_211[1];
double Q_000010001=Qd_010[1]*Qd_001[2];
double Q_000010101=Qd_010[1]*Qd_101[2];
double Q_000110001=Qd_110[1]*Qd_001[2];
double Q_000110101=Qd_110[1]*Qd_101[2];
double Q_001000010=Qd_001[0]*Qd_010[2];
double Q_001000110=Qd_001[0]*Qd_110[2];
double Q_101000010=Qd_101[0]*Qd_010[2];
double Q_101000110=Qd_101[0]*Qd_110[2];
double Q_000001010=Qd_001[1]*Qd_010[2];
double Q_000001110=Qd_001[1]*Qd_110[2];
double Q_000101010=Qd_101[1]*Qd_010[2];
double Q_000101110=Qd_101[1]*Qd_110[2];
double Q_000000011=Qd_011[2];
double Q_000000111=Qd_111[2];
double Q_000000211=Qd_211[2];
ans_temp[ans_id*18+0]+=Pmtrx[0]*(Q_011000000*PR_021000000000+Q_111000000*PR_021000000100+Q_211000000*PR_021000000200);
ans_temp[ans_id*18+0]+=Pmtrx[1]*(Q_010001000*PR_021000000000+Q_010101000*PR_021000000010+Q_110001000*PR_021000000100+Q_110101000*PR_021000000110);
ans_temp[ans_id*18+0]+=Pmtrx[2]*(Q_010000001*PR_021000000000+Q_010000101*PR_021000000001+Q_110000001*PR_021000000100+Q_110000101*PR_021000000101);
ans_temp[ans_id*18+1]+=Pmtrx[0]*(Q_001010000*PR_021000000000+Q_001110000*PR_021000000010+Q_101010000*PR_021000000100+Q_101110000*PR_021000000110);
ans_temp[ans_id*18+1]+=Pmtrx[1]*(Q_000011000*PR_021000000000+Q_000111000*PR_021000000010+Q_000211000*PR_021000000020);
ans_temp[ans_id*18+1]+=Pmtrx[2]*(Q_000010001*PR_021000000000+Q_000010101*PR_021000000001+Q_000110001*PR_021000000010+Q_000110101*PR_021000000011);
ans_temp[ans_id*18+2]+=Pmtrx[0]*(Q_001000010*PR_021000000000+Q_001000110*PR_021000000001+Q_101000010*PR_021000000100+Q_101000110*PR_021000000101);
ans_temp[ans_id*18+2]+=Pmtrx[1]*(Q_000001010*PR_021000000000+Q_000001110*PR_021000000001+Q_000101010*PR_021000000010+Q_000101110*PR_021000000011);
ans_temp[ans_id*18+2]+=Pmtrx[2]*(Q_000000011*PR_021000000000+Q_000000111*PR_021000000001+Q_000000211*PR_021000000002);
ans_temp[ans_id*18+0]+=Pmtrx[3]*(Q_011000000*PR_020001000000+Q_111000000*PR_020001000100+Q_211000000*PR_020001000200);
ans_temp[ans_id*18+0]+=Pmtrx[4]*(Q_010001000*PR_020001000000+Q_010101000*PR_020001000010+Q_110001000*PR_020001000100+Q_110101000*PR_020001000110);
ans_temp[ans_id*18+0]+=Pmtrx[5]*(Q_010000001*PR_020001000000+Q_010000101*PR_020001000001+Q_110000001*PR_020001000100+Q_110000101*PR_020001000101);
ans_temp[ans_id*18+1]+=Pmtrx[3]*(Q_001010000*PR_020001000000+Q_001110000*PR_020001000010+Q_101010000*PR_020001000100+Q_101110000*PR_020001000110);
ans_temp[ans_id*18+1]+=Pmtrx[4]*(Q_000011000*PR_020001000000+Q_000111000*PR_020001000010+Q_000211000*PR_020001000020);
ans_temp[ans_id*18+1]+=Pmtrx[5]*(Q_000010001*PR_020001000000+Q_000010101*PR_020001000001+Q_000110001*PR_020001000010+Q_000110101*PR_020001000011);
ans_temp[ans_id*18+2]+=Pmtrx[3]*(Q_001000010*PR_020001000000+Q_001000110*PR_020001000001+Q_101000010*PR_020001000100+Q_101000110*PR_020001000101);
ans_temp[ans_id*18+2]+=Pmtrx[4]*(Q_000001010*PR_020001000000+Q_000001110*PR_020001000001+Q_000101010*PR_020001000010+Q_000101110*PR_020001000011);
ans_temp[ans_id*18+2]+=Pmtrx[5]*(Q_000000011*PR_020001000000+Q_000000111*PR_020001000001+Q_000000211*PR_020001000002);
ans_temp[ans_id*18+0]+=Pmtrx[6]*(Q_011000000*PR_020000001000+Q_111000000*PR_020000001100+Q_211000000*PR_020000001200);
ans_temp[ans_id*18+0]+=Pmtrx[7]*(Q_010001000*PR_020000001000+Q_010101000*PR_020000001010+Q_110001000*PR_020000001100+Q_110101000*PR_020000001110);
ans_temp[ans_id*18+0]+=Pmtrx[8]*(Q_010000001*PR_020000001000+Q_010000101*PR_020000001001+Q_110000001*PR_020000001100+Q_110000101*PR_020000001101);
ans_temp[ans_id*18+1]+=Pmtrx[6]*(Q_001010000*PR_020000001000+Q_001110000*PR_020000001010+Q_101010000*PR_020000001100+Q_101110000*PR_020000001110);
ans_temp[ans_id*18+1]+=Pmtrx[7]*(Q_000011000*PR_020000001000+Q_000111000*PR_020000001010+Q_000211000*PR_020000001020);
ans_temp[ans_id*18+1]+=Pmtrx[8]*(Q_000010001*PR_020000001000+Q_000010101*PR_020000001001+Q_000110001*PR_020000001010+Q_000110101*PR_020000001011);
ans_temp[ans_id*18+2]+=Pmtrx[6]*(Q_001000010*PR_020000001000+Q_001000110*PR_020000001001+Q_101000010*PR_020000001100+Q_101000110*PR_020000001101);
ans_temp[ans_id*18+2]+=Pmtrx[7]*(Q_000001010*PR_020000001000+Q_000001110*PR_020000001001+Q_000101010*PR_020000001010+Q_000101110*PR_020000001011);
ans_temp[ans_id*18+2]+=Pmtrx[8]*(Q_000000011*PR_020000001000+Q_000000111*PR_020000001001+Q_000000211*PR_020000001002);
ans_temp[ans_id*18+3]+=Pmtrx[0]*(Q_011000000*PR_011010000000+Q_111000000*PR_011010000100+Q_211000000*PR_011010000200);
ans_temp[ans_id*18+3]+=Pmtrx[1]*(Q_010001000*PR_011010000000+Q_010101000*PR_011010000010+Q_110001000*PR_011010000100+Q_110101000*PR_011010000110);
ans_temp[ans_id*18+3]+=Pmtrx[2]*(Q_010000001*PR_011010000000+Q_010000101*PR_011010000001+Q_110000001*PR_011010000100+Q_110000101*PR_011010000101);
ans_temp[ans_id*18+4]+=Pmtrx[0]*(Q_001010000*PR_011010000000+Q_001110000*PR_011010000010+Q_101010000*PR_011010000100+Q_101110000*PR_011010000110);
ans_temp[ans_id*18+4]+=Pmtrx[1]*(Q_000011000*PR_011010000000+Q_000111000*PR_011010000010+Q_000211000*PR_011010000020);
ans_temp[ans_id*18+4]+=Pmtrx[2]*(Q_000010001*PR_011010000000+Q_000010101*PR_011010000001+Q_000110001*PR_011010000010+Q_000110101*PR_011010000011);
ans_temp[ans_id*18+5]+=Pmtrx[0]*(Q_001000010*PR_011010000000+Q_001000110*PR_011010000001+Q_101000010*PR_011010000100+Q_101000110*PR_011010000101);
ans_temp[ans_id*18+5]+=Pmtrx[1]*(Q_000001010*PR_011010000000+Q_000001110*PR_011010000001+Q_000101010*PR_011010000010+Q_000101110*PR_011010000011);
ans_temp[ans_id*18+5]+=Pmtrx[2]*(Q_000000011*PR_011010000000+Q_000000111*PR_011010000001+Q_000000211*PR_011010000002);
ans_temp[ans_id*18+3]+=Pmtrx[3]*(Q_011000000*PR_010011000000+Q_111000000*PR_010011000100+Q_211000000*PR_010011000200);
ans_temp[ans_id*18+3]+=Pmtrx[4]*(Q_010001000*PR_010011000000+Q_010101000*PR_010011000010+Q_110001000*PR_010011000100+Q_110101000*PR_010011000110);
ans_temp[ans_id*18+3]+=Pmtrx[5]*(Q_010000001*PR_010011000000+Q_010000101*PR_010011000001+Q_110000001*PR_010011000100+Q_110000101*PR_010011000101);
ans_temp[ans_id*18+4]+=Pmtrx[3]*(Q_001010000*PR_010011000000+Q_001110000*PR_010011000010+Q_101010000*PR_010011000100+Q_101110000*PR_010011000110);
ans_temp[ans_id*18+4]+=Pmtrx[4]*(Q_000011000*PR_010011000000+Q_000111000*PR_010011000010+Q_000211000*PR_010011000020);
ans_temp[ans_id*18+4]+=Pmtrx[5]*(Q_000010001*PR_010011000000+Q_000010101*PR_010011000001+Q_000110001*PR_010011000010+Q_000110101*PR_010011000011);
ans_temp[ans_id*18+5]+=Pmtrx[3]*(Q_001000010*PR_010011000000+Q_001000110*PR_010011000001+Q_101000010*PR_010011000100+Q_101000110*PR_010011000101);
ans_temp[ans_id*18+5]+=Pmtrx[4]*(Q_000001010*PR_010011000000+Q_000001110*PR_010011000001+Q_000101010*PR_010011000010+Q_000101110*PR_010011000011);
ans_temp[ans_id*18+5]+=Pmtrx[5]*(Q_000000011*PR_010011000000+Q_000000111*PR_010011000001+Q_000000211*PR_010011000002);
ans_temp[ans_id*18+3]+=Pmtrx[6]*(Q_011000000*PR_010010001000+Q_111000000*PR_010010001100+Q_211000000*PR_010010001200);
ans_temp[ans_id*18+3]+=Pmtrx[7]*(Q_010001000*PR_010010001000+Q_010101000*PR_010010001010+Q_110001000*PR_010010001100+Q_110101000*PR_010010001110);
ans_temp[ans_id*18+3]+=Pmtrx[8]*(Q_010000001*PR_010010001000+Q_010000101*PR_010010001001+Q_110000001*PR_010010001100+Q_110000101*PR_010010001101);
ans_temp[ans_id*18+4]+=Pmtrx[6]*(Q_001010000*PR_010010001000+Q_001110000*PR_010010001010+Q_101010000*PR_010010001100+Q_101110000*PR_010010001110);
ans_temp[ans_id*18+4]+=Pmtrx[7]*(Q_000011000*PR_010010001000+Q_000111000*PR_010010001010+Q_000211000*PR_010010001020);
ans_temp[ans_id*18+4]+=Pmtrx[8]*(Q_000010001*PR_010010001000+Q_000010101*PR_010010001001+Q_000110001*PR_010010001010+Q_000110101*PR_010010001011);
ans_temp[ans_id*18+5]+=Pmtrx[6]*(Q_001000010*PR_010010001000+Q_001000110*PR_010010001001+Q_101000010*PR_010010001100+Q_101000110*PR_010010001101);
ans_temp[ans_id*18+5]+=Pmtrx[7]*(Q_000001010*PR_010010001000+Q_000001110*PR_010010001001+Q_000101010*PR_010010001010+Q_000101110*PR_010010001011);
ans_temp[ans_id*18+5]+=Pmtrx[8]*(Q_000000011*PR_010010001000+Q_000000111*PR_010010001001+Q_000000211*PR_010010001002);
ans_temp[ans_id*18+6]+=Pmtrx[0]*(Q_011000000*PR_001020000000+Q_111000000*PR_001020000100+Q_211000000*PR_001020000200);
ans_temp[ans_id*18+6]+=Pmtrx[1]*(Q_010001000*PR_001020000000+Q_010101000*PR_001020000010+Q_110001000*PR_001020000100+Q_110101000*PR_001020000110);
ans_temp[ans_id*18+6]+=Pmtrx[2]*(Q_010000001*PR_001020000000+Q_010000101*PR_001020000001+Q_110000001*PR_001020000100+Q_110000101*PR_001020000101);
ans_temp[ans_id*18+7]+=Pmtrx[0]*(Q_001010000*PR_001020000000+Q_001110000*PR_001020000010+Q_101010000*PR_001020000100+Q_101110000*PR_001020000110);
ans_temp[ans_id*18+7]+=Pmtrx[1]*(Q_000011000*PR_001020000000+Q_000111000*PR_001020000010+Q_000211000*PR_001020000020);
ans_temp[ans_id*18+7]+=Pmtrx[2]*(Q_000010001*PR_001020000000+Q_000010101*PR_001020000001+Q_000110001*PR_001020000010+Q_000110101*PR_001020000011);
ans_temp[ans_id*18+8]+=Pmtrx[0]*(Q_001000010*PR_001020000000+Q_001000110*PR_001020000001+Q_101000010*PR_001020000100+Q_101000110*PR_001020000101);
ans_temp[ans_id*18+8]+=Pmtrx[1]*(Q_000001010*PR_001020000000+Q_000001110*PR_001020000001+Q_000101010*PR_001020000010+Q_000101110*PR_001020000011);
ans_temp[ans_id*18+8]+=Pmtrx[2]*(Q_000000011*PR_001020000000+Q_000000111*PR_001020000001+Q_000000211*PR_001020000002);
ans_temp[ans_id*18+6]+=Pmtrx[3]*(Q_011000000*PR_000021000000+Q_111000000*PR_000021000100+Q_211000000*PR_000021000200);
ans_temp[ans_id*18+6]+=Pmtrx[4]*(Q_010001000*PR_000021000000+Q_010101000*PR_000021000010+Q_110001000*PR_000021000100+Q_110101000*PR_000021000110);
ans_temp[ans_id*18+6]+=Pmtrx[5]*(Q_010000001*PR_000021000000+Q_010000101*PR_000021000001+Q_110000001*PR_000021000100+Q_110000101*PR_000021000101);
ans_temp[ans_id*18+7]+=Pmtrx[3]*(Q_001010000*PR_000021000000+Q_001110000*PR_000021000010+Q_101010000*PR_000021000100+Q_101110000*PR_000021000110);
ans_temp[ans_id*18+7]+=Pmtrx[4]*(Q_000011000*PR_000021000000+Q_000111000*PR_000021000010+Q_000211000*PR_000021000020);
ans_temp[ans_id*18+7]+=Pmtrx[5]*(Q_000010001*PR_000021000000+Q_000010101*PR_000021000001+Q_000110001*PR_000021000010+Q_000110101*PR_000021000011);
ans_temp[ans_id*18+8]+=Pmtrx[3]*(Q_001000010*PR_000021000000+Q_001000110*PR_000021000001+Q_101000010*PR_000021000100+Q_101000110*PR_000021000101);
ans_temp[ans_id*18+8]+=Pmtrx[4]*(Q_000001010*PR_000021000000+Q_000001110*PR_000021000001+Q_000101010*PR_000021000010+Q_000101110*PR_000021000011);
ans_temp[ans_id*18+8]+=Pmtrx[5]*(Q_000000011*PR_000021000000+Q_000000111*PR_000021000001+Q_000000211*PR_000021000002);
ans_temp[ans_id*18+6]+=Pmtrx[6]*(Q_011000000*PR_000020001000+Q_111000000*PR_000020001100+Q_211000000*PR_000020001200);
ans_temp[ans_id*18+6]+=Pmtrx[7]*(Q_010001000*PR_000020001000+Q_010101000*PR_000020001010+Q_110001000*PR_000020001100+Q_110101000*PR_000020001110);
ans_temp[ans_id*18+6]+=Pmtrx[8]*(Q_010000001*PR_000020001000+Q_010000101*PR_000020001001+Q_110000001*PR_000020001100+Q_110000101*PR_000020001101);
ans_temp[ans_id*18+7]+=Pmtrx[6]*(Q_001010000*PR_000020001000+Q_001110000*PR_000020001010+Q_101010000*PR_000020001100+Q_101110000*PR_000020001110);
ans_temp[ans_id*18+7]+=Pmtrx[7]*(Q_000011000*PR_000020001000+Q_000111000*PR_000020001010+Q_000211000*PR_000020001020);
ans_temp[ans_id*18+7]+=Pmtrx[8]*(Q_000010001*PR_000020001000+Q_000010101*PR_000020001001+Q_000110001*PR_000020001010+Q_000110101*PR_000020001011);
ans_temp[ans_id*18+8]+=Pmtrx[6]*(Q_001000010*PR_000020001000+Q_001000110*PR_000020001001+Q_101000010*PR_000020001100+Q_101000110*PR_000020001101);
ans_temp[ans_id*18+8]+=Pmtrx[7]*(Q_000001010*PR_000020001000+Q_000001110*PR_000020001001+Q_000101010*PR_000020001010+Q_000101110*PR_000020001011);
ans_temp[ans_id*18+8]+=Pmtrx[8]*(Q_000000011*PR_000020001000+Q_000000111*PR_000020001001+Q_000000211*PR_000020001002);
ans_temp[ans_id*18+9]+=Pmtrx[0]*(Q_011000000*PR_011000010000+Q_111000000*PR_011000010100+Q_211000000*PR_011000010200);
ans_temp[ans_id*18+9]+=Pmtrx[1]*(Q_010001000*PR_011000010000+Q_010101000*PR_011000010010+Q_110001000*PR_011000010100+Q_110101000*PR_011000010110);
ans_temp[ans_id*18+9]+=Pmtrx[2]*(Q_010000001*PR_011000010000+Q_010000101*PR_011000010001+Q_110000001*PR_011000010100+Q_110000101*PR_011000010101);
ans_temp[ans_id*18+10]+=Pmtrx[0]*(Q_001010000*PR_011000010000+Q_001110000*PR_011000010010+Q_101010000*PR_011000010100+Q_101110000*PR_011000010110);
ans_temp[ans_id*18+10]+=Pmtrx[1]*(Q_000011000*PR_011000010000+Q_000111000*PR_011000010010+Q_000211000*PR_011000010020);
ans_temp[ans_id*18+10]+=Pmtrx[2]*(Q_000010001*PR_011000010000+Q_000010101*PR_011000010001+Q_000110001*PR_011000010010+Q_000110101*PR_011000010011);
ans_temp[ans_id*18+11]+=Pmtrx[0]*(Q_001000010*PR_011000010000+Q_001000110*PR_011000010001+Q_101000010*PR_011000010100+Q_101000110*PR_011000010101);
ans_temp[ans_id*18+11]+=Pmtrx[1]*(Q_000001010*PR_011000010000+Q_000001110*PR_011000010001+Q_000101010*PR_011000010010+Q_000101110*PR_011000010011);
ans_temp[ans_id*18+11]+=Pmtrx[2]*(Q_000000011*PR_011000010000+Q_000000111*PR_011000010001+Q_000000211*PR_011000010002);
ans_temp[ans_id*18+9]+=Pmtrx[3]*(Q_011000000*PR_010001010000+Q_111000000*PR_010001010100+Q_211000000*PR_010001010200);
ans_temp[ans_id*18+9]+=Pmtrx[4]*(Q_010001000*PR_010001010000+Q_010101000*PR_010001010010+Q_110001000*PR_010001010100+Q_110101000*PR_010001010110);
ans_temp[ans_id*18+9]+=Pmtrx[5]*(Q_010000001*PR_010001010000+Q_010000101*PR_010001010001+Q_110000001*PR_010001010100+Q_110000101*PR_010001010101);
ans_temp[ans_id*18+10]+=Pmtrx[3]*(Q_001010000*PR_010001010000+Q_001110000*PR_010001010010+Q_101010000*PR_010001010100+Q_101110000*PR_010001010110);
ans_temp[ans_id*18+10]+=Pmtrx[4]*(Q_000011000*PR_010001010000+Q_000111000*PR_010001010010+Q_000211000*PR_010001010020);
ans_temp[ans_id*18+10]+=Pmtrx[5]*(Q_000010001*PR_010001010000+Q_000010101*PR_010001010001+Q_000110001*PR_010001010010+Q_000110101*PR_010001010011);
ans_temp[ans_id*18+11]+=Pmtrx[3]*(Q_001000010*PR_010001010000+Q_001000110*PR_010001010001+Q_101000010*PR_010001010100+Q_101000110*PR_010001010101);
ans_temp[ans_id*18+11]+=Pmtrx[4]*(Q_000001010*PR_010001010000+Q_000001110*PR_010001010001+Q_000101010*PR_010001010010+Q_000101110*PR_010001010011);
ans_temp[ans_id*18+11]+=Pmtrx[5]*(Q_000000011*PR_010001010000+Q_000000111*PR_010001010001+Q_000000211*PR_010001010002);
ans_temp[ans_id*18+9]+=Pmtrx[6]*(Q_011000000*PR_010000011000+Q_111000000*PR_010000011100+Q_211000000*PR_010000011200);
ans_temp[ans_id*18+9]+=Pmtrx[7]*(Q_010001000*PR_010000011000+Q_010101000*PR_010000011010+Q_110001000*PR_010000011100+Q_110101000*PR_010000011110);
ans_temp[ans_id*18+9]+=Pmtrx[8]*(Q_010000001*PR_010000011000+Q_010000101*PR_010000011001+Q_110000001*PR_010000011100+Q_110000101*PR_010000011101);
ans_temp[ans_id*18+10]+=Pmtrx[6]*(Q_001010000*PR_010000011000+Q_001110000*PR_010000011010+Q_101010000*PR_010000011100+Q_101110000*PR_010000011110);
ans_temp[ans_id*18+10]+=Pmtrx[7]*(Q_000011000*PR_010000011000+Q_000111000*PR_010000011010+Q_000211000*PR_010000011020);
ans_temp[ans_id*18+10]+=Pmtrx[8]*(Q_000010001*PR_010000011000+Q_000010101*PR_010000011001+Q_000110001*PR_010000011010+Q_000110101*PR_010000011011);
ans_temp[ans_id*18+11]+=Pmtrx[6]*(Q_001000010*PR_010000011000+Q_001000110*PR_010000011001+Q_101000010*PR_010000011100+Q_101000110*PR_010000011101);
ans_temp[ans_id*18+11]+=Pmtrx[7]*(Q_000001010*PR_010000011000+Q_000001110*PR_010000011001+Q_000101010*PR_010000011010+Q_000101110*PR_010000011011);
ans_temp[ans_id*18+11]+=Pmtrx[8]*(Q_000000011*PR_010000011000+Q_000000111*PR_010000011001+Q_000000211*PR_010000011002);
ans_temp[ans_id*18+12]+=Pmtrx[0]*(Q_011000000*PR_001010010000+Q_111000000*PR_001010010100+Q_211000000*PR_001010010200);
ans_temp[ans_id*18+12]+=Pmtrx[1]*(Q_010001000*PR_001010010000+Q_010101000*PR_001010010010+Q_110001000*PR_001010010100+Q_110101000*PR_001010010110);
ans_temp[ans_id*18+12]+=Pmtrx[2]*(Q_010000001*PR_001010010000+Q_010000101*PR_001010010001+Q_110000001*PR_001010010100+Q_110000101*PR_001010010101);
ans_temp[ans_id*18+13]+=Pmtrx[0]*(Q_001010000*PR_001010010000+Q_001110000*PR_001010010010+Q_101010000*PR_001010010100+Q_101110000*PR_001010010110);
ans_temp[ans_id*18+13]+=Pmtrx[1]*(Q_000011000*PR_001010010000+Q_000111000*PR_001010010010+Q_000211000*PR_001010010020);
ans_temp[ans_id*18+13]+=Pmtrx[2]*(Q_000010001*PR_001010010000+Q_000010101*PR_001010010001+Q_000110001*PR_001010010010+Q_000110101*PR_001010010011);
ans_temp[ans_id*18+14]+=Pmtrx[0]*(Q_001000010*PR_001010010000+Q_001000110*PR_001010010001+Q_101000010*PR_001010010100+Q_101000110*PR_001010010101);
ans_temp[ans_id*18+14]+=Pmtrx[1]*(Q_000001010*PR_001010010000+Q_000001110*PR_001010010001+Q_000101010*PR_001010010010+Q_000101110*PR_001010010011);
ans_temp[ans_id*18+14]+=Pmtrx[2]*(Q_000000011*PR_001010010000+Q_000000111*PR_001010010001+Q_000000211*PR_001010010002);
ans_temp[ans_id*18+12]+=Pmtrx[3]*(Q_011000000*PR_000011010000+Q_111000000*PR_000011010100+Q_211000000*PR_000011010200);
ans_temp[ans_id*18+12]+=Pmtrx[4]*(Q_010001000*PR_000011010000+Q_010101000*PR_000011010010+Q_110001000*PR_000011010100+Q_110101000*PR_000011010110);
ans_temp[ans_id*18+12]+=Pmtrx[5]*(Q_010000001*PR_000011010000+Q_010000101*PR_000011010001+Q_110000001*PR_000011010100+Q_110000101*PR_000011010101);
ans_temp[ans_id*18+13]+=Pmtrx[3]*(Q_001010000*PR_000011010000+Q_001110000*PR_000011010010+Q_101010000*PR_000011010100+Q_101110000*PR_000011010110);
ans_temp[ans_id*18+13]+=Pmtrx[4]*(Q_000011000*PR_000011010000+Q_000111000*PR_000011010010+Q_000211000*PR_000011010020);
ans_temp[ans_id*18+13]+=Pmtrx[5]*(Q_000010001*PR_000011010000+Q_000010101*PR_000011010001+Q_000110001*PR_000011010010+Q_000110101*PR_000011010011);
ans_temp[ans_id*18+14]+=Pmtrx[3]*(Q_001000010*PR_000011010000+Q_001000110*PR_000011010001+Q_101000010*PR_000011010100+Q_101000110*PR_000011010101);
ans_temp[ans_id*18+14]+=Pmtrx[4]*(Q_000001010*PR_000011010000+Q_000001110*PR_000011010001+Q_000101010*PR_000011010010+Q_000101110*PR_000011010011);
ans_temp[ans_id*18+14]+=Pmtrx[5]*(Q_000000011*PR_000011010000+Q_000000111*PR_000011010001+Q_000000211*PR_000011010002);
ans_temp[ans_id*18+12]+=Pmtrx[6]*(Q_011000000*PR_000010011000+Q_111000000*PR_000010011100+Q_211000000*PR_000010011200);
ans_temp[ans_id*18+12]+=Pmtrx[7]*(Q_010001000*PR_000010011000+Q_010101000*PR_000010011010+Q_110001000*PR_000010011100+Q_110101000*PR_000010011110);
ans_temp[ans_id*18+12]+=Pmtrx[8]*(Q_010000001*PR_000010011000+Q_010000101*PR_000010011001+Q_110000001*PR_000010011100+Q_110000101*PR_000010011101);
ans_temp[ans_id*18+13]+=Pmtrx[6]*(Q_001010000*PR_000010011000+Q_001110000*PR_000010011010+Q_101010000*PR_000010011100+Q_101110000*PR_000010011110);
ans_temp[ans_id*18+13]+=Pmtrx[7]*(Q_000011000*PR_000010011000+Q_000111000*PR_000010011010+Q_000211000*PR_000010011020);
ans_temp[ans_id*18+13]+=Pmtrx[8]*(Q_000010001*PR_000010011000+Q_000010101*PR_000010011001+Q_000110001*PR_000010011010+Q_000110101*PR_000010011011);
ans_temp[ans_id*18+14]+=Pmtrx[6]*(Q_001000010*PR_000010011000+Q_001000110*PR_000010011001+Q_101000010*PR_000010011100+Q_101000110*PR_000010011101);
ans_temp[ans_id*18+14]+=Pmtrx[7]*(Q_000001010*PR_000010011000+Q_000001110*PR_000010011001+Q_000101010*PR_000010011010+Q_000101110*PR_000010011011);
ans_temp[ans_id*18+14]+=Pmtrx[8]*(Q_000000011*PR_000010011000+Q_000000111*PR_000010011001+Q_000000211*PR_000010011002);
ans_temp[ans_id*18+15]+=Pmtrx[0]*(Q_011000000*PR_001000020000+Q_111000000*PR_001000020100+Q_211000000*PR_001000020200);
ans_temp[ans_id*18+15]+=Pmtrx[1]*(Q_010001000*PR_001000020000+Q_010101000*PR_001000020010+Q_110001000*PR_001000020100+Q_110101000*PR_001000020110);
ans_temp[ans_id*18+15]+=Pmtrx[2]*(Q_010000001*PR_001000020000+Q_010000101*PR_001000020001+Q_110000001*PR_001000020100+Q_110000101*PR_001000020101);
ans_temp[ans_id*18+16]+=Pmtrx[0]*(Q_001010000*PR_001000020000+Q_001110000*PR_001000020010+Q_101010000*PR_001000020100+Q_101110000*PR_001000020110);
ans_temp[ans_id*18+16]+=Pmtrx[1]*(Q_000011000*PR_001000020000+Q_000111000*PR_001000020010+Q_000211000*PR_001000020020);
ans_temp[ans_id*18+16]+=Pmtrx[2]*(Q_000010001*PR_001000020000+Q_000010101*PR_001000020001+Q_000110001*PR_001000020010+Q_000110101*PR_001000020011);
ans_temp[ans_id*18+17]+=Pmtrx[0]*(Q_001000010*PR_001000020000+Q_001000110*PR_001000020001+Q_101000010*PR_001000020100+Q_101000110*PR_001000020101);
ans_temp[ans_id*18+17]+=Pmtrx[1]*(Q_000001010*PR_001000020000+Q_000001110*PR_001000020001+Q_000101010*PR_001000020010+Q_000101110*PR_001000020011);
ans_temp[ans_id*18+17]+=Pmtrx[2]*(Q_000000011*PR_001000020000+Q_000000111*PR_001000020001+Q_000000211*PR_001000020002);
ans_temp[ans_id*18+15]+=Pmtrx[3]*(Q_011000000*PR_000001020000+Q_111000000*PR_000001020100+Q_211000000*PR_000001020200);
ans_temp[ans_id*18+15]+=Pmtrx[4]*(Q_010001000*PR_000001020000+Q_010101000*PR_000001020010+Q_110001000*PR_000001020100+Q_110101000*PR_000001020110);
ans_temp[ans_id*18+15]+=Pmtrx[5]*(Q_010000001*PR_000001020000+Q_010000101*PR_000001020001+Q_110000001*PR_000001020100+Q_110000101*PR_000001020101);
ans_temp[ans_id*18+16]+=Pmtrx[3]*(Q_001010000*PR_000001020000+Q_001110000*PR_000001020010+Q_101010000*PR_000001020100+Q_101110000*PR_000001020110);
ans_temp[ans_id*18+16]+=Pmtrx[4]*(Q_000011000*PR_000001020000+Q_000111000*PR_000001020010+Q_000211000*PR_000001020020);
ans_temp[ans_id*18+16]+=Pmtrx[5]*(Q_000010001*PR_000001020000+Q_000010101*PR_000001020001+Q_000110001*PR_000001020010+Q_000110101*PR_000001020011);
ans_temp[ans_id*18+17]+=Pmtrx[3]*(Q_001000010*PR_000001020000+Q_001000110*PR_000001020001+Q_101000010*PR_000001020100+Q_101000110*PR_000001020101);
ans_temp[ans_id*18+17]+=Pmtrx[4]*(Q_000001010*PR_000001020000+Q_000001110*PR_000001020001+Q_000101010*PR_000001020010+Q_000101110*PR_000001020011);
ans_temp[ans_id*18+17]+=Pmtrx[5]*(Q_000000011*PR_000001020000+Q_000000111*PR_000001020001+Q_000000211*PR_000001020002);
ans_temp[ans_id*18+15]+=Pmtrx[6]*(Q_011000000*PR_000000021000+Q_111000000*PR_000000021100+Q_211000000*PR_000000021200);
ans_temp[ans_id*18+15]+=Pmtrx[7]*(Q_010001000*PR_000000021000+Q_010101000*PR_000000021010+Q_110001000*PR_000000021100+Q_110101000*PR_000000021110);
ans_temp[ans_id*18+15]+=Pmtrx[8]*(Q_010000001*PR_000000021000+Q_010000101*PR_000000021001+Q_110000001*PR_000000021100+Q_110000101*PR_000000021101);
ans_temp[ans_id*18+16]+=Pmtrx[6]*(Q_001010000*PR_000000021000+Q_001110000*PR_000000021010+Q_101010000*PR_000000021100+Q_101110000*PR_000000021110);
ans_temp[ans_id*18+16]+=Pmtrx[7]*(Q_000011000*PR_000000021000+Q_000111000*PR_000000021010+Q_000211000*PR_000000021020);
ans_temp[ans_id*18+16]+=Pmtrx[8]*(Q_000010001*PR_000000021000+Q_000010101*PR_000000021001+Q_000110001*PR_000000021010+Q_000110101*PR_000000021011);
ans_temp[ans_id*18+17]+=Pmtrx[6]*(Q_001000010*PR_000000021000+Q_001000110*PR_000000021001+Q_101000010*PR_000000021100+Q_101000110*PR_000000021101);
ans_temp[ans_id*18+17]+=Pmtrx[7]*(Q_000001010*PR_000000021000+Q_000001110*PR_000000021001+Q_000101010*PR_000000021010+Q_000101110*PR_000000021011);
ans_temp[ans_id*18+17]+=Pmtrx[8]*(Q_000000011*PR_000000021000+Q_000000111*PR_000000021001+Q_000000211*PR_000000021002);
}
}
__syncthreads();
int num_thread=NTHREAD/2;
while (num_thread!=0){
__syncthreads();
if(tId_x<num_thread){
for(int ians=0;ians<18;ians++){
ans_temp[tId_x*18+ians]+=ans_temp[(tId_x+num_thread)*18+ians];
}
}
num_thread/=2;
}
if(tId_x==0){
for(int ians=0;ians<18;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*18+ians]=ans_temp[(tId_x)*18+ians];
}
}
}
}
}
__global__ void MD_Kq_dppp_fs(unsigned int contrc_bra_num,unsigned int contrc_ket_num,\
unsigned int * contrc_bra_id,\
unsigned int * contrc_ket_id,\
unsigned int mtrx_len,\
double * Pmtrx_in,\
double * P,\
double * PA,\
double * PB,\
double * Zta_in,\
double * pp_in,\
float * K2_p_in,\
unsigned int * id_bra_in,\
double * Q,\
double * QC,\
double * QD,\
double * Eta_in,\
double * pq_in,\
float * K2_q_in,\
unsigned int * id_ket_in,\
double * ans){
unsigned int tId_x = threadIdx.x;
unsigned int bId_x = blockIdx.x;
unsigned int bId_y = blockIdx.y;
unsigned int tdis = blockDim.x;
unsigned int bdis_x = gridDim.x;
unsigned int bdis_y = gridDim.y;
unsigned int ans_id=tId_x;
double Pmtrx[9]={0.0};
__shared__ double ans_temp[NTHREAD*18];
for(int i=0;i<18;i++){
ans_temp[i*tdis+tId_x]=0.0;
}
for(unsigned int i_contrc_bra=bId_x;i_contrc_bra<contrc_bra_num;i_contrc_bra+=bdis_x){
for(unsigned int j_contrc_ket=bId_y;j_contrc_ket<contrc_ket_num;j_contrc_ket+=bdis_y){
unsigned int primit_bra_start = contrc_bra_id[i_contrc_bra ];
unsigned int primit_bra_end = contrc_bra_id[i_contrc_bra+1];
unsigned int primit_ket_start = contrc_ket_id[j_contrc_ket ];
unsigned int primit_ket_end = contrc_ket_id[j_contrc_ket+1];
for(unsigned int ii=primit_bra_start;ii<primit_bra_end;ii++){
unsigned int id_bra=id_bra_in[ii];
double PX=P[ii*3+0];
double PY=P[ii*3+1];
double PZ=P[ii*3+2];
double Pd_010[3];
Pd_010[0]=PA[ii*3+0];
Pd_010[1]=PA[ii*3+1];
Pd_010[2]=PA[ii*3+2];
double Pd_001[3];
Pd_001[0]=PB[ii*3+0];
Pd_001[1]=PB[ii*3+1];
Pd_001[2]=PB[ii*3+2];
double Zta=Zta_in[ii];
double pp=pp_in[ii];
float K2_p=K2_p_in[ii];
double aPin1=1/(2*Zta);
for(unsigned int j=tId_x;j<primit_ket_end-primit_ket_start;j+=tdis){
unsigned int jj=primit_ket_start+j;
unsigned int id_ket=tex1Dfetch(tex_id_ket,jj);
double P_max=0.0;
for(int p_j=0;p_j<3;p_j++){
for(int p_i=0;p_i<3;p_i++){
Pmtrx[p_i*3+p_j]=Pmtrx_in[(id_ket+p_j)*mtrx_len+(id_bra+p_i)];
double temp_P=fabsf(Pmtrx[p_i*3+p_j]);
if(temp_P>P_max) P_max=temp_P;
}
}
float K2_q=tex1Dfetch(tex_K2_q,jj);
if(fabsf(K2_p*K2_q)<1.0E-14){
break;
}
if(fabsf(P_max*K2_p*K2_q)<1.0E-14) continue;
int2 temp_int2;
temp_int2=tex1Dfetch(tex_Eta,jj);
double Eta=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_pq,jj);
double pq=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+0);
double QX=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+1);
double QY=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+2);
double QZ=__hiloint2double(temp_int2.y,temp_int2.x);
double Qd_010[3];
temp_int2=tex1Dfetch(tex_QC,jj*3+0);
Qd_010[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QC,jj*3+1);
Qd_010[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QC,jj*3+2);
Qd_010[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double Qd_001[3];
temp_int2=tex1Dfetch(tex_QD,jj*3+0);
Qd_001[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QD,jj*3+1);
Qd_001[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QD,jj*3+2);
Qd_001[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double alphaT=rsqrt(Eta+Zta);
double lmd=4*P25*pp*pq*alphaT;
alphaT=Eta*Zta*alphaT*alphaT;
double TX=PX-QX;
double TY=PY-QY;
double TZ=PZ-QZ;
double T=alphaT*(TX*TX+TY*TY+TZ*TZ);
double R_000[6];
Ft_fs_5(5,T,R_000);
R_000[0]*=lmd;
R_000[1]*=-2*alphaT*lmd;
R_000[2]*=4*alphaT*alphaT*lmd;
R_000[3]*=-8*alphaT*alphaT*alphaT*lmd;
R_000[4]*=16*alphaT*alphaT*alphaT*alphaT*lmd;
R_000[5]*=-32*alphaT*alphaT*alphaT*alphaT*alphaT*lmd;
double aQin1=1/(2*Eta);
double R_100[5];
double R_200[4];
double R_300[3];
double R_400[2];
double R_500[1];
double R_010[5];
double R_110[4];
double R_210[3];
double R_310[2];
double R_410[1];
double R_020[4];
double R_120[3];
double R_220[2];
double R_320[1];
double R_030[3];
double R_130[2];
double R_230[1];
double R_040[2];
double R_140[1];
double R_050[1];
double R_001[5];
double R_101[4];
double R_201[3];
double R_301[2];
double R_401[1];
double R_011[4];
double R_111[3];
double R_211[2];
double R_311[1];
double R_021[3];
double R_121[2];
double R_221[1];
double R_031[2];
double R_131[1];
double R_041[1];
double R_002[4];
double R_102[3];
double R_202[2];
double R_302[1];
double R_012[3];
double R_112[2];
double R_212[1];
double R_022[2];
double R_122[1];
double R_032[1];
double R_003[3];
double R_103[2];
double R_203[1];
double R_013[2];
double R_113[1];
double R_023[1];
double R_004[2];
double R_104[1];
double R_014[1];
double R_005[1];
for(int i=0;i<5;i++){
R_100[i]=TX*R_000[i+1];
}
for(int i=0;i<5;i++){
R_010[i]=TY*R_000[i+1];
}
for(int i=0;i<5;i++){
R_001[i]=TZ*R_000[i+1];
}
for(int i=0;i<4;i++){
R_200[i]=TX*R_100[i+1]+R_000[i+1];
}
for(int i=0;i<4;i++){
R_110[i]=TX*R_010[i+1];
}
for(int i=0;i<4;i++){
R_020[i]=TY*R_010[i+1]+R_000[i+1];
}
for(int i=0;i<4;i++){
R_101[i]=TX*R_001[i+1];
}
for(int i=0;i<4;i++){
R_011[i]=TY*R_001[i+1];
}
for(int i=0;i<4;i++){
R_002[i]=TZ*R_001[i+1]+R_000[i+1];
}
for(int i=0;i<3;i++){
R_300[i]=TX*R_200[i+1]+2*R_100[i+1];
}
for(int i=0;i<3;i++){
R_210[i]=TY*R_200[i+1];
}
for(int i=0;i<3;i++){
R_120[i]=TX*R_020[i+1];
}
for(int i=0;i<3;i++){
R_030[i]=TY*R_020[i+1]+2*R_010[i+1];
}
for(int i=0;i<3;i++){
R_201[i]=TZ*R_200[i+1];
}
for(int i=0;i<3;i++){
R_111[i]=TX*R_011[i+1];
}
for(int i=0;i<3;i++){
R_021[i]=TZ*R_020[i+1];
}
for(int i=0;i<3;i++){
R_102[i]=TX*R_002[i+1];
}
for(int i=0;i<3;i++){
R_012[i]=TY*R_002[i+1];
}
for(int i=0;i<3;i++){
R_003[i]=TZ*R_002[i+1]+2*R_001[i+1];
}
for(int i=0;i<2;i++){
R_400[i]=TX*R_300[i+1]+3*R_200[i+1];
}
for(int i=0;i<2;i++){
R_310[i]=TY*R_300[i+1];
}
for(int i=0;i<2;i++){
R_220[i]=TX*R_120[i+1]+R_020[i+1];
}
for(int i=0;i<2;i++){
R_130[i]=TX*R_030[i+1];
}
for(int i=0;i<2;i++){
R_040[i]=TY*R_030[i+1]+3*R_020[i+1];
}
for(int i=0;i<2;i++){
R_301[i]=TZ*R_300[i+1];
}
for(int i=0;i<2;i++){
R_211[i]=TY*R_201[i+1];
}
for(int i=0;i<2;i++){
R_121[i]=TX*R_021[i+1];
}
for(int i=0;i<2;i++){
R_031[i]=TZ*R_030[i+1];
}
for(int i=0;i<2;i++){
R_202[i]=TX*R_102[i+1]+R_002[i+1];
}
for(int i=0;i<2;i++){
R_112[i]=TX*R_012[i+1];
}
for(int i=0;i<2;i++){
R_022[i]=TY*R_012[i+1]+R_002[i+1];
}
for(int i=0;i<2;i++){
R_103[i]=TX*R_003[i+1];
}
for(int i=0;i<2;i++){
R_013[i]=TY*R_003[i+1];
}
for(int i=0;i<2;i++){
R_004[i]=TZ*R_003[i+1]+3*R_002[i+1];
}
for(int i=0;i<1;i++){
R_500[i]=TX*R_400[i+1]+4*R_300[i+1];
}
for(int i=0;i<1;i++){
R_410[i]=TY*R_400[i+1];
}
for(int i=0;i<1;i++){
R_320[i]=TX*R_220[i+1]+2*R_120[i+1];
}
for(int i=0;i<1;i++){
R_230[i]=TY*R_220[i+1]+2*R_210[i+1];
}
for(int i=0;i<1;i++){
R_140[i]=TX*R_040[i+1];
}
for(int i=0;i<1;i++){
R_050[i]=TY*R_040[i+1]+4*R_030[i+1];
}
for(int i=0;i<1;i++){
R_401[i]=TZ*R_400[i+1];
}
for(int i=0;i<1;i++){
R_311[i]=TY*R_301[i+1];
}
for(int i=0;i<1;i++){
R_221[i]=TZ*R_220[i+1];
}
for(int i=0;i<1;i++){
R_131[i]=TX*R_031[i+1];
}
for(int i=0;i<1;i++){
R_041[i]=TZ*R_040[i+1];
}
for(int i=0;i<1;i++){
R_302[i]=TX*R_202[i+1]+2*R_102[i+1];
}
for(int i=0;i<1;i++){
R_212[i]=TY*R_202[i+1];
}
for(int i=0;i<1;i++){
R_122[i]=TX*R_022[i+1];
}
for(int i=0;i<1;i++){
R_032[i]=TY*R_022[i+1]+2*R_012[i+1];
}
for(int i=0;i<1;i++){
R_203[i]=TZ*R_202[i+1]+2*R_201[i+1];
}
for(int i=0;i<1;i++){
R_113[i]=TX*R_013[i+1];
}
for(int i=0;i<1;i++){
R_023[i]=TZ*R_022[i+1]+2*R_021[i+1];
}
for(int i=0;i<1;i++){
R_104[i]=TX*R_004[i+1];
}
for(int i=0;i<1;i++){
R_014[i]=TY*R_004[i+1];
}
for(int i=0;i<1;i++){
R_005[i]=TZ*R_004[i+1]+4*R_003[i+1];
}
double Qd_101[3];
double Qd_110[3];
double Qd_011[3];
double Qd_111[3];
double Qd_211[3];
for(int i=0;i<3;i++){
Qd_101[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_110[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_011[i]=Qd_101[i]+Qd_010[i]*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_111[i]=Qd_010[i]*Qd_101[i]+aQin1*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_211[i]=aQin1*Qd_101[i];
}
double Q_011000000=Qd_011[0];
double Q_111000000=Qd_111[0];
double Q_211000000=Qd_211[0];
double Q_010001000=Qd_010[0]*Qd_001[1];
double Q_010101000=Qd_010[0]*Qd_101[1];
double Q_110001000=Qd_110[0]*Qd_001[1];
double Q_110101000=Qd_110[0]*Qd_101[1];
double Q_010000001=Qd_010[0]*Qd_001[2];
double Q_010000101=Qd_010[0]*Qd_101[2];
double Q_110000001=Qd_110[0]*Qd_001[2];
double Q_110000101=Qd_110[0]*Qd_101[2];
double Q_001010000=Qd_001[0]*Qd_010[1];
double Q_001110000=Qd_001[0]*Qd_110[1];
double Q_101010000=Qd_101[0]*Qd_010[1];
double Q_101110000=Qd_101[0]*Qd_110[1];
double Q_000011000=Qd_011[1];
double Q_000111000=Qd_111[1];
double Q_000211000=Qd_211[1];
double Q_000010001=Qd_010[1]*Qd_001[2];
double Q_000010101=Qd_010[1]*Qd_101[2];
double Q_000110001=Qd_110[1]*Qd_001[2];
double Q_000110101=Qd_110[1]*Qd_101[2];
double Q_001000010=Qd_001[0]*Qd_010[2];
double Q_001000110=Qd_001[0]*Qd_110[2];
double Q_101000010=Qd_101[0]*Qd_010[2];
double Q_101000110=Qd_101[0]*Qd_110[2];
double Q_000001010=Qd_001[1]*Qd_010[2];
double Q_000001110=Qd_001[1]*Qd_110[2];
double Q_000101010=Qd_101[1]*Qd_010[2];
double Q_000101110=Qd_101[1]*Qd_110[2];
double Q_000000011=Qd_011[2];
double Q_000000111=Qd_111[2];
double Q_000000211=Qd_211[2];
double QR_011000000000=Q_011000000*R_000[0]+-1*Q_111000000*R_100[0]+Q_211000000*R_200[0];
double QR_010001000000=Q_010001000*R_000[0]+-1*Q_010101000*R_010[0]+-1*Q_110001000*R_100[0]+Q_110101000*R_110[0];
double QR_010000001000=Q_010000001*R_000[0]+-1*Q_010000101*R_001[0]+-1*Q_110000001*R_100[0]+Q_110000101*R_101[0];
double QR_001010000000=Q_001010000*R_000[0]+-1*Q_001110000*R_010[0]+-1*Q_101010000*R_100[0]+Q_101110000*R_110[0];
double QR_000011000000=Q_000011000*R_000[0]+-1*Q_000111000*R_010[0]+Q_000211000*R_020[0];
double QR_000010001000=Q_000010001*R_000[0]+-1*Q_000010101*R_001[0]+-1*Q_000110001*R_010[0]+Q_000110101*R_011[0];
double QR_001000010000=Q_001000010*R_000[0]+-1*Q_001000110*R_001[0]+-1*Q_101000010*R_100[0]+Q_101000110*R_101[0];
double QR_000001010000=Q_000001010*R_000[0]+-1*Q_000001110*R_001[0]+-1*Q_000101010*R_010[0]+Q_000101110*R_011[0];
double QR_000000011000=Q_000000011*R_000[0]+-1*Q_000000111*R_001[0]+Q_000000211*R_002[0];
double QR_011000000001=Q_011000000*R_001[0]+-1*Q_111000000*R_101[0]+Q_211000000*R_201[0];
double QR_010001000001=Q_010001000*R_001[0]+-1*Q_010101000*R_011[0]+-1*Q_110001000*R_101[0]+Q_110101000*R_111[0];
double QR_010000001001=Q_010000001*R_001[0]+-1*Q_010000101*R_002[0]+-1*Q_110000001*R_101[0]+Q_110000101*R_102[0];
double QR_001010000001=Q_001010000*R_001[0]+-1*Q_001110000*R_011[0]+-1*Q_101010000*R_101[0]+Q_101110000*R_111[0];
double QR_000011000001=Q_000011000*R_001[0]+-1*Q_000111000*R_011[0]+Q_000211000*R_021[0];
double QR_000010001001=Q_000010001*R_001[0]+-1*Q_000010101*R_002[0]+-1*Q_000110001*R_011[0]+Q_000110101*R_012[0];
double QR_001000010001=Q_001000010*R_001[0]+-1*Q_001000110*R_002[0]+-1*Q_101000010*R_101[0]+Q_101000110*R_102[0];
double QR_000001010001=Q_000001010*R_001[0]+-1*Q_000001110*R_002[0]+-1*Q_000101010*R_011[0]+Q_000101110*R_012[0];
double QR_000000011001=Q_000000011*R_001[0]+-1*Q_000000111*R_002[0]+Q_000000211*R_003[0];
double QR_011000000010=Q_011000000*R_010[0]+-1*Q_111000000*R_110[0]+Q_211000000*R_210[0];
double QR_010001000010=Q_010001000*R_010[0]+-1*Q_010101000*R_020[0]+-1*Q_110001000*R_110[0]+Q_110101000*R_120[0];
double QR_010000001010=Q_010000001*R_010[0]+-1*Q_010000101*R_011[0]+-1*Q_110000001*R_110[0]+Q_110000101*R_111[0];
double QR_001010000010=Q_001010000*R_010[0]+-1*Q_001110000*R_020[0]+-1*Q_101010000*R_110[0]+Q_101110000*R_120[0];
double QR_000011000010=Q_000011000*R_010[0]+-1*Q_000111000*R_020[0]+Q_000211000*R_030[0];
double QR_000010001010=Q_000010001*R_010[0]+-1*Q_000010101*R_011[0]+-1*Q_000110001*R_020[0]+Q_000110101*R_021[0];
double QR_001000010010=Q_001000010*R_010[0]+-1*Q_001000110*R_011[0]+-1*Q_101000010*R_110[0]+Q_101000110*R_111[0];
double QR_000001010010=Q_000001010*R_010[0]+-1*Q_000001110*R_011[0]+-1*Q_000101010*R_020[0]+Q_000101110*R_021[0];
double QR_000000011010=Q_000000011*R_010[0]+-1*Q_000000111*R_011[0]+Q_000000211*R_012[0];
double QR_011000000100=Q_011000000*R_100[0]+-1*Q_111000000*R_200[0]+Q_211000000*R_300[0];
double QR_010001000100=Q_010001000*R_100[0]+-1*Q_010101000*R_110[0]+-1*Q_110001000*R_200[0]+Q_110101000*R_210[0];
double QR_010000001100=Q_010000001*R_100[0]+-1*Q_010000101*R_101[0]+-1*Q_110000001*R_200[0]+Q_110000101*R_201[0];
double QR_001010000100=Q_001010000*R_100[0]+-1*Q_001110000*R_110[0]+-1*Q_101010000*R_200[0]+Q_101110000*R_210[0];
double QR_000011000100=Q_000011000*R_100[0]+-1*Q_000111000*R_110[0]+Q_000211000*R_120[0];
double QR_000010001100=Q_000010001*R_100[0]+-1*Q_000010101*R_101[0]+-1*Q_000110001*R_110[0]+Q_000110101*R_111[0];
double QR_001000010100=Q_001000010*R_100[0]+-1*Q_001000110*R_101[0]+-1*Q_101000010*R_200[0]+Q_101000110*R_201[0];
double QR_000001010100=Q_000001010*R_100[0]+-1*Q_000001110*R_101[0]+-1*Q_000101010*R_110[0]+Q_000101110*R_111[0];
double QR_000000011100=Q_000000011*R_100[0]+-1*Q_000000111*R_101[0]+Q_000000211*R_102[0];
double QR_011000000002=Q_011000000*R_002[0]+-1*Q_111000000*R_102[0]+Q_211000000*R_202[0];
double QR_010001000002=Q_010001000*R_002[0]+-1*Q_010101000*R_012[0]+-1*Q_110001000*R_102[0]+Q_110101000*R_112[0];
double QR_010000001002=Q_010000001*R_002[0]+-1*Q_010000101*R_003[0]+-1*Q_110000001*R_102[0]+Q_110000101*R_103[0];
double QR_001010000002=Q_001010000*R_002[0]+-1*Q_001110000*R_012[0]+-1*Q_101010000*R_102[0]+Q_101110000*R_112[0];
double QR_000011000002=Q_000011000*R_002[0]+-1*Q_000111000*R_012[0]+Q_000211000*R_022[0];
double QR_000010001002=Q_000010001*R_002[0]+-1*Q_000010101*R_003[0]+-1*Q_000110001*R_012[0]+Q_000110101*R_013[0];
double QR_001000010002=Q_001000010*R_002[0]+-1*Q_001000110*R_003[0]+-1*Q_101000010*R_102[0]+Q_101000110*R_103[0];
double QR_000001010002=Q_000001010*R_002[0]+-1*Q_000001110*R_003[0]+-1*Q_000101010*R_012[0]+Q_000101110*R_013[0];
double QR_000000011002=Q_000000011*R_002[0]+-1*Q_000000111*R_003[0]+Q_000000211*R_004[0];
double QR_011000000011=Q_011000000*R_011[0]+-1*Q_111000000*R_111[0]+Q_211000000*R_211[0];
double QR_010001000011=Q_010001000*R_011[0]+-1*Q_010101000*R_021[0]+-1*Q_110001000*R_111[0]+Q_110101000*R_121[0];
double QR_010000001011=Q_010000001*R_011[0]+-1*Q_010000101*R_012[0]+-1*Q_110000001*R_111[0]+Q_110000101*R_112[0];
double QR_001010000011=Q_001010000*R_011[0]+-1*Q_001110000*R_021[0]+-1*Q_101010000*R_111[0]+Q_101110000*R_121[0];
double QR_000011000011=Q_000011000*R_011[0]+-1*Q_000111000*R_021[0]+Q_000211000*R_031[0];
double QR_000010001011=Q_000010001*R_011[0]+-1*Q_000010101*R_012[0]+-1*Q_000110001*R_021[0]+Q_000110101*R_022[0];
double QR_001000010011=Q_001000010*R_011[0]+-1*Q_001000110*R_012[0]+-1*Q_101000010*R_111[0]+Q_101000110*R_112[0];
double QR_000001010011=Q_000001010*R_011[0]+-1*Q_000001110*R_012[0]+-1*Q_000101010*R_021[0]+Q_000101110*R_022[0];
double QR_000000011011=Q_000000011*R_011[0]+-1*Q_000000111*R_012[0]+Q_000000211*R_013[0];
double QR_011000000020=Q_011000000*R_020[0]+-1*Q_111000000*R_120[0]+Q_211000000*R_220[0];
double QR_010001000020=Q_010001000*R_020[0]+-1*Q_010101000*R_030[0]+-1*Q_110001000*R_120[0]+Q_110101000*R_130[0];
double QR_010000001020=Q_010000001*R_020[0]+-1*Q_010000101*R_021[0]+-1*Q_110000001*R_120[0]+Q_110000101*R_121[0];
double QR_001010000020=Q_001010000*R_020[0]+-1*Q_001110000*R_030[0]+-1*Q_101010000*R_120[0]+Q_101110000*R_130[0];
double QR_000011000020=Q_000011000*R_020[0]+-1*Q_000111000*R_030[0]+Q_000211000*R_040[0];
double QR_000010001020=Q_000010001*R_020[0]+-1*Q_000010101*R_021[0]+-1*Q_000110001*R_030[0]+Q_000110101*R_031[0];
double QR_001000010020=Q_001000010*R_020[0]+-1*Q_001000110*R_021[0]+-1*Q_101000010*R_120[0]+Q_101000110*R_121[0];
double QR_000001010020=Q_000001010*R_020[0]+-1*Q_000001110*R_021[0]+-1*Q_000101010*R_030[0]+Q_000101110*R_031[0];
double QR_000000011020=Q_000000011*R_020[0]+-1*Q_000000111*R_021[0]+Q_000000211*R_022[0];
double QR_011000000101=Q_011000000*R_101[0]+-1*Q_111000000*R_201[0]+Q_211000000*R_301[0];
double QR_010001000101=Q_010001000*R_101[0]+-1*Q_010101000*R_111[0]+-1*Q_110001000*R_201[0]+Q_110101000*R_211[0];
double QR_010000001101=Q_010000001*R_101[0]+-1*Q_010000101*R_102[0]+-1*Q_110000001*R_201[0]+Q_110000101*R_202[0];
double QR_001010000101=Q_001010000*R_101[0]+-1*Q_001110000*R_111[0]+-1*Q_101010000*R_201[0]+Q_101110000*R_211[0];
double QR_000011000101=Q_000011000*R_101[0]+-1*Q_000111000*R_111[0]+Q_000211000*R_121[0];
double QR_000010001101=Q_000010001*R_101[0]+-1*Q_000010101*R_102[0]+-1*Q_000110001*R_111[0]+Q_000110101*R_112[0];
double QR_001000010101=Q_001000010*R_101[0]+-1*Q_001000110*R_102[0]+-1*Q_101000010*R_201[0]+Q_101000110*R_202[0];
double QR_000001010101=Q_000001010*R_101[0]+-1*Q_000001110*R_102[0]+-1*Q_000101010*R_111[0]+Q_000101110*R_112[0];
double QR_000000011101=Q_000000011*R_101[0]+-1*Q_000000111*R_102[0]+Q_000000211*R_103[0];
double QR_011000000110=Q_011000000*R_110[0]+-1*Q_111000000*R_210[0]+Q_211000000*R_310[0];
double QR_010001000110=Q_010001000*R_110[0]+-1*Q_010101000*R_120[0]+-1*Q_110001000*R_210[0]+Q_110101000*R_220[0];
double QR_010000001110=Q_010000001*R_110[0]+-1*Q_010000101*R_111[0]+-1*Q_110000001*R_210[0]+Q_110000101*R_211[0];
double QR_001010000110=Q_001010000*R_110[0]+-1*Q_001110000*R_120[0]+-1*Q_101010000*R_210[0]+Q_101110000*R_220[0];
double QR_000011000110=Q_000011000*R_110[0]+-1*Q_000111000*R_120[0]+Q_000211000*R_130[0];
double QR_000010001110=Q_000010001*R_110[0]+-1*Q_000010101*R_111[0]+-1*Q_000110001*R_120[0]+Q_000110101*R_121[0];
double QR_001000010110=Q_001000010*R_110[0]+-1*Q_001000110*R_111[0]+-1*Q_101000010*R_210[0]+Q_101000110*R_211[0];
double QR_000001010110=Q_000001010*R_110[0]+-1*Q_000001110*R_111[0]+-1*Q_000101010*R_120[0]+Q_000101110*R_121[0];
double QR_000000011110=Q_000000011*R_110[0]+-1*Q_000000111*R_111[0]+Q_000000211*R_112[0];
double QR_011000000200=Q_011000000*R_200[0]+-1*Q_111000000*R_300[0]+Q_211000000*R_400[0];
double QR_010001000200=Q_010001000*R_200[0]+-1*Q_010101000*R_210[0]+-1*Q_110001000*R_300[0]+Q_110101000*R_310[0];
double QR_010000001200=Q_010000001*R_200[0]+-1*Q_010000101*R_201[0]+-1*Q_110000001*R_300[0]+Q_110000101*R_301[0];
double QR_001010000200=Q_001010000*R_200[0]+-1*Q_001110000*R_210[0]+-1*Q_101010000*R_300[0]+Q_101110000*R_310[0];
double QR_000011000200=Q_000011000*R_200[0]+-1*Q_000111000*R_210[0]+Q_000211000*R_220[0];
double QR_000010001200=Q_000010001*R_200[0]+-1*Q_000010101*R_201[0]+-1*Q_000110001*R_210[0]+Q_000110101*R_211[0];
double QR_001000010200=Q_001000010*R_200[0]+-1*Q_001000110*R_201[0]+-1*Q_101000010*R_300[0]+Q_101000110*R_301[0];
double QR_000001010200=Q_000001010*R_200[0]+-1*Q_000001110*R_201[0]+-1*Q_000101010*R_210[0]+Q_000101110*R_211[0];
double QR_000000011200=Q_000000011*R_200[0]+-1*Q_000000111*R_201[0]+Q_000000211*R_202[0];
double QR_011000000003=Q_011000000*R_003[0]+-1*Q_111000000*R_103[0]+Q_211000000*R_203[0];
double QR_010001000003=Q_010001000*R_003[0]+-1*Q_010101000*R_013[0]+-1*Q_110001000*R_103[0]+Q_110101000*R_113[0];
double QR_010000001003=Q_010000001*R_003[0]+-1*Q_010000101*R_004[0]+-1*Q_110000001*R_103[0]+Q_110000101*R_104[0];
double QR_001010000003=Q_001010000*R_003[0]+-1*Q_001110000*R_013[0]+-1*Q_101010000*R_103[0]+Q_101110000*R_113[0];
double QR_000011000003=Q_000011000*R_003[0]+-1*Q_000111000*R_013[0]+Q_000211000*R_023[0];
double QR_000010001003=Q_000010001*R_003[0]+-1*Q_000010101*R_004[0]+-1*Q_000110001*R_013[0]+Q_000110101*R_014[0];
double QR_001000010003=Q_001000010*R_003[0]+-1*Q_001000110*R_004[0]+-1*Q_101000010*R_103[0]+Q_101000110*R_104[0];
double QR_000001010003=Q_000001010*R_003[0]+-1*Q_000001110*R_004[0]+-1*Q_000101010*R_013[0]+Q_000101110*R_014[0];
double QR_000000011003=Q_000000011*R_003[0]+-1*Q_000000111*R_004[0]+Q_000000211*R_005[0];
double QR_011000000012=Q_011000000*R_012[0]+-1*Q_111000000*R_112[0]+Q_211000000*R_212[0];
double QR_010001000012=Q_010001000*R_012[0]+-1*Q_010101000*R_022[0]+-1*Q_110001000*R_112[0]+Q_110101000*R_122[0];
double QR_010000001012=Q_010000001*R_012[0]+-1*Q_010000101*R_013[0]+-1*Q_110000001*R_112[0]+Q_110000101*R_113[0];
double QR_001010000012=Q_001010000*R_012[0]+-1*Q_001110000*R_022[0]+-1*Q_101010000*R_112[0]+Q_101110000*R_122[0];
double QR_000011000012=Q_000011000*R_012[0]+-1*Q_000111000*R_022[0]+Q_000211000*R_032[0];
double QR_000010001012=Q_000010001*R_012[0]+-1*Q_000010101*R_013[0]+-1*Q_000110001*R_022[0]+Q_000110101*R_023[0];
double QR_001000010012=Q_001000010*R_012[0]+-1*Q_001000110*R_013[0]+-1*Q_101000010*R_112[0]+Q_101000110*R_113[0];
double QR_000001010012=Q_000001010*R_012[0]+-1*Q_000001110*R_013[0]+-1*Q_000101010*R_022[0]+Q_000101110*R_023[0];
double QR_000000011012=Q_000000011*R_012[0]+-1*Q_000000111*R_013[0]+Q_000000211*R_014[0];
double QR_011000000021=Q_011000000*R_021[0]+-1*Q_111000000*R_121[0]+Q_211000000*R_221[0];
double QR_010001000021=Q_010001000*R_021[0]+-1*Q_010101000*R_031[0]+-1*Q_110001000*R_121[0]+Q_110101000*R_131[0];
double QR_010000001021=Q_010000001*R_021[0]+-1*Q_010000101*R_022[0]+-1*Q_110000001*R_121[0]+Q_110000101*R_122[0];
double QR_001010000021=Q_001010000*R_021[0]+-1*Q_001110000*R_031[0]+-1*Q_101010000*R_121[0]+Q_101110000*R_131[0];
double QR_000011000021=Q_000011000*R_021[0]+-1*Q_000111000*R_031[0]+Q_000211000*R_041[0];
double QR_000010001021=Q_000010001*R_021[0]+-1*Q_000010101*R_022[0]+-1*Q_000110001*R_031[0]+Q_000110101*R_032[0];
double QR_001000010021=Q_001000010*R_021[0]+-1*Q_001000110*R_022[0]+-1*Q_101000010*R_121[0]+Q_101000110*R_122[0];
double QR_000001010021=Q_000001010*R_021[0]+-1*Q_000001110*R_022[0]+-1*Q_000101010*R_031[0]+Q_000101110*R_032[0];
double QR_000000011021=Q_000000011*R_021[0]+-1*Q_000000111*R_022[0]+Q_000000211*R_023[0];
double QR_011000000030=Q_011000000*R_030[0]+-1*Q_111000000*R_130[0]+Q_211000000*R_230[0];
double QR_010001000030=Q_010001000*R_030[0]+-1*Q_010101000*R_040[0]+-1*Q_110001000*R_130[0]+Q_110101000*R_140[0];
double QR_010000001030=Q_010000001*R_030[0]+-1*Q_010000101*R_031[0]+-1*Q_110000001*R_130[0]+Q_110000101*R_131[0];
double QR_001010000030=Q_001010000*R_030[0]+-1*Q_001110000*R_040[0]+-1*Q_101010000*R_130[0]+Q_101110000*R_140[0];
double QR_000011000030=Q_000011000*R_030[0]+-1*Q_000111000*R_040[0]+Q_000211000*R_050[0];
double QR_000010001030=Q_000010001*R_030[0]+-1*Q_000010101*R_031[0]+-1*Q_000110001*R_040[0]+Q_000110101*R_041[0];
double QR_001000010030=Q_001000010*R_030[0]+-1*Q_001000110*R_031[0]+-1*Q_101000010*R_130[0]+Q_101000110*R_131[0];
double QR_000001010030=Q_000001010*R_030[0]+-1*Q_000001110*R_031[0]+-1*Q_000101010*R_040[0]+Q_000101110*R_041[0];
double QR_000000011030=Q_000000011*R_030[0]+-1*Q_000000111*R_031[0]+Q_000000211*R_032[0];
double QR_011000000102=Q_011000000*R_102[0]+-1*Q_111000000*R_202[0]+Q_211000000*R_302[0];
double QR_010001000102=Q_010001000*R_102[0]+-1*Q_010101000*R_112[0]+-1*Q_110001000*R_202[0]+Q_110101000*R_212[0];
double QR_010000001102=Q_010000001*R_102[0]+-1*Q_010000101*R_103[0]+-1*Q_110000001*R_202[0]+Q_110000101*R_203[0];
double QR_001010000102=Q_001010000*R_102[0]+-1*Q_001110000*R_112[0]+-1*Q_101010000*R_202[0]+Q_101110000*R_212[0];
double QR_000011000102=Q_000011000*R_102[0]+-1*Q_000111000*R_112[0]+Q_000211000*R_122[0];
double QR_000010001102=Q_000010001*R_102[0]+-1*Q_000010101*R_103[0]+-1*Q_000110001*R_112[0]+Q_000110101*R_113[0];
double QR_001000010102=Q_001000010*R_102[0]+-1*Q_001000110*R_103[0]+-1*Q_101000010*R_202[0]+Q_101000110*R_203[0];
double QR_000001010102=Q_000001010*R_102[0]+-1*Q_000001110*R_103[0]+-1*Q_000101010*R_112[0]+Q_000101110*R_113[0];
double QR_000000011102=Q_000000011*R_102[0]+-1*Q_000000111*R_103[0]+Q_000000211*R_104[0];
double QR_011000000111=Q_011000000*R_111[0]+-1*Q_111000000*R_211[0]+Q_211000000*R_311[0];
double QR_010001000111=Q_010001000*R_111[0]+-1*Q_010101000*R_121[0]+-1*Q_110001000*R_211[0]+Q_110101000*R_221[0];
double QR_010000001111=Q_010000001*R_111[0]+-1*Q_010000101*R_112[0]+-1*Q_110000001*R_211[0]+Q_110000101*R_212[0];
double QR_001010000111=Q_001010000*R_111[0]+-1*Q_001110000*R_121[0]+-1*Q_101010000*R_211[0]+Q_101110000*R_221[0];
double QR_000011000111=Q_000011000*R_111[0]+-1*Q_000111000*R_121[0]+Q_000211000*R_131[0];
double QR_000010001111=Q_000010001*R_111[0]+-1*Q_000010101*R_112[0]+-1*Q_000110001*R_121[0]+Q_000110101*R_122[0];
double QR_001000010111=Q_001000010*R_111[0]+-1*Q_001000110*R_112[0]+-1*Q_101000010*R_211[0]+Q_101000110*R_212[0];
double QR_000001010111=Q_000001010*R_111[0]+-1*Q_000001110*R_112[0]+-1*Q_000101010*R_121[0]+Q_000101110*R_122[0];
double QR_000000011111=Q_000000011*R_111[0]+-1*Q_000000111*R_112[0]+Q_000000211*R_113[0];
double QR_011000000120=Q_011000000*R_120[0]+-1*Q_111000000*R_220[0]+Q_211000000*R_320[0];
double QR_010001000120=Q_010001000*R_120[0]+-1*Q_010101000*R_130[0]+-1*Q_110001000*R_220[0]+Q_110101000*R_230[0];
double QR_010000001120=Q_010000001*R_120[0]+-1*Q_010000101*R_121[0]+-1*Q_110000001*R_220[0]+Q_110000101*R_221[0];
double QR_001010000120=Q_001010000*R_120[0]+-1*Q_001110000*R_130[0]+-1*Q_101010000*R_220[0]+Q_101110000*R_230[0];
double QR_000011000120=Q_000011000*R_120[0]+-1*Q_000111000*R_130[0]+Q_000211000*R_140[0];
double QR_000010001120=Q_000010001*R_120[0]+-1*Q_000010101*R_121[0]+-1*Q_000110001*R_130[0]+Q_000110101*R_131[0];
double QR_001000010120=Q_001000010*R_120[0]+-1*Q_001000110*R_121[0]+-1*Q_101000010*R_220[0]+Q_101000110*R_221[0];
double QR_000001010120=Q_000001010*R_120[0]+-1*Q_000001110*R_121[0]+-1*Q_000101010*R_130[0]+Q_000101110*R_131[0];
double QR_000000011120=Q_000000011*R_120[0]+-1*Q_000000111*R_121[0]+Q_000000211*R_122[0];
double QR_011000000201=Q_011000000*R_201[0]+-1*Q_111000000*R_301[0]+Q_211000000*R_401[0];
double QR_010001000201=Q_010001000*R_201[0]+-1*Q_010101000*R_211[0]+-1*Q_110001000*R_301[0]+Q_110101000*R_311[0];
double QR_010000001201=Q_010000001*R_201[0]+-1*Q_010000101*R_202[0]+-1*Q_110000001*R_301[0]+Q_110000101*R_302[0];
double QR_001010000201=Q_001010000*R_201[0]+-1*Q_001110000*R_211[0]+-1*Q_101010000*R_301[0]+Q_101110000*R_311[0];
double QR_000011000201=Q_000011000*R_201[0]+-1*Q_000111000*R_211[0]+Q_000211000*R_221[0];
double QR_000010001201=Q_000010001*R_201[0]+-1*Q_000010101*R_202[0]+-1*Q_000110001*R_211[0]+Q_000110101*R_212[0];
double QR_001000010201=Q_001000010*R_201[0]+-1*Q_001000110*R_202[0]+-1*Q_101000010*R_301[0]+Q_101000110*R_302[0];
double QR_000001010201=Q_000001010*R_201[0]+-1*Q_000001110*R_202[0]+-1*Q_000101010*R_211[0]+Q_000101110*R_212[0];
double QR_000000011201=Q_000000011*R_201[0]+-1*Q_000000111*R_202[0]+Q_000000211*R_203[0];
double QR_011000000210=Q_011000000*R_210[0]+-1*Q_111000000*R_310[0]+Q_211000000*R_410[0];
double QR_010001000210=Q_010001000*R_210[0]+-1*Q_010101000*R_220[0]+-1*Q_110001000*R_310[0]+Q_110101000*R_320[0];
double QR_010000001210=Q_010000001*R_210[0]+-1*Q_010000101*R_211[0]+-1*Q_110000001*R_310[0]+Q_110000101*R_311[0];
double QR_001010000210=Q_001010000*R_210[0]+-1*Q_001110000*R_220[0]+-1*Q_101010000*R_310[0]+Q_101110000*R_320[0];
double QR_000011000210=Q_000011000*R_210[0]+-1*Q_000111000*R_220[0]+Q_000211000*R_230[0];
double QR_000010001210=Q_000010001*R_210[0]+-1*Q_000010101*R_211[0]+-1*Q_000110001*R_220[0]+Q_000110101*R_221[0];
double QR_001000010210=Q_001000010*R_210[0]+-1*Q_001000110*R_211[0]+-1*Q_101000010*R_310[0]+Q_101000110*R_311[0];
double QR_000001010210=Q_000001010*R_210[0]+-1*Q_000001110*R_211[0]+-1*Q_000101010*R_220[0]+Q_000101110*R_221[0];
double QR_000000011210=Q_000000011*R_210[0]+-1*Q_000000111*R_211[0]+Q_000000211*R_212[0];
double QR_011000000300=Q_011000000*R_300[0]+-1*Q_111000000*R_400[0]+Q_211000000*R_500[0];
double QR_010001000300=Q_010001000*R_300[0]+-1*Q_010101000*R_310[0]+-1*Q_110001000*R_400[0]+Q_110101000*R_410[0];
double QR_010000001300=Q_010000001*R_300[0]+-1*Q_010000101*R_301[0]+-1*Q_110000001*R_400[0]+Q_110000101*R_401[0];
double QR_001010000300=Q_001010000*R_300[0]+-1*Q_001110000*R_310[0]+-1*Q_101010000*R_400[0]+Q_101110000*R_410[0];
double QR_000011000300=Q_000011000*R_300[0]+-1*Q_000111000*R_310[0]+Q_000211000*R_320[0];
double QR_000010001300=Q_000010001*R_300[0]+-1*Q_000010101*R_301[0]+-1*Q_000110001*R_310[0]+Q_000110101*R_311[0];
double QR_001000010300=Q_001000010*R_300[0]+-1*Q_001000110*R_301[0]+-1*Q_101000010*R_400[0]+Q_101000110*R_401[0];
double QR_000001010300=Q_000001010*R_300[0]+-1*Q_000001110*R_301[0]+-1*Q_000101010*R_310[0]+Q_000101110*R_311[0];
double QR_000000011300=Q_000000011*R_300[0]+-1*Q_000000111*R_301[0]+Q_000000211*R_302[0];
double Pd_101[3];
double Pd_110[3];
double Pd_011[3];
double Pd_111[3];
double Pd_211[3];
double Pd_020[3];
double Pd_120[3];
double Pd_220[3];
double Pd_021[3];
double Pd_121[3];
double Pd_221[3];
double Pd_321[3];
for(int i=0;i<3;i++){
Pd_101[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_110[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_011[i]=Pd_101[i]+Pd_010[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_111[i]=Pd_010[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_211[i]=aPin1*Pd_101[i];
}
for(int i=0;i<3;i++){
Pd_020[i]=Pd_110[i]+Pd_010[i]*Pd_010[i];
}
for(int i=0;i<3;i++){
Pd_120[i]=Pd_010[i]*Pd_110[i]+aPin1*Pd_010[i];
}
for(int i=0;i<3;i++){
Pd_220[i]=aPin1*Pd_110[i];
}
for(int i=0;i<3;i++){
Pd_021[i]=Pd_111[i]+Pd_010[i]*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_121[i]=2*Pd_211[i]+Pd_010[i]*Pd_111[i]+aPin1*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_221[i]=Pd_010[i]*Pd_211[i]+aPin1*Pd_111[i];
}
for(int i=0;i<3;i++){
Pd_321[i]=aPin1*Pd_211[i];
}
double P_021000000=Pd_021[0];
double P_121000000=Pd_121[0];
double P_221000000=Pd_221[0];
double P_321000000=Pd_321[0];
double P_020001000=Pd_020[0]*Pd_001[1];
double P_020101000=Pd_020[0]*Pd_101[1];
double P_120001000=Pd_120[0]*Pd_001[1];
double P_120101000=Pd_120[0]*Pd_101[1];
double P_220001000=Pd_220[0]*Pd_001[1];
double P_220101000=Pd_220[0]*Pd_101[1];
double P_020000001=Pd_020[0]*Pd_001[2];
double P_020000101=Pd_020[0]*Pd_101[2];
double P_120000001=Pd_120[0]*Pd_001[2];
double P_120000101=Pd_120[0]*Pd_101[2];
double P_220000001=Pd_220[0]*Pd_001[2];
double P_220000101=Pd_220[0]*Pd_101[2];
double P_011010000=Pd_011[0]*Pd_010[1];
double P_011110000=Pd_011[0]*Pd_110[1];
double P_111010000=Pd_111[0]*Pd_010[1];
double P_111110000=Pd_111[0]*Pd_110[1];
double P_211010000=Pd_211[0]*Pd_010[1];
double P_211110000=Pd_211[0]*Pd_110[1];
double P_010011000=Pd_010[0]*Pd_011[1];
double P_010111000=Pd_010[0]*Pd_111[1];
double P_010211000=Pd_010[0]*Pd_211[1];
double P_110011000=Pd_110[0]*Pd_011[1];
double P_110111000=Pd_110[0]*Pd_111[1];
double P_110211000=Pd_110[0]*Pd_211[1];
double P_010010001=Pd_010[0]*Pd_010[1]*Pd_001[2];
double P_010010101=Pd_010[0]*Pd_010[1]*Pd_101[2];
double P_010110001=Pd_010[0]*Pd_110[1]*Pd_001[2];
double P_010110101=Pd_010[0]*Pd_110[1]*Pd_101[2];
double P_110010001=Pd_110[0]*Pd_010[1]*Pd_001[2];
double P_110010101=Pd_110[0]*Pd_010[1]*Pd_101[2];
double P_110110001=Pd_110[0]*Pd_110[1]*Pd_001[2];
double P_110110101=Pd_110[0]*Pd_110[1]*Pd_101[2];
double P_001020000=Pd_001[0]*Pd_020[1];
double P_001120000=Pd_001[0]*Pd_120[1];
double P_001220000=Pd_001[0]*Pd_220[1];
double P_101020000=Pd_101[0]*Pd_020[1];
double P_101120000=Pd_101[0]*Pd_120[1];
double P_101220000=Pd_101[0]*Pd_220[1];
double P_000021000=Pd_021[1];
double P_000121000=Pd_121[1];
double P_000221000=Pd_221[1];
double P_000321000=Pd_321[1];
double P_000020001=Pd_020[1]*Pd_001[2];
double P_000020101=Pd_020[1]*Pd_101[2];
double P_000120001=Pd_120[1]*Pd_001[2];
double P_000120101=Pd_120[1]*Pd_101[2];
double P_000220001=Pd_220[1]*Pd_001[2];
double P_000220101=Pd_220[1]*Pd_101[2];
double P_011000010=Pd_011[0]*Pd_010[2];
double P_011000110=Pd_011[0]*Pd_110[2];
double P_111000010=Pd_111[0]*Pd_010[2];
double P_111000110=Pd_111[0]*Pd_110[2];
double P_211000010=Pd_211[0]*Pd_010[2];
double P_211000110=Pd_211[0]*Pd_110[2];
double P_010001010=Pd_010[0]*Pd_001[1]*Pd_010[2];
double P_010001110=Pd_010[0]*Pd_001[1]*Pd_110[2];
double P_010101010=Pd_010[0]*Pd_101[1]*Pd_010[2];
double P_010101110=Pd_010[0]*Pd_101[1]*Pd_110[2];
double P_110001010=Pd_110[0]*Pd_001[1]*Pd_010[2];
double P_110001110=Pd_110[0]*Pd_001[1]*Pd_110[2];
double P_110101010=Pd_110[0]*Pd_101[1]*Pd_010[2];
double P_110101110=Pd_110[0]*Pd_101[1]*Pd_110[2];
double P_010000011=Pd_010[0]*Pd_011[2];
double P_010000111=Pd_010[0]*Pd_111[2];
double P_010000211=Pd_010[0]*Pd_211[2];
double P_110000011=Pd_110[0]*Pd_011[2];
double P_110000111=Pd_110[0]*Pd_111[2];
double P_110000211=Pd_110[0]*Pd_211[2];
double P_001010010=Pd_001[0]*Pd_010[1]*Pd_010[2];
double P_001010110=Pd_001[0]*Pd_010[1]*Pd_110[2];
double P_001110010=Pd_001[0]*Pd_110[1]*Pd_010[2];
double P_001110110=Pd_001[0]*Pd_110[1]*Pd_110[2];
double P_101010010=Pd_101[0]*Pd_010[1]*Pd_010[2];
double P_101010110=Pd_101[0]*Pd_010[1]*Pd_110[2];
double P_101110010=Pd_101[0]*Pd_110[1]*Pd_010[2];
double P_101110110=Pd_101[0]*Pd_110[1]*Pd_110[2];
double P_000011010=Pd_011[1]*Pd_010[2];
double P_000011110=Pd_011[1]*Pd_110[2];
double P_000111010=Pd_111[1]*Pd_010[2];
double P_000111110=Pd_111[1]*Pd_110[2];
double P_000211010=Pd_211[1]*Pd_010[2];
double P_000211110=Pd_211[1]*Pd_110[2];
double P_000010011=Pd_010[1]*Pd_011[2];
double P_000010111=Pd_010[1]*Pd_111[2];
double P_000010211=Pd_010[1]*Pd_211[2];
double P_000110011=Pd_110[1]*Pd_011[2];
double P_000110111=Pd_110[1]*Pd_111[2];
double P_000110211=Pd_110[1]*Pd_211[2];
double P_001000020=Pd_001[0]*Pd_020[2];
double P_001000120=Pd_001[0]*Pd_120[2];
double P_001000220=Pd_001[0]*Pd_220[2];
double P_101000020=Pd_101[0]*Pd_020[2];
double P_101000120=Pd_101[0]*Pd_120[2];
double P_101000220=Pd_101[0]*Pd_220[2];
double P_000001020=Pd_001[1]*Pd_020[2];
double P_000001120=Pd_001[1]*Pd_120[2];
double P_000001220=Pd_001[1]*Pd_220[2];
double P_000101020=Pd_101[1]*Pd_020[2];
double P_000101120=Pd_101[1]*Pd_120[2];
double P_000101220=Pd_101[1]*Pd_220[2];
double P_000000021=Pd_021[2];
double P_000000121=Pd_121[2];
double P_000000221=Pd_221[2];
double P_000000321=Pd_321[2];
ans_temp[ans_id*18+0]+=Pmtrx[0]*(P_021000000*QR_011000000000+P_121000000*QR_011000000100+P_221000000*QR_011000000200+P_321000000*QR_011000000300);
ans_temp[ans_id*18+0]+=Pmtrx[1]*(P_021000000*QR_010001000000+P_121000000*QR_010001000100+P_221000000*QR_010001000200+P_321000000*QR_010001000300);
ans_temp[ans_id*18+0]+=Pmtrx[2]*(P_021000000*QR_010000001000+P_121000000*QR_010000001100+P_221000000*QR_010000001200+P_321000000*QR_010000001300);
ans_temp[ans_id*18+1]+=Pmtrx[0]*(P_021000000*QR_001010000000+P_121000000*QR_001010000100+P_221000000*QR_001010000200+P_321000000*QR_001010000300);
ans_temp[ans_id*18+1]+=Pmtrx[1]*(P_021000000*QR_000011000000+P_121000000*QR_000011000100+P_221000000*QR_000011000200+P_321000000*QR_000011000300);
ans_temp[ans_id*18+1]+=Pmtrx[2]*(P_021000000*QR_000010001000+P_121000000*QR_000010001100+P_221000000*QR_000010001200+P_321000000*QR_000010001300);
ans_temp[ans_id*18+2]+=Pmtrx[0]*(P_021000000*QR_001000010000+P_121000000*QR_001000010100+P_221000000*QR_001000010200+P_321000000*QR_001000010300);
ans_temp[ans_id*18+2]+=Pmtrx[1]*(P_021000000*QR_000001010000+P_121000000*QR_000001010100+P_221000000*QR_000001010200+P_321000000*QR_000001010300);
ans_temp[ans_id*18+2]+=Pmtrx[2]*(P_021000000*QR_000000011000+P_121000000*QR_000000011100+P_221000000*QR_000000011200+P_321000000*QR_000000011300);
ans_temp[ans_id*18+0]+=Pmtrx[3]*(P_020001000*QR_011000000000+P_020101000*QR_011000000010+P_120001000*QR_011000000100+P_120101000*QR_011000000110+P_220001000*QR_011000000200+P_220101000*QR_011000000210);
ans_temp[ans_id*18+0]+=Pmtrx[4]*(P_020001000*QR_010001000000+P_020101000*QR_010001000010+P_120001000*QR_010001000100+P_120101000*QR_010001000110+P_220001000*QR_010001000200+P_220101000*QR_010001000210);
ans_temp[ans_id*18+0]+=Pmtrx[5]*(P_020001000*QR_010000001000+P_020101000*QR_010000001010+P_120001000*QR_010000001100+P_120101000*QR_010000001110+P_220001000*QR_010000001200+P_220101000*QR_010000001210);
ans_temp[ans_id*18+1]+=Pmtrx[3]*(P_020001000*QR_001010000000+P_020101000*QR_001010000010+P_120001000*QR_001010000100+P_120101000*QR_001010000110+P_220001000*QR_001010000200+P_220101000*QR_001010000210);
ans_temp[ans_id*18+1]+=Pmtrx[4]*(P_020001000*QR_000011000000+P_020101000*QR_000011000010+P_120001000*QR_000011000100+P_120101000*QR_000011000110+P_220001000*QR_000011000200+P_220101000*QR_000011000210);
ans_temp[ans_id*18+1]+=Pmtrx[5]*(P_020001000*QR_000010001000+P_020101000*QR_000010001010+P_120001000*QR_000010001100+P_120101000*QR_000010001110+P_220001000*QR_000010001200+P_220101000*QR_000010001210);
ans_temp[ans_id*18+2]+=Pmtrx[3]*(P_020001000*QR_001000010000+P_020101000*QR_001000010010+P_120001000*QR_001000010100+P_120101000*QR_001000010110+P_220001000*QR_001000010200+P_220101000*QR_001000010210);
ans_temp[ans_id*18+2]+=Pmtrx[4]*(P_020001000*QR_000001010000+P_020101000*QR_000001010010+P_120001000*QR_000001010100+P_120101000*QR_000001010110+P_220001000*QR_000001010200+P_220101000*QR_000001010210);
ans_temp[ans_id*18+2]+=Pmtrx[5]*(P_020001000*QR_000000011000+P_020101000*QR_000000011010+P_120001000*QR_000000011100+P_120101000*QR_000000011110+P_220001000*QR_000000011200+P_220101000*QR_000000011210);
ans_temp[ans_id*18+0]+=Pmtrx[6]*(P_020000001*QR_011000000000+P_020000101*QR_011000000001+P_120000001*QR_011000000100+P_120000101*QR_011000000101+P_220000001*QR_011000000200+P_220000101*QR_011000000201);
ans_temp[ans_id*18+0]+=Pmtrx[7]*(P_020000001*QR_010001000000+P_020000101*QR_010001000001+P_120000001*QR_010001000100+P_120000101*QR_010001000101+P_220000001*QR_010001000200+P_220000101*QR_010001000201);
ans_temp[ans_id*18+0]+=Pmtrx[8]*(P_020000001*QR_010000001000+P_020000101*QR_010000001001+P_120000001*QR_010000001100+P_120000101*QR_010000001101+P_220000001*QR_010000001200+P_220000101*QR_010000001201);
ans_temp[ans_id*18+1]+=Pmtrx[6]*(P_020000001*QR_001010000000+P_020000101*QR_001010000001+P_120000001*QR_001010000100+P_120000101*QR_001010000101+P_220000001*QR_001010000200+P_220000101*QR_001010000201);
ans_temp[ans_id*18+1]+=Pmtrx[7]*(P_020000001*QR_000011000000+P_020000101*QR_000011000001+P_120000001*QR_000011000100+P_120000101*QR_000011000101+P_220000001*QR_000011000200+P_220000101*QR_000011000201);
ans_temp[ans_id*18+1]+=Pmtrx[8]*(P_020000001*QR_000010001000+P_020000101*QR_000010001001+P_120000001*QR_000010001100+P_120000101*QR_000010001101+P_220000001*QR_000010001200+P_220000101*QR_000010001201);
ans_temp[ans_id*18+2]+=Pmtrx[6]*(P_020000001*QR_001000010000+P_020000101*QR_001000010001+P_120000001*QR_001000010100+P_120000101*QR_001000010101+P_220000001*QR_001000010200+P_220000101*QR_001000010201);
ans_temp[ans_id*18+2]+=Pmtrx[7]*(P_020000001*QR_000001010000+P_020000101*QR_000001010001+P_120000001*QR_000001010100+P_120000101*QR_000001010101+P_220000001*QR_000001010200+P_220000101*QR_000001010201);
ans_temp[ans_id*18+2]+=Pmtrx[8]*(P_020000001*QR_000000011000+P_020000101*QR_000000011001+P_120000001*QR_000000011100+P_120000101*QR_000000011101+P_220000001*QR_000000011200+P_220000101*QR_000000011201);
ans_temp[ans_id*18+3]+=Pmtrx[0]*(P_011010000*QR_011000000000+P_011110000*QR_011000000010+P_111010000*QR_011000000100+P_111110000*QR_011000000110+P_211010000*QR_011000000200+P_211110000*QR_011000000210);
ans_temp[ans_id*18+3]+=Pmtrx[1]*(P_011010000*QR_010001000000+P_011110000*QR_010001000010+P_111010000*QR_010001000100+P_111110000*QR_010001000110+P_211010000*QR_010001000200+P_211110000*QR_010001000210);
ans_temp[ans_id*18+3]+=Pmtrx[2]*(P_011010000*QR_010000001000+P_011110000*QR_010000001010+P_111010000*QR_010000001100+P_111110000*QR_010000001110+P_211010000*QR_010000001200+P_211110000*QR_010000001210);
ans_temp[ans_id*18+4]+=Pmtrx[0]*(P_011010000*QR_001010000000+P_011110000*QR_001010000010+P_111010000*QR_001010000100+P_111110000*QR_001010000110+P_211010000*QR_001010000200+P_211110000*QR_001010000210);
ans_temp[ans_id*18+4]+=Pmtrx[1]*(P_011010000*QR_000011000000+P_011110000*QR_000011000010+P_111010000*QR_000011000100+P_111110000*QR_000011000110+P_211010000*QR_000011000200+P_211110000*QR_000011000210);
ans_temp[ans_id*18+4]+=Pmtrx[2]*(P_011010000*QR_000010001000+P_011110000*QR_000010001010+P_111010000*QR_000010001100+P_111110000*QR_000010001110+P_211010000*QR_000010001200+P_211110000*QR_000010001210);
ans_temp[ans_id*18+5]+=Pmtrx[0]*(P_011010000*QR_001000010000+P_011110000*QR_001000010010+P_111010000*QR_001000010100+P_111110000*QR_001000010110+P_211010000*QR_001000010200+P_211110000*QR_001000010210);
ans_temp[ans_id*18+5]+=Pmtrx[1]*(P_011010000*QR_000001010000+P_011110000*QR_000001010010+P_111010000*QR_000001010100+P_111110000*QR_000001010110+P_211010000*QR_000001010200+P_211110000*QR_000001010210);
ans_temp[ans_id*18+5]+=Pmtrx[2]*(P_011010000*QR_000000011000+P_011110000*QR_000000011010+P_111010000*QR_000000011100+P_111110000*QR_000000011110+P_211010000*QR_000000011200+P_211110000*QR_000000011210);
ans_temp[ans_id*18+3]+=Pmtrx[3]*(P_010011000*QR_011000000000+P_010111000*QR_011000000010+P_010211000*QR_011000000020+P_110011000*QR_011000000100+P_110111000*QR_011000000110+P_110211000*QR_011000000120);
ans_temp[ans_id*18+3]+=Pmtrx[4]*(P_010011000*QR_010001000000+P_010111000*QR_010001000010+P_010211000*QR_010001000020+P_110011000*QR_010001000100+P_110111000*QR_010001000110+P_110211000*QR_010001000120);
ans_temp[ans_id*18+3]+=Pmtrx[5]*(P_010011000*QR_010000001000+P_010111000*QR_010000001010+P_010211000*QR_010000001020+P_110011000*QR_010000001100+P_110111000*QR_010000001110+P_110211000*QR_010000001120);
ans_temp[ans_id*18+4]+=Pmtrx[3]*(P_010011000*QR_001010000000+P_010111000*QR_001010000010+P_010211000*QR_001010000020+P_110011000*QR_001010000100+P_110111000*QR_001010000110+P_110211000*QR_001010000120);
ans_temp[ans_id*18+4]+=Pmtrx[4]*(P_010011000*QR_000011000000+P_010111000*QR_000011000010+P_010211000*QR_000011000020+P_110011000*QR_000011000100+P_110111000*QR_000011000110+P_110211000*QR_000011000120);
ans_temp[ans_id*18+4]+=Pmtrx[5]*(P_010011000*QR_000010001000+P_010111000*QR_000010001010+P_010211000*QR_000010001020+P_110011000*QR_000010001100+P_110111000*QR_000010001110+P_110211000*QR_000010001120);
ans_temp[ans_id*18+5]+=Pmtrx[3]*(P_010011000*QR_001000010000+P_010111000*QR_001000010010+P_010211000*QR_001000010020+P_110011000*QR_001000010100+P_110111000*QR_001000010110+P_110211000*QR_001000010120);
ans_temp[ans_id*18+5]+=Pmtrx[4]*(P_010011000*QR_000001010000+P_010111000*QR_000001010010+P_010211000*QR_000001010020+P_110011000*QR_000001010100+P_110111000*QR_000001010110+P_110211000*QR_000001010120);
ans_temp[ans_id*18+5]+=Pmtrx[5]*(P_010011000*QR_000000011000+P_010111000*QR_000000011010+P_010211000*QR_000000011020+P_110011000*QR_000000011100+P_110111000*QR_000000011110+P_110211000*QR_000000011120);
ans_temp[ans_id*18+3]+=Pmtrx[6]*(P_010010001*QR_011000000000+P_010010101*QR_011000000001+P_010110001*QR_011000000010+P_010110101*QR_011000000011+P_110010001*QR_011000000100+P_110010101*QR_011000000101+P_110110001*QR_011000000110+P_110110101*QR_011000000111);
ans_temp[ans_id*18+3]+=Pmtrx[7]*(P_010010001*QR_010001000000+P_010010101*QR_010001000001+P_010110001*QR_010001000010+P_010110101*QR_010001000011+P_110010001*QR_010001000100+P_110010101*QR_010001000101+P_110110001*QR_010001000110+P_110110101*QR_010001000111);
ans_temp[ans_id*18+3]+=Pmtrx[8]*(P_010010001*QR_010000001000+P_010010101*QR_010000001001+P_010110001*QR_010000001010+P_010110101*QR_010000001011+P_110010001*QR_010000001100+P_110010101*QR_010000001101+P_110110001*QR_010000001110+P_110110101*QR_010000001111);
ans_temp[ans_id*18+4]+=Pmtrx[6]*(P_010010001*QR_001010000000+P_010010101*QR_001010000001+P_010110001*QR_001010000010+P_010110101*QR_001010000011+P_110010001*QR_001010000100+P_110010101*QR_001010000101+P_110110001*QR_001010000110+P_110110101*QR_001010000111);
ans_temp[ans_id*18+4]+=Pmtrx[7]*(P_010010001*QR_000011000000+P_010010101*QR_000011000001+P_010110001*QR_000011000010+P_010110101*QR_000011000011+P_110010001*QR_000011000100+P_110010101*QR_000011000101+P_110110001*QR_000011000110+P_110110101*QR_000011000111);
ans_temp[ans_id*18+4]+=Pmtrx[8]*(P_010010001*QR_000010001000+P_010010101*QR_000010001001+P_010110001*QR_000010001010+P_010110101*QR_000010001011+P_110010001*QR_000010001100+P_110010101*QR_000010001101+P_110110001*QR_000010001110+P_110110101*QR_000010001111);
ans_temp[ans_id*18+5]+=Pmtrx[6]*(P_010010001*QR_001000010000+P_010010101*QR_001000010001+P_010110001*QR_001000010010+P_010110101*QR_001000010011+P_110010001*QR_001000010100+P_110010101*QR_001000010101+P_110110001*QR_001000010110+P_110110101*QR_001000010111);
ans_temp[ans_id*18+5]+=Pmtrx[7]*(P_010010001*QR_000001010000+P_010010101*QR_000001010001+P_010110001*QR_000001010010+P_010110101*QR_000001010011+P_110010001*QR_000001010100+P_110010101*QR_000001010101+P_110110001*QR_000001010110+P_110110101*QR_000001010111);
ans_temp[ans_id*18+5]+=Pmtrx[8]*(P_010010001*QR_000000011000+P_010010101*QR_000000011001+P_010110001*QR_000000011010+P_010110101*QR_000000011011+P_110010001*QR_000000011100+P_110010101*QR_000000011101+P_110110001*QR_000000011110+P_110110101*QR_000000011111);
ans_temp[ans_id*18+6]+=Pmtrx[0]*(P_001020000*QR_011000000000+P_001120000*QR_011000000010+P_001220000*QR_011000000020+P_101020000*QR_011000000100+P_101120000*QR_011000000110+P_101220000*QR_011000000120);
ans_temp[ans_id*18+6]+=Pmtrx[1]*(P_001020000*QR_010001000000+P_001120000*QR_010001000010+P_001220000*QR_010001000020+P_101020000*QR_010001000100+P_101120000*QR_010001000110+P_101220000*QR_010001000120);
ans_temp[ans_id*18+6]+=Pmtrx[2]*(P_001020000*QR_010000001000+P_001120000*QR_010000001010+P_001220000*QR_010000001020+P_101020000*QR_010000001100+P_101120000*QR_010000001110+P_101220000*QR_010000001120);
ans_temp[ans_id*18+7]+=Pmtrx[0]*(P_001020000*QR_001010000000+P_001120000*QR_001010000010+P_001220000*QR_001010000020+P_101020000*QR_001010000100+P_101120000*QR_001010000110+P_101220000*QR_001010000120);
ans_temp[ans_id*18+7]+=Pmtrx[1]*(P_001020000*QR_000011000000+P_001120000*QR_000011000010+P_001220000*QR_000011000020+P_101020000*QR_000011000100+P_101120000*QR_000011000110+P_101220000*QR_000011000120);
ans_temp[ans_id*18+7]+=Pmtrx[2]*(P_001020000*QR_000010001000+P_001120000*QR_000010001010+P_001220000*QR_000010001020+P_101020000*QR_000010001100+P_101120000*QR_000010001110+P_101220000*QR_000010001120);
ans_temp[ans_id*18+8]+=Pmtrx[0]*(P_001020000*QR_001000010000+P_001120000*QR_001000010010+P_001220000*QR_001000010020+P_101020000*QR_001000010100+P_101120000*QR_001000010110+P_101220000*QR_001000010120);
ans_temp[ans_id*18+8]+=Pmtrx[1]*(P_001020000*QR_000001010000+P_001120000*QR_000001010010+P_001220000*QR_000001010020+P_101020000*QR_000001010100+P_101120000*QR_000001010110+P_101220000*QR_000001010120);
ans_temp[ans_id*18+8]+=Pmtrx[2]*(P_001020000*QR_000000011000+P_001120000*QR_000000011010+P_001220000*QR_000000011020+P_101020000*QR_000000011100+P_101120000*QR_000000011110+P_101220000*QR_000000011120);
ans_temp[ans_id*18+6]+=Pmtrx[3]*(P_000021000*QR_011000000000+P_000121000*QR_011000000010+P_000221000*QR_011000000020+P_000321000*QR_011000000030);
ans_temp[ans_id*18+6]+=Pmtrx[4]*(P_000021000*QR_010001000000+P_000121000*QR_010001000010+P_000221000*QR_010001000020+P_000321000*QR_010001000030);
ans_temp[ans_id*18+6]+=Pmtrx[5]*(P_000021000*QR_010000001000+P_000121000*QR_010000001010+P_000221000*QR_010000001020+P_000321000*QR_010000001030);
ans_temp[ans_id*18+7]+=Pmtrx[3]*(P_000021000*QR_001010000000+P_000121000*QR_001010000010+P_000221000*QR_001010000020+P_000321000*QR_001010000030);
ans_temp[ans_id*18+7]+=Pmtrx[4]*(P_000021000*QR_000011000000+P_000121000*QR_000011000010+P_000221000*QR_000011000020+P_000321000*QR_000011000030);
ans_temp[ans_id*18+7]+=Pmtrx[5]*(P_000021000*QR_000010001000+P_000121000*QR_000010001010+P_000221000*QR_000010001020+P_000321000*QR_000010001030);
ans_temp[ans_id*18+8]+=Pmtrx[3]*(P_000021000*QR_001000010000+P_000121000*QR_001000010010+P_000221000*QR_001000010020+P_000321000*QR_001000010030);
ans_temp[ans_id*18+8]+=Pmtrx[4]*(P_000021000*QR_000001010000+P_000121000*QR_000001010010+P_000221000*QR_000001010020+P_000321000*QR_000001010030);
ans_temp[ans_id*18+8]+=Pmtrx[5]*(P_000021000*QR_000000011000+P_000121000*QR_000000011010+P_000221000*QR_000000011020+P_000321000*QR_000000011030);
ans_temp[ans_id*18+6]+=Pmtrx[6]*(P_000020001*QR_011000000000+P_000020101*QR_011000000001+P_000120001*QR_011000000010+P_000120101*QR_011000000011+P_000220001*QR_011000000020+P_000220101*QR_011000000021);
ans_temp[ans_id*18+6]+=Pmtrx[7]*(P_000020001*QR_010001000000+P_000020101*QR_010001000001+P_000120001*QR_010001000010+P_000120101*QR_010001000011+P_000220001*QR_010001000020+P_000220101*QR_010001000021);
ans_temp[ans_id*18+6]+=Pmtrx[8]*(P_000020001*QR_010000001000+P_000020101*QR_010000001001+P_000120001*QR_010000001010+P_000120101*QR_010000001011+P_000220001*QR_010000001020+P_000220101*QR_010000001021);
ans_temp[ans_id*18+7]+=Pmtrx[6]*(P_000020001*QR_001010000000+P_000020101*QR_001010000001+P_000120001*QR_001010000010+P_000120101*QR_001010000011+P_000220001*QR_001010000020+P_000220101*QR_001010000021);
ans_temp[ans_id*18+7]+=Pmtrx[7]*(P_000020001*QR_000011000000+P_000020101*QR_000011000001+P_000120001*QR_000011000010+P_000120101*QR_000011000011+P_000220001*QR_000011000020+P_000220101*QR_000011000021);
ans_temp[ans_id*18+7]+=Pmtrx[8]*(P_000020001*QR_000010001000+P_000020101*QR_000010001001+P_000120001*QR_000010001010+P_000120101*QR_000010001011+P_000220001*QR_000010001020+P_000220101*QR_000010001021);
ans_temp[ans_id*18+8]+=Pmtrx[6]*(P_000020001*QR_001000010000+P_000020101*QR_001000010001+P_000120001*QR_001000010010+P_000120101*QR_001000010011+P_000220001*QR_001000010020+P_000220101*QR_001000010021);
ans_temp[ans_id*18+8]+=Pmtrx[7]*(P_000020001*QR_000001010000+P_000020101*QR_000001010001+P_000120001*QR_000001010010+P_000120101*QR_000001010011+P_000220001*QR_000001010020+P_000220101*QR_000001010021);
ans_temp[ans_id*18+8]+=Pmtrx[8]*(P_000020001*QR_000000011000+P_000020101*QR_000000011001+P_000120001*QR_000000011010+P_000120101*QR_000000011011+P_000220001*QR_000000011020+P_000220101*QR_000000011021);
ans_temp[ans_id*18+9]+=Pmtrx[0]*(P_011000010*QR_011000000000+P_011000110*QR_011000000001+P_111000010*QR_011000000100+P_111000110*QR_011000000101+P_211000010*QR_011000000200+P_211000110*QR_011000000201);
ans_temp[ans_id*18+9]+=Pmtrx[1]*(P_011000010*QR_010001000000+P_011000110*QR_010001000001+P_111000010*QR_010001000100+P_111000110*QR_010001000101+P_211000010*QR_010001000200+P_211000110*QR_010001000201);
ans_temp[ans_id*18+9]+=Pmtrx[2]*(P_011000010*QR_010000001000+P_011000110*QR_010000001001+P_111000010*QR_010000001100+P_111000110*QR_010000001101+P_211000010*QR_010000001200+P_211000110*QR_010000001201);
ans_temp[ans_id*18+10]+=Pmtrx[0]*(P_011000010*QR_001010000000+P_011000110*QR_001010000001+P_111000010*QR_001010000100+P_111000110*QR_001010000101+P_211000010*QR_001010000200+P_211000110*QR_001010000201);
ans_temp[ans_id*18+10]+=Pmtrx[1]*(P_011000010*QR_000011000000+P_011000110*QR_000011000001+P_111000010*QR_000011000100+P_111000110*QR_000011000101+P_211000010*QR_000011000200+P_211000110*QR_000011000201);
ans_temp[ans_id*18+10]+=Pmtrx[2]*(P_011000010*QR_000010001000+P_011000110*QR_000010001001+P_111000010*QR_000010001100+P_111000110*QR_000010001101+P_211000010*QR_000010001200+P_211000110*QR_000010001201);
ans_temp[ans_id*18+11]+=Pmtrx[0]*(P_011000010*QR_001000010000+P_011000110*QR_001000010001+P_111000010*QR_001000010100+P_111000110*QR_001000010101+P_211000010*QR_001000010200+P_211000110*QR_001000010201);
ans_temp[ans_id*18+11]+=Pmtrx[1]*(P_011000010*QR_000001010000+P_011000110*QR_000001010001+P_111000010*QR_000001010100+P_111000110*QR_000001010101+P_211000010*QR_000001010200+P_211000110*QR_000001010201);
ans_temp[ans_id*18+11]+=Pmtrx[2]*(P_011000010*QR_000000011000+P_011000110*QR_000000011001+P_111000010*QR_000000011100+P_111000110*QR_000000011101+P_211000010*QR_000000011200+P_211000110*QR_000000011201);
ans_temp[ans_id*18+9]+=Pmtrx[3]*(P_010001010*QR_011000000000+P_010001110*QR_011000000001+P_010101010*QR_011000000010+P_010101110*QR_011000000011+P_110001010*QR_011000000100+P_110001110*QR_011000000101+P_110101010*QR_011000000110+P_110101110*QR_011000000111);
ans_temp[ans_id*18+9]+=Pmtrx[4]*(P_010001010*QR_010001000000+P_010001110*QR_010001000001+P_010101010*QR_010001000010+P_010101110*QR_010001000011+P_110001010*QR_010001000100+P_110001110*QR_010001000101+P_110101010*QR_010001000110+P_110101110*QR_010001000111);
ans_temp[ans_id*18+9]+=Pmtrx[5]*(P_010001010*QR_010000001000+P_010001110*QR_010000001001+P_010101010*QR_010000001010+P_010101110*QR_010000001011+P_110001010*QR_010000001100+P_110001110*QR_010000001101+P_110101010*QR_010000001110+P_110101110*QR_010000001111);
ans_temp[ans_id*18+10]+=Pmtrx[3]*(P_010001010*QR_001010000000+P_010001110*QR_001010000001+P_010101010*QR_001010000010+P_010101110*QR_001010000011+P_110001010*QR_001010000100+P_110001110*QR_001010000101+P_110101010*QR_001010000110+P_110101110*QR_001010000111);
ans_temp[ans_id*18+10]+=Pmtrx[4]*(P_010001010*QR_000011000000+P_010001110*QR_000011000001+P_010101010*QR_000011000010+P_010101110*QR_000011000011+P_110001010*QR_000011000100+P_110001110*QR_000011000101+P_110101010*QR_000011000110+P_110101110*QR_000011000111);
ans_temp[ans_id*18+10]+=Pmtrx[5]*(P_010001010*QR_000010001000+P_010001110*QR_000010001001+P_010101010*QR_000010001010+P_010101110*QR_000010001011+P_110001010*QR_000010001100+P_110001110*QR_000010001101+P_110101010*QR_000010001110+P_110101110*QR_000010001111);
ans_temp[ans_id*18+11]+=Pmtrx[3]*(P_010001010*QR_001000010000+P_010001110*QR_001000010001+P_010101010*QR_001000010010+P_010101110*QR_001000010011+P_110001010*QR_001000010100+P_110001110*QR_001000010101+P_110101010*QR_001000010110+P_110101110*QR_001000010111);
ans_temp[ans_id*18+11]+=Pmtrx[4]*(P_010001010*QR_000001010000+P_010001110*QR_000001010001+P_010101010*QR_000001010010+P_010101110*QR_000001010011+P_110001010*QR_000001010100+P_110001110*QR_000001010101+P_110101010*QR_000001010110+P_110101110*QR_000001010111);
ans_temp[ans_id*18+11]+=Pmtrx[5]*(P_010001010*QR_000000011000+P_010001110*QR_000000011001+P_010101010*QR_000000011010+P_010101110*QR_000000011011+P_110001010*QR_000000011100+P_110001110*QR_000000011101+P_110101010*QR_000000011110+P_110101110*QR_000000011111);
ans_temp[ans_id*18+9]+=Pmtrx[6]*(P_010000011*QR_011000000000+P_010000111*QR_011000000001+P_010000211*QR_011000000002+P_110000011*QR_011000000100+P_110000111*QR_011000000101+P_110000211*QR_011000000102);
ans_temp[ans_id*18+9]+=Pmtrx[7]*(P_010000011*QR_010001000000+P_010000111*QR_010001000001+P_010000211*QR_010001000002+P_110000011*QR_010001000100+P_110000111*QR_010001000101+P_110000211*QR_010001000102);
ans_temp[ans_id*18+9]+=Pmtrx[8]*(P_010000011*QR_010000001000+P_010000111*QR_010000001001+P_010000211*QR_010000001002+P_110000011*QR_010000001100+P_110000111*QR_010000001101+P_110000211*QR_010000001102);
ans_temp[ans_id*18+10]+=Pmtrx[6]*(P_010000011*QR_001010000000+P_010000111*QR_001010000001+P_010000211*QR_001010000002+P_110000011*QR_001010000100+P_110000111*QR_001010000101+P_110000211*QR_001010000102);
ans_temp[ans_id*18+10]+=Pmtrx[7]*(P_010000011*QR_000011000000+P_010000111*QR_000011000001+P_010000211*QR_000011000002+P_110000011*QR_000011000100+P_110000111*QR_000011000101+P_110000211*QR_000011000102);
ans_temp[ans_id*18+10]+=Pmtrx[8]*(P_010000011*QR_000010001000+P_010000111*QR_000010001001+P_010000211*QR_000010001002+P_110000011*QR_000010001100+P_110000111*QR_000010001101+P_110000211*QR_000010001102);
ans_temp[ans_id*18+11]+=Pmtrx[6]*(P_010000011*QR_001000010000+P_010000111*QR_001000010001+P_010000211*QR_001000010002+P_110000011*QR_001000010100+P_110000111*QR_001000010101+P_110000211*QR_001000010102);
ans_temp[ans_id*18+11]+=Pmtrx[7]*(P_010000011*QR_000001010000+P_010000111*QR_000001010001+P_010000211*QR_000001010002+P_110000011*QR_000001010100+P_110000111*QR_000001010101+P_110000211*QR_000001010102);
ans_temp[ans_id*18+11]+=Pmtrx[8]*(P_010000011*QR_000000011000+P_010000111*QR_000000011001+P_010000211*QR_000000011002+P_110000011*QR_000000011100+P_110000111*QR_000000011101+P_110000211*QR_000000011102);
ans_temp[ans_id*18+12]+=Pmtrx[0]*(P_001010010*QR_011000000000+P_001010110*QR_011000000001+P_001110010*QR_011000000010+P_001110110*QR_011000000011+P_101010010*QR_011000000100+P_101010110*QR_011000000101+P_101110010*QR_011000000110+P_101110110*QR_011000000111);
ans_temp[ans_id*18+12]+=Pmtrx[1]*(P_001010010*QR_010001000000+P_001010110*QR_010001000001+P_001110010*QR_010001000010+P_001110110*QR_010001000011+P_101010010*QR_010001000100+P_101010110*QR_010001000101+P_101110010*QR_010001000110+P_101110110*QR_010001000111);
ans_temp[ans_id*18+12]+=Pmtrx[2]*(P_001010010*QR_010000001000+P_001010110*QR_010000001001+P_001110010*QR_010000001010+P_001110110*QR_010000001011+P_101010010*QR_010000001100+P_101010110*QR_010000001101+P_101110010*QR_010000001110+P_101110110*QR_010000001111);
ans_temp[ans_id*18+13]+=Pmtrx[0]*(P_001010010*QR_001010000000+P_001010110*QR_001010000001+P_001110010*QR_001010000010+P_001110110*QR_001010000011+P_101010010*QR_001010000100+P_101010110*QR_001010000101+P_101110010*QR_001010000110+P_101110110*QR_001010000111);
ans_temp[ans_id*18+13]+=Pmtrx[1]*(P_001010010*QR_000011000000+P_001010110*QR_000011000001+P_001110010*QR_000011000010+P_001110110*QR_000011000011+P_101010010*QR_000011000100+P_101010110*QR_000011000101+P_101110010*QR_000011000110+P_101110110*QR_000011000111);
ans_temp[ans_id*18+13]+=Pmtrx[2]*(P_001010010*QR_000010001000+P_001010110*QR_000010001001+P_001110010*QR_000010001010+P_001110110*QR_000010001011+P_101010010*QR_000010001100+P_101010110*QR_000010001101+P_101110010*QR_000010001110+P_101110110*QR_000010001111);
ans_temp[ans_id*18+14]+=Pmtrx[0]*(P_001010010*QR_001000010000+P_001010110*QR_001000010001+P_001110010*QR_001000010010+P_001110110*QR_001000010011+P_101010010*QR_001000010100+P_101010110*QR_001000010101+P_101110010*QR_001000010110+P_101110110*QR_001000010111);
ans_temp[ans_id*18+14]+=Pmtrx[1]*(P_001010010*QR_000001010000+P_001010110*QR_000001010001+P_001110010*QR_000001010010+P_001110110*QR_000001010011+P_101010010*QR_000001010100+P_101010110*QR_000001010101+P_101110010*QR_000001010110+P_101110110*QR_000001010111);
ans_temp[ans_id*18+14]+=Pmtrx[2]*(P_001010010*QR_000000011000+P_001010110*QR_000000011001+P_001110010*QR_000000011010+P_001110110*QR_000000011011+P_101010010*QR_000000011100+P_101010110*QR_000000011101+P_101110010*QR_000000011110+P_101110110*QR_000000011111);
ans_temp[ans_id*18+12]+=Pmtrx[3]*(P_000011010*QR_011000000000+P_000011110*QR_011000000001+P_000111010*QR_011000000010+P_000111110*QR_011000000011+P_000211010*QR_011000000020+P_000211110*QR_011000000021);
ans_temp[ans_id*18+12]+=Pmtrx[4]*(P_000011010*QR_010001000000+P_000011110*QR_010001000001+P_000111010*QR_010001000010+P_000111110*QR_010001000011+P_000211010*QR_010001000020+P_000211110*QR_010001000021);
ans_temp[ans_id*18+12]+=Pmtrx[5]*(P_000011010*QR_010000001000+P_000011110*QR_010000001001+P_000111010*QR_010000001010+P_000111110*QR_010000001011+P_000211010*QR_010000001020+P_000211110*QR_010000001021);
ans_temp[ans_id*18+13]+=Pmtrx[3]*(P_000011010*QR_001010000000+P_000011110*QR_001010000001+P_000111010*QR_001010000010+P_000111110*QR_001010000011+P_000211010*QR_001010000020+P_000211110*QR_001010000021);
ans_temp[ans_id*18+13]+=Pmtrx[4]*(P_000011010*QR_000011000000+P_000011110*QR_000011000001+P_000111010*QR_000011000010+P_000111110*QR_000011000011+P_000211010*QR_000011000020+P_000211110*QR_000011000021);
ans_temp[ans_id*18+13]+=Pmtrx[5]*(P_000011010*QR_000010001000+P_000011110*QR_000010001001+P_000111010*QR_000010001010+P_000111110*QR_000010001011+P_000211010*QR_000010001020+P_000211110*QR_000010001021);
ans_temp[ans_id*18+14]+=Pmtrx[3]*(P_000011010*QR_001000010000+P_000011110*QR_001000010001+P_000111010*QR_001000010010+P_000111110*QR_001000010011+P_000211010*QR_001000010020+P_000211110*QR_001000010021);
ans_temp[ans_id*18+14]+=Pmtrx[4]*(P_000011010*QR_000001010000+P_000011110*QR_000001010001+P_000111010*QR_000001010010+P_000111110*QR_000001010011+P_000211010*QR_000001010020+P_000211110*QR_000001010021);
ans_temp[ans_id*18+14]+=Pmtrx[5]*(P_000011010*QR_000000011000+P_000011110*QR_000000011001+P_000111010*QR_000000011010+P_000111110*QR_000000011011+P_000211010*QR_000000011020+P_000211110*QR_000000011021);
ans_temp[ans_id*18+12]+=Pmtrx[6]*(P_000010011*QR_011000000000+P_000010111*QR_011000000001+P_000010211*QR_011000000002+P_000110011*QR_011000000010+P_000110111*QR_011000000011+P_000110211*QR_011000000012);
ans_temp[ans_id*18+12]+=Pmtrx[7]*(P_000010011*QR_010001000000+P_000010111*QR_010001000001+P_000010211*QR_010001000002+P_000110011*QR_010001000010+P_000110111*QR_010001000011+P_000110211*QR_010001000012);
ans_temp[ans_id*18+12]+=Pmtrx[8]*(P_000010011*QR_010000001000+P_000010111*QR_010000001001+P_000010211*QR_010000001002+P_000110011*QR_010000001010+P_000110111*QR_010000001011+P_000110211*QR_010000001012);
ans_temp[ans_id*18+13]+=Pmtrx[6]*(P_000010011*QR_001010000000+P_000010111*QR_001010000001+P_000010211*QR_001010000002+P_000110011*QR_001010000010+P_000110111*QR_001010000011+P_000110211*QR_001010000012);
ans_temp[ans_id*18+13]+=Pmtrx[7]*(P_000010011*QR_000011000000+P_000010111*QR_000011000001+P_000010211*QR_000011000002+P_000110011*QR_000011000010+P_000110111*QR_000011000011+P_000110211*QR_000011000012);
ans_temp[ans_id*18+13]+=Pmtrx[8]*(P_000010011*QR_000010001000+P_000010111*QR_000010001001+P_000010211*QR_000010001002+P_000110011*QR_000010001010+P_000110111*QR_000010001011+P_000110211*QR_000010001012);
ans_temp[ans_id*18+14]+=Pmtrx[6]*(P_000010011*QR_001000010000+P_000010111*QR_001000010001+P_000010211*QR_001000010002+P_000110011*QR_001000010010+P_000110111*QR_001000010011+P_000110211*QR_001000010012);
ans_temp[ans_id*18+14]+=Pmtrx[7]*(P_000010011*QR_000001010000+P_000010111*QR_000001010001+P_000010211*QR_000001010002+P_000110011*QR_000001010010+P_000110111*QR_000001010011+P_000110211*QR_000001010012);
ans_temp[ans_id*18+14]+=Pmtrx[8]*(P_000010011*QR_000000011000+P_000010111*QR_000000011001+P_000010211*QR_000000011002+P_000110011*QR_000000011010+P_000110111*QR_000000011011+P_000110211*QR_000000011012);
ans_temp[ans_id*18+15]+=Pmtrx[0]*(P_001000020*QR_011000000000+P_001000120*QR_011000000001+P_001000220*QR_011000000002+P_101000020*QR_011000000100+P_101000120*QR_011000000101+P_101000220*QR_011000000102);
ans_temp[ans_id*18+15]+=Pmtrx[1]*(P_001000020*QR_010001000000+P_001000120*QR_010001000001+P_001000220*QR_010001000002+P_101000020*QR_010001000100+P_101000120*QR_010001000101+P_101000220*QR_010001000102);
ans_temp[ans_id*18+15]+=Pmtrx[2]*(P_001000020*QR_010000001000+P_001000120*QR_010000001001+P_001000220*QR_010000001002+P_101000020*QR_010000001100+P_101000120*QR_010000001101+P_101000220*QR_010000001102);
ans_temp[ans_id*18+16]+=Pmtrx[0]*(P_001000020*QR_001010000000+P_001000120*QR_001010000001+P_001000220*QR_001010000002+P_101000020*QR_001010000100+P_101000120*QR_001010000101+P_101000220*QR_001010000102);
ans_temp[ans_id*18+16]+=Pmtrx[1]*(P_001000020*QR_000011000000+P_001000120*QR_000011000001+P_001000220*QR_000011000002+P_101000020*QR_000011000100+P_101000120*QR_000011000101+P_101000220*QR_000011000102);
ans_temp[ans_id*18+16]+=Pmtrx[2]*(P_001000020*QR_000010001000+P_001000120*QR_000010001001+P_001000220*QR_000010001002+P_101000020*QR_000010001100+P_101000120*QR_000010001101+P_101000220*QR_000010001102);
ans_temp[ans_id*18+17]+=Pmtrx[0]*(P_001000020*QR_001000010000+P_001000120*QR_001000010001+P_001000220*QR_001000010002+P_101000020*QR_001000010100+P_101000120*QR_001000010101+P_101000220*QR_001000010102);
ans_temp[ans_id*18+17]+=Pmtrx[1]*(P_001000020*QR_000001010000+P_001000120*QR_000001010001+P_001000220*QR_000001010002+P_101000020*QR_000001010100+P_101000120*QR_000001010101+P_101000220*QR_000001010102);
ans_temp[ans_id*18+17]+=Pmtrx[2]*(P_001000020*QR_000000011000+P_001000120*QR_000000011001+P_001000220*QR_000000011002+P_101000020*QR_000000011100+P_101000120*QR_000000011101+P_101000220*QR_000000011102);
ans_temp[ans_id*18+15]+=Pmtrx[3]*(P_000001020*QR_011000000000+P_000001120*QR_011000000001+P_000001220*QR_011000000002+P_000101020*QR_011000000010+P_000101120*QR_011000000011+P_000101220*QR_011000000012);
ans_temp[ans_id*18+15]+=Pmtrx[4]*(P_000001020*QR_010001000000+P_000001120*QR_010001000001+P_000001220*QR_010001000002+P_000101020*QR_010001000010+P_000101120*QR_010001000011+P_000101220*QR_010001000012);
ans_temp[ans_id*18+15]+=Pmtrx[5]*(P_000001020*QR_010000001000+P_000001120*QR_010000001001+P_000001220*QR_010000001002+P_000101020*QR_010000001010+P_000101120*QR_010000001011+P_000101220*QR_010000001012);
ans_temp[ans_id*18+16]+=Pmtrx[3]*(P_000001020*QR_001010000000+P_000001120*QR_001010000001+P_000001220*QR_001010000002+P_000101020*QR_001010000010+P_000101120*QR_001010000011+P_000101220*QR_001010000012);
ans_temp[ans_id*18+16]+=Pmtrx[4]*(P_000001020*QR_000011000000+P_000001120*QR_000011000001+P_000001220*QR_000011000002+P_000101020*QR_000011000010+P_000101120*QR_000011000011+P_000101220*QR_000011000012);
ans_temp[ans_id*18+16]+=Pmtrx[5]*(P_000001020*QR_000010001000+P_000001120*QR_000010001001+P_000001220*QR_000010001002+P_000101020*QR_000010001010+P_000101120*QR_000010001011+P_000101220*QR_000010001012);
ans_temp[ans_id*18+17]+=Pmtrx[3]*(P_000001020*QR_001000010000+P_000001120*QR_001000010001+P_000001220*QR_001000010002+P_000101020*QR_001000010010+P_000101120*QR_001000010011+P_000101220*QR_001000010012);
ans_temp[ans_id*18+17]+=Pmtrx[4]*(P_000001020*QR_000001010000+P_000001120*QR_000001010001+P_000001220*QR_000001010002+P_000101020*QR_000001010010+P_000101120*QR_000001010011+P_000101220*QR_000001010012);
ans_temp[ans_id*18+17]+=Pmtrx[5]*(P_000001020*QR_000000011000+P_000001120*QR_000000011001+P_000001220*QR_000000011002+P_000101020*QR_000000011010+P_000101120*QR_000000011011+P_000101220*QR_000000011012);
ans_temp[ans_id*18+15]+=Pmtrx[6]*(P_000000021*QR_011000000000+P_000000121*QR_011000000001+P_000000221*QR_011000000002+P_000000321*QR_011000000003);
ans_temp[ans_id*18+15]+=Pmtrx[7]*(P_000000021*QR_010001000000+P_000000121*QR_010001000001+P_000000221*QR_010001000002+P_000000321*QR_010001000003);
ans_temp[ans_id*18+15]+=Pmtrx[8]*(P_000000021*QR_010000001000+P_000000121*QR_010000001001+P_000000221*QR_010000001002+P_000000321*QR_010000001003);
ans_temp[ans_id*18+16]+=Pmtrx[6]*(P_000000021*QR_001010000000+P_000000121*QR_001010000001+P_000000221*QR_001010000002+P_000000321*QR_001010000003);
ans_temp[ans_id*18+16]+=Pmtrx[7]*(P_000000021*QR_000011000000+P_000000121*QR_000011000001+P_000000221*QR_000011000002+P_000000321*QR_000011000003);
ans_temp[ans_id*18+16]+=Pmtrx[8]*(P_000000021*QR_000010001000+P_000000121*QR_000010001001+P_000000221*QR_000010001002+P_000000321*QR_000010001003);
ans_temp[ans_id*18+17]+=Pmtrx[6]*(P_000000021*QR_001000010000+P_000000121*QR_001000010001+P_000000221*QR_001000010002+P_000000321*QR_001000010003);
ans_temp[ans_id*18+17]+=Pmtrx[7]*(P_000000021*QR_000001010000+P_000000121*QR_000001010001+P_000000221*QR_000001010002+P_000000321*QR_000001010003);
ans_temp[ans_id*18+17]+=Pmtrx[8]*(P_000000021*QR_000000011000+P_000000121*QR_000000011001+P_000000221*QR_000000011002+P_000000321*QR_000000011003);
}
}
__syncthreads();
int num_thread=NTHREAD/2;
while (num_thread!=0){
__syncthreads();
if(tId_x<num_thread){
for(int ians=0;ians<18;ians++){
ans_temp[tId_x*18+ians]+=ans_temp[(tId_x+num_thread)*18+ians];
}
}
num_thread/=2;
}
if(tId_x==0){
for(int ians=0;ians<18;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*18+ians]=ans_temp[(tId_x)*18+ians];
}
}
}
}
}
__global__ void MD_Kp_ddpp_fs(unsigned int contrc_bra_num,unsigned int contrc_ket_num,\
unsigned int * contrc_bra_id,\
unsigned int * contrc_ket_id,\
unsigned int mtrx_len,\
double * Pmtrx_in,\
double * P,\
double * PA,\
double * PB,\
double * Zta_in,\
double * pp_in,\
float * K2_p_in,\
unsigned int * id_bra_in,\
double * Q,\
double * QC,\
double * QD,\
double * Eta_in,\
double * pq_in,\
float * K2_q_in,\
unsigned int * id_ket_in,\
double * ans){
unsigned int tId_x = threadIdx.x;
unsigned int bId_x = blockIdx.x;
unsigned int bId_y = blockIdx.y;
unsigned int tdis = blockDim.x;
unsigned int bdis_x = gridDim.x;
unsigned int bdis_y = gridDim.y;
unsigned int ans_id=tId_x;
double Pmtrx[18]={0.0};
__shared__ double ans_temp[NTHREAD*18];
for(int i=0;i<18;i++){
ans_temp[i*tdis+tId_x]=0.0;
}
for(unsigned int i_contrc_bra=bId_x;i_contrc_bra<contrc_bra_num;i_contrc_bra+=bdis_x){
for(unsigned int j_contrc_ket=bId_y;j_contrc_ket<contrc_ket_num;j_contrc_ket+=bdis_y){
unsigned int primit_bra_start = contrc_bra_id[i_contrc_bra ];
unsigned int primit_bra_end = contrc_bra_id[i_contrc_bra+1];
unsigned int primit_ket_start = contrc_ket_id[j_contrc_ket ];
unsigned int primit_ket_end = contrc_ket_id[j_contrc_ket+1];
for(unsigned int ii=primit_ket_start;ii<primit_ket_end;ii++){
unsigned int id_ket=id_ket_in[ii];
double QX=Q[ii*3+0];
double QY=Q[ii*3+1];
double QZ=Q[ii*3+2];
double Qd_010[3];
Qd_010[0]=QC[ii*3+0];
Qd_010[1]=QC[ii*3+1];
Qd_010[2]=QC[ii*3+2];
double Qd_001[3];
Qd_001[0]=QD[ii*3+0];
Qd_001[1]=QD[ii*3+1];
Qd_001[2]=QD[ii*3+2];
double Eta=Eta_in[ii];
double pq=pq_in[ii];
float K2_q=K2_q_in[ii];
double aQin1=1/(2*Eta);
for(unsigned int j=tId_x;j<primit_bra_end-primit_bra_start;j+=tdis){
unsigned int jj=primit_bra_start+j;
unsigned int id_bra=tex1Dfetch(tex_id_bra,jj);
double P_max=0.0;
for(int p_j=0;p_j<3;p_j++){
for(int p_i=0;p_i<6;p_i++){
Pmtrx[p_i*3+p_j]=Pmtrx_in[(id_ket+p_j)*mtrx_len+(id_bra+p_i)];
double temp_P=fabsf(Pmtrx[p_i*3+p_j]);
if(temp_P>P_max) P_max=temp_P;
}
}
float K2_p=tex1Dfetch(tex_K2_p,jj);
if(fabsf(K2_p*K2_q)<1.0E-14){
break;
}
if(fabsf(P_max*K2_p*K2_q)<1.0E-14) continue;
int2 temp_int2;
temp_int2=tex1Dfetch(tex_Zta,jj);
double Zta=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_pp,jj);
double pp=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+0);
double PX=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+1);
double PY=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_P,jj*3+2);
double PZ=__hiloint2double(temp_int2.y,temp_int2.x);
double Pd_010[3];
temp_int2=tex1Dfetch(tex_PA,jj*3+0);
Pd_010[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PA,jj*3+1);
Pd_010[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PA,jj*3+2);
Pd_010[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double Pd_001[3];
temp_int2=tex1Dfetch(tex_PB,jj*3+0);
Pd_001[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PB,jj*3+1);
Pd_001[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_PB,jj*3+2);
Pd_001[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double alphaT=rsqrt(Eta+Zta);
double lmd=4*P25*pp*pq*alphaT;
alphaT=Eta*Zta*alphaT*alphaT;
double TX=PX-QX;
double TY=PY-QY;
double TZ=PZ-QZ;
double T=alphaT*(TX*TX+TY*TY+TZ*TZ);
double R_000[7];
Ft_fs_6(6,T,R_000);
R_000[0]*=lmd;
R_000[1]*=-2*alphaT*lmd;
R_000[2]*=4*alphaT*alphaT*lmd;
R_000[3]*=-8*alphaT*alphaT*alphaT*lmd;
R_000[4]*=16*alphaT*alphaT*alphaT*alphaT*lmd;
R_000[5]*=-32*alphaT*alphaT*alphaT*alphaT*alphaT*lmd;
R_000[6]*=64*alphaT*alphaT*alphaT*alphaT*alphaT*alphaT*lmd;
double aPin1=1/(2*Zta);
double R_100[6];
double R_200[5];
double R_300[4];
double R_400[3];
double R_500[2];
double R_600[1];
double R_010[6];
double R_110[5];
double R_210[4];
double R_310[3];
double R_410[2];
double R_510[1];
double R_020[5];
double R_120[4];
double R_220[3];
double R_320[2];
double R_420[1];
double R_030[4];
double R_130[3];
double R_230[2];
double R_330[1];
double R_040[3];
double R_140[2];
double R_240[1];
double R_050[2];
double R_150[1];
double R_060[1];
double R_001[6];
double R_101[5];
double R_201[4];
double R_301[3];
double R_401[2];
double R_501[1];
double R_011[5];
double R_111[4];
double R_211[3];
double R_311[2];
double R_411[1];
double R_021[4];
double R_121[3];
double R_221[2];
double R_321[1];
double R_031[3];
double R_131[2];
double R_231[1];
double R_041[2];
double R_141[1];
double R_051[1];
double R_002[5];
double R_102[4];
double R_202[3];
double R_302[2];
double R_402[1];
double R_012[4];
double R_112[3];
double R_212[2];
double R_312[1];
double R_022[3];
double R_122[2];
double R_222[1];
double R_032[2];
double R_132[1];
double R_042[1];
double R_003[4];
double R_103[3];
double R_203[2];
double R_303[1];
double R_013[3];
double R_113[2];
double R_213[1];
double R_023[2];
double R_123[1];
double R_033[1];
double R_004[3];
double R_104[2];
double R_204[1];
double R_014[2];
double R_114[1];
double R_024[1];
double R_005[2];
double R_105[1];
double R_015[1];
double R_006[1];
for(int i=0;i<6;i++){
R_100[i]=TX*R_000[i+1];
}
for(int i=0;i<6;i++){
R_010[i]=TY*R_000[i+1];
}
for(int i=0;i<6;i++){
R_001[i]=TZ*R_000[i+1];
}
for(int i=0;i<5;i++){
R_200[i]=TX*R_100[i+1]+R_000[i+1];
}
for(int i=0;i<5;i++){
R_110[i]=TX*R_010[i+1];
}
for(int i=0;i<5;i++){
R_020[i]=TY*R_010[i+1]+R_000[i+1];
}
for(int i=0;i<5;i++){
R_101[i]=TX*R_001[i+1];
}
for(int i=0;i<5;i++){
R_011[i]=TY*R_001[i+1];
}
for(int i=0;i<5;i++){
R_002[i]=TZ*R_001[i+1]+R_000[i+1];
}
for(int i=0;i<4;i++){
R_300[i]=TX*R_200[i+1]+2*R_100[i+1];
}
for(int i=0;i<4;i++){
R_210[i]=TY*R_200[i+1];
}
for(int i=0;i<4;i++){
R_120[i]=TX*R_020[i+1];
}
for(int i=0;i<4;i++){
R_030[i]=TY*R_020[i+1]+2*R_010[i+1];
}
for(int i=0;i<4;i++){
R_201[i]=TZ*R_200[i+1];
}
for(int i=0;i<4;i++){
R_111[i]=TX*R_011[i+1];
}
for(int i=0;i<4;i++){
R_021[i]=TZ*R_020[i+1];
}
for(int i=0;i<4;i++){
R_102[i]=TX*R_002[i+1];
}
for(int i=0;i<4;i++){
R_012[i]=TY*R_002[i+1];
}
for(int i=0;i<4;i++){
R_003[i]=TZ*R_002[i+1]+2*R_001[i+1];
}
for(int i=0;i<3;i++){
R_400[i]=TX*R_300[i+1]+3*R_200[i+1];
}
for(int i=0;i<3;i++){
R_310[i]=TY*R_300[i+1];
}
for(int i=0;i<3;i++){
R_220[i]=TX*R_120[i+1]+R_020[i+1];
}
for(int i=0;i<3;i++){
R_130[i]=TX*R_030[i+1];
}
for(int i=0;i<3;i++){
R_040[i]=TY*R_030[i+1]+3*R_020[i+1];
}
for(int i=0;i<3;i++){
R_301[i]=TZ*R_300[i+1];
}
for(int i=0;i<3;i++){
R_211[i]=TY*R_201[i+1];
}
for(int i=0;i<3;i++){
R_121[i]=TX*R_021[i+1];
}
for(int i=0;i<3;i++){
R_031[i]=TZ*R_030[i+1];
}
for(int i=0;i<3;i++){
R_202[i]=TX*R_102[i+1]+R_002[i+1];
}
for(int i=0;i<3;i++){
R_112[i]=TX*R_012[i+1];
}
for(int i=0;i<3;i++){
R_022[i]=TY*R_012[i+1]+R_002[i+1];
}
for(int i=0;i<3;i++){
R_103[i]=TX*R_003[i+1];
}
for(int i=0;i<3;i++){
R_013[i]=TY*R_003[i+1];
}
for(int i=0;i<3;i++){
R_004[i]=TZ*R_003[i+1]+3*R_002[i+1];
}
for(int i=0;i<2;i++){
R_500[i]=TX*R_400[i+1]+4*R_300[i+1];
}
for(int i=0;i<2;i++){
R_410[i]=TY*R_400[i+1];
}
for(int i=0;i<2;i++){
R_320[i]=TX*R_220[i+1]+2*R_120[i+1];
}
for(int i=0;i<2;i++){
R_230[i]=TY*R_220[i+1]+2*R_210[i+1];
}
for(int i=0;i<2;i++){
R_140[i]=TX*R_040[i+1];
}
for(int i=0;i<2;i++){
R_050[i]=TY*R_040[i+1]+4*R_030[i+1];
}
for(int i=0;i<2;i++){
R_401[i]=TZ*R_400[i+1];
}
for(int i=0;i<2;i++){
R_311[i]=TY*R_301[i+1];
}
for(int i=0;i<2;i++){
R_221[i]=TZ*R_220[i+1];
}
for(int i=0;i<2;i++){
R_131[i]=TX*R_031[i+1];
}
for(int i=0;i<2;i++){
R_041[i]=TZ*R_040[i+1];
}
for(int i=0;i<2;i++){
R_302[i]=TX*R_202[i+1]+2*R_102[i+1];
}
for(int i=0;i<2;i++){
R_212[i]=TY*R_202[i+1];
}
for(int i=0;i<2;i++){
R_122[i]=TX*R_022[i+1];
}
for(int i=0;i<2;i++){
R_032[i]=TY*R_022[i+1]+2*R_012[i+1];
}
for(int i=0;i<2;i++){
R_203[i]=TZ*R_202[i+1]+2*R_201[i+1];
}
for(int i=0;i<2;i++){
R_113[i]=TX*R_013[i+1];
}
for(int i=0;i<2;i++){
R_023[i]=TZ*R_022[i+1]+2*R_021[i+1];
}
for(int i=0;i<2;i++){
R_104[i]=TX*R_004[i+1];
}
for(int i=0;i<2;i++){
R_014[i]=TY*R_004[i+1];
}
for(int i=0;i<2;i++){
R_005[i]=TZ*R_004[i+1]+4*R_003[i+1];
}
for(int i=0;i<1;i++){
R_600[i]=TX*R_500[i+1]+5*R_400[i+1];
}
for(int i=0;i<1;i++){
R_510[i]=TY*R_500[i+1];
}
for(int i=0;i<1;i++){
R_420[i]=TX*R_320[i+1]+3*R_220[i+1];
}
for(int i=0;i<1;i++){
R_330[i]=TX*R_230[i+1]+2*R_130[i+1];
}
for(int i=0;i<1;i++){
R_240[i]=TY*R_230[i+1]+3*R_220[i+1];
}
for(int i=0;i<1;i++){
R_150[i]=TX*R_050[i+1];
}
for(int i=0;i<1;i++){
R_060[i]=TY*R_050[i+1]+5*R_040[i+1];
}
for(int i=0;i<1;i++){
R_501[i]=TZ*R_500[i+1];
}
for(int i=0;i<1;i++){
R_411[i]=TY*R_401[i+1];
}
for(int i=0;i<1;i++){
R_321[i]=TZ*R_320[i+1];
}
for(int i=0;i<1;i++){
R_231[i]=TZ*R_230[i+1];
}
for(int i=0;i<1;i++){
R_141[i]=TX*R_041[i+1];
}
for(int i=0;i<1;i++){
R_051[i]=TZ*R_050[i+1];
}
for(int i=0;i<1;i++){
R_402[i]=TX*R_302[i+1]+3*R_202[i+1];
}
for(int i=0;i<1;i++){
R_312[i]=TY*R_302[i+1];
}
for(int i=0;i<1;i++){
R_222[i]=TX*R_122[i+1]+R_022[i+1];
}
for(int i=0;i<1;i++){
R_132[i]=TX*R_032[i+1];
}
for(int i=0;i<1;i++){
R_042[i]=TY*R_032[i+1]+3*R_022[i+1];
}
for(int i=0;i<1;i++){
R_303[i]=TX*R_203[i+1]+2*R_103[i+1];
}
for(int i=0;i<1;i++){
R_213[i]=TY*R_203[i+1];
}
for(int i=0;i<1;i++){
R_123[i]=TX*R_023[i+1];
}
for(int i=0;i<1;i++){
R_033[i]=TY*R_023[i+1]+2*R_013[i+1];
}
for(int i=0;i<1;i++){
R_204[i]=TZ*R_203[i+1]+3*R_202[i+1];
}
for(int i=0;i<1;i++){
R_114[i]=TX*R_014[i+1];
}
for(int i=0;i<1;i++){
R_024[i]=TZ*R_023[i+1]+3*R_022[i+1];
}
for(int i=0;i<1;i++){
R_105[i]=TX*R_005[i+1];
}
for(int i=0;i<1;i++){
R_015[i]=TY*R_005[i+1];
}
for(int i=0;i<1;i++){
R_006[i]=TZ*R_005[i+1]+5*R_004[i+1];
}
double Pd_101[3];
double Pd_002[3];
double Pd_102[3];
double Pd_202[3];
double Pd_110[3];
double Pd_011[3];
double Pd_111[3];
double Pd_211[3];
double Pd_012[3];
double Pd_112[3];
double Pd_212[3];
double Pd_312[3];
double Pd_020[3];
double Pd_120[3];
double Pd_220[3];
double Pd_021[3];
double Pd_121[3];
double Pd_221[3];
double Pd_321[3];
double Pd_022[3];
double Pd_122[3];
double Pd_222[3];
double Pd_322[3];
double Pd_422[3];
for(int i=0;i<3;i++){
Pd_101[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_002[i]=Pd_101[i]+Pd_001[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_102[i]=Pd_001[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_202[i]=aPin1*Pd_101[i];
}
for(int i=0;i<3;i++){
Pd_110[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_011[i]=Pd_101[i]+Pd_010[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_111[i]=Pd_010[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_211[i]=aPin1*Pd_101[i];
}
for(int i=0;i<3;i++){
Pd_012[i]=Pd_111[i]+Pd_001[i]*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_112[i]=2*Pd_211[i]+Pd_001[i]*Pd_111[i]+aPin1*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_212[i]=Pd_001[i]*Pd_211[i]+aPin1*Pd_111[i];
}
for(int i=0;i<3;i++){
Pd_312[i]=aPin1*Pd_211[i];
}
for(int i=0;i<3;i++){
Pd_020[i]=Pd_110[i]+Pd_010[i]*Pd_010[i];
}
for(int i=0;i<3;i++){
Pd_120[i]=Pd_010[i]*Pd_110[i]+aPin1*Pd_010[i];
}
for(int i=0;i<3;i++){
Pd_220[i]=aPin1*Pd_110[i];
}
for(int i=0;i<3;i++){
Pd_021[i]=Pd_111[i]+Pd_010[i]*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_121[i]=2*Pd_211[i]+Pd_010[i]*Pd_111[i]+aPin1*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_221[i]=Pd_010[i]*Pd_211[i]+aPin1*Pd_111[i];
}
for(int i=0;i<3;i++){
Pd_321[i]=aPin1*Pd_211[i];
}
for(int i=0;i<3;i++){
Pd_022[i]=Pd_112[i]+Pd_010[i]*Pd_012[i];
}
for(int i=0;i<3;i++){
Pd_122[i]=2*Pd_212[i]+Pd_010[i]*Pd_112[i]+aPin1*Pd_012[i];
}
for(int i=0;i<3;i++){
Pd_222[i]=3*Pd_312[i]+Pd_010[i]*Pd_212[i]+aPin1*Pd_112[i];
}
for(int i=0;i<3;i++){
Pd_322[i]=Pd_010[i]*Pd_312[i]+aPin1*Pd_212[i];
}
for(int i=0;i<3;i++){
Pd_422[i]=aPin1*Pd_312[i];
}
double P_022000000=Pd_022[0];
double P_122000000=Pd_122[0];
double P_222000000=Pd_222[0];
double P_322000000=Pd_322[0];
double P_422000000=Pd_422[0];
double P_021001000=Pd_021[0]*Pd_001[1];
double P_021101000=Pd_021[0]*Pd_101[1];
double P_121001000=Pd_121[0]*Pd_001[1];
double P_121101000=Pd_121[0]*Pd_101[1];
double P_221001000=Pd_221[0]*Pd_001[1];
double P_221101000=Pd_221[0]*Pd_101[1];
double P_321001000=Pd_321[0]*Pd_001[1];
double P_321101000=Pd_321[0]*Pd_101[1];
double P_020002000=Pd_020[0]*Pd_002[1];
double P_020102000=Pd_020[0]*Pd_102[1];
double P_020202000=Pd_020[0]*Pd_202[1];
double P_120002000=Pd_120[0]*Pd_002[1];
double P_120102000=Pd_120[0]*Pd_102[1];
double P_120202000=Pd_120[0]*Pd_202[1];
double P_220002000=Pd_220[0]*Pd_002[1];
double P_220102000=Pd_220[0]*Pd_102[1];
double P_220202000=Pd_220[0]*Pd_202[1];
double P_021000001=Pd_021[0]*Pd_001[2];
double P_021000101=Pd_021[0]*Pd_101[2];
double P_121000001=Pd_121[0]*Pd_001[2];
double P_121000101=Pd_121[0]*Pd_101[2];
double P_221000001=Pd_221[0]*Pd_001[2];
double P_221000101=Pd_221[0]*Pd_101[2];
double P_321000001=Pd_321[0]*Pd_001[2];
double P_321000101=Pd_321[0]*Pd_101[2];
double P_020001001=Pd_020[0]*Pd_001[1]*Pd_001[2];
double P_020001101=Pd_020[0]*Pd_001[1]*Pd_101[2];
double P_020101001=Pd_020[0]*Pd_101[1]*Pd_001[2];
double P_020101101=Pd_020[0]*Pd_101[1]*Pd_101[2];
double P_120001001=Pd_120[0]*Pd_001[1]*Pd_001[2];
double P_120001101=Pd_120[0]*Pd_001[1]*Pd_101[2];
double P_120101001=Pd_120[0]*Pd_101[1]*Pd_001[2];
double P_120101101=Pd_120[0]*Pd_101[1]*Pd_101[2];
double P_220001001=Pd_220[0]*Pd_001[1]*Pd_001[2];
double P_220001101=Pd_220[0]*Pd_001[1]*Pd_101[2];
double P_220101001=Pd_220[0]*Pd_101[1]*Pd_001[2];
double P_220101101=Pd_220[0]*Pd_101[1]*Pd_101[2];
double P_020000002=Pd_020[0]*Pd_002[2];
double P_020000102=Pd_020[0]*Pd_102[2];
double P_020000202=Pd_020[0]*Pd_202[2];
double P_120000002=Pd_120[0]*Pd_002[2];
double P_120000102=Pd_120[0]*Pd_102[2];
double P_120000202=Pd_120[0]*Pd_202[2];
double P_220000002=Pd_220[0]*Pd_002[2];
double P_220000102=Pd_220[0]*Pd_102[2];
double P_220000202=Pd_220[0]*Pd_202[2];
double P_012010000=Pd_012[0]*Pd_010[1];
double P_012110000=Pd_012[0]*Pd_110[1];
double P_112010000=Pd_112[0]*Pd_010[1];
double P_112110000=Pd_112[0]*Pd_110[1];
double P_212010000=Pd_212[0]*Pd_010[1];
double P_212110000=Pd_212[0]*Pd_110[1];
double P_312010000=Pd_312[0]*Pd_010[1];
double P_312110000=Pd_312[0]*Pd_110[1];
double P_011011000=Pd_011[0]*Pd_011[1];
double P_011111000=Pd_011[0]*Pd_111[1];
double P_011211000=Pd_011[0]*Pd_211[1];
double P_111011000=Pd_111[0]*Pd_011[1];
double P_111111000=Pd_111[0]*Pd_111[1];
double P_111211000=Pd_111[0]*Pd_211[1];
double P_211011000=Pd_211[0]*Pd_011[1];
double P_211111000=Pd_211[0]*Pd_111[1];
double P_211211000=Pd_211[0]*Pd_211[1];
double P_010012000=Pd_010[0]*Pd_012[1];
double P_010112000=Pd_010[0]*Pd_112[1];
double P_010212000=Pd_010[0]*Pd_212[1];
double P_010312000=Pd_010[0]*Pd_312[1];
double P_110012000=Pd_110[0]*Pd_012[1];
double P_110112000=Pd_110[0]*Pd_112[1];
double P_110212000=Pd_110[0]*Pd_212[1];
double P_110312000=Pd_110[0]*Pd_312[1];
double P_011010001=Pd_011[0]*Pd_010[1]*Pd_001[2];
double P_011010101=Pd_011[0]*Pd_010[1]*Pd_101[2];
double P_011110001=Pd_011[0]*Pd_110[1]*Pd_001[2];
double P_011110101=Pd_011[0]*Pd_110[1]*Pd_101[2];
double P_111010001=Pd_111[0]*Pd_010[1]*Pd_001[2];
double P_111010101=Pd_111[0]*Pd_010[1]*Pd_101[2];
double P_111110001=Pd_111[0]*Pd_110[1]*Pd_001[2];
double P_111110101=Pd_111[0]*Pd_110[1]*Pd_101[2];
double P_211010001=Pd_211[0]*Pd_010[1]*Pd_001[2];
double P_211010101=Pd_211[0]*Pd_010[1]*Pd_101[2];
double P_211110001=Pd_211[0]*Pd_110[1]*Pd_001[2];
double P_211110101=Pd_211[0]*Pd_110[1]*Pd_101[2];
double P_010011001=Pd_010[0]*Pd_011[1]*Pd_001[2];
double P_010011101=Pd_010[0]*Pd_011[1]*Pd_101[2];
double P_010111001=Pd_010[0]*Pd_111[1]*Pd_001[2];
double P_010111101=Pd_010[0]*Pd_111[1]*Pd_101[2];
double P_010211001=Pd_010[0]*Pd_211[1]*Pd_001[2];
double P_010211101=Pd_010[0]*Pd_211[1]*Pd_101[2];
double P_110011001=Pd_110[0]*Pd_011[1]*Pd_001[2];
double P_110011101=Pd_110[0]*Pd_011[1]*Pd_101[2];
double P_110111001=Pd_110[0]*Pd_111[1]*Pd_001[2];
double P_110111101=Pd_110[0]*Pd_111[1]*Pd_101[2];
double P_110211001=Pd_110[0]*Pd_211[1]*Pd_001[2];
double P_110211101=Pd_110[0]*Pd_211[1]*Pd_101[2];
double P_010010002=Pd_010[0]*Pd_010[1]*Pd_002[2];
double P_010010102=Pd_010[0]*Pd_010[1]*Pd_102[2];
double P_010010202=Pd_010[0]*Pd_010[1]*Pd_202[2];
double P_010110002=Pd_010[0]*Pd_110[1]*Pd_002[2];
double P_010110102=Pd_010[0]*Pd_110[1]*Pd_102[2];
double P_010110202=Pd_010[0]*Pd_110[1]*Pd_202[2];
double P_110010002=Pd_110[0]*Pd_010[1]*Pd_002[2];
double P_110010102=Pd_110[0]*Pd_010[1]*Pd_102[2];
double P_110010202=Pd_110[0]*Pd_010[1]*Pd_202[2];
double P_110110002=Pd_110[0]*Pd_110[1]*Pd_002[2];
double P_110110102=Pd_110[0]*Pd_110[1]*Pd_102[2];
double P_110110202=Pd_110[0]*Pd_110[1]*Pd_202[2];
double P_002020000=Pd_002[0]*Pd_020[1];
double P_002120000=Pd_002[0]*Pd_120[1];
double P_002220000=Pd_002[0]*Pd_220[1];
double P_102020000=Pd_102[0]*Pd_020[1];
double P_102120000=Pd_102[0]*Pd_120[1];
double P_102220000=Pd_102[0]*Pd_220[1];
double P_202020000=Pd_202[0]*Pd_020[1];
double P_202120000=Pd_202[0]*Pd_120[1];
double P_202220000=Pd_202[0]*Pd_220[1];
double P_001021000=Pd_001[0]*Pd_021[1];
double P_001121000=Pd_001[0]*Pd_121[1];
double P_001221000=Pd_001[0]*Pd_221[1];
double P_001321000=Pd_001[0]*Pd_321[1];
double P_101021000=Pd_101[0]*Pd_021[1];
double P_101121000=Pd_101[0]*Pd_121[1];
double P_101221000=Pd_101[0]*Pd_221[1];
double P_101321000=Pd_101[0]*Pd_321[1];
double P_000022000=Pd_022[1];
double P_000122000=Pd_122[1];
double P_000222000=Pd_222[1];
double P_000322000=Pd_322[1];
double P_000422000=Pd_422[1];
double P_001020001=Pd_001[0]*Pd_020[1]*Pd_001[2];
double P_001020101=Pd_001[0]*Pd_020[1]*Pd_101[2];
double P_001120001=Pd_001[0]*Pd_120[1]*Pd_001[2];
double P_001120101=Pd_001[0]*Pd_120[1]*Pd_101[2];
double P_001220001=Pd_001[0]*Pd_220[1]*Pd_001[2];
double P_001220101=Pd_001[0]*Pd_220[1]*Pd_101[2];
double P_101020001=Pd_101[0]*Pd_020[1]*Pd_001[2];
double P_101020101=Pd_101[0]*Pd_020[1]*Pd_101[2];
double P_101120001=Pd_101[0]*Pd_120[1]*Pd_001[2];
double P_101120101=Pd_101[0]*Pd_120[1]*Pd_101[2];
double P_101220001=Pd_101[0]*Pd_220[1]*Pd_001[2];
double P_101220101=Pd_101[0]*Pd_220[1]*Pd_101[2];
double P_000021001=Pd_021[1]*Pd_001[2];
double P_000021101=Pd_021[1]*Pd_101[2];
double P_000121001=Pd_121[1]*Pd_001[2];
double P_000121101=Pd_121[1]*Pd_101[2];
double P_000221001=Pd_221[1]*Pd_001[2];
double P_000221101=Pd_221[1]*Pd_101[2];
double P_000321001=Pd_321[1]*Pd_001[2];
double P_000321101=Pd_321[1]*Pd_101[2];
double P_000020002=Pd_020[1]*Pd_002[2];
double P_000020102=Pd_020[1]*Pd_102[2];
double P_000020202=Pd_020[1]*Pd_202[2];
double P_000120002=Pd_120[1]*Pd_002[2];
double P_000120102=Pd_120[1]*Pd_102[2];
double P_000120202=Pd_120[1]*Pd_202[2];
double P_000220002=Pd_220[1]*Pd_002[2];
double P_000220102=Pd_220[1]*Pd_102[2];
double P_000220202=Pd_220[1]*Pd_202[2];
double P_012000010=Pd_012[0]*Pd_010[2];
double P_012000110=Pd_012[0]*Pd_110[2];
double P_112000010=Pd_112[0]*Pd_010[2];
double P_112000110=Pd_112[0]*Pd_110[2];
double P_212000010=Pd_212[0]*Pd_010[2];
double P_212000110=Pd_212[0]*Pd_110[2];
double P_312000010=Pd_312[0]*Pd_010[2];
double P_312000110=Pd_312[0]*Pd_110[2];
double P_011001010=Pd_011[0]*Pd_001[1]*Pd_010[2];
double P_011001110=Pd_011[0]*Pd_001[1]*Pd_110[2];
double P_011101010=Pd_011[0]*Pd_101[1]*Pd_010[2];
double P_011101110=Pd_011[0]*Pd_101[1]*Pd_110[2];
double P_111001010=Pd_111[0]*Pd_001[1]*Pd_010[2];
double P_111001110=Pd_111[0]*Pd_001[1]*Pd_110[2];
double P_111101010=Pd_111[0]*Pd_101[1]*Pd_010[2];
double P_111101110=Pd_111[0]*Pd_101[1]*Pd_110[2];
double P_211001010=Pd_211[0]*Pd_001[1]*Pd_010[2];
double P_211001110=Pd_211[0]*Pd_001[1]*Pd_110[2];
double P_211101010=Pd_211[0]*Pd_101[1]*Pd_010[2];
double P_211101110=Pd_211[0]*Pd_101[1]*Pd_110[2];
double P_010002010=Pd_010[0]*Pd_002[1]*Pd_010[2];
double P_010002110=Pd_010[0]*Pd_002[1]*Pd_110[2];
double P_010102010=Pd_010[0]*Pd_102[1]*Pd_010[2];
double P_010102110=Pd_010[0]*Pd_102[1]*Pd_110[2];
double P_010202010=Pd_010[0]*Pd_202[1]*Pd_010[2];
double P_010202110=Pd_010[0]*Pd_202[1]*Pd_110[2];
double P_110002010=Pd_110[0]*Pd_002[1]*Pd_010[2];
double P_110002110=Pd_110[0]*Pd_002[1]*Pd_110[2];
double P_110102010=Pd_110[0]*Pd_102[1]*Pd_010[2];
double P_110102110=Pd_110[0]*Pd_102[1]*Pd_110[2];
double P_110202010=Pd_110[0]*Pd_202[1]*Pd_010[2];
double P_110202110=Pd_110[0]*Pd_202[1]*Pd_110[2];
double P_011000011=Pd_011[0]*Pd_011[2];
double P_011000111=Pd_011[0]*Pd_111[2];
double P_011000211=Pd_011[0]*Pd_211[2];
double P_111000011=Pd_111[0]*Pd_011[2];
double P_111000111=Pd_111[0]*Pd_111[2];
double P_111000211=Pd_111[0]*Pd_211[2];
double P_211000011=Pd_211[0]*Pd_011[2];
double P_211000111=Pd_211[0]*Pd_111[2];
double P_211000211=Pd_211[0]*Pd_211[2];
double P_010001011=Pd_010[0]*Pd_001[1]*Pd_011[2];
double P_010001111=Pd_010[0]*Pd_001[1]*Pd_111[2];
double P_010001211=Pd_010[0]*Pd_001[1]*Pd_211[2];
double P_010101011=Pd_010[0]*Pd_101[1]*Pd_011[2];
double P_010101111=Pd_010[0]*Pd_101[1]*Pd_111[2];
double P_010101211=Pd_010[0]*Pd_101[1]*Pd_211[2];
double P_110001011=Pd_110[0]*Pd_001[1]*Pd_011[2];
double P_110001111=Pd_110[0]*Pd_001[1]*Pd_111[2];
double P_110001211=Pd_110[0]*Pd_001[1]*Pd_211[2];
double P_110101011=Pd_110[0]*Pd_101[1]*Pd_011[2];
double P_110101111=Pd_110[0]*Pd_101[1]*Pd_111[2];
double P_110101211=Pd_110[0]*Pd_101[1]*Pd_211[2];
double P_010000012=Pd_010[0]*Pd_012[2];
double P_010000112=Pd_010[0]*Pd_112[2];
double P_010000212=Pd_010[0]*Pd_212[2];
double P_010000312=Pd_010[0]*Pd_312[2];
double P_110000012=Pd_110[0]*Pd_012[2];
double P_110000112=Pd_110[0]*Pd_112[2];
double P_110000212=Pd_110[0]*Pd_212[2];
double P_110000312=Pd_110[0]*Pd_312[2];
double P_002010010=Pd_002[0]*Pd_010[1]*Pd_010[2];
double P_002010110=Pd_002[0]*Pd_010[1]*Pd_110[2];
double P_002110010=Pd_002[0]*Pd_110[1]*Pd_010[2];
double P_002110110=Pd_002[0]*Pd_110[1]*Pd_110[2];
double P_102010010=Pd_102[0]*Pd_010[1]*Pd_010[2];
double P_102010110=Pd_102[0]*Pd_010[1]*Pd_110[2];
double P_102110010=Pd_102[0]*Pd_110[1]*Pd_010[2];
double P_102110110=Pd_102[0]*Pd_110[1]*Pd_110[2];
double P_202010010=Pd_202[0]*Pd_010[1]*Pd_010[2];
double P_202010110=Pd_202[0]*Pd_010[1]*Pd_110[2];
double P_202110010=Pd_202[0]*Pd_110[1]*Pd_010[2];
double P_202110110=Pd_202[0]*Pd_110[1]*Pd_110[2];
double P_001011010=Pd_001[0]*Pd_011[1]*Pd_010[2];
double P_001011110=Pd_001[0]*Pd_011[1]*Pd_110[2];
double P_001111010=Pd_001[0]*Pd_111[1]*Pd_010[2];
double P_001111110=Pd_001[0]*Pd_111[1]*Pd_110[2];
double P_001211010=Pd_001[0]*Pd_211[1]*Pd_010[2];
double P_001211110=Pd_001[0]*Pd_211[1]*Pd_110[2];
double P_101011010=Pd_101[0]*Pd_011[1]*Pd_010[2];
double P_101011110=Pd_101[0]*Pd_011[1]*Pd_110[2];
double P_101111010=Pd_101[0]*Pd_111[1]*Pd_010[2];
double P_101111110=Pd_101[0]*Pd_111[1]*Pd_110[2];
double P_101211010=Pd_101[0]*Pd_211[1]*Pd_010[2];
double P_101211110=Pd_101[0]*Pd_211[1]*Pd_110[2];
double P_000012010=Pd_012[1]*Pd_010[2];
double P_000012110=Pd_012[1]*Pd_110[2];
double P_000112010=Pd_112[1]*Pd_010[2];
double P_000112110=Pd_112[1]*Pd_110[2];
double P_000212010=Pd_212[1]*Pd_010[2];
double P_000212110=Pd_212[1]*Pd_110[2];
double P_000312010=Pd_312[1]*Pd_010[2];
double P_000312110=Pd_312[1]*Pd_110[2];
double P_001010011=Pd_001[0]*Pd_010[1]*Pd_011[2];
double P_001010111=Pd_001[0]*Pd_010[1]*Pd_111[2];
double P_001010211=Pd_001[0]*Pd_010[1]*Pd_211[2];
double P_001110011=Pd_001[0]*Pd_110[1]*Pd_011[2];
double P_001110111=Pd_001[0]*Pd_110[1]*Pd_111[2];
double P_001110211=Pd_001[0]*Pd_110[1]*Pd_211[2];
double P_101010011=Pd_101[0]*Pd_010[1]*Pd_011[2];
double P_101010111=Pd_101[0]*Pd_010[1]*Pd_111[2];
double P_101010211=Pd_101[0]*Pd_010[1]*Pd_211[2];
double P_101110011=Pd_101[0]*Pd_110[1]*Pd_011[2];
double P_101110111=Pd_101[0]*Pd_110[1]*Pd_111[2];
double P_101110211=Pd_101[0]*Pd_110[1]*Pd_211[2];
double P_000011011=Pd_011[1]*Pd_011[2];
double P_000011111=Pd_011[1]*Pd_111[2];
double P_000011211=Pd_011[1]*Pd_211[2];
double P_000111011=Pd_111[1]*Pd_011[2];
double P_000111111=Pd_111[1]*Pd_111[2];
double P_000111211=Pd_111[1]*Pd_211[2];
double P_000211011=Pd_211[1]*Pd_011[2];
double P_000211111=Pd_211[1]*Pd_111[2];
double P_000211211=Pd_211[1]*Pd_211[2];
double P_000010012=Pd_010[1]*Pd_012[2];
double P_000010112=Pd_010[1]*Pd_112[2];
double P_000010212=Pd_010[1]*Pd_212[2];
double P_000010312=Pd_010[1]*Pd_312[2];
double P_000110012=Pd_110[1]*Pd_012[2];
double P_000110112=Pd_110[1]*Pd_112[2];
double P_000110212=Pd_110[1]*Pd_212[2];
double P_000110312=Pd_110[1]*Pd_312[2];
double P_002000020=Pd_002[0]*Pd_020[2];
double P_002000120=Pd_002[0]*Pd_120[2];
double P_002000220=Pd_002[0]*Pd_220[2];
double P_102000020=Pd_102[0]*Pd_020[2];
double P_102000120=Pd_102[0]*Pd_120[2];
double P_102000220=Pd_102[0]*Pd_220[2];
double P_202000020=Pd_202[0]*Pd_020[2];
double P_202000120=Pd_202[0]*Pd_120[2];
double P_202000220=Pd_202[0]*Pd_220[2];
double P_001001020=Pd_001[0]*Pd_001[1]*Pd_020[2];
double P_001001120=Pd_001[0]*Pd_001[1]*Pd_120[2];
double P_001001220=Pd_001[0]*Pd_001[1]*Pd_220[2];
double P_001101020=Pd_001[0]*Pd_101[1]*Pd_020[2];
double P_001101120=Pd_001[0]*Pd_101[1]*Pd_120[2];
double P_001101220=Pd_001[0]*Pd_101[1]*Pd_220[2];
double P_101001020=Pd_101[0]*Pd_001[1]*Pd_020[2];
double P_101001120=Pd_101[0]*Pd_001[1]*Pd_120[2];
double P_101001220=Pd_101[0]*Pd_001[1]*Pd_220[2];
double P_101101020=Pd_101[0]*Pd_101[1]*Pd_020[2];
double P_101101120=Pd_101[0]*Pd_101[1]*Pd_120[2];
double P_101101220=Pd_101[0]*Pd_101[1]*Pd_220[2];
double P_000002020=Pd_002[1]*Pd_020[2];
double P_000002120=Pd_002[1]*Pd_120[2];
double P_000002220=Pd_002[1]*Pd_220[2];
double P_000102020=Pd_102[1]*Pd_020[2];
double P_000102120=Pd_102[1]*Pd_120[2];
double P_000102220=Pd_102[1]*Pd_220[2];
double P_000202020=Pd_202[1]*Pd_020[2];
double P_000202120=Pd_202[1]*Pd_120[2];
double P_000202220=Pd_202[1]*Pd_220[2];
double P_001000021=Pd_001[0]*Pd_021[2];
double P_001000121=Pd_001[0]*Pd_121[2];
double P_001000221=Pd_001[0]*Pd_221[2];
double P_001000321=Pd_001[0]*Pd_321[2];
double P_101000021=Pd_101[0]*Pd_021[2];
double P_101000121=Pd_101[0]*Pd_121[2];
double P_101000221=Pd_101[0]*Pd_221[2];
double P_101000321=Pd_101[0]*Pd_321[2];
double P_000001021=Pd_001[1]*Pd_021[2];
double P_000001121=Pd_001[1]*Pd_121[2];
double P_000001221=Pd_001[1]*Pd_221[2];
double P_000001321=Pd_001[1]*Pd_321[2];
double P_000101021=Pd_101[1]*Pd_021[2];
double P_000101121=Pd_101[1]*Pd_121[2];
double P_000101221=Pd_101[1]*Pd_221[2];
double P_000101321=Pd_101[1]*Pd_321[2];
double P_000000022=Pd_022[2];
double P_000000122=Pd_122[2];
double P_000000222=Pd_222[2];
double P_000000322=Pd_322[2];
double P_000000422=Pd_422[2];
double PR_022000000000=P_022000000*R_000[0]+-1*P_122000000*R_100[0]+P_222000000*R_200[0]+-1*P_322000000*R_300[0]+P_422000000*R_400[0];
double PR_021001000000=P_021001000*R_000[0]+-1*P_021101000*R_010[0]+-1*P_121001000*R_100[0]+P_121101000*R_110[0]+P_221001000*R_200[0]+-1*P_221101000*R_210[0]+-1*P_321001000*R_300[0]+P_321101000*R_310[0];
double PR_020002000000=P_020002000*R_000[0]+-1*P_020102000*R_010[0]+P_020202000*R_020[0]+-1*P_120002000*R_100[0]+P_120102000*R_110[0]+-1*P_120202000*R_120[0]+P_220002000*R_200[0]+-1*P_220102000*R_210[0]+P_220202000*R_220[0];
double PR_021000001000=P_021000001*R_000[0]+-1*P_021000101*R_001[0]+-1*P_121000001*R_100[0]+P_121000101*R_101[0]+P_221000001*R_200[0]+-1*P_221000101*R_201[0]+-1*P_321000001*R_300[0]+P_321000101*R_301[0];
double PR_020001001000=P_020001001*R_000[0]+-1*P_020001101*R_001[0]+-1*P_020101001*R_010[0]+P_020101101*R_011[0]+-1*P_120001001*R_100[0]+P_120001101*R_101[0]+P_120101001*R_110[0]+-1*P_120101101*R_111[0]+P_220001001*R_200[0]+-1*P_220001101*R_201[0]+-1*P_220101001*R_210[0]+P_220101101*R_211[0];
double PR_020000002000=P_020000002*R_000[0]+-1*P_020000102*R_001[0]+P_020000202*R_002[0]+-1*P_120000002*R_100[0]+P_120000102*R_101[0]+-1*P_120000202*R_102[0]+P_220000002*R_200[0]+-1*P_220000102*R_201[0]+P_220000202*R_202[0];
double PR_012010000000=P_012010000*R_000[0]+-1*P_012110000*R_010[0]+-1*P_112010000*R_100[0]+P_112110000*R_110[0]+P_212010000*R_200[0]+-1*P_212110000*R_210[0]+-1*P_312010000*R_300[0]+P_312110000*R_310[0];
double PR_011011000000=P_011011000*R_000[0]+-1*P_011111000*R_010[0]+P_011211000*R_020[0]+-1*P_111011000*R_100[0]+P_111111000*R_110[0]+-1*P_111211000*R_120[0]+P_211011000*R_200[0]+-1*P_211111000*R_210[0]+P_211211000*R_220[0];
double PR_010012000000=P_010012000*R_000[0]+-1*P_010112000*R_010[0]+P_010212000*R_020[0]+-1*P_010312000*R_030[0]+-1*P_110012000*R_100[0]+P_110112000*R_110[0]+-1*P_110212000*R_120[0]+P_110312000*R_130[0];
double PR_011010001000=P_011010001*R_000[0]+-1*P_011010101*R_001[0]+-1*P_011110001*R_010[0]+P_011110101*R_011[0]+-1*P_111010001*R_100[0]+P_111010101*R_101[0]+P_111110001*R_110[0]+-1*P_111110101*R_111[0]+P_211010001*R_200[0]+-1*P_211010101*R_201[0]+-1*P_211110001*R_210[0]+P_211110101*R_211[0];
double PR_010011001000=P_010011001*R_000[0]+-1*P_010011101*R_001[0]+-1*P_010111001*R_010[0]+P_010111101*R_011[0]+P_010211001*R_020[0]+-1*P_010211101*R_021[0]+-1*P_110011001*R_100[0]+P_110011101*R_101[0]+P_110111001*R_110[0]+-1*P_110111101*R_111[0]+-1*P_110211001*R_120[0]+P_110211101*R_121[0];
double PR_010010002000=P_010010002*R_000[0]+-1*P_010010102*R_001[0]+P_010010202*R_002[0]+-1*P_010110002*R_010[0]+P_010110102*R_011[0]+-1*P_010110202*R_012[0]+-1*P_110010002*R_100[0]+P_110010102*R_101[0]+-1*P_110010202*R_102[0]+P_110110002*R_110[0]+-1*P_110110102*R_111[0]+P_110110202*R_112[0];
double PR_002020000000=P_002020000*R_000[0]+-1*P_002120000*R_010[0]+P_002220000*R_020[0]+-1*P_102020000*R_100[0]+P_102120000*R_110[0]+-1*P_102220000*R_120[0]+P_202020000*R_200[0]+-1*P_202120000*R_210[0]+P_202220000*R_220[0];
double PR_001021000000=P_001021000*R_000[0]+-1*P_001121000*R_010[0]+P_001221000*R_020[0]+-1*P_001321000*R_030[0]+-1*P_101021000*R_100[0]+P_101121000*R_110[0]+-1*P_101221000*R_120[0]+P_101321000*R_130[0];
double PR_000022000000=P_000022000*R_000[0]+-1*P_000122000*R_010[0]+P_000222000*R_020[0]+-1*P_000322000*R_030[0]+P_000422000*R_040[0];
double PR_001020001000=P_001020001*R_000[0]+-1*P_001020101*R_001[0]+-1*P_001120001*R_010[0]+P_001120101*R_011[0]+P_001220001*R_020[0]+-1*P_001220101*R_021[0]+-1*P_101020001*R_100[0]+P_101020101*R_101[0]+P_101120001*R_110[0]+-1*P_101120101*R_111[0]+-1*P_101220001*R_120[0]+P_101220101*R_121[0];
double PR_000021001000=P_000021001*R_000[0]+-1*P_000021101*R_001[0]+-1*P_000121001*R_010[0]+P_000121101*R_011[0]+P_000221001*R_020[0]+-1*P_000221101*R_021[0]+-1*P_000321001*R_030[0]+P_000321101*R_031[0];
double PR_000020002000=P_000020002*R_000[0]+-1*P_000020102*R_001[0]+P_000020202*R_002[0]+-1*P_000120002*R_010[0]+P_000120102*R_011[0]+-1*P_000120202*R_012[0]+P_000220002*R_020[0]+-1*P_000220102*R_021[0]+P_000220202*R_022[0];
double PR_012000010000=P_012000010*R_000[0]+-1*P_012000110*R_001[0]+-1*P_112000010*R_100[0]+P_112000110*R_101[0]+P_212000010*R_200[0]+-1*P_212000110*R_201[0]+-1*P_312000010*R_300[0]+P_312000110*R_301[0];
double PR_011001010000=P_011001010*R_000[0]+-1*P_011001110*R_001[0]+-1*P_011101010*R_010[0]+P_011101110*R_011[0]+-1*P_111001010*R_100[0]+P_111001110*R_101[0]+P_111101010*R_110[0]+-1*P_111101110*R_111[0]+P_211001010*R_200[0]+-1*P_211001110*R_201[0]+-1*P_211101010*R_210[0]+P_211101110*R_211[0];
double PR_010002010000=P_010002010*R_000[0]+-1*P_010002110*R_001[0]+-1*P_010102010*R_010[0]+P_010102110*R_011[0]+P_010202010*R_020[0]+-1*P_010202110*R_021[0]+-1*P_110002010*R_100[0]+P_110002110*R_101[0]+P_110102010*R_110[0]+-1*P_110102110*R_111[0]+-1*P_110202010*R_120[0]+P_110202110*R_121[0];
double PR_011000011000=P_011000011*R_000[0]+-1*P_011000111*R_001[0]+P_011000211*R_002[0]+-1*P_111000011*R_100[0]+P_111000111*R_101[0]+-1*P_111000211*R_102[0]+P_211000011*R_200[0]+-1*P_211000111*R_201[0]+P_211000211*R_202[0];
double PR_010001011000=P_010001011*R_000[0]+-1*P_010001111*R_001[0]+P_010001211*R_002[0]+-1*P_010101011*R_010[0]+P_010101111*R_011[0]+-1*P_010101211*R_012[0]+-1*P_110001011*R_100[0]+P_110001111*R_101[0]+-1*P_110001211*R_102[0]+P_110101011*R_110[0]+-1*P_110101111*R_111[0]+P_110101211*R_112[0];
double PR_010000012000=P_010000012*R_000[0]+-1*P_010000112*R_001[0]+P_010000212*R_002[0]+-1*P_010000312*R_003[0]+-1*P_110000012*R_100[0]+P_110000112*R_101[0]+-1*P_110000212*R_102[0]+P_110000312*R_103[0];
double PR_002010010000=P_002010010*R_000[0]+-1*P_002010110*R_001[0]+-1*P_002110010*R_010[0]+P_002110110*R_011[0]+-1*P_102010010*R_100[0]+P_102010110*R_101[0]+P_102110010*R_110[0]+-1*P_102110110*R_111[0]+P_202010010*R_200[0]+-1*P_202010110*R_201[0]+-1*P_202110010*R_210[0]+P_202110110*R_211[0];
double PR_001011010000=P_001011010*R_000[0]+-1*P_001011110*R_001[0]+-1*P_001111010*R_010[0]+P_001111110*R_011[0]+P_001211010*R_020[0]+-1*P_001211110*R_021[0]+-1*P_101011010*R_100[0]+P_101011110*R_101[0]+P_101111010*R_110[0]+-1*P_101111110*R_111[0]+-1*P_101211010*R_120[0]+P_101211110*R_121[0];
double PR_000012010000=P_000012010*R_000[0]+-1*P_000012110*R_001[0]+-1*P_000112010*R_010[0]+P_000112110*R_011[0]+P_000212010*R_020[0]+-1*P_000212110*R_021[0]+-1*P_000312010*R_030[0]+P_000312110*R_031[0];
double PR_001010011000=P_001010011*R_000[0]+-1*P_001010111*R_001[0]+P_001010211*R_002[0]+-1*P_001110011*R_010[0]+P_001110111*R_011[0]+-1*P_001110211*R_012[0]+-1*P_101010011*R_100[0]+P_101010111*R_101[0]+-1*P_101010211*R_102[0]+P_101110011*R_110[0]+-1*P_101110111*R_111[0]+P_101110211*R_112[0];
double PR_000011011000=P_000011011*R_000[0]+-1*P_000011111*R_001[0]+P_000011211*R_002[0]+-1*P_000111011*R_010[0]+P_000111111*R_011[0]+-1*P_000111211*R_012[0]+P_000211011*R_020[0]+-1*P_000211111*R_021[0]+P_000211211*R_022[0];
double PR_000010012000=P_000010012*R_000[0]+-1*P_000010112*R_001[0]+P_000010212*R_002[0]+-1*P_000010312*R_003[0]+-1*P_000110012*R_010[0]+P_000110112*R_011[0]+-1*P_000110212*R_012[0]+P_000110312*R_013[0];
double PR_002000020000=P_002000020*R_000[0]+-1*P_002000120*R_001[0]+P_002000220*R_002[0]+-1*P_102000020*R_100[0]+P_102000120*R_101[0]+-1*P_102000220*R_102[0]+P_202000020*R_200[0]+-1*P_202000120*R_201[0]+P_202000220*R_202[0];
double PR_001001020000=P_001001020*R_000[0]+-1*P_001001120*R_001[0]+P_001001220*R_002[0]+-1*P_001101020*R_010[0]+P_001101120*R_011[0]+-1*P_001101220*R_012[0]+-1*P_101001020*R_100[0]+P_101001120*R_101[0]+-1*P_101001220*R_102[0]+P_101101020*R_110[0]+-1*P_101101120*R_111[0]+P_101101220*R_112[0];
double PR_000002020000=P_000002020*R_000[0]+-1*P_000002120*R_001[0]+P_000002220*R_002[0]+-1*P_000102020*R_010[0]+P_000102120*R_011[0]+-1*P_000102220*R_012[0]+P_000202020*R_020[0]+-1*P_000202120*R_021[0]+P_000202220*R_022[0];
double PR_001000021000=P_001000021*R_000[0]+-1*P_001000121*R_001[0]+P_001000221*R_002[0]+-1*P_001000321*R_003[0]+-1*P_101000021*R_100[0]+P_101000121*R_101[0]+-1*P_101000221*R_102[0]+P_101000321*R_103[0];
double PR_000001021000=P_000001021*R_000[0]+-1*P_000001121*R_001[0]+P_000001221*R_002[0]+-1*P_000001321*R_003[0]+-1*P_000101021*R_010[0]+P_000101121*R_011[0]+-1*P_000101221*R_012[0]+P_000101321*R_013[0];
double PR_000000022000=P_000000022*R_000[0]+-1*P_000000122*R_001[0]+P_000000222*R_002[0]+-1*P_000000322*R_003[0]+P_000000422*R_004[0];
double PR_022000000001=P_022000000*R_001[0]+-1*P_122000000*R_101[0]+P_222000000*R_201[0]+-1*P_322000000*R_301[0]+P_422000000*R_401[0];
double PR_021001000001=P_021001000*R_001[0]+-1*P_021101000*R_011[0]+-1*P_121001000*R_101[0]+P_121101000*R_111[0]+P_221001000*R_201[0]+-1*P_221101000*R_211[0]+-1*P_321001000*R_301[0]+P_321101000*R_311[0];
double PR_020002000001=P_020002000*R_001[0]+-1*P_020102000*R_011[0]+P_020202000*R_021[0]+-1*P_120002000*R_101[0]+P_120102000*R_111[0]+-1*P_120202000*R_121[0]+P_220002000*R_201[0]+-1*P_220102000*R_211[0]+P_220202000*R_221[0];
double PR_021000001001=P_021000001*R_001[0]+-1*P_021000101*R_002[0]+-1*P_121000001*R_101[0]+P_121000101*R_102[0]+P_221000001*R_201[0]+-1*P_221000101*R_202[0]+-1*P_321000001*R_301[0]+P_321000101*R_302[0];
double PR_020001001001=P_020001001*R_001[0]+-1*P_020001101*R_002[0]+-1*P_020101001*R_011[0]+P_020101101*R_012[0]+-1*P_120001001*R_101[0]+P_120001101*R_102[0]+P_120101001*R_111[0]+-1*P_120101101*R_112[0]+P_220001001*R_201[0]+-1*P_220001101*R_202[0]+-1*P_220101001*R_211[0]+P_220101101*R_212[0];
double PR_020000002001=P_020000002*R_001[0]+-1*P_020000102*R_002[0]+P_020000202*R_003[0]+-1*P_120000002*R_101[0]+P_120000102*R_102[0]+-1*P_120000202*R_103[0]+P_220000002*R_201[0]+-1*P_220000102*R_202[0]+P_220000202*R_203[0];
double PR_012010000001=P_012010000*R_001[0]+-1*P_012110000*R_011[0]+-1*P_112010000*R_101[0]+P_112110000*R_111[0]+P_212010000*R_201[0]+-1*P_212110000*R_211[0]+-1*P_312010000*R_301[0]+P_312110000*R_311[0];
double PR_011011000001=P_011011000*R_001[0]+-1*P_011111000*R_011[0]+P_011211000*R_021[0]+-1*P_111011000*R_101[0]+P_111111000*R_111[0]+-1*P_111211000*R_121[0]+P_211011000*R_201[0]+-1*P_211111000*R_211[0]+P_211211000*R_221[0];
double PR_010012000001=P_010012000*R_001[0]+-1*P_010112000*R_011[0]+P_010212000*R_021[0]+-1*P_010312000*R_031[0]+-1*P_110012000*R_101[0]+P_110112000*R_111[0]+-1*P_110212000*R_121[0]+P_110312000*R_131[0];
double PR_011010001001=P_011010001*R_001[0]+-1*P_011010101*R_002[0]+-1*P_011110001*R_011[0]+P_011110101*R_012[0]+-1*P_111010001*R_101[0]+P_111010101*R_102[0]+P_111110001*R_111[0]+-1*P_111110101*R_112[0]+P_211010001*R_201[0]+-1*P_211010101*R_202[0]+-1*P_211110001*R_211[0]+P_211110101*R_212[0];
double PR_010011001001=P_010011001*R_001[0]+-1*P_010011101*R_002[0]+-1*P_010111001*R_011[0]+P_010111101*R_012[0]+P_010211001*R_021[0]+-1*P_010211101*R_022[0]+-1*P_110011001*R_101[0]+P_110011101*R_102[0]+P_110111001*R_111[0]+-1*P_110111101*R_112[0]+-1*P_110211001*R_121[0]+P_110211101*R_122[0];
double PR_010010002001=P_010010002*R_001[0]+-1*P_010010102*R_002[0]+P_010010202*R_003[0]+-1*P_010110002*R_011[0]+P_010110102*R_012[0]+-1*P_010110202*R_013[0]+-1*P_110010002*R_101[0]+P_110010102*R_102[0]+-1*P_110010202*R_103[0]+P_110110002*R_111[0]+-1*P_110110102*R_112[0]+P_110110202*R_113[0];
double PR_002020000001=P_002020000*R_001[0]+-1*P_002120000*R_011[0]+P_002220000*R_021[0]+-1*P_102020000*R_101[0]+P_102120000*R_111[0]+-1*P_102220000*R_121[0]+P_202020000*R_201[0]+-1*P_202120000*R_211[0]+P_202220000*R_221[0];
double PR_001021000001=P_001021000*R_001[0]+-1*P_001121000*R_011[0]+P_001221000*R_021[0]+-1*P_001321000*R_031[0]+-1*P_101021000*R_101[0]+P_101121000*R_111[0]+-1*P_101221000*R_121[0]+P_101321000*R_131[0];
double PR_000022000001=P_000022000*R_001[0]+-1*P_000122000*R_011[0]+P_000222000*R_021[0]+-1*P_000322000*R_031[0]+P_000422000*R_041[0];
double PR_001020001001=P_001020001*R_001[0]+-1*P_001020101*R_002[0]+-1*P_001120001*R_011[0]+P_001120101*R_012[0]+P_001220001*R_021[0]+-1*P_001220101*R_022[0]+-1*P_101020001*R_101[0]+P_101020101*R_102[0]+P_101120001*R_111[0]+-1*P_101120101*R_112[0]+-1*P_101220001*R_121[0]+P_101220101*R_122[0];
double PR_000021001001=P_000021001*R_001[0]+-1*P_000021101*R_002[0]+-1*P_000121001*R_011[0]+P_000121101*R_012[0]+P_000221001*R_021[0]+-1*P_000221101*R_022[0]+-1*P_000321001*R_031[0]+P_000321101*R_032[0];
double PR_000020002001=P_000020002*R_001[0]+-1*P_000020102*R_002[0]+P_000020202*R_003[0]+-1*P_000120002*R_011[0]+P_000120102*R_012[0]+-1*P_000120202*R_013[0]+P_000220002*R_021[0]+-1*P_000220102*R_022[0]+P_000220202*R_023[0];
double PR_012000010001=P_012000010*R_001[0]+-1*P_012000110*R_002[0]+-1*P_112000010*R_101[0]+P_112000110*R_102[0]+P_212000010*R_201[0]+-1*P_212000110*R_202[0]+-1*P_312000010*R_301[0]+P_312000110*R_302[0];
double PR_011001010001=P_011001010*R_001[0]+-1*P_011001110*R_002[0]+-1*P_011101010*R_011[0]+P_011101110*R_012[0]+-1*P_111001010*R_101[0]+P_111001110*R_102[0]+P_111101010*R_111[0]+-1*P_111101110*R_112[0]+P_211001010*R_201[0]+-1*P_211001110*R_202[0]+-1*P_211101010*R_211[0]+P_211101110*R_212[0];
double PR_010002010001=P_010002010*R_001[0]+-1*P_010002110*R_002[0]+-1*P_010102010*R_011[0]+P_010102110*R_012[0]+P_010202010*R_021[0]+-1*P_010202110*R_022[0]+-1*P_110002010*R_101[0]+P_110002110*R_102[0]+P_110102010*R_111[0]+-1*P_110102110*R_112[0]+-1*P_110202010*R_121[0]+P_110202110*R_122[0];
double PR_011000011001=P_011000011*R_001[0]+-1*P_011000111*R_002[0]+P_011000211*R_003[0]+-1*P_111000011*R_101[0]+P_111000111*R_102[0]+-1*P_111000211*R_103[0]+P_211000011*R_201[0]+-1*P_211000111*R_202[0]+P_211000211*R_203[0];
double PR_010001011001=P_010001011*R_001[0]+-1*P_010001111*R_002[0]+P_010001211*R_003[0]+-1*P_010101011*R_011[0]+P_010101111*R_012[0]+-1*P_010101211*R_013[0]+-1*P_110001011*R_101[0]+P_110001111*R_102[0]+-1*P_110001211*R_103[0]+P_110101011*R_111[0]+-1*P_110101111*R_112[0]+P_110101211*R_113[0];
double PR_010000012001=P_010000012*R_001[0]+-1*P_010000112*R_002[0]+P_010000212*R_003[0]+-1*P_010000312*R_004[0]+-1*P_110000012*R_101[0]+P_110000112*R_102[0]+-1*P_110000212*R_103[0]+P_110000312*R_104[0];
double PR_002010010001=P_002010010*R_001[0]+-1*P_002010110*R_002[0]+-1*P_002110010*R_011[0]+P_002110110*R_012[0]+-1*P_102010010*R_101[0]+P_102010110*R_102[0]+P_102110010*R_111[0]+-1*P_102110110*R_112[0]+P_202010010*R_201[0]+-1*P_202010110*R_202[0]+-1*P_202110010*R_211[0]+P_202110110*R_212[0];
double PR_001011010001=P_001011010*R_001[0]+-1*P_001011110*R_002[0]+-1*P_001111010*R_011[0]+P_001111110*R_012[0]+P_001211010*R_021[0]+-1*P_001211110*R_022[0]+-1*P_101011010*R_101[0]+P_101011110*R_102[0]+P_101111010*R_111[0]+-1*P_101111110*R_112[0]+-1*P_101211010*R_121[0]+P_101211110*R_122[0];
double PR_000012010001=P_000012010*R_001[0]+-1*P_000012110*R_002[0]+-1*P_000112010*R_011[0]+P_000112110*R_012[0]+P_000212010*R_021[0]+-1*P_000212110*R_022[0]+-1*P_000312010*R_031[0]+P_000312110*R_032[0];
double PR_001010011001=P_001010011*R_001[0]+-1*P_001010111*R_002[0]+P_001010211*R_003[0]+-1*P_001110011*R_011[0]+P_001110111*R_012[0]+-1*P_001110211*R_013[0]+-1*P_101010011*R_101[0]+P_101010111*R_102[0]+-1*P_101010211*R_103[0]+P_101110011*R_111[0]+-1*P_101110111*R_112[0]+P_101110211*R_113[0];
double PR_000011011001=P_000011011*R_001[0]+-1*P_000011111*R_002[0]+P_000011211*R_003[0]+-1*P_000111011*R_011[0]+P_000111111*R_012[0]+-1*P_000111211*R_013[0]+P_000211011*R_021[0]+-1*P_000211111*R_022[0]+P_000211211*R_023[0];
double PR_000010012001=P_000010012*R_001[0]+-1*P_000010112*R_002[0]+P_000010212*R_003[0]+-1*P_000010312*R_004[0]+-1*P_000110012*R_011[0]+P_000110112*R_012[0]+-1*P_000110212*R_013[0]+P_000110312*R_014[0];
double PR_002000020001=P_002000020*R_001[0]+-1*P_002000120*R_002[0]+P_002000220*R_003[0]+-1*P_102000020*R_101[0]+P_102000120*R_102[0]+-1*P_102000220*R_103[0]+P_202000020*R_201[0]+-1*P_202000120*R_202[0]+P_202000220*R_203[0];
double PR_001001020001=P_001001020*R_001[0]+-1*P_001001120*R_002[0]+P_001001220*R_003[0]+-1*P_001101020*R_011[0]+P_001101120*R_012[0]+-1*P_001101220*R_013[0]+-1*P_101001020*R_101[0]+P_101001120*R_102[0]+-1*P_101001220*R_103[0]+P_101101020*R_111[0]+-1*P_101101120*R_112[0]+P_101101220*R_113[0];
double PR_000002020001=P_000002020*R_001[0]+-1*P_000002120*R_002[0]+P_000002220*R_003[0]+-1*P_000102020*R_011[0]+P_000102120*R_012[0]+-1*P_000102220*R_013[0]+P_000202020*R_021[0]+-1*P_000202120*R_022[0]+P_000202220*R_023[0];
double PR_001000021001=P_001000021*R_001[0]+-1*P_001000121*R_002[0]+P_001000221*R_003[0]+-1*P_001000321*R_004[0]+-1*P_101000021*R_101[0]+P_101000121*R_102[0]+-1*P_101000221*R_103[0]+P_101000321*R_104[0];
double PR_000001021001=P_000001021*R_001[0]+-1*P_000001121*R_002[0]+P_000001221*R_003[0]+-1*P_000001321*R_004[0]+-1*P_000101021*R_011[0]+P_000101121*R_012[0]+-1*P_000101221*R_013[0]+P_000101321*R_014[0];
double PR_000000022001=P_000000022*R_001[0]+-1*P_000000122*R_002[0]+P_000000222*R_003[0]+-1*P_000000322*R_004[0]+P_000000422*R_005[0];
double PR_022000000010=P_022000000*R_010[0]+-1*P_122000000*R_110[0]+P_222000000*R_210[0]+-1*P_322000000*R_310[0]+P_422000000*R_410[0];
double PR_021001000010=P_021001000*R_010[0]+-1*P_021101000*R_020[0]+-1*P_121001000*R_110[0]+P_121101000*R_120[0]+P_221001000*R_210[0]+-1*P_221101000*R_220[0]+-1*P_321001000*R_310[0]+P_321101000*R_320[0];
double PR_020002000010=P_020002000*R_010[0]+-1*P_020102000*R_020[0]+P_020202000*R_030[0]+-1*P_120002000*R_110[0]+P_120102000*R_120[0]+-1*P_120202000*R_130[0]+P_220002000*R_210[0]+-1*P_220102000*R_220[0]+P_220202000*R_230[0];
double PR_021000001010=P_021000001*R_010[0]+-1*P_021000101*R_011[0]+-1*P_121000001*R_110[0]+P_121000101*R_111[0]+P_221000001*R_210[0]+-1*P_221000101*R_211[0]+-1*P_321000001*R_310[0]+P_321000101*R_311[0];
double PR_020001001010=P_020001001*R_010[0]+-1*P_020001101*R_011[0]+-1*P_020101001*R_020[0]+P_020101101*R_021[0]+-1*P_120001001*R_110[0]+P_120001101*R_111[0]+P_120101001*R_120[0]+-1*P_120101101*R_121[0]+P_220001001*R_210[0]+-1*P_220001101*R_211[0]+-1*P_220101001*R_220[0]+P_220101101*R_221[0];
double PR_020000002010=P_020000002*R_010[0]+-1*P_020000102*R_011[0]+P_020000202*R_012[0]+-1*P_120000002*R_110[0]+P_120000102*R_111[0]+-1*P_120000202*R_112[0]+P_220000002*R_210[0]+-1*P_220000102*R_211[0]+P_220000202*R_212[0];
double PR_012010000010=P_012010000*R_010[0]+-1*P_012110000*R_020[0]+-1*P_112010000*R_110[0]+P_112110000*R_120[0]+P_212010000*R_210[0]+-1*P_212110000*R_220[0]+-1*P_312010000*R_310[0]+P_312110000*R_320[0];
double PR_011011000010=P_011011000*R_010[0]+-1*P_011111000*R_020[0]+P_011211000*R_030[0]+-1*P_111011000*R_110[0]+P_111111000*R_120[0]+-1*P_111211000*R_130[0]+P_211011000*R_210[0]+-1*P_211111000*R_220[0]+P_211211000*R_230[0];
double PR_010012000010=P_010012000*R_010[0]+-1*P_010112000*R_020[0]+P_010212000*R_030[0]+-1*P_010312000*R_040[0]+-1*P_110012000*R_110[0]+P_110112000*R_120[0]+-1*P_110212000*R_130[0]+P_110312000*R_140[0];
double PR_011010001010=P_011010001*R_010[0]+-1*P_011010101*R_011[0]+-1*P_011110001*R_020[0]+P_011110101*R_021[0]+-1*P_111010001*R_110[0]+P_111010101*R_111[0]+P_111110001*R_120[0]+-1*P_111110101*R_121[0]+P_211010001*R_210[0]+-1*P_211010101*R_211[0]+-1*P_211110001*R_220[0]+P_211110101*R_221[0];
double PR_010011001010=P_010011001*R_010[0]+-1*P_010011101*R_011[0]+-1*P_010111001*R_020[0]+P_010111101*R_021[0]+P_010211001*R_030[0]+-1*P_010211101*R_031[0]+-1*P_110011001*R_110[0]+P_110011101*R_111[0]+P_110111001*R_120[0]+-1*P_110111101*R_121[0]+-1*P_110211001*R_130[0]+P_110211101*R_131[0];
double PR_010010002010=P_010010002*R_010[0]+-1*P_010010102*R_011[0]+P_010010202*R_012[0]+-1*P_010110002*R_020[0]+P_010110102*R_021[0]+-1*P_010110202*R_022[0]+-1*P_110010002*R_110[0]+P_110010102*R_111[0]+-1*P_110010202*R_112[0]+P_110110002*R_120[0]+-1*P_110110102*R_121[0]+P_110110202*R_122[0];
double PR_002020000010=P_002020000*R_010[0]+-1*P_002120000*R_020[0]+P_002220000*R_030[0]+-1*P_102020000*R_110[0]+P_102120000*R_120[0]+-1*P_102220000*R_130[0]+P_202020000*R_210[0]+-1*P_202120000*R_220[0]+P_202220000*R_230[0];
double PR_001021000010=P_001021000*R_010[0]+-1*P_001121000*R_020[0]+P_001221000*R_030[0]+-1*P_001321000*R_040[0]+-1*P_101021000*R_110[0]+P_101121000*R_120[0]+-1*P_101221000*R_130[0]+P_101321000*R_140[0];
double PR_000022000010=P_000022000*R_010[0]+-1*P_000122000*R_020[0]+P_000222000*R_030[0]+-1*P_000322000*R_040[0]+P_000422000*R_050[0];
double PR_001020001010=P_001020001*R_010[0]+-1*P_001020101*R_011[0]+-1*P_001120001*R_020[0]+P_001120101*R_021[0]+P_001220001*R_030[0]+-1*P_001220101*R_031[0]+-1*P_101020001*R_110[0]+P_101020101*R_111[0]+P_101120001*R_120[0]+-1*P_101120101*R_121[0]+-1*P_101220001*R_130[0]+P_101220101*R_131[0];
double PR_000021001010=P_000021001*R_010[0]+-1*P_000021101*R_011[0]+-1*P_000121001*R_020[0]+P_000121101*R_021[0]+P_000221001*R_030[0]+-1*P_000221101*R_031[0]+-1*P_000321001*R_040[0]+P_000321101*R_041[0];
double PR_000020002010=P_000020002*R_010[0]+-1*P_000020102*R_011[0]+P_000020202*R_012[0]+-1*P_000120002*R_020[0]+P_000120102*R_021[0]+-1*P_000120202*R_022[0]+P_000220002*R_030[0]+-1*P_000220102*R_031[0]+P_000220202*R_032[0];
double PR_012000010010=P_012000010*R_010[0]+-1*P_012000110*R_011[0]+-1*P_112000010*R_110[0]+P_112000110*R_111[0]+P_212000010*R_210[0]+-1*P_212000110*R_211[0]+-1*P_312000010*R_310[0]+P_312000110*R_311[0];
double PR_011001010010=P_011001010*R_010[0]+-1*P_011001110*R_011[0]+-1*P_011101010*R_020[0]+P_011101110*R_021[0]+-1*P_111001010*R_110[0]+P_111001110*R_111[0]+P_111101010*R_120[0]+-1*P_111101110*R_121[0]+P_211001010*R_210[0]+-1*P_211001110*R_211[0]+-1*P_211101010*R_220[0]+P_211101110*R_221[0];
double PR_010002010010=P_010002010*R_010[0]+-1*P_010002110*R_011[0]+-1*P_010102010*R_020[0]+P_010102110*R_021[0]+P_010202010*R_030[0]+-1*P_010202110*R_031[0]+-1*P_110002010*R_110[0]+P_110002110*R_111[0]+P_110102010*R_120[0]+-1*P_110102110*R_121[0]+-1*P_110202010*R_130[0]+P_110202110*R_131[0];
double PR_011000011010=P_011000011*R_010[0]+-1*P_011000111*R_011[0]+P_011000211*R_012[0]+-1*P_111000011*R_110[0]+P_111000111*R_111[0]+-1*P_111000211*R_112[0]+P_211000011*R_210[0]+-1*P_211000111*R_211[0]+P_211000211*R_212[0];
double PR_010001011010=P_010001011*R_010[0]+-1*P_010001111*R_011[0]+P_010001211*R_012[0]+-1*P_010101011*R_020[0]+P_010101111*R_021[0]+-1*P_010101211*R_022[0]+-1*P_110001011*R_110[0]+P_110001111*R_111[0]+-1*P_110001211*R_112[0]+P_110101011*R_120[0]+-1*P_110101111*R_121[0]+P_110101211*R_122[0];
double PR_010000012010=P_010000012*R_010[0]+-1*P_010000112*R_011[0]+P_010000212*R_012[0]+-1*P_010000312*R_013[0]+-1*P_110000012*R_110[0]+P_110000112*R_111[0]+-1*P_110000212*R_112[0]+P_110000312*R_113[0];
double PR_002010010010=P_002010010*R_010[0]+-1*P_002010110*R_011[0]+-1*P_002110010*R_020[0]+P_002110110*R_021[0]+-1*P_102010010*R_110[0]+P_102010110*R_111[0]+P_102110010*R_120[0]+-1*P_102110110*R_121[0]+P_202010010*R_210[0]+-1*P_202010110*R_211[0]+-1*P_202110010*R_220[0]+P_202110110*R_221[0];
double PR_001011010010=P_001011010*R_010[0]+-1*P_001011110*R_011[0]+-1*P_001111010*R_020[0]+P_001111110*R_021[0]+P_001211010*R_030[0]+-1*P_001211110*R_031[0]+-1*P_101011010*R_110[0]+P_101011110*R_111[0]+P_101111010*R_120[0]+-1*P_101111110*R_121[0]+-1*P_101211010*R_130[0]+P_101211110*R_131[0];
double PR_000012010010=P_000012010*R_010[0]+-1*P_000012110*R_011[0]+-1*P_000112010*R_020[0]+P_000112110*R_021[0]+P_000212010*R_030[0]+-1*P_000212110*R_031[0]+-1*P_000312010*R_040[0]+P_000312110*R_041[0];
double PR_001010011010=P_001010011*R_010[0]+-1*P_001010111*R_011[0]+P_001010211*R_012[0]+-1*P_001110011*R_020[0]+P_001110111*R_021[0]+-1*P_001110211*R_022[0]+-1*P_101010011*R_110[0]+P_101010111*R_111[0]+-1*P_101010211*R_112[0]+P_101110011*R_120[0]+-1*P_101110111*R_121[0]+P_101110211*R_122[0];
double PR_000011011010=P_000011011*R_010[0]+-1*P_000011111*R_011[0]+P_000011211*R_012[0]+-1*P_000111011*R_020[0]+P_000111111*R_021[0]+-1*P_000111211*R_022[0]+P_000211011*R_030[0]+-1*P_000211111*R_031[0]+P_000211211*R_032[0];
double PR_000010012010=P_000010012*R_010[0]+-1*P_000010112*R_011[0]+P_000010212*R_012[0]+-1*P_000010312*R_013[0]+-1*P_000110012*R_020[0]+P_000110112*R_021[0]+-1*P_000110212*R_022[0]+P_000110312*R_023[0];
double PR_002000020010=P_002000020*R_010[0]+-1*P_002000120*R_011[0]+P_002000220*R_012[0]+-1*P_102000020*R_110[0]+P_102000120*R_111[0]+-1*P_102000220*R_112[0]+P_202000020*R_210[0]+-1*P_202000120*R_211[0]+P_202000220*R_212[0];
double PR_001001020010=P_001001020*R_010[0]+-1*P_001001120*R_011[0]+P_001001220*R_012[0]+-1*P_001101020*R_020[0]+P_001101120*R_021[0]+-1*P_001101220*R_022[0]+-1*P_101001020*R_110[0]+P_101001120*R_111[0]+-1*P_101001220*R_112[0]+P_101101020*R_120[0]+-1*P_101101120*R_121[0]+P_101101220*R_122[0];
double PR_000002020010=P_000002020*R_010[0]+-1*P_000002120*R_011[0]+P_000002220*R_012[0]+-1*P_000102020*R_020[0]+P_000102120*R_021[0]+-1*P_000102220*R_022[0]+P_000202020*R_030[0]+-1*P_000202120*R_031[0]+P_000202220*R_032[0];
double PR_001000021010=P_001000021*R_010[0]+-1*P_001000121*R_011[0]+P_001000221*R_012[0]+-1*P_001000321*R_013[0]+-1*P_101000021*R_110[0]+P_101000121*R_111[0]+-1*P_101000221*R_112[0]+P_101000321*R_113[0];
double PR_000001021010=P_000001021*R_010[0]+-1*P_000001121*R_011[0]+P_000001221*R_012[0]+-1*P_000001321*R_013[0]+-1*P_000101021*R_020[0]+P_000101121*R_021[0]+-1*P_000101221*R_022[0]+P_000101321*R_023[0];
double PR_000000022010=P_000000022*R_010[0]+-1*P_000000122*R_011[0]+P_000000222*R_012[0]+-1*P_000000322*R_013[0]+P_000000422*R_014[0];
double PR_022000000100=P_022000000*R_100[0]+-1*P_122000000*R_200[0]+P_222000000*R_300[0]+-1*P_322000000*R_400[0]+P_422000000*R_500[0];
double PR_021001000100=P_021001000*R_100[0]+-1*P_021101000*R_110[0]+-1*P_121001000*R_200[0]+P_121101000*R_210[0]+P_221001000*R_300[0]+-1*P_221101000*R_310[0]+-1*P_321001000*R_400[0]+P_321101000*R_410[0];
double PR_020002000100=P_020002000*R_100[0]+-1*P_020102000*R_110[0]+P_020202000*R_120[0]+-1*P_120002000*R_200[0]+P_120102000*R_210[0]+-1*P_120202000*R_220[0]+P_220002000*R_300[0]+-1*P_220102000*R_310[0]+P_220202000*R_320[0];
double PR_021000001100=P_021000001*R_100[0]+-1*P_021000101*R_101[0]+-1*P_121000001*R_200[0]+P_121000101*R_201[0]+P_221000001*R_300[0]+-1*P_221000101*R_301[0]+-1*P_321000001*R_400[0]+P_321000101*R_401[0];
double PR_020001001100=P_020001001*R_100[0]+-1*P_020001101*R_101[0]+-1*P_020101001*R_110[0]+P_020101101*R_111[0]+-1*P_120001001*R_200[0]+P_120001101*R_201[0]+P_120101001*R_210[0]+-1*P_120101101*R_211[0]+P_220001001*R_300[0]+-1*P_220001101*R_301[0]+-1*P_220101001*R_310[0]+P_220101101*R_311[0];
double PR_020000002100=P_020000002*R_100[0]+-1*P_020000102*R_101[0]+P_020000202*R_102[0]+-1*P_120000002*R_200[0]+P_120000102*R_201[0]+-1*P_120000202*R_202[0]+P_220000002*R_300[0]+-1*P_220000102*R_301[0]+P_220000202*R_302[0];
double PR_012010000100=P_012010000*R_100[0]+-1*P_012110000*R_110[0]+-1*P_112010000*R_200[0]+P_112110000*R_210[0]+P_212010000*R_300[0]+-1*P_212110000*R_310[0]+-1*P_312010000*R_400[0]+P_312110000*R_410[0];
double PR_011011000100=P_011011000*R_100[0]+-1*P_011111000*R_110[0]+P_011211000*R_120[0]+-1*P_111011000*R_200[0]+P_111111000*R_210[0]+-1*P_111211000*R_220[0]+P_211011000*R_300[0]+-1*P_211111000*R_310[0]+P_211211000*R_320[0];
double PR_010012000100=P_010012000*R_100[0]+-1*P_010112000*R_110[0]+P_010212000*R_120[0]+-1*P_010312000*R_130[0]+-1*P_110012000*R_200[0]+P_110112000*R_210[0]+-1*P_110212000*R_220[0]+P_110312000*R_230[0];
double PR_011010001100=P_011010001*R_100[0]+-1*P_011010101*R_101[0]+-1*P_011110001*R_110[0]+P_011110101*R_111[0]+-1*P_111010001*R_200[0]+P_111010101*R_201[0]+P_111110001*R_210[0]+-1*P_111110101*R_211[0]+P_211010001*R_300[0]+-1*P_211010101*R_301[0]+-1*P_211110001*R_310[0]+P_211110101*R_311[0];
double PR_010011001100=P_010011001*R_100[0]+-1*P_010011101*R_101[0]+-1*P_010111001*R_110[0]+P_010111101*R_111[0]+P_010211001*R_120[0]+-1*P_010211101*R_121[0]+-1*P_110011001*R_200[0]+P_110011101*R_201[0]+P_110111001*R_210[0]+-1*P_110111101*R_211[0]+-1*P_110211001*R_220[0]+P_110211101*R_221[0];
double PR_010010002100=P_010010002*R_100[0]+-1*P_010010102*R_101[0]+P_010010202*R_102[0]+-1*P_010110002*R_110[0]+P_010110102*R_111[0]+-1*P_010110202*R_112[0]+-1*P_110010002*R_200[0]+P_110010102*R_201[0]+-1*P_110010202*R_202[0]+P_110110002*R_210[0]+-1*P_110110102*R_211[0]+P_110110202*R_212[0];
double PR_002020000100=P_002020000*R_100[0]+-1*P_002120000*R_110[0]+P_002220000*R_120[0]+-1*P_102020000*R_200[0]+P_102120000*R_210[0]+-1*P_102220000*R_220[0]+P_202020000*R_300[0]+-1*P_202120000*R_310[0]+P_202220000*R_320[0];
double PR_001021000100=P_001021000*R_100[0]+-1*P_001121000*R_110[0]+P_001221000*R_120[0]+-1*P_001321000*R_130[0]+-1*P_101021000*R_200[0]+P_101121000*R_210[0]+-1*P_101221000*R_220[0]+P_101321000*R_230[0];
double PR_000022000100=P_000022000*R_100[0]+-1*P_000122000*R_110[0]+P_000222000*R_120[0]+-1*P_000322000*R_130[0]+P_000422000*R_140[0];
double PR_001020001100=P_001020001*R_100[0]+-1*P_001020101*R_101[0]+-1*P_001120001*R_110[0]+P_001120101*R_111[0]+P_001220001*R_120[0]+-1*P_001220101*R_121[0]+-1*P_101020001*R_200[0]+P_101020101*R_201[0]+P_101120001*R_210[0]+-1*P_101120101*R_211[0]+-1*P_101220001*R_220[0]+P_101220101*R_221[0];
double PR_000021001100=P_000021001*R_100[0]+-1*P_000021101*R_101[0]+-1*P_000121001*R_110[0]+P_000121101*R_111[0]+P_000221001*R_120[0]+-1*P_000221101*R_121[0]+-1*P_000321001*R_130[0]+P_000321101*R_131[0];
double PR_000020002100=P_000020002*R_100[0]+-1*P_000020102*R_101[0]+P_000020202*R_102[0]+-1*P_000120002*R_110[0]+P_000120102*R_111[0]+-1*P_000120202*R_112[0]+P_000220002*R_120[0]+-1*P_000220102*R_121[0]+P_000220202*R_122[0];
double PR_012000010100=P_012000010*R_100[0]+-1*P_012000110*R_101[0]+-1*P_112000010*R_200[0]+P_112000110*R_201[0]+P_212000010*R_300[0]+-1*P_212000110*R_301[0]+-1*P_312000010*R_400[0]+P_312000110*R_401[0];
double PR_011001010100=P_011001010*R_100[0]+-1*P_011001110*R_101[0]+-1*P_011101010*R_110[0]+P_011101110*R_111[0]+-1*P_111001010*R_200[0]+P_111001110*R_201[0]+P_111101010*R_210[0]+-1*P_111101110*R_211[0]+P_211001010*R_300[0]+-1*P_211001110*R_301[0]+-1*P_211101010*R_310[0]+P_211101110*R_311[0];
double PR_010002010100=P_010002010*R_100[0]+-1*P_010002110*R_101[0]+-1*P_010102010*R_110[0]+P_010102110*R_111[0]+P_010202010*R_120[0]+-1*P_010202110*R_121[0]+-1*P_110002010*R_200[0]+P_110002110*R_201[0]+P_110102010*R_210[0]+-1*P_110102110*R_211[0]+-1*P_110202010*R_220[0]+P_110202110*R_221[0];
double PR_011000011100=P_011000011*R_100[0]+-1*P_011000111*R_101[0]+P_011000211*R_102[0]+-1*P_111000011*R_200[0]+P_111000111*R_201[0]+-1*P_111000211*R_202[0]+P_211000011*R_300[0]+-1*P_211000111*R_301[0]+P_211000211*R_302[0];
double PR_010001011100=P_010001011*R_100[0]+-1*P_010001111*R_101[0]+P_010001211*R_102[0]+-1*P_010101011*R_110[0]+P_010101111*R_111[0]+-1*P_010101211*R_112[0]+-1*P_110001011*R_200[0]+P_110001111*R_201[0]+-1*P_110001211*R_202[0]+P_110101011*R_210[0]+-1*P_110101111*R_211[0]+P_110101211*R_212[0];
double PR_010000012100=P_010000012*R_100[0]+-1*P_010000112*R_101[0]+P_010000212*R_102[0]+-1*P_010000312*R_103[0]+-1*P_110000012*R_200[0]+P_110000112*R_201[0]+-1*P_110000212*R_202[0]+P_110000312*R_203[0];
double PR_002010010100=P_002010010*R_100[0]+-1*P_002010110*R_101[0]+-1*P_002110010*R_110[0]+P_002110110*R_111[0]+-1*P_102010010*R_200[0]+P_102010110*R_201[0]+P_102110010*R_210[0]+-1*P_102110110*R_211[0]+P_202010010*R_300[0]+-1*P_202010110*R_301[0]+-1*P_202110010*R_310[0]+P_202110110*R_311[0];
double PR_001011010100=P_001011010*R_100[0]+-1*P_001011110*R_101[0]+-1*P_001111010*R_110[0]+P_001111110*R_111[0]+P_001211010*R_120[0]+-1*P_001211110*R_121[0]+-1*P_101011010*R_200[0]+P_101011110*R_201[0]+P_101111010*R_210[0]+-1*P_101111110*R_211[0]+-1*P_101211010*R_220[0]+P_101211110*R_221[0];
double PR_000012010100=P_000012010*R_100[0]+-1*P_000012110*R_101[0]+-1*P_000112010*R_110[0]+P_000112110*R_111[0]+P_000212010*R_120[0]+-1*P_000212110*R_121[0]+-1*P_000312010*R_130[0]+P_000312110*R_131[0];
double PR_001010011100=P_001010011*R_100[0]+-1*P_001010111*R_101[0]+P_001010211*R_102[0]+-1*P_001110011*R_110[0]+P_001110111*R_111[0]+-1*P_001110211*R_112[0]+-1*P_101010011*R_200[0]+P_101010111*R_201[0]+-1*P_101010211*R_202[0]+P_101110011*R_210[0]+-1*P_101110111*R_211[0]+P_101110211*R_212[0];
double PR_000011011100=P_000011011*R_100[0]+-1*P_000011111*R_101[0]+P_000011211*R_102[0]+-1*P_000111011*R_110[0]+P_000111111*R_111[0]+-1*P_000111211*R_112[0]+P_000211011*R_120[0]+-1*P_000211111*R_121[0]+P_000211211*R_122[0];
double PR_000010012100=P_000010012*R_100[0]+-1*P_000010112*R_101[0]+P_000010212*R_102[0]+-1*P_000010312*R_103[0]+-1*P_000110012*R_110[0]+P_000110112*R_111[0]+-1*P_000110212*R_112[0]+P_000110312*R_113[0];
double PR_002000020100=P_002000020*R_100[0]+-1*P_002000120*R_101[0]+P_002000220*R_102[0]+-1*P_102000020*R_200[0]+P_102000120*R_201[0]+-1*P_102000220*R_202[0]+P_202000020*R_300[0]+-1*P_202000120*R_301[0]+P_202000220*R_302[0];
double PR_001001020100=P_001001020*R_100[0]+-1*P_001001120*R_101[0]+P_001001220*R_102[0]+-1*P_001101020*R_110[0]+P_001101120*R_111[0]+-1*P_001101220*R_112[0]+-1*P_101001020*R_200[0]+P_101001120*R_201[0]+-1*P_101001220*R_202[0]+P_101101020*R_210[0]+-1*P_101101120*R_211[0]+P_101101220*R_212[0];
double PR_000002020100=P_000002020*R_100[0]+-1*P_000002120*R_101[0]+P_000002220*R_102[0]+-1*P_000102020*R_110[0]+P_000102120*R_111[0]+-1*P_000102220*R_112[0]+P_000202020*R_120[0]+-1*P_000202120*R_121[0]+P_000202220*R_122[0];
double PR_001000021100=P_001000021*R_100[0]+-1*P_001000121*R_101[0]+P_001000221*R_102[0]+-1*P_001000321*R_103[0]+-1*P_101000021*R_200[0]+P_101000121*R_201[0]+-1*P_101000221*R_202[0]+P_101000321*R_203[0];
double PR_000001021100=P_000001021*R_100[0]+-1*P_000001121*R_101[0]+P_000001221*R_102[0]+-1*P_000001321*R_103[0]+-1*P_000101021*R_110[0]+P_000101121*R_111[0]+-1*P_000101221*R_112[0]+P_000101321*R_113[0];
double PR_000000022100=P_000000022*R_100[0]+-1*P_000000122*R_101[0]+P_000000222*R_102[0]+-1*P_000000322*R_103[0]+P_000000422*R_104[0];
double PR_022000000002=P_022000000*R_002[0]+-1*P_122000000*R_102[0]+P_222000000*R_202[0]+-1*P_322000000*R_302[0]+P_422000000*R_402[0];
double PR_021001000002=P_021001000*R_002[0]+-1*P_021101000*R_012[0]+-1*P_121001000*R_102[0]+P_121101000*R_112[0]+P_221001000*R_202[0]+-1*P_221101000*R_212[0]+-1*P_321001000*R_302[0]+P_321101000*R_312[0];
double PR_020002000002=P_020002000*R_002[0]+-1*P_020102000*R_012[0]+P_020202000*R_022[0]+-1*P_120002000*R_102[0]+P_120102000*R_112[0]+-1*P_120202000*R_122[0]+P_220002000*R_202[0]+-1*P_220102000*R_212[0]+P_220202000*R_222[0];
double PR_021000001002=P_021000001*R_002[0]+-1*P_021000101*R_003[0]+-1*P_121000001*R_102[0]+P_121000101*R_103[0]+P_221000001*R_202[0]+-1*P_221000101*R_203[0]+-1*P_321000001*R_302[0]+P_321000101*R_303[0];
double PR_020001001002=P_020001001*R_002[0]+-1*P_020001101*R_003[0]+-1*P_020101001*R_012[0]+P_020101101*R_013[0]+-1*P_120001001*R_102[0]+P_120001101*R_103[0]+P_120101001*R_112[0]+-1*P_120101101*R_113[0]+P_220001001*R_202[0]+-1*P_220001101*R_203[0]+-1*P_220101001*R_212[0]+P_220101101*R_213[0];
double PR_020000002002=P_020000002*R_002[0]+-1*P_020000102*R_003[0]+P_020000202*R_004[0]+-1*P_120000002*R_102[0]+P_120000102*R_103[0]+-1*P_120000202*R_104[0]+P_220000002*R_202[0]+-1*P_220000102*R_203[0]+P_220000202*R_204[0];
double PR_012010000002=P_012010000*R_002[0]+-1*P_012110000*R_012[0]+-1*P_112010000*R_102[0]+P_112110000*R_112[0]+P_212010000*R_202[0]+-1*P_212110000*R_212[0]+-1*P_312010000*R_302[0]+P_312110000*R_312[0];
double PR_011011000002=P_011011000*R_002[0]+-1*P_011111000*R_012[0]+P_011211000*R_022[0]+-1*P_111011000*R_102[0]+P_111111000*R_112[0]+-1*P_111211000*R_122[0]+P_211011000*R_202[0]+-1*P_211111000*R_212[0]+P_211211000*R_222[0];
double PR_010012000002=P_010012000*R_002[0]+-1*P_010112000*R_012[0]+P_010212000*R_022[0]+-1*P_010312000*R_032[0]+-1*P_110012000*R_102[0]+P_110112000*R_112[0]+-1*P_110212000*R_122[0]+P_110312000*R_132[0];
double PR_011010001002=P_011010001*R_002[0]+-1*P_011010101*R_003[0]+-1*P_011110001*R_012[0]+P_011110101*R_013[0]+-1*P_111010001*R_102[0]+P_111010101*R_103[0]+P_111110001*R_112[0]+-1*P_111110101*R_113[0]+P_211010001*R_202[0]+-1*P_211010101*R_203[0]+-1*P_211110001*R_212[0]+P_211110101*R_213[0];
double PR_010011001002=P_010011001*R_002[0]+-1*P_010011101*R_003[0]+-1*P_010111001*R_012[0]+P_010111101*R_013[0]+P_010211001*R_022[0]+-1*P_010211101*R_023[0]+-1*P_110011001*R_102[0]+P_110011101*R_103[0]+P_110111001*R_112[0]+-1*P_110111101*R_113[0]+-1*P_110211001*R_122[0]+P_110211101*R_123[0];
double PR_010010002002=P_010010002*R_002[0]+-1*P_010010102*R_003[0]+P_010010202*R_004[0]+-1*P_010110002*R_012[0]+P_010110102*R_013[0]+-1*P_010110202*R_014[0]+-1*P_110010002*R_102[0]+P_110010102*R_103[0]+-1*P_110010202*R_104[0]+P_110110002*R_112[0]+-1*P_110110102*R_113[0]+P_110110202*R_114[0];
double PR_002020000002=P_002020000*R_002[0]+-1*P_002120000*R_012[0]+P_002220000*R_022[0]+-1*P_102020000*R_102[0]+P_102120000*R_112[0]+-1*P_102220000*R_122[0]+P_202020000*R_202[0]+-1*P_202120000*R_212[0]+P_202220000*R_222[0];
double PR_001021000002=P_001021000*R_002[0]+-1*P_001121000*R_012[0]+P_001221000*R_022[0]+-1*P_001321000*R_032[0]+-1*P_101021000*R_102[0]+P_101121000*R_112[0]+-1*P_101221000*R_122[0]+P_101321000*R_132[0];
double PR_000022000002=P_000022000*R_002[0]+-1*P_000122000*R_012[0]+P_000222000*R_022[0]+-1*P_000322000*R_032[0]+P_000422000*R_042[0];
double PR_001020001002=P_001020001*R_002[0]+-1*P_001020101*R_003[0]+-1*P_001120001*R_012[0]+P_001120101*R_013[0]+P_001220001*R_022[0]+-1*P_001220101*R_023[0]+-1*P_101020001*R_102[0]+P_101020101*R_103[0]+P_101120001*R_112[0]+-1*P_101120101*R_113[0]+-1*P_101220001*R_122[0]+P_101220101*R_123[0];
double PR_000021001002=P_000021001*R_002[0]+-1*P_000021101*R_003[0]+-1*P_000121001*R_012[0]+P_000121101*R_013[0]+P_000221001*R_022[0]+-1*P_000221101*R_023[0]+-1*P_000321001*R_032[0]+P_000321101*R_033[0];
double PR_000020002002=P_000020002*R_002[0]+-1*P_000020102*R_003[0]+P_000020202*R_004[0]+-1*P_000120002*R_012[0]+P_000120102*R_013[0]+-1*P_000120202*R_014[0]+P_000220002*R_022[0]+-1*P_000220102*R_023[0]+P_000220202*R_024[0];
double PR_012000010002=P_012000010*R_002[0]+-1*P_012000110*R_003[0]+-1*P_112000010*R_102[0]+P_112000110*R_103[0]+P_212000010*R_202[0]+-1*P_212000110*R_203[0]+-1*P_312000010*R_302[0]+P_312000110*R_303[0];
double PR_011001010002=P_011001010*R_002[0]+-1*P_011001110*R_003[0]+-1*P_011101010*R_012[0]+P_011101110*R_013[0]+-1*P_111001010*R_102[0]+P_111001110*R_103[0]+P_111101010*R_112[0]+-1*P_111101110*R_113[0]+P_211001010*R_202[0]+-1*P_211001110*R_203[0]+-1*P_211101010*R_212[0]+P_211101110*R_213[0];
double PR_010002010002=P_010002010*R_002[0]+-1*P_010002110*R_003[0]+-1*P_010102010*R_012[0]+P_010102110*R_013[0]+P_010202010*R_022[0]+-1*P_010202110*R_023[0]+-1*P_110002010*R_102[0]+P_110002110*R_103[0]+P_110102010*R_112[0]+-1*P_110102110*R_113[0]+-1*P_110202010*R_122[0]+P_110202110*R_123[0];
double PR_011000011002=P_011000011*R_002[0]+-1*P_011000111*R_003[0]+P_011000211*R_004[0]+-1*P_111000011*R_102[0]+P_111000111*R_103[0]+-1*P_111000211*R_104[0]+P_211000011*R_202[0]+-1*P_211000111*R_203[0]+P_211000211*R_204[0];
double PR_010001011002=P_010001011*R_002[0]+-1*P_010001111*R_003[0]+P_010001211*R_004[0]+-1*P_010101011*R_012[0]+P_010101111*R_013[0]+-1*P_010101211*R_014[0]+-1*P_110001011*R_102[0]+P_110001111*R_103[0]+-1*P_110001211*R_104[0]+P_110101011*R_112[0]+-1*P_110101111*R_113[0]+P_110101211*R_114[0];
double PR_010000012002=P_010000012*R_002[0]+-1*P_010000112*R_003[0]+P_010000212*R_004[0]+-1*P_010000312*R_005[0]+-1*P_110000012*R_102[0]+P_110000112*R_103[0]+-1*P_110000212*R_104[0]+P_110000312*R_105[0];
double PR_002010010002=P_002010010*R_002[0]+-1*P_002010110*R_003[0]+-1*P_002110010*R_012[0]+P_002110110*R_013[0]+-1*P_102010010*R_102[0]+P_102010110*R_103[0]+P_102110010*R_112[0]+-1*P_102110110*R_113[0]+P_202010010*R_202[0]+-1*P_202010110*R_203[0]+-1*P_202110010*R_212[0]+P_202110110*R_213[0];
double PR_001011010002=P_001011010*R_002[0]+-1*P_001011110*R_003[0]+-1*P_001111010*R_012[0]+P_001111110*R_013[0]+P_001211010*R_022[0]+-1*P_001211110*R_023[0]+-1*P_101011010*R_102[0]+P_101011110*R_103[0]+P_101111010*R_112[0]+-1*P_101111110*R_113[0]+-1*P_101211010*R_122[0]+P_101211110*R_123[0];
double PR_000012010002=P_000012010*R_002[0]+-1*P_000012110*R_003[0]+-1*P_000112010*R_012[0]+P_000112110*R_013[0]+P_000212010*R_022[0]+-1*P_000212110*R_023[0]+-1*P_000312010*R_032[0]+P_000312110*R_033[0];
double PR_001010011002=P_001010011*R_002[0]+-1*P_001010111*R_003[0]+P_001010211*R_004[0]+-1*P_001110011*R_012[0]+P_001110111*R_013[0]+-1*P_001110211*R_014[0]+-1*P_101010011*R_102[0]+P_101010111*R_103[0]+-1*P_101010211*R_104[0]+P_101110011*R_112[0]+-1*P_101110111*R_113[0]+P_101110211*R_114[0];
double PR_000011011002=P_000011011*R_002[0]+-1*P_000011111*R_003[0]+P_000011211*R_004[0]+-1*P_000111011*R_012[0]+P_000111111*R_013[0]+-1*P_000111211*R_014[0]+P_000211011*R_022[0]+-1*P_000211111*R_023[0]+P_000211211*R_024[0];
double PR_000010012002=P_000010012*R_002[0]+-1*P_000010112*R_003[0]+P_000010212*R_004[0]+-1*P_000010312*R_005[0]+-1*P_000110012*R_012[0]+P_000110112*R_013[0]+-1*P_000110212*R_014[0]+P_000110312*R_015[0];
double PR_002000020002=P_002000020*R_002[0]+-1*P_002000120*R_003[0]+P_002000220*R_004[0]+-1*P_102000020*R_102[0]+P_102000120*R_103[0]+-1*P_102000220*R_104[0]+P_202000020*R_202[0]+-1*P_202000120*R_203[0]+P_202000220*R_204[0];
double PR_001001020002=P_001001020*R_002[0]+-1*P_001001120*R_003[0]+P_001001220*R_004[0]+-1*P_001101020*R_012[0]+P_001101120*R_013[0]+-1*P_001101220*R_014[0]+-1*P_101001020*R_102[0]+P_101001120*R_103[0]+-1*P_101001220*R_104[0]+P_101101020*R_112[0]+-1*P_101101120*R_113[0]+P_101101220*R_114[0];
double PR_000002020002=P_000002020*R_002[0]+-1*P_000002120*R_003[0]+P_000002220*R_004[0]+-1*P_000102020*R_012[0]+P_000102120*R_013[0]+-1*P_000102220*R_014[0]+P_000202020*R_022[0]+-1*P_000202120*R_023[0]+P_000202220*R_024[0];
double PR_001000021002=P_001000021*R_002[0]+-1*P_001000121*R_003[0]+P_001000221*R_004[0]+-1*P_001000321*R_005[0]+-1*P_101000021*R_102[0]+P_101000121*R_103[0]+-1*P_101000221*R_104[0]+P_101000321*R_105[0];
double PR_000001021002=P_000001021*R_002[0]+-1*P_000001121*R_003[0]+P_000001221*R_004[0]+-1*P_000001321*R_005[0]+-1*P_000101021*R_012[0]+P_000101121*R_013[0]+-1*P_000101221*R_014[0]+P_000101321*R_015[0];
double PR_000000022002=P_000000022*R_002[0]+-1*P_000000122*R_003[0]+P_000000222*R_004[0]+-1*P_000000322*R_005[0]+P_000000422*R_006[0];
double PR_022000000011=P_022000000*R_011[0]+-1*P_122000000*R_111[0]+P_222000000*R_211[0]+-1*P_322000000*R_311[0]+P_422000000*R_411[0];
double PR_021001000011=P_021001000*R_011[0]+-1*P_021101000*R_021[0]+-1*P_121001000*R_111[0]+P_121101000*R_121[0]+P_221001000*R_211[0]+-1*P_221101000*R_221[0]+-1*P_321001000*R_311[0]+P_321101000*R_321[0];
double PR_020002000011=P_020002000*R_011[0]+-1*P_020102000*R_021[0]+P_020202000*R_031[0]+-1*P_120002000*R_111[0]+P_120102000*R_121[0]+-1*P_120202000*R_131[0]+P_220002000*R_211[0]+-1*P_220102000*R_221[0]+P_220202000*R_231[0];
double PR_021000001011=P_021000001*R_011[0]+-1*P_021000101*R_012[0]+-1*P_121000001*R_111[0]+P_121000101*R_112[0]+P_221000001*R_211[0]+-1*P_221000101*R_212[0]+-1*P_321000001*R_311[0]+P_321000101*R_312[0];
double PR_020001001011=P_020001001*R_011[0]+-1*P_020001101*R_012[0]+-1*P_020101001*R_021[0]+P_020101101*R_022[0]+-1*P_120001001*R_111[0]+P_120001101*R_112[0]+P_120101001*R_121[0]+-1*P_120101101*R_122[0]+P_220001001*R_211[0]+-1*P_220001101*R_212[0]+-1*P_220101001*R_221[0]+P_220101101*R_222[0];
double PR_020000002011=P_020000002*R_011[0]+-1*P_020000102*R_012[0]+P_020000202*R_013[0]+-1*P_120000002*R_111[0]+P_120000102*R_112[0]+-1*P_120000202*R_113[0]+P_220000002*R_211[0]+-1*P_220000102*R_212[0]+P_220000202*R_213[0];
double PR_012010000011=P_012010000*R_011[0]+-1*P_012110000*R_021[0]+-1*P_112010000*R_111[0]+P_112110000*R_121[0]+P_212010000*R_211[0]+-1*P_212110000*R_221[0]+-1*P_312010000*R_311[0]+P_312110000*R_321[0];
double PR_011011000011=P_011011000*R_011[0]+-1*P_011111000*R_021[0]+P_011211000*R_031[0]+-1*P_111011000*R_111[0]+P_111111000*R_121[0]+-1*P_111211000*R_131[0]+P_211011000*R_211[0]+-1*P_211111000*R_221[0]+P_211211000*R_231[0];
double PR_010012000011=P_010012000*R_011[0]+-1*P_010112000*R_021[0]+P_010212000*R_031[0]+-1*P_010312000*R_041[0]+-1*P_110012000*R_111[0]+P_110112000*R_121[0]+-1*P_110212000*R_131[0]+P_110312000*R_141[0];
double PR_011010001011=P_011010001*R_011[0]+-1*P_011010101*R_012[0]+-1*P_011110001*R_021[0]+P_011110101*R_022[0]+-1*P_111010001*R_111[0]+P_111010101*R_112[0]+P_111110001*R_121[0]+-1*P_111110101*R_122[0]+P_211010001*R_211[0]+-1*P_211010101*R_212[0]+-1*P_211110001*R_221[0]+P_211110101*R_222[0];
double PR_010011001011=P_010011001*R_011[0]+-1*P_010011101*R_012[0]+-1*P_010111001*R_021[0]+P_010111101*R_022[0]+P_010211001*R_031[0]+-1*P_010211101*R_032[0]+-1*P_110011001*R_111[0]+P_110011101*R_112[0]+P_110111001*R_121[0]+-1*P_110111101*R_122[0]+-1*P_110211001*R_131[0]+P_110211101*R_132[0];
double PR_010010002011=P_010010002*R_011[0]+-1*P_010010102*R_012[0]+P_010010202*R_013[0]+-1*P_010110002*R_021[0]+P_010110102*R_022[0]+-1*P_010110202*R_023[0]+-1*P_110010002*R_111[0]+P_110010102*R_112[0]+-1*P_110010202*R_113[0]+P_110110002*R_121[0]+-1*P_110110102*R_122[0]+P_110110202*R_123[0];
double PR_002020000011=P_002020000*R_011[0]+-1*P_002120000*R_021[0]+P_002220000*R_031[0]+-1*P_102020000*R_111[0]+P_102120000*R_121[0]+-1*P_102220000*R_131[0]+P_202020000*R_211[0]+-1*P_202120000*R_221[0]+P_202220000*R_231[0];
double PR_001021000011=P_001021000*R_011[0]+-1*P_001121000*R_021[0]+P_001221000*R_031[0]+-1*P_001321000*R_041[0]+-1*P_101021000*R_111[0]+P_101121000*R_121[0]+-1*P_101221000*R_131[0]+P_101321000*R_141[0];
double PR_000022000011=P_000022000*R_011[0]+-1*P_000122000*R_021[0]+P_000222000*R_031[0]+-1*P_000322000*R_041[0]+P_000422000*R_051[0];
double PR_001020001011=P_001020001*R_011[0]+-1*P_001020101*R_012[0]+-1*P_001120001*R_021[0]+P_001120101*R_022[0]+P_001220001*R_031[0]+-1*P_001220101*R_032[0]+-1*P_101020001*R_111[0]+P_101020101*R_112[0]+P_101120001*R_121[0]+-1*P_101120101*R_122[0]+-1*P_101220001*R_131[0]+P_101220101*R_132[0];
double PR_000021001011=P_000021001*R_011[0]+-1*P_000021101*R_012[0]+-1*P_000121001*R_021[0]+P_000121101*R_022[0]+P_000221001*R_031[0]+-1*P_000221101*R_032[0]+-1*P_000321001*R_041[0]+P_000321101*R_042[0];
double PR_000020002011=P_000020002*R_011[0]+-1*P_000020102*R_012[0]+P_000020202*R_013[0]+-1*P_000120002*R_021[0]+P_000120102*R_022[0]+-1*P_000120202*R_023[0]+P_000220002*R_031[0]+-1*P_000220102*R_032[0]+P_000220202*R_033[0];
double PR_012000010011=P_012000010*R_011[0]+-1*P_012000110*R_012[0]+-1*P_112000010*R_111[0]+P_112000110*R_112[0]+P_212000010*R_211[0]+-1*P_212000110*R_212[0]+-1*P_312000010*R_311[0]+P_312000110*R_312[0];
double PR_011001010011=P_011001010*R_011[0]+-1*P_011001110*R_012[0]+-1*P_011101010*R_021[0]+P_011101110*R_022[0]+-1*P_111001010*R_111[0]+P_111001110*R_112[0]+P_111101010*R_121[0]+-1*P_111101110*R_122[0]+P_211001010*R_211[0]+-1*P_211001110*R_212[0]+-1*P_211101010*R_221[0]+P_211101110*R_222[0];
double PR_010002010011=P_010002010*R_011[0]+-1*P_010002110*R_012[0]+-1*P_010102010*R_021[0]+P_010102110*R_022[0]+P_010202010*R_031[0]+-1*P_010202110*R_032[0]+-1*P_110002010*R_111[0]+P_110002110*R_112[0]+P_110102010*R_121[0]+-1*P_110102110*R_122[0]+-1*P_110202010*R_131[0]+P_110202110*R_132[0];
double PR_011000011011=P_011000011*R_011[0]+-1*P_011000111*R_012[0]+P_011000211*R_013[0]+-1*P_111000011*R_111[0]+P_111000111*R_112[0]+-1*P_111000211*R_113[0]+P_211000011*R_211[0]+-1*P_211000111*R_212[0]+P_211000211*R_213[0];
double PR_010001011011=P_010001011*R_011[0]+-1*P_010001111*R_012[0]+P_010001211*R_013[0]+-1*P_010101011*R_021[0]+P_010101111*R_022[0]+-1*P_010101211*R_023[0]+-1*P_110001011*R_111[0]+P_110001111*R_112[0]+-1*P_110001211*R_113[0]+P_110101011*R_121[0]+-1*P_110101111*R_122[0]+P_110101211*R_123[0];
double PR_010000012011=P_010000012*R_011[0]+-1*P_010000112*R_012[0]+P_010000212*R_013[0]+-1*P_010000312*R_014[0]+-1*P_110000012*R_111[0]+P_110000112*R_112[0]+-1*P_110000212*R_113[0]+P_110000312*R_114[0];
double PR_002010010011=P_002010010*R_011[0]+-1*P_002010110*R_012[0]+-1*P_002110010*R_021[0]+P_002110110*R_022[0]+-1*P_102010010*R_111[0]+P_102010110*R_112[0]+P_102110010*R_121[0]+-1*P_102110110*R_122[0]+P_202010010*R_211[0]+-1*P_202010110*R_212[0]+-1*P_202110010*R_221[0]+P_202110110*R_222[0];
double PR_001011010011=P_001011010*R_011[0]+-1*P_001011110*R_012[0]+-1*P_001111010*R_021[0]+P_001111110*R_022[0]+P_001211010*R_031[0]+-1*P_001211110*R_032[0]+-1*P_101011010*R_111[0]+P_101011110*R_112[0]+P_101111010*R_121[0]+-1*P_101111110*R_122[0]+-1*P_101211010*R_131[0]+P_101211110*R_132[0];
double PR_000012010011=P_000012010*R_011[0]+-1*P_000012110*R_012[0]+-1*P_000112010*R_021[0]+P_000112110*R_022[0]+P_000212010*R_031[0]+-1*P_000212110*R_032[0]+-1*P_000312010*R_041[0]+P_000312110*R_042[0];
double PR_001010011011=P_001010011*R_011[0]+-1*P_001010111*R_012[0]+P_001010211*R_013[0]+-1*P_001110011*R_021[0]+P_001110111*R_022[0]+-1*P_001110211*R_023[0]+-1*P_101010011*R_111[0]+P_101010111*R_112[0]+-1*P_101010211*R_113[0]+P_101110011*R_121[0]+-1*P_101110111*R_122[0]+P_101110211*R_123[0];
double PR_000011011011=P_000011011*R_011[0]+-1*P_000011111*R_012[0]+P_000011211*R_013[0]+-1*P_000111011*R_021[0]+P_000111111*R_022[0]+-1*P_000111211*R_023[0]+P_000211011*R_031[0]+-1*P_000211111*R_032[0]+P_000211211*R_033[0];
double PR_000010012011=P_000010012*R_011[0]+-1*P_000010112*R_012[0]+P_000010212*R_013[0]+-1*P_000010312*R_014[0]+-1*P_000110012*R_021[0]+P_000110112*R_022[0]+-1*P_000110212*R_023[0]+P_000110312*R_024[0];
double PR_002000020011=P_002000020*R_011[0]+-1*P_002000120*R_012[0]+P_002000220*R_013[0]+-1*P_102000020*R_111[0]+P_102000120*R_112[0]+-1*P_102000220*R_113[0]+P_202000020*R_211[0]+-1*P_202000120*R_212[0]+P_202000220*R_213[0];
double PR_001001020011=P_001001020*R_011[0]+-1*P_001001120*R_012[0]+P_001001220*R_013[0]+-1*P_001101020*R_021[0]+P_001101120*R_022[0]+-1*P_001101220*R_023[0]+-1*P_101001020*R_111[0]+P_101001120*R_112[0]+-1*P_101001220*R_113[0]+P_101101020*R_121[0]+-1*P_101101120*R_122[0]+P_101101220*R_123[0];
double PR_000002020011=P_000002020*R_011[0]+-1*P_000002120*R_012[0]+P_000002220*R_013[0]+-1*P_000102020*R_021[0]+P_000102120*R_022[0]+-1*P_000102220*R_023[0]+P_000202020*R_031[0]+-1*P_000202120*R_032[0]+P_000202220*R_033[0];
double PR_001000021011=P_001000021*R_011[0]+-1*P_001000121*R_012[0]+P_001000221*R_013[0]+-1*P_001000321*R_014[0]+-1*P_101000021*R_111[0]+P_101000121*R_112[0]+-1*P_101000221*R_113[0]+P_101000321*R_114[0];
double PR_000001021011=P_000001021*R_011[0]+-1*P_000001121*R_012[0]+P_000001221*R_013[0]+-1*P_000001321*R_014[0]+-1*P_000101021*R_021[0]+P_000101121*R_022[0]+-1*P_000101221*R_023[0]+P_000101321*R_024[0];
double PR_000000022011=P_000000022*R_011[0]+-1*P_000000122*R_012[0]+P_000000222*R_013[0]+-1*P_000000322*R_014[0]+P_000000422*R_015[0];
double PR_022000000020=P_022000000*R_020[0]+-1*P_122000000*R_120[0]+P_222000000*R_220[0]+-1*P_322000000*R_320[0]+P_422000000*R_420[0];
double PR_021001000020=P_021001000*R_020[0]+-1*P_021101000*R_030[0]+-1*P_121001000*R_120[0]+P_121101000*R_130[0]+P_221001000*R_220[0]+-1*P_221101000*R_230[0]+-1*P_321001000*R_320[0]+P_321101000*R_330[0];
double PR_020002000020=P_020002000*R_020[0]+-1*P_020102000*R_030[0]+P_020202000*R_040[0]+-1*P_120002000*R_120[0]+P_120102000*R_130[0]+-1*P_120202000*R_140[0]+P_220002000*R_220[0]+-1*P_220102000*R_230[0]+P_220202000*R_240[0];
double PR_021000001020=P_021000001*R_020[0]+-1*P_021000101*R_021[0]+-1*P_121000001*R_120[0]+P_121000101*R_121[0]+P_221000001*R_220[0]+-1*P_221000101*R_221[0]+-1*P_321000001*R_320[0]+P_321000101*R_321[0];
double PR_020001001020=P_020001001*R_020[0]+-1*P_020001101*R_021[0]+-1*P_020101001*R_030[0]+P_020101101*R_031[0]+-1*P_120001001*R_120[0]+P_120001101*R_121[0]+P_120101001*R_130[0]+-1*P_120101101*R_131[0]+P_220001001*R_220[0]+-1*P_220001101*R_221[0]+-1*P_220101001*R_230[0]+P_220101101*R_231[0];
double PR_020000002020=P_020000002*R_020[0]+-1*P_020000102*R_021[0]+P_020000202*R_022[0]+-1*P_120000002*R_120[0]+P_120000102*R_121[0]+-1*P_120000202*R_122[0]+P_220000002*R_220[0]+-1*P_220000102*R_221[0]+P_220000202*R_222[0];
double PR_012010000020=P_012010000*R_020[0]+-1*P_012110000*R_030[0]+-1*P_112010000*R_120[0]+P_112110000*R_130[0]+P_212010000*R_220[0]+-1*P_212110000*R_230[0]+-1*P_312010000*R_320[0]+P_312110000*R_330[0];
double PR_011011000020=P_011011000*R_020[0]+-1*P_011111000*R_030[0]+P_011211000*R_040[0]+-1*P_111011000*R_120[0]+P_111111000*R_130[0]+-1*P_111211000*R_140[0]+P_211011000*R_220[0]+-1*P_211111000*R_230[0]+P_211211000*R_240[0];
double PR_010012000020=P_010012000*R_020[0]+-1*P_010112000*R_030[0]+P_010212000*R_040[0]+-1*P_010312000*R_050[0]+-1*P_110012000*R_120[0]+P_110112000*R_130[0]+-1*P_110212000*R_140[0]+P_110312000*R_150[0];
double PR_011010001020=P_011010001*R_020[0]+-1*P_011010101*R_021[0]+-1*P_011110001*R_030[0]+P_011110101*R_031[0]+-1*P_111010001*R_120[0]+P_111010101*R_121[0]+P_111110001*R_130[0]+-1*P_111110101*R_131[0]+P_211010001*R_220[0]+-1*P_211010101*R_221[0]+-1*P_211110001*R_230[0]+P_211110101*R_231[0];
double PR_010011001020=P_010011001*R_020[0]+-1*P_010011101*R_021[0]+-1*P_010111001*R_030[0]+P_010111101*R_031[0]+P_010211001*R_040[0]+-1*P_010211101*R_041[0]+-1*P_110011001*R_120[0]+P_110011101*R_121[0]+P_110111001*R_130[0]+-1*P_110111101*R_131[0]+-1*P_110211001*R_140[0]+P_110211101*R_141[0];
double PR_010010002020=P_010010002*R_020[0]+-1*P_010010102*R_021[0]+P_010010202*R_022[0]+-1*P_010110002*R_030[0]+P_010110102*R_031[0]+-1*P_010110202*R_032[0]+-1*P_110010002*R_120[0]+P_110010102*R_121[0]+-1*P_110010202*R_122[0]+P_110110002*R_130[0]+-1*P_110110102*R_131[0]+P_110110202*R_132[0];
double PR_002020000020=P_002020000*R_020[0]+-1*P_002120000*R_030[0]+P_002220000*R_040[0]+-1*P_102020000*R_120[0]+P_102120000*R_130[0]+-1*P_102220000*R_140[0]+P_202020000*R_220[0]+-1*P_202120000*R_230[0]+P_202220000*R_240[0];
double PR_001021000020=P_001021000*R_020[0]+-1*P_001121000*R_030[0]+P_001221000*R_040[0]+-1*P_001321000*R_050[0]+-1*P_101021000*R_120[0]+P_101121000*R_130[0]+-1*P_101221000*R_140[0]+P_101321000*R_150[0];
double PR_000022000020=P_000022000*R_020[0]+-1*P_000122000*R_030[0]+P_000222000*R_040[0]+-1*P_000322000*R_050[0]+P_000422000*R_060[0];
double PR_001020001020=P_001020001*R_020[0]+-1*P_001020101*R_021[0]+-1*P_001120001*R_030[0]+P_001120101*R_031[0]+P_001220001*R_040[0]+-1*P_001220101*R_041[0]+-1*P_101020001*R_120[0]+P_101020101*R_121[0]+P_101120001*R_130[0]+-1*P_101120101*R_131[0]+-1*P_101220001*R_140[0]+P_101220101*R_141[0];
double PR_000021001020=P_000021001*R_020[0]+-1*P_000021101*R_021[0]+-1*P_000121001*R_030[0]+P_000121101*R_031[0]+P_000221001*R_040[0]+-1*P_000221101*R_041[0]+-1*P_000321001*R_050[0]+P_000321101*R_051[0];
double PR_000020002020=P_000020002*R_020[0]+-1*P_000020102*R_021[0]+P_000020202*R_022[0]+-1*P_000120002*R_030[0]+P_000120102*R_031[0]+-1*P_000120202*R_032[0]+P_000220002*R_040[0]+-1*P_000220102*R_041[0]+P_000220202*R_042[0];
double PR_012000010020=P_012000010*R_020[0]+-1*P_012000110*R_021[0]+-1*P_112000010*R_120[0]+P_112000110*R_121[0]+P_212000010*R_220[0]+-1*P_212000110*R_221[0]+-1*P_312000010*R_320[0]+P_312000110*R_321[0];
double PR_011001010020=P_011001010*R_020[0]+-1*P_011001110*R_021[0]+-1*P_011101010*R_030[0]+P_011101110*R_031[0]+-1*P_111001010*R_120[0]+P_111001110*R_121[0]+P_111101010*R_130[0]+-1*P_111101110*R_131[0]+P_211001010*R_220[0]+-1*P_211001110*R_221[0]+-1*P_211101010*R_230[0]+P_211101110*R_231[0];
double PR_010002010020=P_010002010*R_020[0]+-1*P_010002110*R_021[0]+-1*P_010102010*R_030[0]+P_010102110*R_031[0]+P_010202010*R_040[0]+-1*P_010202110*R_041[0]+-1*P_110002010*R_120[0]+P_110002110*R_121[0]+P_110102010*R_130[0]+-1*P_110102110*R_131[0]+-1*P_110202010*R_140[0]+P_110202110*R_141[0];
double PR_011000011020=P_011000011*R_020[0]+-1*P_011000111*R_021[0]+P_011000211*R_022[0]+-1*P_111000011*R_120[0]+P_111000111*R_121[0]+-1*P_111000211*R_122[0]+P_211000011*R_220[0]+-1*P_211000111*R_221[0]+P_211000211*R_222[0];
double PR_010001011020=P_010001011*R_020[0]+-1*P_010001111*R_021[0]+P_010001211*R_022[0]+-1*P_010101011*R_030[0]+P_010101111*R_031[0]+-1*P_010101211*R_032[0]+-1*P_110001011*R_120[0]+P_110001111*R_121[0]+-1*P_110001211*R_122[0]+P_110101011*R_130[0]+-1*P_110101111*R_131[0]+P_110101211*R_132[0];
double PR_010000012020=P_010000012*R_020[0]+-1*P_010000112*R_021[0]+P_010000212*R_022[0]+-1*P_010000312*R_023[0]+-1*P_110000012*R_120[0]+P_110000112*R_121[0]+-1*P_110000212*R_122[0]+P_110000312*R_123[0];
double PR_002010010020=P_002010010*R_020[0]+-1*P_002010110*R_021[0]+-1*P_002110010*R_030[0]+P_002110110*R_031[0]+-1*P_102010010*R_120[0]+P_102010110*R_121[0]+P_102110010*R_130[0]+-1*P_102110110*R_131[0]+P_202010010*R_220[0]+-1*P_202010110*R_221[0]+-1*P_202110010*R_230[0]+P_202110110*R_231[0];
double PR_001011010020=P_001011010*R_020[0]+-1*P_001011110*R_021[0]+-1*P_001111010*R_030[0]+P_001111110*R_031[0]+P_001211010*R_040[0]+-1*P_001211110*R_041[0]+-1*P_101011010*R_120[0]+P_101011110*R_121[0]+P_101111010*R_130[0]+-1*P_101111110*R_131[0]+-1*P_101211010*R_140[0]+P_101211110*R_141[0];
double PR_000012010020=P_000012010*R_020[0]+-1*P_000012110*R_021[0]+-1*P_000112010*R_030[0]+P_000112110*R_031[0]+P_000212010*R_040[0]+-1*P_000212110*R_041[0]+-1*P_000312010*R_050[0]+P_000312110*R_051[0];
double PR_001010011020=P_001010011*R_020[0]+-1*P_001010111*R_021[0]+P_001010211*R_022[0]+-1*P_001110011*R_030[0]+P_001110111*R_031[0]+-1*P_001110211*R_032[0]+-1*P_101010011*R_120[0]+P_101010111*R_121[0]+-1*P_101010211*R_122[0]+P_101110011*R_130[0]+-1*P_101110111*R_131[0]+P_101110211*R_132[0];
double PR_000011011020=P_000011011*R_020[0]+-1*P_000011111*R_021[0]+P_000011211*R_022[0]+-1*P_000111011*R_030[0]+P_000111111*R_031[0]+-1*P_000111211*R_032[0]+P_000211011*R_040[0]+-1*P_000211111*R_041[0]+P_000211211*R_042[0];
double PR_000010012020=P_000010012*R_020[0]+-1*P_000010112*R_021[0]+P_000010212*R_022[0]+-1*P_000010312*R_023[0]+-1*P_000110012*R_030[0]+P_000110112*R_031[0]+-1*P_000110212*R_032[0]+P_000110312*R_033[0];
double PR_002000020020=P_002000020*R_020[0]+-1*P_002000120*R_021[0]+P_002000220*R_022[0]+-1*P_102000020*R_120[0]+P_102000120*R_121[0]+-1*P_102000220*R_122[0]+P_202000020*R_220[0]+-1*P_202000120*R_221[0]+P_202000220*R_222[0];
double PR_001001020020=P_001001020*R_020[0]+-1*P_001001120*R_021[0]+P_001001220*R_022[0]+-1*P_001101020*R_030[0]+P_001101120*R_031[0]+-1*P_001101220*R_032[0]+-1*P_101001020*R_120[0]+P_101001120*R_121[0]+-1*P_101001220*R_122[0]+P_101101020*R_130[0]+-1*P_101101120*R_131[0]+P_101101220*R_132[0];
double PR_000002020020=P_000002020*R_020[0]+-1*P_000002120*R_021[0]+P_000002220*R_022[0]+-1*P_000102020*R_030[0]+P_000102120*R_031[0]+-1*P_000102220*R_032[0]+P_000202020*R_040[0]+-1*P_000202120*R_041[0]+P_000202220*R_042[0];
double PR_001000021020=P_001000021*R_020[0]+-1*P_001000121*R_021[0]+P_001000221*R_022[0]+-1*P_001000321*R_023[0]+-1*P_101000021*R_120[0]+P_101000121*R_121[0]+-1*P_101000221*R_122[0]+P_101000321*R_123[0];
double PR_000001021020=P_000001021*R_020[0]+-1*P_000001121*R_021[0]+P_000001221*R_022[0]+-1*P_000001321*R_023[0]+-1*P_000101021*R_030[0]+P_000101121*R_031[0]+-1*P_000101221*R_032[0]+P_000101321*R_033[0];
double PR_000000022020=P_000000022*R_020[0]+-1*P_000000122*R_021[0]+P_000000222*R_022[0]+-1*P_000000322*R_023[0]+P_000000422*R_024[0];
double PR_022000000101=P_022000000*R_101[0]+-1*P_122000000*R_201[0]+P_222000000*R_301[0]+-1*P_322000000*R_401[0]+P_422000000*R_501[0];
double PR_021001000101=P_021001000*R_101[0]+-1*P_021101000*R_111[0]+-1*P_121001000*R_201[0]+P_121101000*R_211[0]+P_221001000*R_301[0]+-1*P_221101000*R_311[0]+-1*P_321001000*R_401[0]+P_321101000*R_411[0];
double PR_020002000101=P_020002000*R_101[0]+-1*P_020102000*R_111[0]+P_020202000*R_121[0]+-1*P_120002000*R_201[0]+P_120102000*R_211[0]+-1*P_120202000*R_221[0]+P_220002000*R_301[0]+-1*P_220102000*R_311[0]+P_220202000*R_321[0];
double PR_021000001101=P_021000001*R_101[0]+-1*P_021000101*R_102[0]+-1*P_121000001*R_201[0]+P_121000101*R_202[0]+P_221000001*R_301[0]+-1*P_221000101*R_302[0]+-1*P_321000001*R_401[0]+P_321000101*R_402[0];
double PR_020001001101=P_020001001*R_101[0]+-1*P_020001101*R_102[0]+-1*P_020101001*R_111[0]+P_020101101*R_112[0]+-1*P_120001001*R_201[0]+P_120001101*R_202[0]+P_120101001*R_211[0]+-1*P_120101101*R_212[0]+P_220001001*R_301[0]+-1*P_220001101*R_302[0]+-1*P_220101001*R_311[0]+P_220101101*R_312[0];
double PR_020000002101=P_020000002*R_101[0]+-1*P_020000102*R_102[0]+P_020000202*R_103[0]+-1*P_120000002*R_201[0]+P_120000102*R_202[0]+-1*P_120000202*R_203[0]+P_220000002*R_301[0]+-1*P_220000102*R_302[0]+P_220000202*R_303[0];
double PR_012010000101=P_012010000*R_101[0]+-1*P_012110000*R_111[0]+-1*P_112010000*R_201[0]+P_112110000*R_211[0]+P_212010000*R_301[0]+-1*P_212110000*R_311[0]+-1*P_312010000*R_401[0]+P_312110000*R_411[0];
double PR_011011000101=P_011011000*R_101[0]+-1*P_011111000*R_111[0]+P_011211000*R_121[0]+-1*P_111011000*R_201[0]+P_111111000*R_211[0]+-1*P_111211000*R_221[0]+P_211011000*R_301[0]+-1*P_211111000*R_311[0]+P_211211000*R_321[0];
double PR_010012000101=P_010012000*R_101[0]+-1*P_010112000*R_111[0]+P_010212000*R_121[0]+-1*P_010312000*R_131[0]+-1*P_110012000*R_201[0]+P_110112000*R_211[0]+-1*P_110212000*R_221[0]+P_110312000*R_231[0];
double PR_011010001101=P_011010001*R_101[0]+-1*P_011010101*R_102[0]+-1*P_011110001*R_111[0]+P_011110101*R_112[0]+-1*P_111010001*R_201[0]+P_111010101*R_202[0]+P_111110001*R_211[0]+-1*P_111110101*R_212[0]+P_211010001*R_301[0]+-1*P_211010101*R_302[0]+-1*P_211110001*R_311[0]+P_211110101*R_312[0];
double PR_010011001101=P_010011001*R_101[0]+-1*P_010011101*R_102[0]+-1*P_010111001*R_111[0]+P_010111101*R_112[0]+P_010211001*R_121[0]+-1*P_010211101*R_122[0]+-1*P_110011001*R_201[0]+P_110011101*R_202[0]+P_110111001*R_211[0]+-1*P_110111101*R_212[0]+-1*P_110211001*R_221[0]+P_110211101*R_222[0];
double PR_010010002101=P_010010002*R_101[0]+-1*P_010010102*R_102[0]+P_010010202*R_103[0]+-1*P_010110002*R_111[0]+P_010110102*R_112[0]+-1*P_010110202*R_113[0]+-1*P_110010002*R_201[0]+P_110010102*R_202[0]+-1*P_110010202*R_203[0]+P_110110002*R_211[0]+-1*P_110110102*R_212[0]+P_110110202*R_213[0];
double PR_002020000101=P_002020000*R_101[0]+-1*P_002120000*R_111[0]+P_002220000*R_121[0]+-1*P_102020000*R_201[0]+P_102120000*R_211[0]+-1*P_102220000*R_221[0]+P_202020000*R_301[0]+-1*P_202120000*R_311[0]+P_202220000*R_321[0];
double PR_001021000101=P_001021000*R_101[0]+-1*P_001121000*R_111[0]+P_001221000*R_121[0]+-1*P_001321000*R_131[0]+-1*P_101021000*R_201[0]+P_101121000*R_211[0]+-1*P_101221000*R_221[0]+P_101321000*R_231[0];
double PR_000022000101=P_000022000*R_101[0]+-1*P_000122000*R_111[0]+P_000222000*R_121[0]+-1*P_000322000*R_131[0]+P_000422000*R_141[0];
double PR_001020001101=P_001020001*R_101[0]+-1*P_001020101*R_102[0]+-1*P_001120001*R_111[0]+P_001120101*R_112[0]+P_001220001*R_121[0]+-1*P_001220101*R_122[0]+-1*P_101020001*R_201[0]+P_101020101*R_202[0]+P_101120001*R_211[0]+-1*P_101120101*R_212[0]+-1*P_101220001*R_221[0]+P_101220101*R_222[0];
double PR_000021001101=P_000021001*R_101[0]+-1*P_000021101*R_102[0]+-1*P_000121001*R_111[0]+P_000121101*R_112[0]+P_000221001*R_121[0]+-1*P_000221101*R_122[0]+-1*P_000321001*R_131[0]+P_000321101*R_132[0];
double PR_000020002101=P_000020002*R_101[0]+-1*P_000020102*R_102[0]+P_000020202*R_103[0]+-1*P_000120002*R_111[0]+P_000120102*R_112[0]+-1*P_000120202*R_113[0]+P_000220002*R_121[0]+-1*P_000220102*R_122[0]+P_000220202*R_123[0];
double PR_012000010101=P_012000010*R_101[0]+-1*P_012000110*R_102[0]+-1*P_112000010*R_201[0]+P_112000110*R_202[0]+P_212000010*R_301[0]+-1*P_212000110*R_302[0]+-1*P_312000010*R_401[0]+P_312000110*R_402[0];
double PR_011001010101=P_011001010*R_101[0]+-1*P_011001110*R_102[0]+-1*P_011101010*R_111[0]+P_011101110*R_112[0]+-1*P_111001010*R_201[0]+P_111001110*R_202[0]+P_111101010*R_211[0]+-1*P_111101110*R_212[0]+P_211001010*R_301[0]+-1*P_211001110*R_302[0]+-1*P_211101010*R_311[0]+P_211101110*R_312[0];
double PR_010002010101=P_010002010*R_101[0]+-1*P_010002110*R_102[0]+-1*P_010102010*R_111[0]+P_010102110*R_112[0]+P_010202010*R_121[0]+-1*P_010202110*R_122[0]+-1*P_110002010*R_201[0]+P_110002110*R_202[0]+P_110102010*R_211[0]+-1*P_110102110*R_212[0]+-1*P_110202010*R_221[0]+P_110202110*R_222[0];
double PR_011000011101=P_011000011*R_101[0]+-1*P_011000111*R_102[0]+P_011000211*R_103[0]+-1*P_111000011*R_201[0]+P_111000111*R_202[0]+-1*P_111000211*R_203[0]+P_211000011*R_301[0]+-1*P_211000111*R_302[0]+P_211000211*R_303[0];
double PR_010001011101=P_010001011*R_101[0]+-1*P_010001111*R_102[0]+P_010001211*R_103[0]+-1*P_010101011*R_111[0]+P_010101111*R_112[0]+-1*P_010101211*R_113[0]+-1*P_110001011*R_201[0]+P_110001111*R_202[0]+-1*P_110001211*R_203[0]+P_110101011*R_211[0]+-1*P_110101111*R_212[0]+P_110101211*R_213[0];
double PR_010000012101=P_010000012*R_101[0]+-1*P_010000112*R_102[0]+P_010000212*R_103[0]+-1*P_010000312*R_104[0]+-1*P_110000012*R_201[0]+P_110000112*R_202[0]+-1*P_110000212*R_203[0]+P_110000312*R_204[0];
double PR_002010010101=P_002010010*R_101[0]+-1*P_002010110*R_102[0]+-1*P_002110010*R_111[0]+P_002110110*R_112[0]+-1*P_102010010*R_201[0]+P_102010110*R_202[0]+P_102110010*R_211[0]+-1*P_102110110*R_212[0]+P_202010010*R_301[0]+-1*P_202010110*R_302[0]+-1*P_202110010*R_311[0]+P_202110110*R_312[0];
double PR_001011010101=P_001011010*R_101[0]+-1*P_001011110*R_102[0]+-1*P_001111010*R_111[0]+P_001111110*R_112[0]+P_001211010*R_121[0]+-1*P_001211110*R_122[0]+-1*P_101011010*R_201[0]+P_101011110*R_202[0]+P_101111010*R_211[0]+-1*P_101111110*R_212[0]+-1*P_101211010*R_221[0]+P_101211110*R_222[0];
double PR_000012010101=P_000012010*R_101[0]+-1*P_000012110*R_102[0]+-1*P_000112010*R_111[0]+P_000112110*R_112[0]+P_000212010*R_121[0]+-1*P_000212110*R_122[0]+-1*P_000312010*R_131[0]+P_000312110*R_132[0];
double PR_001010011101=P_001010011*R_101[0]+-1*P_001010111*R_102[0]+P_001010211*R_103[0]+-1*P_001110011*R_111[0]+P_001110111*R_112[0]+-1*P_001110211*R_113[0]+-1*P_101010011*R_201[0]+P_101010111*R_202[0]+-1*P_101010211*R_203[0]+P_101110011*R_211[0]+-1*P_101110111*R_212[0]+P_101110211*R_213[0];
double PR_000011011101=P_000011011*R_101[0]+-1*P_000011111*R_102[0]+P_000011211*R_103[0]+-1*P_000111011*R_111[0]+P_000111111*R_112[0]+-1*P_000111211*R_113[0]+P_000211011*R_121[0]+-1*P_000211111*R_122[0]+P_000211211*R_123[0];
double PR_000010012101=P_000010012*R_101[0]+-1*P_000010112*R_102[0]+P_000010212*R_103[0]+-1*P_000010312*R_104[0]+-1*P_000110012*R_111[0]+P_000110112*R_112[0]+-1*P_000110212*R_113[0]+P_000110312*R_114[0];
double PR_002000020101=P_002000020*R_101[0]+-1*P_002000120*R_102[0]+P_002000220*R_103[0]+-1*P_102000020*R_201[0]+P_102000120*R_202[0]+-1*P_102000220*R_203[0]+P_202000020*R_301[0]+-1*P_202000120*R_302[0]+P_202000220*R_303[0];
double PR_001001020101=P_001001020*R_101[0]+-1*P_001001120*R_102[0]+P_001001220*R_103[0]+-1*P_001101020*R_111[0]+P_001101120*R_112[0]+-1*P_001101220*R_113[0]+-1*P_101001020*R_201[0]+P_101001120*R_202[0]+-1*P_101001220*R_203[0]+P_101101020*R_211[0]+-1*P_101101120*R_212[0]+P_101101220*R_213[0];
double PR_000002020101=P_000002020*R_101[0]+-1*P_000002120*R_102[0]+P_000002220*R_103[0]+-1*P_000102020*R_111[0]+P_000102120*R_112[0]+-1*P_000102220*R_113[0]+P_000202020*R_121[0]+-1*P_000202120*R_122[0]+P_000202220*R_123[0];
double PR_001000021101=P_001000021*R_101[0]+-1*P_001000121*R_102[0]+P_001000221*R_103[0]+-1*P_001000321*R_104[0]+-1*P_101000021*R_201[0]+P_101000121*R_202[0]+-1*P_101000221*R_203[0]+P_101000321*R_204[0];
double PR_000001021101=P_000001021*R_101[0]+-1*P_000001121*R_102[0]+P_000001221*R_103[0]+-1*P_000001321*R_104[0]+-1*P_000101021*R_111[0]+P_000101121*R_112[0]+-1*P_000101221*R_113[0]+P_000101321*R_114[0];
double PR_000000022101=P_000000022*R_101[0]+-1*P_000000122*R_102[0]+P_000000222*R_103[0]+-1*P_000000322*R_104[0]+P_000000422*R_105[0];
double PR_022000000110=P_022000000*R_110[0]+-1*P_122000000*R_210[0]+P_222000000*R_310[0]+-1*P_322000000*R_410[0]+P_422000000*R_510[0];
double PR_021001000110=P_021001000*R_110[0]+-1*P_021101000*R_120[0]+-1*P_121001000*R_210[0]+P_121101000*R_220[0]+P_221001000*R_310[0]+-1*P_221101000*R_320[0]+-1*P_321001000*R_410[0]+P_321101000*R_420[0];
double PR_020002000110=P_020002000*R_110[0]+-1*P_020102000*R_120[0]+P_020202000*R_130[0]+-1*P_120002000*R_210[0]+P_120102000*R_220[0]+-1*P_120202000*R_230[0]+P_220002000*R_310[0]+-1*P_220102000*R_320[0]+P_220202000*R_330[0];
double PR_021000001110=P_021000001*R_110[0]+-1*P_021000101*R_111[0]+-1*P_121000001*R_210[0]+P_121000101*R_211[0]+P_221000001*R_310[0]+-1*P_221000101*R_311[0]+-1*P_321000001*R_410[0]+P_321000101*R_411[0];
double PR_020001001110=P_020001001*R_110[0]+-1*P_020001101*R_111[0]+-1*P_020101001*R_120[0]+P_020101101*R_121[0]+-1*P_120001001*R_210[0]+P_120001101*R_211[0]+P_120101001*R_220[0]+-1*P_120101101*R_221[0]+P_220001001*R_310[0]+-1*P_220001101*R_311[0]+-1*P_220101001*R_320[0]+P_220101101*R_321[0];
double PR_020000002110=P_020000002*R_110[0]+-1*P_020000102*R_111[0]+P_020000202*R_112[0]+-1*P_120000002*R_210[0]+P_120000102*R_211[0]+-1*P_120000202*R_212[0]+P_220000002*R_310[0]+-1*P_220000102*R_311[0]+P_220000202*R_312[0];
double PR_012010000110=P_012010000*R_110[0]+-1*P_012110000*R_120[0]+-1*P_112010000*R_210[0]+P_112110000*R_220[0]+P_212010000*R_310[0]+-1*P_212110000*R_320[0]+-1*P_312010000*R_410[0]+P_312110000*R_420[0];
double PR_011011000110=P_011011000*R_110[0]+-1*P_011111000*R_120[0]+P_011211000*R_130[0]+-1*P_111011000*R_210[0]+P_111111000*R_220[0]+-1*P_111211000*R_230[0]+P_211011000*R_310[0]+-1*P_211111000*R_320[0]+P_211211000*R_330[0];
double PR_010012000110=P_010012000*R_110[0]+-1*P_010112000*R_120[0]+P_010212000*R_130[0]+-1*P_010312000*R_140[0]+-1*P_110012000*R_210[0]+P_110112000*R_220[0]+-1*P_110212000*R_230[0]+P_110312000*R_240[0];
double PR_011010001110=P_011010001*R_110[0]+-1*P_011010101*R_111[0]+-1*P_011110001*R_120[0]+P_011110101*R_121[0]+-1*P_111010001*R_210[0]+P_111010101*R_211[0]+P_111110001*R_220[0]+-1*P_111110101*R_221[0]+P_211010001*R_310[0]+-1*P_211010101*R_311[0]+-1*P_211110001*R_320[0]+P_211110101*R_321[0];
double PR_010011001110=P_010011001*R_110[0]+-1*P_010011101*R_111[0]+-1*P_010111001*R_120[0]+P_010111101*R_121[0]+P_010211001*R_130[0]+-1*P_010211101*R_131[0]+-1*P_110011001*R_210[0]+P_110011101*R_211[0]+P_110111001*R_220[0]+-1*P_110111101*R_221[0]+-1*P_110211001*R_230[0]+P_110211101*R_231[0];
double PR_010010002110=P_010010002*R_110[0]+-1*P_010010102*R_111[0]+P_010010202*R_112[0]+-1*P_010110002*R_120[0]+P_010110102*R_121[0]+-1*P_010110202*R_122[0]+-1*P_110010002*R_210[0]+P_110010102*R_211[0]+-1*P_110010202*R_212[0]+P_110110002*R_220[0]+-1*P_110110102*R_221[0]+P_110110202*R_222[0];
double PR_002020000110=P_002020000*R_110[0]+-1*P_002120000*R_120[0]+P_002220000*R_130[0]+-1*P_102020000*R_210[0]+P_102120000*R_220[0]+-1*P_102220000*R_230[0]+P_202020000*R_310[0]+-1*P_202120000*R_320[0]+P_202220000*R_330[0];
double PR_001021000110=P_001021000*R_110[0]+-1*P_001121000*R_120[0]+P_001221000*R_130[0]+-1*P_001321000*R_140[0]+-1*P_101021000*R_210[0]+P_101121000*R_220[0]+-1*P_101221000*R_230[0]+P_101321000*R_240[0];
double PR_000022000110=P_000022000*R_110[0]+-1*P_000122000*R_120[0]+P_000222000*R_130[0]+-1*P_000322000*R_140[0]+P_000422000*R_150[0];
double PR_001020001110=P_001020001*R_110[0]+-1*P_001020101*R_111[0]+-1*P_001120001*R_120[0]+P_001120101*R_121[0]+P_001220001*R_130[0]+-1*P_001220101*R_131[0]+-1*P_101020001*R_210[0]+P_101020101*R_211[0]+P_101120001*R_220[0]+-1*P_101120101*R_221[0]+-1*P_101220001*R_230[0]+P_101220101*R_231[0];
double PR_000021001110=P_000021001*R_110[0]+-1*P_000021101*R_111[0]+-1*P_000121001*R_120[0]+P_000121101*R_121[0]+P_000221001*R_130[0]+-1*P_000221101*R_131[0]+-1*P_000321001*R_140[0]+P_000321101*R_141[0];
double PR_000020002110=P_000020002*R_110[0]+-1*P_000020102*R_111[0]+P_000020202*R_112[0]+-1*P_000120002*R_120[0]+P_000120102*R_121[0]+-1*P_000120202*R_122[0]+P_000220002*R_130[0]+-1*P_000220102*R_131[0]+P_000220202*R_132[0];
double PR_012000010110=P_012000010*R_110[0]+-1*P_012000110*R_111[0]+-1*P_112000010*R_210[0]+P_112000110*R_211[0]+P_212000010*R_310[0]+-1*P_212000110*R_311[0]+-1*P_312000010*R_410[0]+P_312000110*R_411[0];
double PR_011001010110=P_011001010*R_110[0]+-1*P_011001110*R_111[0]+-1*P_011101010*R_120[0]+P_011101110*R_121[0]+-1*P_111001010*R_210[0]+P_111001110*R_211[0]+P_111101010*R_220[0]+-1*P_111101110*R_221[0]+P_211001010*R_310[0]+-1*P_211001110*R_311[0]+-1*P_211101010*R_320[0]+P_211101110*R_321[0];
double PR_010002010110=P_010002010*R_110[0]+-1*P_010002110*R_111[0]+-1*P_010102010*R_120[0]+P_010102110*R_121[0]+P_010202010*R_130[0]+-1*P_010202110*R_131[0]+-1*P_110002010*R_210[0]+P_110002110*R_211[0]+P_110102010*R_220[0]+-1*P_110102110*R_221[0]+-1*P_110202010*R_230[0]+P_110202110*R_231[0];
double PR_011000011110=P_011000011*R_110[0]+-1*P_011000111*R_111[0]+P_011000211*R_112[0]+-1*P_111000011*R_210[0]+P_111000111*R_211[0]+-1*P_111000211*R_212[0]+P_211000011*R_310[0]+-1*P_211000111*R_311[0]+P_211000211*R_312[0];
double PR_010001011110=P_010001011*R_110[0]+-1*P_010001111*R_111[0]+P_010001211*R_112[0]+-1*P_010101011*R_120[0]+P_010101111*R_121[0]+-1*P_010101211*R_122[0]+-1*P_110001011*R_210[0]+P_110001111*R_211[0]+-1*P_110001211*R_212[0]+P_110101011*R_220[0]+-1*P_110101111*R_221[0]+P_110101211*R_222[0];
double PR_010000012110=P_010000012*R_110[0]+-1*P_010000112*R_111[0]+P_010000212*R_112[0]+-1*P_010000312*R_113[0]+-1*P_110000012*R_210[0]+P_110000112*R_211[0]+-1*P_110000212*R_212[0]+P_110000312*R_213[0];
double PR_002010010110=P_002010010*R_110[0]+-1*P_002010110*R_111[0]+-1*P_002110010*R_120[0]+P_002110110*R_121[0]+-1*P_102010010*R_210[0]+P_102010110*R_211[0]+P_102110010*R_220[0]+-1*P_102110110*R_221[0]+P_202010010*R_310[0]+-1*P_202010110*R_311[0]+-1*P_202110010*R_320[0]+P_202110110*R_321[0];
double PR_001011010110=P_001011010*R_110[0]+-1*P_001011110*R_111[0]+-1*P_001111010*R_120[0]+P_001111110*R_121[0]+P_001211010*R_130[0]+-1*P_001211110*R_131[0]+-1*P_101011010*R_210[0]+P_101011110*R_211[0]+P_101111010*R_220[0]+-1*P_101111110*R_221[0]+-1*P_101211010*R_230[0]+P_101211110*R_231[0];
double PR_000012010110=P_000012010*R_110[0]+-1*P_000012110*R_111[0]+-1*P_000112010*R_120[0]+P_000112110*R_121[0]+P_000212010*R_130[0]+-1*P_000212110*R_131[0]+-1*P_000312010*R_140[0]+P_000312110*R_141[0];
double PR_001010011110=P_001010011*R_110[0]+-1*P_001010111*R_111[0]+P_001010211*R_112[0]+-1*P_001110011*R_120[0]+P_001110111*R_121[0]+-1*P_001110211*R_122[0]+-1*P_101010011*R_210[0]+P_101010111*R_211[0]+-1*P_101010211*R_212[0]+P_101110011*R_220[0]+-1*P_101110111*R_221[0]+P_101110211*R_222[0];
double PR_000011011110=P_000011011*R_110[0]+-1*P_000011111*R_111[0]+P_000011211*R_112[0]+-1*P_000111011*R_120[0]+P_000111111*R_121[0]+-1*P_000111211*R_122[0]+P_000211011*R_130[0]+-1*P_000211111*R_131[0]+P_000211211*R_132[0];
double PR_000010012110=P_000010012*R_110[0]+-1*P_000010112*R_111[0]+P_000010212*R_112[0]+-1*P_000010312*R_113[0]+-1*P_000110012*R_120[0]+P_000110112*R_121[0]+-1*P_000110212*R_122[0]+P_000110312*R_123[0];
double PR_002000020110=P_002000020*R_110[0]+-1*P_002000120*R_111[0]+P_002000220*R_112[0]+-1*P_102000020*R_210[0]+P_102000120*R_211[0]+-1*P_102000220*R_212[0]+P_202000020*R_310[0]+-1*P_202000120*R_311[0]+P_202000220*R_312[0];
double PR_001001020110=P_001001020*R_110[0]+-1*P_001001120*R_111[0]+P_001001220*R_112[0]+-1*P_001101020*R_120[0]+P_001101120*R_121[0]+-1*P_001101220*R_122[0]+-1*P_101001020*R_210[0]+P_101001120*R_211[0]+-1*P_101001220*R_212[0]+P_101101020*R_220[0]+-1*P_101101120*R_221[0]+P_101101220*R_222[0];
double PR_000002020110=P_000002020*R_110[0]+-1*P_000002120*R_111[0]+P_000002220*R_112[0]+-1*P_000102020*R_120[0]+P_000102120*R_121[0]+-1*P_000102220*R_122[0]+P_000202020*R_130[0]+-1*P_000202120*R_131[0]+P_000202220*R_132[0];
double PR_001000021110=P_001000021*R_110[0]+-1*P_001000121*R_111[0]+P_001000221*R_112[0]+-1*P_001000321*R_113[0]+-1*P_101000021*R_210[0]+P_101000121*R_211[0]+-1*P_101000221*R_212[0]+P_101000321*R_213[0];
double PR_000001021110=P_000001021*R_110[0]+-1*P_000001121*R_111[0]+P_000001221*R_112[0]+-1*P_000001321*R_113[0]+-1*P_000101021*R_120[0]+P_000101121*R_121[0]+-1*P_000101221*R_122[0]+P_000101321*R_123[0];
double PR_000000022110=P_000000022*R_110[0]+-1*P_000000122*R_111[0]+P_000000222*R_112[0]+-1*P_000000322*R_113[0]+P_000000422*R_114[0];
double PR_022000000200=P_022000000*R_200[0]+-1*P_122000000*R_300[0]+P_222000000*R_400[0]+-1*P_322000000*R_500[0]+P_422000000*R_600[0];
double PR_021001000200=P_021001000*R_200[0]+-1*P_021101000*R_210[0]+-1*P_121001000*R_300[0]+P_121101000*R_310[0]+P_221001000*R_400[0]+-1*P_221101000*R_410[0]+-1*P_321001000*R_500[0]+P_321101000*R_510[0];
double PR_020002000200=P_020002000*R_200[0]+-1*P_020102000*R_210[0]+P_020202000*R_220[0]+-1*P_120002000*R_300[0]+P_120102000*R_310[0]+-1*P_120202000*R_320[0]+P_220002000*R_400[0]+-1*P_220102000*R_410[0]+P_220202000*R_420[0];
double PR_021000001200=P_021000001*R_200[0]+-1*P_021000101*R_201[0]+-1*P_121000001*R_300[0]+P_121000101*R_301[0]+P_221000001*R_400[0]+-1*P_221000101*R_401[0]+-1*P_321000001*R_500[0]+P_321000101*R_501[0];
double PR_020001001200=P_020001001*R_200[0]+-1*P_020001101*R_201[0]+-1*P_020101001*R_210[0]+P_020101101*R_211[0]+-1*P_120001001*R_300[0]+P_120001101*R_301[0]+P_120101001*R_310[0]+-1*P_120101101*R_311[0]+P_220001001*R_400[0]+-1*P_220001101*R_401[0]+-1*P_220101001*R_410[0]+P_220101101*R_411[0];
double PR_020000002200=P_020000002*R_200[0]+-1*P_020000102*R_201[0]+P_020000202*R_202[0]+-1*P_120000002*R_300[0]+P_120000102*R_301[0]+-1*P_120000202*R_302[0]+P_220000002*R_400[0]+-1*P_220000102*R_401[0]+P_220000202*R_402[0];
double PR_012010000200=P_012010000*R_200[0]+-1*P_012110000*R_210[0]+-1*P_112010000*R_300[0]+P_112110000*R_310[0]+P_212010000*R_400[0]+-1*P_212110000*R_410[0]+-1*P_312010000*R_500[0]+P_312110000*R_510[0];
double PR_011011000200=P_011011000*R_200[0]+-1*P_011111000*R_210[0]+P_011211000*R_220[0]+-1*P_111011000*R_300[0]+P_111111000*R_310[0]+-1*P_111211000*R_320[0]+P_211011000*R_400[0]+-1*P_211111000*R_410[0]+P_211211000*R_420[0];
double PR_010012000200=P_010012000*R_200[0]+-1*P_010112000*R_210[0]+P_010212000*R_220[0]+-1*P_010312000*R_230[0]+-1*P_110012000*R_300[0]+P_110112000*R_310[0]+-1*P_110212000*R_320[0]+P_110312000*R_330[0];
double PR_011010001200=P_011010001*R_200[0]+-1*P_011010101*R_201[0]+-1*P_011110001*R_210[0]+P_011110101*R_211[0]+-1*P_111010001*R_300[0]+P_111010101*R_301[0]+P_111110001*R_310[0]+-1*P_111110101*R_311[0]+P_211010001*R_400[0]+-1*P_211010101*R_401[0]+-1*P_211110001*R_410[0]+P_211110101*R_411[0];
double PR_010011001200=P_010011001*R_200[0]+-1*P_010011101*R_201[0]+-1*P_010111001*R_210[0]+P_010111101*R_211[0]+P_010211001*R_220[0]+-1*P_010211101*R_221[0]+-1*P_110011001*R_300[0]+P_110011101*R_301[0]+P_110111001*R_310[0]+-1*P_110111101*R_311[0]+-1*P_110211001*R_320[0]+P_110211101*R_321[0];
double PR_010010002200=P_010010002*R_200[0]+-1*P_010010102*R_201[0]+P_010010202*R_202[0]+-1*P_010110002*R_210[0]+P_010110102*R_211[0]+-1*P_010110202*R_212[0]+-1*P_110010002*R_300[0]+P_110010102*R_301[0]+-1*P_110010202*R_302[0]+P_110110002*R_310[0]+-1*P_110110102*R_311[0]+P_110110202*R_312[0];
double PR_002020000200=P_002020000*R_200[0]+-1*P_002120000*R_210[0]+P_002220000*R_220[0]+-1*P_102020000*R_300[0]+P_102120000*R_310[0]+-1*P_102220000*R_320[0]+P_202020000*R_400[0]+-1*P_202120000*R_410[0]+P_202220000*R_420[0];
double PR_001021000200=P_001021000*R_200[0]+-1*P_001121000*R_210[0]+P_001221000*R_220[0]+-1*P_001321000*R_230[0]+-1*P_101021000*R_300[0]+P_101121000*R_310[0]+-1*P_101221000*R_320[0]+P_101321000*R_330[0];
double PR_000022000200=P_000022000*R_200[0]+-1*P_000122000*R_210[0]+P_000222000*R_220[0]+-1*P_000322000*R_230[0]+P_000422000*R_240[0];
double PR_001020001200=P_001020001*R_200[0]+-1*P_001020101*R_201[0]+-1*P_001120001*R_210[0]+P_001120101*R_211[0]+P_001220001*R_220[0]+-1*P_001220101*R_221[0]+-1*P_101020001*R_300[0]+P_101020101*R_301[0]+P_101120001*R_310[0]+-1*P_101120101*R_311[0]+-1*P_101220001*R_320[0]+P_101220101*R_321[0];
double PR_000021001200=P_000021001*R_200[0]+-1*P_000021101*R_201[0]+-1*P_000121001*R_210[0]+P_000121101*R_211[0]+P_000221001*R_220[0]+-1*P_000221101*R_221[0]+-1*P_000321001*R_230[0]+P_000321101*R_231[0];
double PR_000020002200=P_000020002*R_200[0]+-1*P_000020102*R_201[0]+P_000020202*R_202[0]+-1*P_000120002*R_210[0]+P_000120102*R_211[0]+-1*P_000120202*R_212[0]+P_000220002*R_220[0]+-1*P_000220102*R_221[0]+P_000220202*R_222[0];
double PR_012000010200=P_012000010*R_200[0]+-1*P_012000110*R_201[0]+-1*P_112000010*R_300[0]+P_112000110*R_301[0]+P_212000010*R_400[0]+-1*P_212000110*R_401[0]+-1*P_312000010*R_500[0]+P_312000110*R_501[0];
double PR_011001010200=P_011001010*R_200[0]+-1*P_011001110*R_201[0]+-1*P_011101010*R_210[0]+P_011101110*R_211[0]+-1*P_111001010*R_300[0]+P_111001110*R_301[0]+P_111101010*R_310[0]+-1*P_111101110*R_311[0]+P_211001010*R_400[0]+-1*P_211001110*R_401[0]+-1*P_211101010*R_410[0]+P_211101110*R_411[0];
double PR_010002010200=P_010002010*R_200[0]+-1*P_010002110*R_201[0]+-1*P_010102010*R_210[0]+P_010102110*R_211[0]+P_010202010*R_220[0]+-1*P_010202110*R_221[0]+-1*P_110002010*R_300[0]+P_110002110*R_301[0]+P_110102010*R_310[0]+-1*P_110102110*R_311[0]+-1*P_110202010*R_320[0]+P_110202110*R_321[0];
double PR_011000011200=P_011000011*R_200[0]+-1*P_011000111*R_201[0]+P_011000211*R_202[0]+-1*P_111000011*R_300[0]+P_111000111*R_301[0]+-1*P_111000211*R_302[0]+P_211000011*R_400[0]+-1*P_211000111*R_401[0]+P_211000211*R_402[0];
double PR_010001011200=P_010001011*R_200[0]+-1*P_010001111*R_201[0]+P_010001211*R_202[0]+-1*P_010101011*R_210[0]+P_010101111*R_211[0]+-1*P_010101211*R_212[0]+-1*P_110001011*R_300[0]+P_110001111*R_301[0]+-1*P_110001211*R_302[0]+P_110101011*R_310[0]+-1*P_110101111*R_311[0]+P_110101211*R_312[0];
double PR_010000012200=P_010000012*R_200[0]+-1*P_010000112*R_201[0]+P_010000212*R_202[0]+-1*P_010000312*R_203[0]+-1*P_110000012*R_300[0]+P_110000112*R_301[0]+-1*P_110000212*R_302[0]+P_110000312*R_303[0];
double PR_002010010200=P_002010010*R_200[0]+-1*P_002010110*R_201[0]+-1*P_002110010*R_210[0]+P_002110110*R_211[0]+-1*P_102010010*R_300[0]+P_102010110*R_301[0]+P_102110010*R_310[0]+-1*P_102110110*R_311[0]+P_202010010*R_400[0]+-1*P_202010110*R_401[0]+-1*P_202110010*R_410[0]+P_202110110*R_411[0];
double PR_001011010200=P_001011010*R_200[0]+-1*P_001011110*R_201[0]+-1*P_001111010*R_210[0]+P_001111110*R_211[0]+P_001211010*R_220[0]+-1*P_001211110*R_221[0]+-1*P_101011010*R_300[0]+P_101011110*R_301[0]+P_101111010*R_310[0]+-1*P_101111110*R_311[0]+-1*P_101211010*R_320[0]+P_101211110*R_321[0];
double PR_000012010200=P_000012010*R_200[0]+-1*P_000012110*R_201[0]+-1*P_000112010*R_210[0]+P_000112110*R_211[0]+P_000212010*R_220[0]+-1*P_000212110*R_221[0]+-1*P_000312010*R_230[0]+P_000312110*R_231[0];
double PR_001010011200=P_001010011*R_200[0]+-1*P_001010111*R_201[0]+P_001010211*R_202[0]+-1*P_001110011*R_210[0]+P_001110111*R_211[0]+-1*P_001110211*R_212[0]+-1*P_101010011*R_300[0]+P_101010111*R_301[0]+-1*P_101010211*R_302[0]+P_101110011*R_310[0]+-1*P_101110111*R_311[0]+P_101110211*R_312[0];
double PR_000011011200=P_000011011*R_200[0]+-1*P_000011111*R_201[0]+P_000011211*R_202[0]+-1*P_000111011*R_210[0]+P_000111111*R_211[0]+-1*P_000111211*R_212[0]+P_000211011*R_220[0]+-1*P_000211111*R_221[0]+P_000211211*R_222[0];
double PR_000010012200=P_000010012*R_200[0]+-1*P_000010112*R_201[0]+P_000010212*R_202[0]+-1*P_000010312*R_203[0]+-1*P_000110012*R_210[0]+P_000110112*R_211[0]+-1*P_000110212*R_212[0]+P_000110312*R_213[0];
double PR_002000020200=P_002000020*R_200[0]+-1*P_002000120*R_201[0]+P_002000220*R_202[0]+-1*P_102000020*R_300[0]+P_102000120*R_301[0]+-1*P_102000220*R_302[0]+P_202000020*R_400[0]+-1*P_202000120*R_401[0]+P_202000220*R_402[0];
double PR_001001020200=P_001001020*R_200[0]+-1*P_001001120*R_201[0]+P_001001220*R_202[0]+-1*P_001101020*R_210[0]+P_001101120*R_211[0]+-1*P_001101220*R_212[0]+-1*P_101001020*R_300[0]+P_101001120*R_301[0]+-1*P_101001220*R_302[0]+P_101101020*R_310[0]+-1*P_101101120*R_311[0]+P_101101220*R_312[0];
double PR_000002020200=P_000002020*R_200[0]+-1*P_000002120*R_201[0]+P_000002220*R_202[0]+-1*P_000102020*R_210[0]+P_000102120*R_211[0]+-1*P_000102220*R_212[0]+P_000202020*R_220[0]+-1*P_000202120*R_221[0]+P_000202220*R_222[0];
double PR_001000021200=P_001000021*R_200[0]+-1*P_001000121*R_201[0]+P_001000221*R_202[0]+-1*P_001000321*R_203[0]+-1*P_101000021*R_300[0]+P_101000121*R_301[0]+-1*P_101000221*R_302[0]+P_101000321*R_303[0];
double PR_000001021200=P_000001021*R_200[0]+-1*P_000001121*R_201[0]+P_000001221*R_202[0]+-1*P_000001321*R_203[0]+-1*P_000101021*R_210[0]+P_000101121*R_211[0]+-1*P_000101221*R_212[0]+P_000101321*R_213[0];
double PR_000000022200=P_000000022*R_200[0]+-1*P_000000122*R_201[0]+P_000000222*R_202[0]+-1*P_000000322*R_203[0]+P_000000422*R_204[0];
double Qd_101[3];
double Qd_110[3];
double Qd_011[3];
double Qd_111[3];
double Qd_211[3];
for(int i=0;i<3;i++){
Qd_101[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_110[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_011[i]=Qd_101[i]+Qd_010[i]*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_111[i]=Qd_010[i]*Qd_101[i]+aQin1*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_211[i]=aQin1*Qd_101[i];
}
double Q_011000000=Qd_011[0];
double Q_111000000=Qd_111[0];
double Q_211000000=Qd_211[0];
double Q_010001000=Qd_010[0]*Qd_001[1];
double Q_010101000=Qd_010[0]*Qd_101[1];
double Q_110001000=Qd_110[0]*Qd_001[1];
double Q_110101000=Qd_110[0]*Qd_101[1];
double Q_010000001=Qd_010[0]*Qd_001[2];
double Q_010000101=Qd_010[0]*Qd_101[2];
double Q_110000001=Qd_110[0]*Qd_001[2];
double Q_110000101=Qd_110[0]*Qd_101[2];
double Q_001010000=Qd_001[0]*Qd_010[1];
double Q_001110000=Qd_001[0]*Qd_110[1];
double Q_101010000=Qd_101[0]*Qd_010[1];
double Q_101110000=Qd_101[0]*Qd_110[1];
double Q_000011000=Qd_011[1];
double Q_000111000=Qd_111[1];
double Q_000211000=Qd_211[1];
double Q_000010001=Qd_010[1]*Qd_001[2];
double Q_000010101=Qd_010[1]*Qd_101[2];
double Q_000110001=Qd_110[1]*Qd_001[2];
double Q_000110101=Qd_110[1]*Qd_101[2];
double Q_001000010=Qd_001[0]*Qd_010[2];
double Q_001000110=Qd_001[0]*Qd_110[2];
double Q_101000010=Qd_101[0]*Qd_010[2];
double Q_101000110=Qd_101[0]*Qd_110[2];
double Q_000001010=Qd_001[1]*Qd_010[2];
double Q_000001110=Qd_001[1]*Qd_110[2];
double Q_000101010=Qd_101[1]*Qd_010[2];
double Q_000101110=Qd_101[1]*Qd_110[2];
double Q_000000011=Qd_011[2];
double Q_000000111=Qd_111[2];
double Q_000000211=Qd_211[2];
ans_temp[ans_id*18+0]+=Pmtrx[0]*(Q_011000000*PR_022000000000+Q_111000000*PR_022000000100+Q_211000000*PR_022000000200);
ans_temp[ans_id*18+0]+=Pmtrx[1]*(Q_010001000*PR_022000000000+Q_010101000*PR_022000000010+Q_110001000*PR_022000000100+Q_110101000*PR_022000000110);
ans_temp[ans_id*18+0]+=Pmtrx[2]*(Q_010000001*PR_022000000000+Q_010000101*PR_022000000001+Q_110000001*PR_022000000100+Q_110000101*PR_022000000101);
ans_temp[ans_id*18+1]+=Pmtrx[0]*(Q_001010000*PR_022000000000+Q_001110000*PR_022000000010+Q_101010000*PR_022000000100+Q_101110000*PR_022000000110);
ans_temp[ans_id*18+1]+=Pmtrx[1]*(Q_000011000*PR_022000000000+Q_000111000*PR_022000000010+Q_000211000*PR_022000000020);
ans_temp[ans_id*18+1]+=Pmtrx[2]*(Q_000010001*PR_022000000000+Q_000010101*PR_022000000001+Q_000110001*PR_022000000010+Q_000110101*PR_022000000011);
ans_temp[ans_id*18+2]+=Pmtrx[0]*(Q_001000010*PR_022000000000+Q_001000110*PR_022000000001+Q_101000010*PR_022000000100+Q_101000110*PR_022000000101);
ans_temp[ans_id*18+2]+=Pmtrx[1]*(Q_000001010*PR_022000000000+Q_000001110*PR_022000000001+Q_000101010*PR_022000000010+Q_000101110*PR_022000000011);
ans_temp[ans_id*18+2]+=Pmtrx[2]*(Q_000000011*PR_022000000000+Q_000000111*PR_022000000001+Q_000000211*PR_022000000002);
ans_temp[ans_id*18+0]+=Pmtrx[3]*(Q_011000000*PR_021001000000+Q_111000000*PR_021001000100+Q_211000000*PR_021001000200);
ans_temp[ans_id*18+0]+=Pmtrx[4]*(Q_010001000*PR_021001000000+Q_010101000*PR_021001000010+Q_110001000*PR_021001000100+Q_110101000*PR_021001000110);
ans_temp[ans_id*18+0]+=Pmtrx[5]*(Q_010000001*PR_021001000000+Q_010000101*PR_021001000001+Q_110000001*PR_021001000100+Q_110000101*PR_021001000101);
ans_temp[ans_id*18+1]+=Pmtrx[3]*(Q_001010000*PR_021001000000+Q_001110000*PR_021001000010+Q_101010000*PR_021001000100+Q_101110000*PR_021001000110);
ans_temp[ans_id*18+1]+=Pmtrx[4]*(Q_000011000*PR_021001000000+Q_000111000*PR_021001000010+Q_000211000*PR_021001000020);
ans_temp[ans_id*18+1]+=Pmtrx[5]*(Q_000010001*PR_021001000000+Q_000010101*PR_021001000001+Q_000110001*PR_021001000010+Q_000110101*PR_021001000011);
ans_temp[ans_id*18+2]+=Pmtrx[3]*(Q_001000010*PR_021001000000+Q_001000110*PR_021001000001+Q_101000010*PR_021001000100+Q_101000110*PR_021001000101);
ans_temp[ans_id*18+2]+=Pmtrx[4]*(Q_000001010*PR_021001000000+Q_000001110*PR_021001000001+Q_000101010*PR_021001000010+Q_000101110*PR_021001000011);
ans_temp[ans_id*18+2]+=Pmtrx[5]*(Q_000000011*PR_021001000000+Q_000000111*PR_021001000001+Q_000000211*PR_021001000002);
ans_temp[ans_id*18+0]+=Pmtrx[6]*(Q_011000000*PR_020002000000+Q_111000000*PR_020002000100+Q_211000000*PR_020002000200);
ans_temp[ans_id*18+0]+=Pmtrx[7]*(Q_010001000*PR_020002000000+Q_010101000*PR_020002000010+Q_110001000*PR_020002000100+Q_110101000*PR_020002000110);
ans_temp[ans_id*18+0]+=Pmtrx[8]*(Q_010000001*PR_020002000000+Q_010000101*PR_020002000001+Q_110000001*PR_020002000100+Q_110000101*PR_020002000101);
ans_temp[ans_id*18+1]+=Pmtrx[6]*(Q_001010000*PR_020002000000+Q_001110000*PR_020002000010+Q_101010000*PR_020002000100+Q_101110000*PR_020002000110);
ans_temp[ans_id*18+1]+=Pmtrx[7]*(Q_000011000*PR_020002000000+Q_000111000*PR_020002000010+Q_000211000*PR_020002000020);
ans_temp[ans_id*18+1]+=Pmtrx[8]*(Q_000010001*PR_020002000000+Q_000010101*PR_020002000001+Q_000110001*PR_020002000010+Q_000110101*PR_020002000011);
ans_temp[ans_id*18+2]+=Pmtrx[6]*(Q_001000010*PR_020002000000+Q_001000110*PR_020002000001+Q_101000010*PR_020002000100+Q_101000110*PR_020002000101);
ans_temp[ans_id*18+2]+=Pmtrx[7]*(Q_000001010*PR_020002000000+Q_000001110*PR_020002000001+Q_000101010*PR_020002000010+Q_000101110*PR_020002000011);
ans_temp[ans_id*18+2]+=Pmtrx[8]*(Q_000000011*PR_020002000000+Q_000000111*PR_020002000001+Q_000000211*PR_020002000002);
ans_temp[ans_id*18+0]+=Pmtrx[9]*(Q_011000000*PR_021000001000+Q_111000000*PR_021000001100+Q_211000000*PR_021000001200);
ans_temp[ans_id*18+0]+=Pmtrx[10]*(Q_010001000*PR_021000001000+Q_010101000*PR_021000001010+Q_110001000*PR_021000001100+Q_110101000*PR_021000001110);
ans_temp[ans_id*18+0]+=Pmtrx[11]*(Q_010000001*PR_021000001000+Q_010000101*PR_021000001001+Q_110000001*PR_021000001100+Q_110000101*PR_021000001101);
ans_temp[ans_id*18+1]+=Pmtrx[9]*(Q_001010000*PR_021000001000+Q_001110000*PR_021000001010+Q_101010000*PR_021000001100+Q_101110000*PR_021000001110);
ans_temp[ans_id*18+1]+=Pmtrx[10]*(Q_000011000*PR_021000001000+Q_000111000*PR_021000001010+Q_000211000*PR_021000001020);
ans_temp[ans_id*18+1]+=Pmtrx[11]*(Q_000010001*PR_021000001000+Q_000010101*PR_021000001001+Q_000110001*PR_021000001010+Q_000110101*PR_021000001011);
ans_temp[ans_id*18+2]+=Pmtrx[9]*(Q_001000010*PR_021000001000+Q_001000110*PR_021000001001+Q_101000010*PR_021000001100+Q_101000110*PR_021000001101);
ans_temp[ans_id*18+2]+=Pmtrx[10]*(Q_000001010*PR_021000001000+Q_000001110*PR_021000001001+Q_000101010*PR_021000001010+Q_000101110*PR_021000001011);
ans_temp[ans_id*18+2]+=Pmtrx[11]*(Q_000000011*PR_021000001000+Q_000000111*PR_021000001001+Q_000000211*PR_021000001002);
ans_temp[ans_id*18+0]+=Pmtrx[12]*(Q_011000000*PR_020001001000+Q_111000000*PR_020001001100+Q_211000000*PR_020001001200);
ans_temp[ans_id*18+0]+=Pmtrx[13]*(Q_010001000*PR_020001001000+Q_010101000*PR_020001001010+Q_110001000*PR_020001001100+Q_110101000*PR_020001001110);
ans_temp[ans_id*18+0]+=Pmtrx[14]*(Q_010000001*PR_020001001000+Q_010000101*PR_020001001001+Q_110000001*PR_020001001100+Q_110000101*PR_020001001101);
ans_temp[ans_id*18+1]+=Pmtrx[12]*(Q_001010000*PR_020001001000+Q_001110000*PR_020001001010+Q_101010000*PR_020001001100+Q_101110000*PR_020001001110);
ans_temp[ans_id*18+1]+=Pmtrx[13]*(Q_000011000*PR_020001001000+Q_000111000*PR_020001001010+Q_000211000*PR_020001001020);
ans_temp[ans_id*18+1]+=Pmtrx[14]*(Q_000010001*PR_020001001000+Q_000010101*PR_020001001001+Q_000110001*PR_020001001010+Q_000110101*PR_020001001011);
ans_temp[ans_id*18+2]+=Pmtrx[12]*(Q_001000010*PR_020001001000+Q_001000110*PR_020001001001+Q_101000010*PR_020001001100+Q_101000110*PR_020001001101);
ans_temp[ans_id*18+2]+=Pmtrx[13]*(Q_000001010*PR_020001001000+Q_000001110*PR_020001001001+Q_000101010*PR_020001001010+Q_000101110*PR_020001001011);
ans_temp[ans_id*18+2]+=Pmtrx[14]*(Q_000000011*PR_020001001000+Q_000000111*PR_020001001001+Q_000000211*PR_020001001002);
ans_temp[ans_id*18+0]+=Pmtrx[15]*(Q_011000000*PR_020000002000+Q_111000000*PR_020000002100+Q_211000000*PR_020000002200);
ans_temp[ans_id*18+0]+=Pmtrx[16]*(Q_010001000*PR_020000002000+Q_010101000*PR_020000002010+Q_110001000*PR_020000002100+Q_110101000*PR_020000002110);
ans_temp[ans_id*18+0]+=Pmtrx[17]*(Q_010000001*PR_020000002000+Q_010000101*PR_020000002001+Q_110000001*PR_020000002100+Q_110000101*PR_020000002101);
ans_temp[ans_id*18+1]+=Pmtrx[15]*(Q_001010000*PR_020000002000+Q_001110000*PR_020000002010+Q_101010000*PR_020000002100+Q_101110000*PR_020000002110);
ans_temp[ans_id*18+1]+=Pmtrx[16]*(Q_000011000*PR_020000002000+Q_000111000*PR_020000002010+Q_000211000*PR_020000002020);
ans_temp[ans_id*18+1]+=Pmtrx[17]*(Q_000010001*PR_020000002000+Q_000010101*PR_020000002001+Q_000110001*PR_020000002010+Q_000110101*PR_020000002011);
ans_temp[ans_id*18+2]+=Pmtrx[15]*(Q_001000010*PR_020000002000+Q_001000110*PR_020000002001+Q_101000010*PR_020000002100+Q_101000110*PR_020000002101);
ans_temp[ans_id*18+2]+=Pmtrx[16]*(Q_000001010*PR_020000002000+Q_000001110*PR_020000002001+Q_000101010*PR_020000002010+Q_000101110*PR_020000002011);
ans_temp[ans_id*18+2]+=Pmtrx[17]*(Q_000000011*PR_020000002000+Q_000000111*PR_020000002001+Q_000000211*PR_020000002002);
ans_temp[ans_id*18+3]+=Pmtrx[0]*(Q_011000000*PR_012010000000+Q_111000000*PR_012010000100+Q_211000000*PR_012010000200);
ans_temp[ans_id*18+3]+=Pmtrx[1]*(Q_010001000*PR_012010000000+Q_010101000*PR_012010000010+Q_110001000*PR_012010000100+Q_110101000*PR_012010000110);
ans_temp[ans_id*18+3]+=Pmtrx[2]*(Q_010000001*PR_012010000000+Q_010000101*PR_012010000001+Q_110000001*PR_012010000100+Q_110000101*PR_012010000101);
ans_temp[ans_id*18+4]+=Pmtrx[0]*(Q_001010000*PR_012010000000+Q_001110000*PR_012010000010+Q_101010000*PR_012010000100+Q_101110000*PR_012010000110);
ans_temp[ans_id*18+4]+=Pmtrx[1]*(Q_000011000*PR_012010000000+Q_000111000*PR_012010000010+Q_000211000*PR_012010000020);
ans_temp[ans_id*18+4]+=Pmtrx[2]*(Q_000010001*PR_012010000000+Q_000010101*PR_012010000001+Q_000110001*PR_012010000010+Q_000110101*PR_012010000011);
ans_temp[ans_id*18+5]+=Pmtrx[0]*(Q_001000010*PR_012010000000+Q_001000110*PR_012010000001+Q_101000010*PR_012010000100+Q_101000110*PR_012010000101);
ans_temp[ans_id*18+5]+=Pmtrx[1]*(Q_000001010*PR_012010000000+Q_000001110*PR_012010000001+Q_000101010*PR_012010000010+Q_000101110*PR_012010000011);
ans_temp[ans_id*18+5]+=Pmtrx[2]*(Q_000000011*PR_012010000000+Q_000000111*PR_012010000001+Q_000000211*PR_012010000002);
ans_temp[ans_id*18+3]+=Pmtrx[3]*(Q_011000000*PR_011011000000+Q_111000000*PR_011011000100+Q_211000000*PR_011011000200);
ans_temp[ans_id*18+3]+=Pmtrx[4]*(Q_010001000*PR_011011000000+Q_010101000*PR_011011000010+Q_110001000*PR_011011000100+Q_110101000*PR_011011000110);
ans_temp[ans_id*18+3]+=Pmtrx[5]*(Q_010000001*PR_011011000000+Q_010000101*PR_011011000001+Q_110000001*PR_011011000100+Q_110000101*PR_011011000101);
ans_temp[ans_id*18+4]+=Pmtrx[3]*(Q_001010000*PR_011011000000+Q_001110000*PR_011011000010+Q_101010000*PR_011011000100+Q_101110000*PR_011011000110);
ans_temp[ans_id*18+4]+=Pmtrx[4]*(Q_000011000*PR_011011000000+Q_000111000*PR_011011000010+Q_000211000*PR_011011000020);
ans_temp[ans_id*18+4]+=Pmtrx[5]*(Q_000010001*PR_011011000000+Q_000010101*PR_011011000001+Q_000110001*PR_011011000010+Q_000110101*PR_011011000011);
ans_temp[ans_id*18+5]+=Pmtrx[3]*(Q_001000010*PR_011011000000+Q_001000110*PR_011011000001+Q_101000010*PR_011011000100+Q_101000110*PR_011011000101);
ans_temp[ans_id*18+5]+=Pmtrx[4]*(Q_000001010*PR_011011000000+Q_000001110*PR_011011000001+Q_000101010*PR_011011000010+Q_000101110*PR_011011000011);
ans_temp[ans_id*18+5]+=Pmtrx[5]*(Q_000000011*PR_011011000000+Q_000000111*PR_011011000001+Q_000000211*PR_011011000002);
ans_temp[ans_id*18+3]+=Pmtrx[6]*(Q_011000000*PR_010012000000+Q_111000000*PR_010012000100+Q_211000000*PR_010012000200);
ans_temp[ans_id*18+3]+=Pmtrx[7]*(Q_010001000*PR_010012000000+Q_010101000*PR_010012000010+Q_110001000*PR_010012000100+Q_110101000*PR_010012000110);
ans_temp[ans_id*18+3]+=Pmtrx[8]*(Q_010000001*PR_010012000000+Q_010000101*PR_010012000001+Q_110000001*PR_010012000100+Q_110000101*PR_010012000101);
ans_temp[ans_id*18+4]+=Pmtrx[6]*(Q_001010000*PR_010012000000+Q_001110000*PR_010012000010+Q_101010000*PR_010012000100+Q_101110000*PR_010012000110);
ans_temp[ans_id*18+4]+=Pmtrx[7]*(Q_000011000*PR_010012000000+Q_000111000*PR_010012000010+Q_000211000*PR_010012000020);
ans_temp[ans_id*18+4]+=Pmtrx[8]*(Q_000010001*PR_010012000000+Q_000010101*PR_010012000001+Q_000110001*PR_010012000010+Q_000110101*PR_010012000011);
ans_temp[ans_id*18+5]+=Pmtrx[6]*(Q_001000010*PR_010012000000+Q_001000110*PR_010012000001+Q_101000010*PR_010012000100+Q_101000110*PR_010012000101);
ans_temp[ans_id*18+5]+=Pmtrx[7]*(Q_000001010*PR_010012000000+Q_000001110*PR_010012000001+Q_000101010*PR_010012000010+Q_000101110*PR_010012000011);
ans_temp[ans_id*18+5]+=Pmtrx[8]*(Q_000000011*PR_010012000000+Q_000000111*PR_010012000001+Q_000000211*PR_010012000002);
ans_temp[ans_id*18+3]+=Pmtrx[9]*(Q_011000000*PR_011010001000+Q_111000000*PR_011010001100+Q_211000000*PR_011010001200);
ans_temp[ans_id*18+3]+=Pmtrx[10]*(Q_010001000*PR_011010001000+Q_010101000*PR_011010001010+Q_110001000*PR_011010001100+Q_110101000*PR_011010001110);
ans_temp[ans_id*18+3]+=Pmtrx[11]*(Q_010000001*PR_011010001000+Q_010000101*PR_011010001001+Q_110000001*PR_011010001100+Q_110000101*PR_011010001101);
ans_temp[ans_id*18+4]+=Pmtrx[9]*(Q_001010000*PR_011010001000+Q_001110000*PR_011010001010+Q_101010000*PR_011010001100+Q_101110000*PR_011010001110);
ans_temp[ans_id*18+4]+=Pmtrx[10]*(Q_000011000*PR_011010001000+Q_000111000*PR_011010001010+Q_000211000*PR_011010001020);
ans_temp[ans_id*18+4]+=Pmtrx[11]*(Q_000010001*PR_011010001000+Q_000010101*PR_011010001001+Q_000110001*PR_011010001010+Q_000110101*PR_011010001011);
ans_temp[ans_id*18+5]+=Pmtrx[9]*(Q_001000010*PR_011010001000+Q_001000110*PR_011010001001+Q_101000010*PR_011010001100+Q_101000110*PR_011010001101);
ans_temp[ans_id*18+5]+=Pmtrx[10]*(Q_000001010*PR_011010001000+Q_000001110*PR_011010001001+Q_000101010*PR_011010001010+Q_000101110*PR_011010001011);
ans_temp[ans_id*18+5]+=Pmtrx[11]*(Q_000000011*PR_011010001000+Q_000000111*PR_011010001001+Q_000000211*PR_011010001002);
ans_temp[ans_id*18+3]+=Pmtrx[12]*(Q_011000000*PR_010011001000+Q_111000000*PR_010011001100+Q_211000000*PR_010011001200);
ans_temp[ans_id*18+3]+=Pmtrx[13]*(Q_010001000*PR_010011001000+Q_010101000*PR_010011001010+Q_110001000*PR_010011001100+Q_110101000*PR_010011001110);
ans_temp[ans_id*18+3]+=Pmtrx[14]*(Q_010000001*PR_010011001000+Q_010000101*PR_010011001001+Q_110000001*PR_010011001100+Q_110000101*PR_010011001101);
ans_temp[ans_id*18+4]+=Pmtrx[12]*(Q_001010000*PR_010011001000+Q_001110000*PR_010011001010+Q_101010000*PR_010011001100+Q_101110000*PR_010011001110);
ans_temp[ans_id*18+4]+=Pmtrx[13]*(Q_000011000*PR_010011001000+Q_000111000*PR_010011001010+Q_000211000*PR_010011001020);
ans_temp[ans_id*18+4]+=Pmtrx[14]*(Q_000010001*PR_010011001000+Q_000010101*PR_010011001001+Q_000110001*PR_010011001010+Q_000110101*PR_010011001011);
ans_temp[ans_id*18+5]+=Pmtrx[12]*(Q_001000010*PR_010011001000+Q_001000110*PR_010011001001+Q_101000010*PR_010011001100+Q_101000110*PR_010011001101);
ans_temp[ans_id*18+5]+=Pmtrx[13]*(Q_000001010*PR_010011001000+Q_000001110*PR_010011001001+Q_000101010*PR_010011001010+Q_000101110*PR_010011001011);
ans_temp[ans_id*18+5]+=Pmtrx[14]*(Q_000000011*PR_010011001000+Q_000000111*PR_010011001001+Q_000000211*PR_010011001002);
ans_temp[ans_id*18+3]+=Pmtrx[15]*(Q_011000000*PR_010010002000+Q_111000000*PR_010010002100+Q_211000000*PR_010010002200);
ans_temp[ans_id*18+3]+=Pmtrx[16]*(Q_010001000*PR_010010002000+Q_010101000*PR_010010002010+Q_110001000*PR_010010002100+Q_110101000*PR_010010002110);
ans_temp[ans_id*18+3]+=Pmtrx[17]*(Q_010000001*PR_010010002000+Q_010000101*PR_010010002001+Q_110000001*PR_010010002100+Q_110000101*PR_010010002101);
ans_temp[ans_id*18+4]+=Pmtrx[15]*(Q_001010000*PR_010010002000+Q_001110000*PR_010010002010+Q_101010000*PR_010010002100+Q_101110000*PR_010010002110);
ans_temp[ans_id*18+4]+=Pmtrx[16]*(Q_000011000*PR_010010002000+Q_000111000*PR_010010002010+Q_000211000*PR_010010002020);
ans_temp[ans_id*18+4]+=Pmtrx[17]*(Q_000010001*PR_010010002000+Q_000010101*PR_010010002001+Q_000110001*PR_010010002010+Q_000110101*PR_010010002011);
ans_temp[ans_id*18+5]+=Pmtrx[15]*(Q_001000010*PR_010010002000+Q_001000110*PR_010010002001+Q_101000010*PR_010010002100+Q_101000110*PR_010010002101);
ans_temp[ans_id*18+5]+=Pmtrx[16]*(Q_000001010*PR_010010002000+Q_000001110*PR_010010002001+Q_000101010*PR_010010002010+Q_000101110*PR_010010002011);
ans_temp[ans_id*18+5]+=Pmtrx[17]*(Q_000000011*PR_010010002000+Q_000000111*PR_010010002001+Q_000000211*PR_010010002002);
ans_temp[ans_id*18+6]+=Pmtrx[0]*(Q_011000000*PR_002020000000+Q_111000000*PR_002020000100+Q_211000000*PR_002020000200);
ans_temp[ans_id*18+6]+=Pmtrx[1]*(Q_010001000*PR_002020000000+Q_010101000*PR_002020000010+Q_110001000*PR_002020000100+Q_110101000*PR_002020000110);
ans_temp[ans_id*18+6]+=Pmtrx[2]*(Q_010000001*PR_002020000000+Q_010000101*PR_002020000001+Q_110000001*PR_002020000100+Q_110000101*PR_002020000101);
ans_temp[ans_id*18+7]+=Pmtrx[0]*(Q_001010000*PR_002020000000+Q_001110000*PR_002020000010+Q_101010000*PR_002020000100+Q_101110000*PR_002020000110);
ans_temp[ans_id*18+7]+=Pmtrx[1]*(Q_000011000*PR_002020000000+Q_000111000*PR_002020000010+Q_000211000*PR_002020000020);
ans_temp[ans_id*18+7]+=Pmtrx[2]*(Q_000010001*PR_002020000000+Q_000010101*PR_002020000001+Q_000110001*PR_002020000010+Q_000110101*PR_002020000011);
ans_temp[ans_id*18+8]+=Pmtrx[0]*(Q_001000010*PR_002020000000+Q_001000110*PR_002020000001+Q_101000010*PR_002020000100+Q_101000110*PR_002020000101);
ans_temp[ans_id*18+8]+=Pmtrx[1]*(Q_000001010*PR_002020000000+Q_000001110*PR_002020000001+Q_000101010*PR_002020000010+Q_000101110*PR_002020000011);
ans_temp[ans_id*18+8]+=Pmtrx[2]*(Q_000000011*PR_002020000000+Q_000000111*PR_002020000001+Q_000000211*PR_002020000002);
ans_temp[ans_id*18+6]+=Pmtrx[3]*(Q_011000000*PR_001021000000+Q_111000000*PR_001021000100+Q_211000000*PR_001021000200);
ans_temp[ans_id*18+6]+=Pmtrx[4]*(Q_010001000*PR_001021000000+Q_010101000*PR_001021000010+Q_110001000*PR_001021000100+Q_110101000*PR_001021000110);
ans_temp[ans_id*18+6]+=Pmtrx[5]*(Q_010000001*PR_001021000000+Q_010000101*PR_001021000001+Q_110000001*PR_001021000100+Q_110000101*PR_001021000101);
ans_temp[ans_id*18+7]+=Pmtrx[3]*(Q_001010000*PR_001021000000+Q_001110000*PR_001021000010+Q_101010000*PR_001021000100+Q_101110000*PR_001021000110);
ans_temp[ans_id*18+7]+=Pmtrx[4]*(Q_000011000*PR_001021000000+Q_000111000*PR_001021000010+Q_000211000*PR_001021000020);
ans_temp[ans_id*18+7]+=Pmtrx[5]*(Q_000010001*PR_001021000000+Q_000010101*PR_001021000001+Q_000110001*PR_001021000010+Q_000110101*PR_001021000011);
ans_temp[ans_id*18+8]+=Pmtrx[3]*(Q_001000010*PR_001021000000+Q_001000110*PR_001021000001+Q_101000010*PR_001021000100+Q_101000110*PR_001021000101);
ans_temp[ans_id*18+8]+=Pmtrx[4]*(Q_000001010*PR_001021000000+Q_000001110*PR_001021000001+Q_000101010*PR_001021000010+Q_000101110*PR_001021000011);
ans_temp[ans_id*18+8]+=Pmtrx[5]*(Q_000000011*PR_001021000000+Q_000000111*PR_001021000001+Q_000000211*PR_001021000002);
ans_temp[ans_id*18+6]+=Pmtrx[6]*(Q_011000000*PR_000022000000+Q_111000000*PR_000022000100+Q_211000000*PR_000022000200);
ans_temp[ans_id*18+6]+=Pmtrx[7]*(Q_010001000*PR_000022000000+Q_010101000*PR_000022000010+Q_110001000*PR_000022000100+Q_110101000*PR_000022000110);
ans_temp[ans_id*18+6]+=Pmtrx[8]*(Q_010000001*PR_000022000000+Q_010000101*PR_000022000001+Q_110000001*PR_000022000100+Q_110000101*PR_000022000101);
ans_temp[ans_id*18+7]+=Pmtrx[6]*(Q_001010000*PR_000022000000+Q_001110000*PR_000022000010+Q_101010000*PR_000022000100+Q_101110000*PR_000022000110);
ans_temp[ans_id*18+7]+=Pmtrx[7]*(Q_000011000*PR_000022000000+Q_000111000*PR_000022000010+Q_000211000*PR_000022000020);
ans_temp[ans_id*18+7]+=Pmtrx[8]*(Q_000010001*PR_000022000000+Q_000010101*PR_000022000001+Q_000110001*PR_000022000010+Q_000110101*PR_000022000011);
ans_temp[ans_id*18+8]+=Pmtrx[6]*(Q_001000010*PR_000022000000+Q_001000110*PR_000022000001+Q_101000010*PR_000022000100+Q_101000110*PR_000022000101);
ans_temp[ans_id*18+8]+=Pmtrx[7]*(Q_000001010*PR_000022000000+Q_000001110*PR_000022000001+Q_000101010*PR_000022000010+Q_000101110*PR_000022000011);
ans_temp[ans_id*18+8]+=Pmtrx[8]*(Q_000000011*PR_000022000000+Q_000000111*PR_000022000001+Q_000000211*PR_000022000002);
ans_temp[ans_id*18+6]+=Pmtrx[9]*(Q_011000000*PR_001020001000+Q_111000000*PR_001020001100+Q_211000000*PR_001020001200);
ans_temp[ans_id*18+6]+=Pmtrx[10]*(Q_010001000*PR_001020001000+Q_010101000*PR_001020001010+Q_110001000*PR_001020001100+Q_110101000*PR_001020001110);
ans_temp[ans_id*18+6]+=Pmtrx[11]*(Q_010000001*PR_001020001000+Q_010000101*PR_001020001001+Q_110000001*PR_001020001100+Q_110000101*PR_001020001101);
ans_temp[ans_id*18+7]+=Pmtrx[9]*(Q_001010000*PR_001020001000+Q_001110000*PR_001020001010+Q_101010000*PR_001020001100+Q_101110000*PR_001020001110);
ans_temp[ans_id*18+7]+=Pmtrx[10]*(Q_000011000*PR_001020001000+Q_000111000*PR_001020001010+Q_000211000*PR_001020001020);
ans_temp[ans_id*18+7]+=Pmtrx[11]*(Q_000010001*PR_001020001000+Q_000010101*PR_001020001001+Q_000110001*PR_001020001010+Q_000110101*PR_001020001011);
ans_temp[ans_id*18+8]+=Pmtrx[9]*(Q_001000010*PR_001020001000+Q_001000110*PR_001020001001+Q_101000010*PR_001020001100+Q_101000110*PR_001020001101);
ans_temp[ans_id*18+8]+=Pmtrx[10]*(Q_000001010*PR_001020001000+Q_000001110*PR_001020001001+Q_000101010*PR_001020001010+Q_000101110*PR_001020001011);
ans_temp[ans_id*18+8]+=Pmtrx[11]*(Q_000000011*PR_001020001000+Q_000000111*PR_001020001001+Q_000000211*PR_001020001002);
ans_temp[ans_id*18+6]+=Pmtrx[12]*(Q_011000000*PR_000021001000+Q_111000000*PR_000021001100+Q_211000000*PR_000021001200);
ans_temp[ans_id*18+6]+=Pmtrx[13]*(Q_010001000*PR_000021001000+Q_010101000*PR_000021001010+Q_110001000*PR_000021001100+Q_110101000*PR_000021001110);
ans_temp[ans_id*18+6]+=Pmtrx[14]*(Q_010000001*PR_000021001000+Q_010000101*PR_000021001001+Q_110000001*PR_000021001100+Q_110000101*PR_000021001101);
ans_temp[ans_id*18+7]+=Pmtrx[12]*(Q_001010000*PR_000021001000+Q_001110000*PR_000021001010+Q_101010000*PR_000021001100+Q_101110000*PR_000021001110);
ans_temp[ans_id*18+7]+=Pmtrx[13]*(Q_000011000*PR_000021001000+Q_000111000*PR_000021001010+Q_000211000*PR_000021001020);
ans_temp[ans_id*18+7]+=Pmtrx[14]*(Q_000010001*PR_000021001000+Q_000010101*PR_000021001001+Q_000110001*PR_000021001010+Q_000110101*PR_000021001011);
ans_temp[ans_id*18+8]+=Pmtrx[12]*(Q_001000010*PR_000021001000+Q_001000110*PR_000021001001+Q_101000010*PR_000021001100+Q_101000110*PR_000021001101);
ans_temp[ans_id*18+8]+=Pmtrx[13]*(Q_000001010*PR_000021001000+Q_000001110*PR_000021001001+Q_000101010*PR_000021001010+Q_000101110*PR_000021001011);
ans_temp[ans_id*18+8]+=Pmtrx[14]*(Q_000000011*PR_000021001000+Q_000000111*PR_000021001001+Q_000000211*PR_000021001002);
ans_temp[ans_id*18+6]+=Pmtrx[15]*(Q_011000000*PR_000020002000+Q_111000000*PR_000020002100+Q_211000000*PR_000020002200);
ans_temp[ans_id*18+6]+=Pmtrx[16]*(Q_010001000*PR_000020002000+Q_010101000*PR_000020002010+Q_110001000*PR_000020002100+Q_110101000*PR_000020002110);
ans_temp[ans_id*18+6]+=Pmtrx[17]*(Q_010000001*PR_000020002000+Q_010000101*PR_000020002001+Q_110000001*PR_000020002100+Q_110000101*PR_000020002101);
ans_temp[ans_id*18+7]+=Pmtrx[15]*(Q_001010000*PR_000020002000+Q_001110000*PR_000020002010+Q_101010000*PR_000020002100+Q_101110000*PR_000020002110);
ans_temp[ans_id*18+7]+=Pmtrx[16]*(Q_000011000*PR_000020002000+Q_000111000*PR_000020002010+Q_000211000*PR_000020002020);
ans_temp[ans_id*18+7]+=Pmtrx[17]*(Q_000010001*PR_000020002000+Q_000010101*PR_000020002001+Q_000110001*PR_000020002010+Q_000110101*PR_000020002011);
ans_temp[ans_id*18+8]+=Pmtrx[15]*(Q_001000010*PR_000020002000+Q_001000110*PR_000020002001+Q_101000010*PR_000020002100+Q_101000110*PR_000020002101);
ans_temp[ans_id*18+8]+=Pmtrx[16]*(Q_000001010*PR_000020002000+Q_000001110*PR_000020002001+Q_000101010*PR_000020002010+Q_000101110*PR_000020002011);
ans_temp[ans_id*18+8]+=Pmtrx[17]*(Q_000000011*PR_000020002000+Q_000000111*PR_000020002001+Q_000000211*PR_000020002002);
ans_temp[ans_id*18+9]+=Pmtrx[0]*(Q_011000000*PR_012000010000+Q_111000000*PR_012000010100+Q_211000000*PR_012000010200);
ans_temp[ans_id*18+9]+=Pmtrx[1]*(Q_010001000*PR_012000010000+Q_010101000*PR_012000010010+Q_110001000*PR_012000010100+Q_110101000*PR_012000010110);
ans_temp[ans_id*18+9]+=Pmtrx[2]*(Q_010000001*PR_012000010000+Q_010000101*PR_012000010001+Q_110000001*PR_012000010100+Q_110000101*PR_012000010101);
ans_temp[ans_id*18+10]+=Pmtrx[0]*(Q_001010000*PR_012000010000+Q_001110000*PR_012000010010+Q_101010000*PR_012000010100+Q_101110000*PR_012000010110);
ans_temp[ans_id*18+10]+=Pmtrx[1]*(Q_000011000*PR_012000010000+Q_000111000*PR_012000010010+Q_000211000*PR_012000010020);
ans_temp[ans_id*18+10]+=Pmtrx[2]*(Q_000010001*PR_012000010000+Q_000010101*PR_012000010001+Q_000110001*PR_012000010010+Q_000110101*PR_012000010011);
ans_temp[ans_id*18+11]+=Pmtrx[0]*(Q_001000010*PR_012000010000+Q_001000110*PR_012000010001+Q_101000010*PR_012000010100+Q_101000110*PR_012000010101);
ans_temp[ans_id*18+11]+=Pmtrx[1]*(Q_000001010*PR_012000010000+Q_000001110*PR_012000010001+Q_000101010*PR_012000010010+Q_000101110*PR_012000010011);
ans_temp[ans_id*18+11]+=Pmtrx[2]*(Q_000000011*PR_012000010000+Q_000000111*PR_012000010001+Q_000000211*PR_012000010002);
ans_temp[ans_id*18+9]+=Pmtrx[3]*(Q_011000000*PR_011001010000+Q_111000000*PR_011001010100+Q_211000000*PR_011001010200);
ans_temp[ans_id*18+9]+=Pmtrx[4]*(Q_010001000*PR_011001010000+Q_010101000*PR_011001010010+Q_110001000*PR_011001010100+Q_110101000*PR_011001010110);
ans_temp[ans_id*18+9]+=Pmtrx[5]*(Q_010000001*PR_011001010000+Q_010000101*PR_011001010001+Q_110000001*PR_011001010100+Q_110000101*PR_011001010101);
ans_temp[ans_id*18+10]+=Pmtrx[3]*(Q_001010000*PR_011001010000+Q_001110000*PR_011001010010+Q_101010000*PR_011001010100+Q_101110000*PR_011001010110);
ans_temp[ans_id*18+10]+=Pmtrx[4]*(Q_000011000*PR_011001010000+Q_000111000*PR_011001010010+Q_000211000*PR_011001010020);
ans_temp[ans_id*18+10]+=Pmtrx[5]*(Q_000010001*PR_011001010000+Q_000010101*PR_011001010001+Q_000110001*PR_011001010010+Q_000110101*PR_011001010011);
ans_temp[ans_id*18+11]+=Pmtrx[3]*(Q_001000010*PR_011001010000+Q_001000110*PR_011001010001+Q_101000010*PR_011001010100+Q_101000110*PR_011001010101);
ans_temp[ans_id*18+11]+=Pmtrx[4]*(Q_000001010*PR_011001010000+Q_000001110*PR_011001010001+Q_000101010*PR_011001010010+Q_000101110*PR_011001010011);
ans_temp[ans_id*18+11]+=Pmtrx[5]*(Q_000000011*PR_011001010000+Q_000000111*PR_011001010001+Q_000000211*PR_011001010002);
ans_temp[ans_id*18+9]+=Pmtrx[6]*(Q_011000000*PR_010002010000+Q_111000000*PR_010002010100+Q_211000000*PR_010002010200);
ans_temp[ans_id*18+9]+=Pmtrx[7]*(Q_010001000*PR_010002010000+Q_010101000*PR_010002010010+Q_110001000*PR_010002010100+Q_110101000*PR_010002010110);
ans_temp[ans_id*18+9]+=Pmtrx[8]*(Q_010000001*PR_010002010000+Q_010000101*PR_010002010001+Q_110000001*PR_010002010100+Q_110000101*PR_010002010101);
ans_temp[ans_id*18+10]+=Pmtrx[6]*(Q_001010000*PR_010002010000+Q_001110000*PR_010002010010+Q_101010000*PR_010002010100+Q_101110000*PR_010002010110);
ans_temp[ans_id*18+10]+=Pmtrx[7]*(Q_000011000*PR_010002010000+Q_000111000*PR_010002010010+Q_000211000*PR_010002010020);
ans_temp[ans_id*18+10]+=Pmtrx[8]*(Q_000010001*PR_010002010000+Q_000010101*PR_010002010001+Q_000110001*PR_010002010010+Q_000110101*PR_010002010011);
ans_temp[ans_id*18+11]+=Pmtrx[6]*(Q_001000010*PR_010002010000+Q_001000110*PR_010002010001+Q_101000010*PR_010002010100+Q_101000110*PR_010002010101);
ans_temp[ans_id*18+11]+=Pmtrx[7]*(Q_000001010*PR_010002010000+Q_000001110*PR_010002010001+Q_000101010*PR_010002010010+Q_000101110*PR_010002010011);
ans_temp[ans_id*18+11]+=Pmtrx[8]*(Q_000000011*PR_010002010000+Q_000000111*PR_010002010001+Q_000000211*PR_010002010002);
ans_temp[ans_id*18+9]+=Pmtrx[9]*(Q_011000000*PR_011000011000+Q_111000000*PR_011000011100+Q_211000000*PR_011000011200);
ans_temp[ans_id*18+9]+=Pmtrx[10]*(Q_010001000*PR_011000011000+Q_010101000*PR_011000011010+Q_110001000*PR_011000011100+Q_110101000*PR_011000011110);
ans_temp[ans_id*18+9]+=Pmtrx[11]*(Q_010000001*PR_011000011000+Q_010000101*PR_011000011001+Q_110000001*PR_011000011100+Q_110000101*PR_011000011101);
ans_temp[ans_id*18+10]+=Pmtrx[9]*(Q_001010000*PR_011000011000+Q_001110000*PR_011000011010+Q_101010000*PR_011000011100+Q_101110000*PR_011000011110);
ans_temp[ans_id*18+10]+=Pmtrx[10]*(Q_000011000*PR_011000011000+Q_000111000*PR_011000011010+Q_000211000*PR_011000011020);
ans_temp[ans_id*18+10]+=Pmtrx[11]*(Q_000010001*PR_011000011000+Q_000010101*PR_011000011001+Q_000110001*PR_011000011010+Q_000110101*PR_011000011011);
ans_temp[ans_id*18+11]+=Pmtrx[9]*(Q_001000010*PR_011000011000+Q_001000110*PR_011000011001+Q_101000010*PR_011000011100+Q_101000110*PR_011000011101);
ans_temp[ans_id*18+11]+=Pmtrx[10]*(Q_000001010*PR_011000011000+Q_000001110*PR_011000011001+Q_000101010*PR_011000011010+Q_000101110*PR_011000011011);
ans_temp[ans_id*18+11]+=Pmtrx[11]*(Q_000000011*PR_011000011000+Q_000000111*PR_011000011001+Q_000000211*PR_011000011002);
ans_temp[ans_id*18+9]+=Pmtrx[12]*(Q_011000000*PR_010001011000+Q_111000000*PR_010001011100+Q_211000000*PR_010001011200);
ans_temp[ans_id*18+9]+=Pmtrx[13]*(Q_010001000*PR_010001011000+Q_010101000*PR_010001011010+Q_110001000*PR_010001011100+Q_110101000*PR_010001011110);
ans_temp[ans_id*18+9]+=Pmtrx[14]*(Q_010000001*PR_010001011000+Q_010000101*PR_010001011001+Q_110000001*PR_010001011100+Q_110000101*PR_010001011101);
ans_temp[ans_id*18+10]+=Pmtrx[12]*(Q_001010000*PR_010001011000+Q_001110000*PR_010001011010+Q_101010000*PR_010001011100+Q_101110000*PR_010001011110);
ans_temp[ans_id*18+10]+=Pmtrx[13]*(Q_000011000*PR_010001011000+Q_000111000*PR_010001011010+Q_000211000*PR_010001011020);
ans_temp[ans_id*18+10]+=Pmtrx[14]*(Q_000010001*PR_010001011000+Q_000010101*PR_010001011001+Q_000110001*PR_010001011010+Q_000110101*PR_010001011011);
ans_temp[ans_id*18+11]+=Pmtrx[12]*(Q_001000010*PR_010001011000+Q_001000110*PR_010001011001+Q_101000010*PR_010001011100+Q_101000110*PR_010001011101);
ans_temp[ans_id*18+11]+=Pmtrx[13]*(Q_000001010*PR_010001011000+Q_000001110*PR_010001011001+Q_000101010*PR_010001011010+Q_000101110*PR_010001011011);
ans_temp[ans_id*18+11]+=Pmtrx[14]*(Q_000000011*PR_010001011000+Q_000000111*PR_010001011001+Q_000000211*PR_010001011002);
ans_temp[ans_id*18+9]+=Pmtrx[15]*(Q_011000000*PR_010000012000+Q_111000000*PR_010000012100+Q_211000000*PR_010000012200);
ans_temp[ans_id*18+9]+=Pmtrx[16]*(Q_010001000*PR_010000012000+Q_010101000*PR_010000012010+Q_110001000*PR_010000012100+Q_110101000*PR_010000012110);
ans_temp[ans_id*18+9]+=Pmtrx[17]*(Q_010000001*PR_010000012000+Q_010000101*PR_010000012001+Q_110000001*PR_010000012100+Q_110000101*PR_010000012101);
ans_temp[ans_id*18+10]+=Pmtrx[15]*(Q_001010000*PR_010000012000+Q_001110000*PR_010000012010+Q_101010000*PR_010000012100+Q_101110000*PR_010000012110);
ans_temp[ans_id*18+10]+=Pmtrx[16]*(Q_000011000*PR_010000012000+Q_000111000*PR_010000012010+Q_000211000*PR_010000012020);
ans_temp[ans_id*18+10]+=Pmtrx[17]*(Q_000010001*PR_010000012000+Q_000010101*PR_010000012001+Q_000110001*PR_010000012010+Q_000110101*PR_010000012011);
ans_temp[ans_id*18+11]+=Pmtrx[15]*(Q_001000010*PR_010000012000+Q_001000110*PR_010000012001+Q_101000010*PR_010000012100+Q_101000110*PR_010000012101);
ans_temp[ans_id*18+11]+=Pmtrx[16]*(Q_000001010*PR_010000012000+Q_000001110*PR_010000012001+Q_000101010*PR_010000012010+Q_000101110*PR_010000012011);
ans_temp[ans_id*18+11]+=Pmtrx[17]*(Q_000000011*PR_010000012000+Q_000000111*PR_010000012001+Q_000000211*PR_010000012002);
ans_temp[ans_id*18+12]+=Pmtrx[0]*(Q_011000000*PR_002010010000+Q_111000000*PR_002010010100+Q_211000000*PR_002010010200);
ans_temp[ans_id*18+12]+=Pmtrx[1]*(Q_010001000*PR_002010010000+Q_010101000*PR_002010010010+Q_110001000*PR_002010010100+Q_110101000*PR_002010010110);
ans_temp[ans_id*18+12]+=Pmtrx[2]*(Q_010000001*PR_002010010000+Q_010000101*PR_002010010001+Q_110000001*PR_002010010100+Q_110000101*PR_002010010101);
ans_temp[ans_id*18+13]+=Pmtrx[0]*(Q_001010000*PR_002010010000+Q_001110000*PR_002010010010+Q_101010000*PR_002010010100+Q_101110000*PR_002010010110);
ans_temp[ans_id*18+13]+=Pmtrx[1]*(Q_000011000*PR_002010010000+Q_000111000*PR_002010010010+Q_000211000*PR_002010010020);
ans_temp[ans_id*18+13]+=Pmtrx[2]*(Q_000010001*PR_002010010000+Q_000010101*PR_002010010001+Q_000110001*PR_002010010010+Q_000110101*PR_002010010011);
ans_temp[ans_id*18+14]+=Pmtrx[0]*(Q_001000010*PR_002010010000+Q_001000110*PR_002010010001+Q_101000010*PR_002010010100+Q_101000110*PR_002010010101);
ans_temp[ans_id*18+14]+=Pmtrx[1]*(Q_000001010*PR_002010010000+Q_000001110*PR_002010010001+Q_000101010*PR_002010010010+Q_000101110*PR_002010010011);
ans_temp[ans_id*18+14]+=Pmtrx[2]*(Q_000000011*PR_002010010000+Q_000000111*PR_002010010001+Q_000000211*PR_002010010002);
ans_temp[ans_id*18+12]+=Pmtrx[3]*(Q_011000000*PR_001011010000+Q_111000000*PR_001011010100+Q_211000000*PR_001011010200);
ans_temp[ans_id*18+12]+=Pmtrx[4]*(Q_010001000*PR_001011010000+Q_010101000*PR_001011010010+Q_110001000*PR_001011010100+Q_110101000*PR_001011010110);
ans_temp[ans_id*18+12]+=Pmtrx[5]*(Q_010000001*PR_001011010000+Q_010000101*PR_001011010001+Q_110000001*PR_001011010100+Q_110000101*PR_001011010101);
ans_temp[ans_id*18+13]+=Pmtrx[3]*(Q_001010000*PR_001011010000+Q_001110000*PR_001011010010+Q_101010000*PR_001011010100+Q_101110000*PR_001011010110);
ans_temp[ans_id*18+13]+=Pmtrx[4]*(Q_000011000*PR_001011010000+Q_000111000*PR_001011010010+Q_000211000*PR_001011010020);
ans_temp[ans_id*18+13]+=Pmtrx[5]*(Q_000010001*PR_001011010000+Q_000010101*PR_001011010001+Q_000110001*PR_001011010010+Q_000110101*PR_001011010011);
ans_temp[ans_id*18+14]+=Pmtrx[3]*(Q_001000010*PR_001011010000+Q_001000110*PR_001011010001+Q_101000010*PR_001011010100+Q_101000110*PR_001011010101);
ans_temp[ans_id*18+14]+=Pmtrx[4]*(Q_000001010*PR_001011010000+Q_000001110*PR_001011010001+Q_000101010*PR_001011010010+Q_000101110*PR_001011010011);
ans_temp[ans_id*18+14]+=Pmtrx[5]*(Q_000000011*PR_001011010000+Q_000000111*PR_001011010001+Q_000000211*PR_001011010002);
ans_temp[ans_id*18+12]+=Pmtrx[6]*(Q_011000000*PR_000012010000+Q_111000000*PR_000012010100+Q_211000000*PR_000012010200);
ans_temp[ans_id*18+12]+=Pmtrx[7]*(Q_010001000*PR_000012010000+Q_010101000*PR_000012010010+Q_110001000*PR_000012010100+Q_110101000*PR_000012010110);
ans_temp[ans_id*18+12]+=Pmtrx[8]*(Q_010000001*PR_000012010000+Q_010000101*PR_000012010001+Q_110000001*PR_000012010100+Q_110000101*PR_000012010101);
ans_temp[ans_id*18+13]+=Pmtrx[6]*(Q_001010000*PR_000012010000+Q_001110000*PR_000012010010+Q_101010000*PR_000012010100+Q_101110000*PR_000012010110);
ans_temp[ans_id*18+13]+=Pmtrx[7]*(Q_000011000*PR_000012010000+Q_000111000*PR_000012010010+Q_000211000*PR_000012010020);
ans_temp[ans_id*18+13]+=Pmtrx[8]*(Q_000010001*PR_000012010000+Q_000010101*PR_000012010001+Q_000110001*PR_000012010010+Q_000110101*PR_000012010011);
ans_temp[ans_id*18+14]+=Pmtrx[6]*(Q_001000010*PR_000012010000+Q_001000110*PR_000012010001+Q_101000010*PR_000012010100+Q_101000110*PR_000012010101);
ans_temp[ans_id*18+14]+=Pmtrx[7]*(Q_000001010*PR_000012010000+Q_000001110*PR_000012010001+Q_000101010*PR_000012010010+Q_000101110*PR_000012010011);
ans_temp[ans_id*18+14]+=Pmtrx[8]*(Q_000000011*PR_000012010000+Q_000000111*PR_000012010001+Q_000000211*PR_000012010002);
ans_temp[ans_id*18+12]+=Pmtrx[9]*(Q_011000000*PR_001010011000+Q_111000000*PR_001010011100+Q_211000000*PR_001010011200);
ans_temp[ans_id*18+12]+=Pmtrx[10]*(Q_010001000*PR_001010011000+Q_010101000*PR_001010011010+Q_110001000*PR_001010011100+Q_110101000*PR_001010011110);
ans_temp[ans_id*18+12]+=Pmtrx[11]*(Q_010000001*PR_001010011000+Q_010000101*PR_001010011001+Q_110000001*PR_001010011100+Q_110000101*PR_001010011101);
ans_temp[ans_id*18+13]+=Pmtrx[9]*(Q_001010000*PR_001010011000+Q_001110000*PR_001010011010+Q_101010000*PR_001010011100+Q_101110000*PR_001010011110);
ans_temp[ans_id*18+13]+=Pmtrx[10]*(Q_000011000*PR_001010011000+Q_000111000*PR_001010011010+Q_000211000*PR_001010011020);
ans_temp[ans_id*18+13]+=Pmtrx[11]*(Q_000010001*PR_001010011000+Q_000010101*PR_001010011001+Q_000110001*PR_001010011010+Q_000110101*PR_001010011011);
ans_temp[ans_id*18+14]+=Pmtrx[9]*(Q_001000010*PR_001010011000+Q_001000110*PR_001010011001+Q_101000010*PR_001010011100+Q_101000110*PR_001010011101);
ans_temp[ans_id*18+14]+=Pmtrx[10]*(Q_000001010*PR_001010011000+Q_000001110*PR_001010011001+Q_000101010*PR_001010011010+Q_000101110*PR_001010011011);
ans_temp[ans_id*18+14]+=Pmtrx[11]*(Q_000000011*PR_001010011000+Q_000000111*PR_001010011001+Q_000000211*PR_001010011002);
ans_temp[ans_id*18+12]+=Pmtrx[12]*(Q_011000000*PR_000011011000+Q_111000000*PR_000011011100+Q_211000000*PR_000011011200);
ans_temp[ans_id*18+12]+=Pmtrx[13]*(Q_010001000*PR_000011011000+Q_010101000*PR_000011011010+Q_110001000*PR_000011011100+Q_110101000*PR_000011011110);
ans_temp[ans_id*18+12]+=Pmtrx[14]*(Q_010000001*PR_000011011000+Q_010000101*PR_000011011001+Q_110000001*PR_000011011100+Q_110000101*PR_000011011101);
ans_temp[ans_id*18+13]+=Pmtrx[12]*(Q_001010000*PR_000011011000+Q_001110000*PR_000011011010+Q_101010000*PR_000011011100+Q_101110000*PR_000011011110);
ans_temp[ans_id*18+13]+=Pmtrx[13]*(Q_000011000*PR_000011011000+Q_000111000*PR_000011011010+Q_000211000*PR_000011011020);
ans_temp[ans_id*18+13]+=Pmtrx[14]*(Q_000010001*PR_000011011000+Q_000010101*PR_000011011001+Q_000110001*PR_000011011010+Q_000110101*PR_000011011011);
ans_temp[ans_id*18+14]+=Pmtrx[12]*(Q_001000010*PR_000011011000+Q_001000110*PR_000011011001+Q_101000010*PR_000011011100+Q_101000110*PR_000011011101);
ans_temp[ans_id*18+14]+=Pmtrx[13]*(Q_000001010*PR_000011011000+Q_000001110*PR_000011011001+Q_000101010*PR_000011011010+Q_000101110*PR_000011011011);
ans_temp[ans_id*18+14]+=Pmtrx[14]*(Q_000000011*PR_000011011000+Q_000000111*PR_000011011001+Q_000000211*PR_000011011002);
ans_temp[ans_id*18+12]+=Pmtrx[15]*(Q_011000000*PR_000010012000+Q_111000000*PR_000010012100+Q_211000000*PR_000010012200);
ans_temp[ans_id*18+12]+=Pmtrx[16]*(Q_010001000*PR_000010012000+Q_010101000*PR_000010012010+Q_110001000*PR_000010012100+Q_110101000*PR_000010012110);
ans_temp[ans_id*18+12]+=Pmtrx[17]*(Q_010000001*PR_000010012000+Q_010000101*PR_000010012001+Q_110000001*PR_000010012100+Q_110000101*PR_000010012101);
ans_temp[ans_id*18+13]+=Pmtrx[15]*(Q_001010000*PR_000010012000+Q_001110000*PR_000010012010+Q_101010000*PR_000010012100+Q_101110000*PR_000010012110);
ans_temp[ans_id*18+13]+=Pmtrx[16]*(Q_000011000*PR_000010012000+Q_000111000*PR_000010012010+Q_000211000*PR_000010012020);
ans_temp[ans_id*18+13]+=Pmtrx[17]*(Q_000010001*PR_000010012000+Q_000010101*PR_000010012001+Q_000110001*PR_000010012010+Q_000110101*PR_000010012011);
ans_temp[ans_id*18+14]+=Pmtrx[15]*(Q_001000010*PR_000010012000+Q_001000110*PR_000010012001+Q_101000010*PR_000010012100+Q_101000110*PR_000010012101);
ans_temp[ans_id*18+14]+=Pmtrx[16]*(Q_000001010*PR_000010012000+Q_000001110*PR_000010012001+Q_000101010*PR_000010012010+Q_000101110*PR_000010012011);
ans_temp[ans_id*18+14]+=Pmtrx[17]*(Q_000000011*PR_000010012000+Q_000000111*PR_000010012001+Q_000000211*PR_000010012002);
ans_temp[ans_id*18+15]+=Pmtrx[0]*(Q_011000000*PR_002000020000+Q_111000000*PR_002000020100+Q_211000000*PR_002000020200);
ans_temp[ans_id*18+15]+=Pmtrx[1]*(Q_010001000*PR_002000020000+Q_010101000*PR_002000020010+Q_110001000*PR_002000020100+Q_110101000*PR_002000020110);
ans_temp[ans_id*18+15]+=Pmtrx[2]*(Q_010000001*PR_002000020000+Q_010000101*PR_002000020001+Q_110000001*PR_002000020100+Q_110000101*PR_002000020101);
ans_temp[ans_id*18+16]+=Pmtrx[0]*(Q_001010000*PR_002000020000+Q_001110000*PR_002000020010+Q_101010000*PR_002000020100+Q_101110000*PR_002000020110);
ans_temp[ans_id*18+16]+=Pmtrx[1]*(Q_000011000*PR_002000020000+Q_000111000*PR_002000020010+Q_000211000*PR_002000020020);
ans_temp[ans_id*18+16]+=Pmtrx[2]*(Q_000010001*PR_002000020000+Q_000010101*PR_002000020001+Q_000110001*PR_002000020010+Q_000110101*PR_002000020011);
ans_temp[ans_id*18+17]+=Pmtrx[0]*(Q_001000010*PR_002000020000+Q_001000110*PR_002000020001+Q_101000010*PR_002000020100+Q_101000110*PR_002000020101);
ans_temp[ans_id*18+17]+=Pmtrx[1]*(Q_000001010*PR_002000020000+Q_000001110*PR_002000020001+Q_000101010*PR_002000020010+Q_000101110*PR_002000020011);
ans_temp[ans_id*18+17]+=Pmtrx[2]*(Q_000000011*PR_002000020000+Q_000000111*PR_002000020001+Q_000000211*PR_002000020002);
ans_temp[ans_id*18+15]+=Pmtrx[3]*(Q_011000000*PR_001001020000+Q_111000000*PR_001001020100+Q_211000000*PR_001001020200);
ans_temp[ans_id*18+15]+=Pmtrx[4]*(Q_010001000*PR_001001020000+Q_010101000*PR_001001020010+Q_110001000*PR_001001020100+Q_110101000*PR_001001020110);
ans_temp[ans_id*18+15]+=Pmtrx[5]*(Q_010000001*PR_001001020000+Q_010000101*PR_001001020001+Q_110000001*PR_001001020100+Q_110000101*PR_001001020101);
ans_temp[ans_id*18+16]+=Pmtrx[3]*(Q_001010000*PR_001001020000+Q_001110000*PR_001001020010+Q_101010000*PR_001001020100+Q_101110000*PR_001001020110);
ans_temp[ans_id*18+16]+=Pmtrx[4]*(Q_000011000*PR_001001020000+Q_000111000*PR_001001020010+Q_000211000*PR_001001020020);
ans_temp[ans_id*18+16]+=Pmtrx[5]*(Q_000010001*PR_001001020000+Q_000010101*PR_001001020001+Q_000110001*PR_001001020010+Q_000110101*PR_001001020011);
ans_temp[ans_id*18+17]+=Pmtrx[3]*(Q_001000010*PR_001001020000+Q_001000110*PR_001001020001+Q_101000010*PR_001001020100+Q_101000110*PR_001001020101);
ans_temp[ans_id*18+17]+=Pmtrx[4]*(Q_000001010*PR_001001020000+Q_000001110*PR_001001020001+Q_000101010*PR_001001020010+Q_000101110*PR_001001020011);
ans_temp[ans_id*18+17]+=Pmtrx[5]*(Q_000000011*PR_001001020000+Q_000000111*PR_001001020001+Q_000000211*PR_001001020002);
ans_temp[ans_id*18+15]+=Pmtrx[6]*(Q_011000000*PR_000002020000+Q_111000000*PR_000002020100+Q_211000000*PR_000002020200);
ans_temp[ans_id*18+15]+=Pmtrx[7]*(Q_010001000*PR_000002020000+Q_010101000*PR_000002020010+Q_110001000*PR_000002020100+Q_110101000*PR_000002020110);
ans_temp[ans_id*18+15]+=Pmtrx[8]*(Q_010000001*PR_000002020000+Q_010000101*PR_000002020001+Q_110000001*PR_000002020100+Q_110000101*PR_000002020101);
ans_temp[ans_id*18+16]+=Pmtrx[6]*(Q_001010000*PR_000002020000+Q_001110000*PR_000002020010+Q_101010000*PR_000002020100+Q_101110000*PR_000002020110);
ans_temp[ans_id*18+16]+=Pmtrx[7]*(Q_000011000*PR_000002020000+Q_000111000*PR_000002020010+Q_000211000*PR_000002020020);
ans_temp[ans_id*18+16]+=Pmtrx[8]*(Q_000010001*PR_000002020000+Q_000010101*PR_000002020001+Q_000110001*PR_000002020010+Q_000110101*PR_000002020011);
ans_temp[ans_id*18+17]+=Pmtrx[6]*(Q_001000010*PR_000002020000+Q_001000110*PR_000002020001+Q_101000010*PR_000002020100+Q_101000110*PR_000002020101);
ans_temp[ans_id*18+17]+=Pmtrx[7]*(Q_000001010*PR_000002020000+Q_000001110*PR_000002020001+Q_000101010*PR_000002020010+Q_000101110*PR_000002020011);
ans_temp[ans_id*18+17]+=Pmtrx[8]*(Q_000000011*PR_000002020000+Q_000000111*PR_000002020001+Q_000000211*PR_000002020002);
ans_temp[ans_id*18+15]+=Pmtrx[9]*(Q_011000000*PR_001000021000+Q_111000000*PR_001000021100+Q_211000000*PR_001000021200);
ans_temp[ans_id*18+15]+=Pmtrx[10]*(Q_010001000*PR_001000021000+Q_010101000*PR_001000021010+Q_110001000*PR_001000021100+Q_110101000*PR_001000021110);
ans_temp[ans_id*18+15]+=Pmtrx[11]*(Q_010000001*PR_001000021000+Q_010000101*PR_001000021001+Q_110000001*PR_001000021100+Q_110000101*PR_001000021101);
ans_temp[ans_id*18+16]+=Pmtrx[9]*(Q_001010000*PR_001000021000+Q_001110000*PR_001000021010+Q_101010000*PR_001000021100+Q_101110000*PR_001000021110);
ans_temp[ans_id*18+16]+=Pmtrx[10]*(Q_000011000*PR_001000021000+Q_000111000*PR_001000021010+Q_000211000*PR_001000021020);
ans_temp[ans_id*18+16]+=Pmtrx[11]*(Q_000010001*PR_001000021000+Q_000010101*PR_001000021001+Q_000110001*PR_001000021010+Q_000110101*PR_001000021011);
ans_temp[ans_id*18+17]+=Pmtrx[9]*(Q_001000010*PR_001000021000+Q_001000110*PR_001000021001+Q_101000010*PR_001000021100+Q_101000110*PR_001000021101);
ans_temp[ans_id*18+17]+=Pmtrx[10]*(Q_000001010*PR_001000021000+Q_000001110*PR_001000021001+Q_000101010*PR_001000021010+Q_000101110*PR_001000021011);
ans_temp[ans_id*18+17]+=Pmtrx[11]*(Q_000000011*PR_001000021000+Q_000000111*PR_001000021001+Q_000000211*PR_001000021002);
ans_temp[ans_id*18+15]+=Pmtrx[12]*(Q_011000000*PR_000001021000+Q_111000000*PR_000001021100+Q_211000000*PR_000001021200);
ans_temp[ans_id*18+15]+=Pmtrx[13]*(Q_010001000*PR_000001021000+Q_010101000*PR_000001021010+Q_110001000*PR_000001021100+Q_110101000*PR_000001021110);
ans_temp[ans_id*18+15]+=Pmtrx[14]*(Q_010000001*PR_000001021000+Q_010000101*PR_000001021001+Q_110000001*PR_000001021100+Q_110000101*PR_000001021101);
ans_temp[ans_id*18+16]+=Pmtrx[12]*(Q_001010000*PR_000001021000+Q_001110000*PR_000001021010+Q_101010000*PR_000001021100+Q_101110000*PR_000001021110);
ans_temp[ans_id*18+16]+=Pmtrx[13]*(Q_000011000*PR_000001021000+Q_000111000*PR_000001021010+Q_000211000*PR_000001021020);
ans_temp[ans_id*18+16]+=Pmtrx[14]*(Q_000010001*PR_000001021000+Q_000010101*PR_000001021001+Q_000110001*PR_000001021010+Q_000110101*PR_000001021011);
ans_temp[ans_id*18+17]+=Pmtrx[12]*(Q_001000010*PR_000001021000+Q_001000110*PR_000001021001+Q_101000010*PR_000001021100+Q_101000110*PR_000001021101);
ans_temp[ans_id*18+17]+=Pmtrx[13]*(Q_000001010*PR_000001021000+Q_000001110*PR_000001021001+Q_000101010*PR_000001021010+Q_000101110*PR_000001021011);
ans_temp[ans_id*18+17]+=Pmtrx[14]*(Q_000000011*PR_000001021000+Q_000000111*PR_000001021001+Q_000000211*PR_000001021002);
ans_temp[ans_id*18+15]+=Pmtrx[15]*(Q_011000000*PR_000000022000+Q_111000000*PR_000000022100+Q_211000000*PR_000000022200);
ans_temp[ans_id*18+15]+=Pmtrx[16]*(Q_010001000*PR_000000022000+Q_010101000*PR_000000022010+Q_110001000*PR_000000022100+Q_110101000*PR_000000022110);
ans_temp[ans_id*18+15]+=Pmtrx[17]*(Q_010000001*PR_000000022000+Q_010000101*PR_000000022001+Q_110000001*PR_000000022100+Q_110000101*PR_000000022101);
ans_temp[ans_id*18+16]+=Pmtrx[15]*(Q_001010000*PR_000000022000+Q_001110000*PR_000000022010+Q_101010000*PR_000000022100+Q_101110000*PR_000000022110);
ans_temp[ans_id*18+16]+=Pmtrx[16]*(Q_000011000*PR_000000022000+Q_000111000*PR_000000022010+Q_000211000*PR_000000022020);
ans_temp[ans_id*18+16]+=Pmtrx[17]*(Q_000010001*PR_000000022000+Q_000010101*PR_000000022001+Q_000110001*PR_000000022010+Q_000110101*PR_000000022011);
ans_temp[ans_id*18+17]+=Pmtrx[15]*(Q_001000010*PR_000000022000+Q_001000110*PR_000000022001+Q_101000010*PR_000000022100+Q_101000110*PR_000000022101);
ans_temp[ans_id*18+17]+=Pmtrx[16]*(Q_000001010*PR_000000022000+Q_000001110*PR_000000022001+Q_000101010*PR_000000022010+Q_000101110*PR_000000022011);
ans_temp[ans_id*18+17]+=Pmtrx[17]*(Q_000000011*PR_000000022000+Q_000000111*PR_000000022001+Q_000000211*PR_000000022002);
}
}
__syncthreads();
int num_thread=NTHREAD/2;
while (num_thread!=0){
__syncthreads();
if(tId_x<num_thread){
for(int ians=0;ians<18;ians++){
ans_temp[tId_x*18+ians]+=ans_temp[(tId_x+num_thread)*18+ians];
}
}
num_thread/=2;
}
if(tId_x==0){
for(int ians=0;ians<18;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*18+ians]=ans_temp[(tId_x)*18+ians];
}
}
}
}
}
__global__ void MD_Kq_ddpp_fs(unsigned int contrc_bra_num,unsigned int contrc_ket_num,\
unsigned int * contrc_bra_id,\
unsigned int * contrc_ket_id,\
unsigned int mtrx_len,\
double * Pmtrx_in,\
double * P,\
double * PA,\
double * PB,\
double * Zta_in,\
double * pp_in,\
float * K2_p_in,\
unsigned int * id_bra_in,\
double * Q,\
double * QC,\
double * QD,\
double * Eta_in,\
double * pq_in,\
float * K2_q_in,\
unsigned int * id_ket_in,\
double * ans){
unsigned int tId_x = threadIdx.x;
unsigned int bId_x = blockIdx.x;
unsigned int bId_y = blockIdx.y;
unsigned int tdis = blockDim.x;
unsigned int bdis_x = gridDim.x;
unsigned int bdis_y = gridDim.y;
unsigned int ans_id=tId_x;
double Pmtrx[18]={0.0};
__shared__ double ans_temp[NTHREAD*18];
for(int i=0;i<18;i++){
ans_temp[i*tdis+tId_x]=0.0;
}
for(unsigned int i_contrc_bra=bId_x;i_contrc_bra<contrc_bra_num;i_contrc_bra+=bdis_x){
for(unsigned int j_contrc_ket=bId_y;j_contrc_ket<contrc_ket_num;j_contrc_ket+=bdis_y){
unsigned int primit_bra_start = contrc_bra_id[i_contrc_bra ];
unsigned int primit_bra_end = contrc_bra_id[i_contrc_bra+1];
unsigned int primit_ket_start = contrc_ket_id[j_contrc_ket ];
unsigned int primit_ket_end = contrc_ket_id[j_contrc_ket+1];
for(unsigned int ii=primit_bra_start;ii<primit_bra_end;ii++){
unsigned int id_bra=id_bra_in[ii];
double PX=P[ii*3+0];
double PY=P[ii*3+1];
double PZ=P[ii*3+2];
double Pd_010[3];
Pd_010[0]=PA[ii*3+0];
Pd_010[1]=PA[ii*3+1];
Pd_010[2]=PA[ii*3+2];
double Pd_001[3];
Pd_001[0]=PB[ii*3+0];
Pd_001[1]=PB[ii*3+1];
Pd_001[2]=PB[ii*3+2];
double Zta=Zta_in[ii];
double pp=pp_in[ii];
float K2_p=K2_p_in[ii];
double aPin1=1/(2*Zta);
for(unsigned int j=tId_x;j<primit_ket_end-primit_ket_start;j+=tdis){
unsigned int jj=primit_ket_start+j;
unsigned int id_ket=tex1Dfetch(tex_id_ket,jj);
double P_max=0.0;
for(int p_j=0;p_j<3;p_j++){
for(int p_i=0;p_i<6;p_i++){
Pmtrx[p_i*3+p_j]=Pmtrx_in[(id_ket+p_j)*mtrx_len+(id_bra+p_i)];
double temp_P=fabsf(Pmtrx[p_i*3+p_j]);
if(temp_P>P_max) P_max=temp_P;
}
}
float K2_q=tex1Dfetch(tex_K2_q,jj);
if(fabsf(K2_p*K2_q)<1.0E-14){
break;
}
if(fabsf(P_max*K2_p*K2_q)<1.0E-14) continue;
int2 temp_int2;
temp_int2=tex1Dfetch(tex_Eta,jj);
double Eta=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_pq,jj);
double pq=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+0);
double QX=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+1);
double QY=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_Q,jj*3+2);
double QZ=__hiloint2double(temp_int2.y,temp_int2.x);
double Qd_010[3];
temp_int2=tex1Dfetch(tex_QC,jj*3+0);
Qd_010[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QC,jj*3+1);
Qd_010[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QC,jj*3+2);
Qd_010[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double Qd_001[3];
temp_int2=tex1Dfetch(tex_QD,jj*3+0);
Qd_001[0]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QD,jj*3+1);
Qd_001[1]=__hiloint2double(temp_int2.y,temp_int2.x);
temp_int2=tex1Dfetch(tex_QD,jj*3+2);
Qd_001[2]=__hiloint2double(temp_int2.y,temp_int2.x);
double alphaT=rsqrt(Eta+Zta);
double lmd=4*P25*pp*pq*alphaT;
alphaT=Eta*Zta*alphaT*alphaT;
double TX=PX-QX;
double TY=PY-QY;
double TZ=PZ-QZ;
double T=alphaT*(TX*TX+TY*TY+TZ*TZ);
double R_000[7];
Ft_fs_6(6,T,R_000);
R_000[0]*=lmd;
R_000[1]*=-2*alphaT*lmd;
R_000[2]*=4*alphaT*alphaT*lmd;
R_000[3]*=-8*alphaT*alphaT*alphaT*lmd;
R_000[4]*=16*alphaT*alphaT*alphaT*alphaT*lmd;
R_000[5]*=-32*alphaT*alphaT*alphaT*alphaT*alphaT*lmd;
R_000[6]*=64*alphaT*alphaT*alphaT*alphaT*alphaT*alphaT*lmd;
double aQin1=1/(2*Eta);
double R_100[6];
double R_200[5];
double R_300[4];
double R_400[3];
double R_500[2];
double R_600[1];
double R_010[6];
double R_110[5];
double R_210[4];
double R_310[3];
double R_410[2];
double R_510[1];
double R_020[5];
double R_120[4];
double R_220[3];
double R_320[2];
double R_420[1];
double R_030[4];
double R_130[3];
double R_230[2];
double R_330[1];
double R_040[3];
double R_140[2];
double R_240[1];
double R_050[2];
double R_150[1];
double R_060[1];
double R_001[6];
double R_101[5];
double R_201[4];
double R_301[3];
double R_401[2];
double R_501[1];
double R_011[5];
double R_111[4];
double R_211[3];
double R_311[2];
double R_411[1];
double R_021[4];
double R_121[3];
double R_221[2];
double R_321[1];
double R_031[3];
double R_131[2];
double R_231[1];
double R_041[2];
double R_141[1];
double R_051[1];
double R_002[5];
double R_102[4];
double R_202[3];
double R_302[2];
double R_402[1];
double R_012[4];
double R_112[3];
double R_212[2];
double R_312[1];
double R_022[3];
double R_122[2];
double R_222[1];
double R_032[2];
double R_132[1];
double R_042[1];
double R_003[4];
double R_103[3];
double R_203[2];
double R_303[1];
double R_013[3];
double R_113[2];
double R_213[1];
double R_023[2];
double R_123[1];
double R_033[1];
double R_004[3];
double R_104[2];
double R_204[1];
double R_014[2];
double R_114[1];
double R_024[1];
double R_005[2];
double R_105[1];
double R_015[1];
double R_006[1];
for(int i=0;i<6;i++){
R_100[i]=TX*R_000[i+1];
}
for(int i=0;i<6;i++){
R_010[i]=TY*R_000[i+1];
}
for(int i=0;i<6;i++){
R_001[i]=TZ*R_000[i+1];
}
for(int i=0;i<5;i++){
R_200[i]=TX*R_100[i+1]+R_000[i+1];
}
for(int i=0;i<5;i++){
R_110[i]=TX*R_010[i+1];
}
for(int i=0;i<5;i++){
R_020[i]=TY*R_010[i+1]+R_000[i+1];
}
for(int i=0;i<5;i++){
R_101[i]=TX*R_001[i+1];
}
for(int i=0;i<5;i++){
R_011[i]=TY*R_001[i+1];
}
for(int i=0;i<5;i++){
R_002[i]=TZ*R_001[i+1]+R_000[i+1];
}
for(int i=0;i<4;i++){
R_300[i]=TX*R_200[i+1]+2*R_100[i+1];
}
for(int i=0;i<4;i++){
R_210[i]=TY*R_200[i+1];
}
for(int i=0;i<4;i++){
R_120[i]=TX*R_020[i+1];
}
for(int i=0;i<4;i++){
R_030[i]=TY*R_020[i+1]+2*R_010[i+1];
}
for(int i=0;i<4;i++){
R_201[i]=TZ*R_200[i+1];
}
for(int i=0;i<4;i++){
R_111[i]=TX*R_011[i+1];
}
for(int i=0;i<4;i++){
R_021[i]=TZ*R_020[i+1];
}
for(int i=0;i<4;i++){
R_102[i]=TX*R_002[i+1];
}
for(int i=0;i<4;i++){
R_012[i]=TY*R_002[i+1];
}
for(int i=0;i<4;i++){
R_003[i]=TZ*R_002[i+1]+2*R_001[i+1];
}
for(int i=0;i<3;i++){
R_400[i]=TX*R_300[i+1]+3*R_200[i+1];
}
for(int i=0;i<3;i++){
R_310[i]=TY*R_300[i+1];
}
for(int i=0;i<3;i++){
R_220[i]=TX*R_120[i+1]+R_020[i+1];
}
for(int i=0;i<3;i++){
R_130[i]=TX*R_030[i+1];
}
for(int i=0;i<3;i++){
R_040[i]=TY*R_030[i+1]+3*R_020[i+1];
}
for(int i=0;i<3;i++){
R_301[i]=TZ*R_300[i+1];
}
for(int i=0;i<3;i++){
R_211[i]=TY*R_201[i+1];
}
for(int i=0;i<3;i++){
R_121[i]=TX*R_021[i+1];
}
for(int i=0;i<3;i++){
R_031[i]=TZ*R_030[i+1];
}
for(int i=0;i<3;i++){
R_202[i]=TX*R_102[i+1]+R_002[i+1];
}
for(int i=0;i<3;i++){
R_112[i]=TX*R_012[i+1];
}
for(int i=0;i<3;i++){
R_022[i]=TY*R_012[i+1]+R_002[i+1];
}
for(int i=0;i<3;i++){
R_103[i]=TX*R_003[i+1];
}
for(int i=0;i<3;i++){
R_013[i]=TY*R_003[i+1];
}
for(int i=0;i<3;i++){
R_004[i]=TZ*R_003[i+1]+3*R_002[i+1];
}
for(int i=0;i<2;i++){
R_500[i]=TX*R_400[i+1]+4*R_300[i+1];
}
for(int i=0;i<2;i++){
R_410[i]=TY*R_400[i+1];
}
for(int i=0;i<2;i++){
R_320[i]=TX*R_220[i+1]+2*R_120[i+1];
}
for(int i=0;i<2;i++){
R_230[i]=TY*R_220[i+1]+2*R_210[i+1];
}
for(int i=0;i<2;i++){
R_140[i]=TX*R_040[i+1];
}
for(int i=0;i<2;i++){
R_050[i]=TY*R_040[i+1]+4*R_030[i+1];
}
for(int i=0;i<2;i++){
R_401[i]=TZ*R_400[i+1];
}
for(int i=0;i<2;i++){
R_311[i]=TY*R_301[i+1];
}
for(int i=0;i<2;i++){
R_221[i]=TZ*R_220[i+1];
}
for(int i=0;i<2;i++){
R_131[i]=TX*R_031[i+1];
}
for(int i=0;i<2;i++){
R_041[i]=TZ*R_040[i+1];
}
for(int i=0;i<2;i++){
R_302[i]=TX*R_202[i+1]+2*R_102[i+1];
}
for(int i=0;i<2;i++){
R_212[i]=TY*R_202[i+1];
}
for(int i=0;i<2;i++){
R_122[i]=TX*R_022[i+1];
}
for(int i=0;i<2;i++){
R_032[i]=TY*R_022[i+1]+2*R_012[i+1];
}
for(int i=0;i<2;i++){
R_203[i]=TZ*R_202[i+1]+2*R_201[i+1];
}
for(int i=0;i<2;i++){
R_113[i]=TX*R_013[i+1];
}
for(int i=0;i<2;i++){
R_023[i]=TZ*R_022[i+1]+2*R_021[i+1];
}
for(int i=0;i<2;i++){
R_104[i]=TX*R_004[i+1];
}
for(int i=0;i<2;i++){
R_014[i]=TY*R_004[i+1];
}
for(int i=0;i<2;i++){
R_005[i]=TZ*R_004[i+1]+4*R_003[i+1];
}
for(int i=0;i<1;i++){
R_600[i]=TX*R_500[i+1]+5*R_400[i+1];
}
for(int i=0;i<1;i++){
R_510[i]=TY*R_500[i+1];
}
for(int i=0;i<1;i++){
R_420[i]=TX*R_320[i+1]+3*R_220[i+1];
}
for(int i=0;i<1;i++){
R_330[i]=TX*R_230[i+1]+2*R_130[i+1];
}
for(int i=0;i<1;i++){
R_240[i]=TY*R_230[i+1]+3*R_220[i+1];
}
for(int i=0;i<1;i++){
R_150[i]=TX*R_050[i+1];
}
for(int i=0;i<1;i++){
R_060[i]=TY*R_050[i+1]+5*R_040[i+1];
}
for(int i=0;i<1;i++){
R_501[i]=TZ*R_500[i+1];
}
for(int i=0;i<1;i++){
R_411[i]=TY*R_401[i+1];
}
for(int i=0;i<1;i++){
R_321[i]=TZ*R_320[i+1];
}
for(int i=0;i<1;i++){
R_231[i]=TZ*R_230[i+1];
}
for(int i=0;i<1;i++){
R_141[i]=TX*R_041[i+1];
}
for(int i=0;i<1;i++){
R_051[i]=TZ*R_050[i+1];
}
for(int i=0;i<1;i++){
R_402[i]=TX*R_302[i+1]+3*R_202[i+1];
}
for(int i=0;i<1;i++){
R_312[i]=TY*R_302[i+1];
}
for(int i=0;i<1;i++){
R_222[i]=TX*R_122[i+1]+R_022[i+1];
}
for(int i=0;i<1;i++){
R_132[i]=TX*R_032[i+1];
}
for(int i=0;i<1;i++){
R_042[i]=TY*R_032[i+1]+3*R_022[i+1];
}
for(int i=0;i<1;i++){
R_303[i]=TX*R_203[i+1]+2*R_103[i+1];
}
for(int i=0;i<1;i++){
R_213[i]=TY*R_203[i+1];
}
for(int i=0;i<1;i++){
R_123[i]=TX*R_023[i+1];
}
for(int i=0;i<1;i++){
R_033[i]=TY*R_023[i+1]+2*R_013[i+1];
}
for(int i=0;i<1;i++){
R_204[i]=TZ*R_203[i+1]+3*R_202[i+1];
}
for(int i=0;i<1;i++){
R_114[i]=TX*R_014[i+1];
}
for(int i=0;i<1;i++){
R_024[i]=TZ*R_023[i+1]+3*R_022[i+1];
}
for(int i=0;i<1;i++){
R_105[i]=TX*R_005[i+1];
}
for(int i=0;i<1;i++){
R_015[i]=TY*R_005[i+1];
}
for(int i=0;i<1;i++){
R_006[i]=TZ*R_005[i+1]+5*R_004[i+1];
}
double Qd_101[3];
double Qd_110[3];
double Qd_011[3];
double Qd_111[3];
double Qd_211[3];
for(int i=0;i<3;i++){
Qd_101[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_110[i]=aQin1;
}
for(int i=0;i<3;i++){
Qd_011[i]=Qd_101[i]+Qd_010[i]*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_111[i]=Qd_010[i]*Qd_101[i]+aQin1*Qd_001[i];
}
for(int i=0;i<3;i++){
Qd_211[i]=aQin1*Qd_101[i];
}
double Q_011000000=Qd_011[0];
double Q_111000000=Qd_111[0];
double Q_211000000=Qd_211[0];
double Q_010001000=Qd_010[0]*Qd_001[1];
double Q_010101000=Qd_010[0]*Qd_101[1];
double Q_110001000=Qd_110[0]*Qd_001[1];
double Q_110101000=Qd_110[0]*Qd_101[1];
double Q_010000001=Qd_010[0]*Qd_001[2];
double Q_010000101=Qd_010[0]*Qd_101[2];
double Q_110000001=Qd_110[0]*Qd_001[2];
double Q_110000101=Qd_110[0]*Qd_101[2];
double Q_001010000=Qd_001[0]*Qd_010[1];
double Q_001110000=Qd_001[0]*Qd_110[1];
double Q_101010000=Qd_101[0]*Qd_010[1];
double Q_101110000=Qd_101[0]*Qd_110[1];
double Q_000011000=Qd_011[1];
double Q_000111000=Qd_111[1];
double Q_000211000=Qd_211[1];
double Q_000010001=Qd_010[1]*Qd_001[2];
double Q_000010101=Qd_010[1]*Qd_101[2];
double Q_000110001=Qd_110[1]*Qd_001[2];
double Q_000110101=Qd_110[1]*Qd_101[2];
double Q_001000010=Qd_001[0]*Qd_010[2];
double Q_001000110=Qd_001[0]*Qd_110[2];
double Q_101000010=Qd_101[0]*Qd_010[2];
double Q_101000110=Qd_101[0]*Qd_110[2];
double Q_000001010=Qd_001[1]*Qd_010[2];
double Q_000001110=Qd_001[1]*Qd_110[2];
double Q_000101010=Qd_101[1]*Qd_010[2];
double Q_000101110=Qd_101[1]*Qd_110[2];
double Q_000000011=Qd_011[2];
double Q_000000111=Qd_111[2];
double Q_000000211=Qd_211[2];
double QR_011000000000=Q_011000000*R_000[0]+-1*Q_111000000*R_100[0]+Q_211000000*R_200[0];
double QR_010001000000=Q_010001000*R_000[0]+-1*Q_010101000*R_010[0]+-1*Q_110001000*R_100[0]+Q_110101000*R_110[0];
double QR_010000001000=Q_010000001*R_000[0]+-1*Q_010000101*R_001[0]+-1*Q_110000001*R_100[0]+Q_110000101*R_101[0];
double QR_001010000000=Q_001010000*R_000[0]+-1*Q_001110000*R_010[0]+-1*Q_101010000*R_100[0]+Q_101110000*R_110[0];
double QR_000011000000=Q_000011000*R_000[0]+-1*Q_000111000*R_010[0]+Q_000211000*R_020[0];
double QR_000010001000=Q_000010001*R_000[0]+-1*Q_000010101*R_001[0]+-1*Q_000110001*R_010[0]+Q_000110101*R_011[0];
double QR_001000010000=Q_001000010*R_000[0]+-1*Q_001000110*R_001[0]+-1*Q_101000010*R_100[0]+Q_101000110*R_101[0];
double QR_000001010000=Q_000001010*R_000[0]+-1*Q_000001110*R_001[0]+-1*Q_000101010*R_010[0]+Q_000101110*R_011[0];
double QR_000000011000=Q_000000011*R_000[0]+-1*Q_000000111*R_001[0]+Q_000000211*R_002[0];
double QR_011000000001=Q_011000000*R_001[0]+-1*Q_111000000*R_101[0]+Q_211000000*R_201[0];
double QR_010001000001=Q_010001000*R_001[0]+-1*Q_010101000*R_011[0]+-1*Q_110001000*R_101[0]+Q_110101000*R_111[0];
double QR_010000001001=Q_010000001*R_001[0]+-1*Q_010000101*R_002[0]+-1*Q_110000001*R_101[0]+Q_110000101*R_102[0];
double QR_001010000001=Q_001010000*R_001[0]+-1*Q_001110000*R_011[0]+-1*Q_101010000*R_101[0]+Q_101110000*R_111[0];
double QR_000011000001=Q_000011000*R_001[0]+-1*Q_000111000*R_011[0]+Q_000211000*R_021[0];
double QR_000010001001=Q_000010001*R_001[0]+-1*Q_000010101*R_002[0]+-1*Q_000110001*R_011[0]+Q_000110101*R_012[0];
double QR_001000010001=Q_001000010*R_001[0]+-1*Q_001000110*R_002[0]+-1*Q_101000010*R_101[0]+Q_101000110*R_102[0];
double QR_000001010001=Q_000001010*R_001[0]+-1*Q_000001110*R_002[0]+-1*Q_000101010*R_011[0]+Q_000101110*R_012[0];
double QR_000000011001=Q_000000011*R_001[0]+-1*Q_000000111*R_002[0]+Q_000000211*R_003[0];
double QR_011000000010=Q_011000000*R_010[0]+-1*Q_111000000*R_110[0]+Q_211000000*R_210[0];
double QR_010001000010=Q_010001000*R_010[0]+-1*Q_010101000*R_020[0]+-1*Q_110001000*R_110[0]+Q_110101000*R_120[0];
double QR_010000001010=Q_010000001*R_010[0]+-1*Q_010000101*R_011[0]+-1*Q_110000001*R_110[0]+Q_110000101*R_111[0];
double QR_001010000010=Q_001010000*R_010[0]+-1*Q_001110000*R_020[0]+-1*Q_101010000*R_110[0]+Q_101110000*R_120[0];
double QR_000011000010=Q_000011000*R_010[0]+-1*Q_000111000*R_020[0]+Q_000211000*R_030[0];
double QR_000010001010=Q_000010001*R_010[0]+-1*Q_000010101*R_011[0]+-1*Q_000110001*R_020[0]+Q_000110101*R_021[0];
double QR_001000010010=Q_001000010*R_010[0]+-1*Q_001000110*R_011[0]+-1*Q_101000010*R_110[0]+Q_101000110*R_111[0];
double QR_000001010010=Q_000001010*R_010[0]+-1*Q_000001110*R_011[0]+-1*Q_000101010*R_020[0]+Q_000101110*R_021[0];
double QR_000000011010=Q_000000011*R_010[0]+-1*Q_000000111*R_011[0]+Q_000000211*R_012[0];
double QR_011000000100=Q_011000000*R_100[0]+-1*Q_111000000*R_200[0]+Q_211000000*R_300[0];
double QR_010001000100=Q_010001000*R_100[0]+-1*Q_010101000*R_110[0]+-1*Q_110001000*R_200[0]+Q_110101000*R_210[0];
double QR_010000001100=Q_010000001*R_100[0]+-1*Q_010000101*R_101[0]+-1*Q_110000001*R_200[0]+Q_110000101*R_201[0];
double QR_001010000100=Q_001010000*R_100[0]+-1*Q_001110000*R_110[0]+-1*Q_101010000*R_200[0]+Q_101110000*R_210[0];
double QR_000011000100=Q_000011000*R_100[0]+-1*Q_000111000*R_110[0]+Q_000211000*R_120[0];
double QR_000010001100=Q_000010001*R_100[0]+-1*Q_000010101*R_101[0]+-1*Q_000110001*R_110[0]+Q_000110101*R_111[0];
double QR_001000010100=Q_001000010*R_100[0]+-1*Q_001000110*R_101[0]+-1*Q_101000010*R_200[0]+Q_101000110*R_201[0];
double QR_000001010100=Q_000001010*R_100[0]+-1*Q_000001110*R_101[0]+-1*Q_000101010*R_110[0]+Q_000101110*R_111[0];
double QR_000000011100=Q_000000011*R_100[0]+-1*Q_000000111*R_101[0]+Q_000000211*R_102[0];
double QR_011000000002=Q_011000000*R_002[0]+-1*Q_111000000*R_102[0]+Q_211000000*R_202[0];
double QR_010001000002=Q_010001000*R_002[0]+-1*Q_010101000*R_012[0]+-1*Q_110001000*R_102[0]+Q_110101000*R_112[0];
double QR_010000001002=Q_010000001*R_002[0]+-1*Q_010000101*R_003[0]+-1*Q_110000001*R_102[0]+Q_110000101*R_103[0];
double QR_001010000002=Q_001010000*R_002[0]+-1*Q_001110000*R_012[0]+-1*Q_101010000*R_102[0]+Q_101110000*R_112[0];
double QR_000011000002=Q_000011000*R_002[0]+-1*Q_000111000*R_012[0]+Q_000211000*R_022[0];
double QR_000010001002=Q_000010001*R_002[0]+-1*Q_000010101*R_003[0]+-1*Q_000110001*R_012[0]+Q_000110101*R_013[0];
double QR_001000010002=Q_001000010*R_002[0]+-1*Q_001000110*R_003[0]+-1*Q_101000010*R_102[0]+Q_101000110*R_103[0];
double QR_000001010002=Q_000001010*R_002[0]+-1*Q_000001110*R_003[0]+-1*Q_000101010*R_012[0]+Q_000101110*R_013[0];
double QR_000000011002=Q_000000011*R_002[0]+-1*Q_000000111*R_003[0]+Q_000000211*R_004[0];
double QR_011000000011=Q_011000000*R_011[0]+-1*Q_111000000*R_111[0]+Q_211000000*R_211[0];
double QR_010001000011=Q_010001000*R_011[0]+-1*Q_010101000*R_021[0]+-1*Q_110001000*R_111[0]+Q_110101000*R_121[0];
double QR_010000001011=Q_010000001*R_011[0]+-1*Q_010000101*R_012[0]+-1*Q_110000001*R_111[0]+Q_110000101*R_112[0];
double QR_001010000011=Q_001010000*R_011[0]+-1*Q_001110000*R_021[0]+-1*Q_101010000*R_111[0]+Q_101110000*R_121[0];
double QR_000011000011=Q_000011000*R_011[0]+-1*Q_000111000*R_021[0]+Q_000211000*R_031[0];
double QR_000010001011=Q_000010001*R_011[0]+-1*Q_000010101*R_012[0]+-1*Q_000110001*R_021[0]+Q_000110101*R_022[0];
double QR_001000010011=Q_001000010*R_011[0]+-1*Q_001000110*R_012[0]+-1*Q_101000010*R_111[0]+Q_101000110*R_112[0];
double QR_000001010011=Q_000001010*R_011[0]+-1*Q_000001110*R_012[0]+-1*Q_000101010*R_021[0]+Q_000101110*R_022[0];
double QR_000000011011=Q_000000011*R_011[0]+-1*Q_000000111*R_012[0]+Q_000000211*R_013[0];
double QR_011000000020=Q_011000000*R_020[0]+-1*Q_111000000*R_120[0]+Q_211000000*R_220[0];
double QR_010001000020=Q_010001000*R_020[0]+-1*Q_010101000*R_030[0]+-1*Q_110001000*R_120[0]+Q_110101000*R_130[0];
double QR_010000001020=Q_010000001*R_020[0]+-1*Q_010000101*R_021[0]+-1*Q_110000001*R_120[0]+Q_110000101*R_121[0];
double QR_001010000020=Q_001010000*R_020[0]+-1*Q_001110000*R_030[0]+-1*Q_101010000*R_120[0]+Q_101110000*R_130[0];
double QR_000011000020=Q_000011000*R_020[0]+-1*Q_000111000*R_030[0]+Q_000211000*R_040[0];
double QR_000010001020=Q_000010001*R_020[0]+-1*Q_000010101*R_021[0]+-1*Q_000110001*R_030[0]+Q_000110101*R_031[0];
double QR_001000010020=Q_001000010*R_020[0]+-1*Q_001000110*R_021[0]+-1*Q_101000010*R_120[0]+Q_101000110*R_121[0];
double QR_000001010020=Q_000001010*R_020[0]+-1*Q_000001110*R_021[0]+-1*Q_000101010*R_030[0]+Q_000101110*R_031[0];
double QR_000000011020=Q_000000011*R_020[0]+-1*Q_000000111*R_021[0]+Q_000000211*R_022[0];
double QR_011000000101=Q_011000000*R_101[0]+-1*Q_111000000*R_201[0]+Q_211000000*R_301[0];
double QR_010001000101=Q_010001000*R_101[0]+-1*Q_010101000*R_111[0]+-1*Q_110001000*R_201[0]+Q_110101000*R_211[0];
double QR_010000001101=Q_010000001*R_101[0]+-1*Q_010000101*R_102[0]+-1*Q_110000001*R_201[0]+Q_110000101*R_202[0];
double QR_001010000101=Q_001010000*R_101[0]+-1*Q_001110000*R_111[0]+-1*Q_101010000*R_201[0]+Q_101110000*R_211[0];
double QR_000011000101=Q_000011000*R_101[0]+-1*Q_000111000*R_111[0]+Q_000211000*R_121[0];
double QR_000010001101=Q_000010001*R_101[0]+-1*Q_000010101*R_102[0]+-1*Q_000110001*R_111[0]+Q_000110101*R_112[0];
double QR_001000010101=Q_001000010*R_101[0]+-1*Q_001000110*R_102[0]+-1*Q_101000010*R_201[0]+Q_101000110*R_202[0];
double QR_000001010101=Q_000001010*R_101[0]+-1*Q_000001110*R_102[0]+-1*Q_000101010*R_111[0]+Q_000101110*R_112[0];
double QR_000000011101=Q_000000011*R_101[0]+-1*Q_000000111*R_102[0]+Q_000000211*R_103[0];
double QR_011000000110=Q_011000000*R_110[0]+-1*Q_111000000*R_210[0]+Q_211000000*R_310[0];
double QR_010001000110=Q_010001000*R_110[0]+-1*Q_010101000*R_120[0]+-1*Q_110001000*R_210[0]+Q_110101000*R_220[0];
double QR_010000001110=Q_010000001*R_110[0]+-1*Q_010000101*R_111[0]+-1*Q_110000001*R_210[0]+Q_110000101*R_211[0];
double QR_001010000110=Q_001010000*R_110[0]+-1*Q_001110000*R_120[0]+-1*Q_101010000*R_210[0]+Q_101110000*R_220[0];
double QR_000011000110=Q_000011000*R_110[0]+-1*Q_000111000*R_120[0]+Q_000211000*R_130[0];
double QR_000010001110=Q_000010001*R_110[0]+-1*Q_000010101*R_111[0]+-1*Q_000110001*R_120[0]+Q_000110101*R_121[0];
double QR_001000010110=Q_001000010*R_110[0]+-1*Q_001000110*R_111[0]+-1*Q_101000010*R_210[0]+Q_101000110*R_211[0];
double QR_000001010110=Q_000001010*R_110[0]+-1*Q_000001110*R_111[0]+-1*Q_000101010*R_120[0]+Q_000101110*R_121[0];
double QR_000000011110=Q_000000011*R_110[0]+-1*Q_000000111*R_111[0]+Q_000000211*R_112[0];
double QR_011000000200=Q_011000000*R_200[0]+-1*Q_111000000*R_300[0]+Q_211000000*R_400[0];
double QR_010001000200=Q_010001000*R_200[0]+-1*Q_010101000*R_210[0]+-1*Q_110001000*R_300[0]+Q_110101000*R_310[0];
double QR_010000001200=Q_010000001*R_200[0]+-1*Q_010000101*R_201[0]+-1*Q_110000001*R_300[0]+Q_110000101*R_301[0];
double QR_001010000200=Q_001010000*R_200[0]+-1*Q_001110000*R_210[0]+-1*Q_101010000*R_300[0]+Q_101110000*R_310[0];
double QR_000011000200=Q_000011000*R_200[0]+-1*Q_000111000*R_210[0]+Q_000211000*R_220[0];
double QR_000010001200=Q_000010001*R_200[0]+-1*Q_000010101*R_201[0]+-1*Q_000110001*R_210[0]+Q_000110101*R_211[0];
double QR_001000010200=Q_001000010*R_200[0]+-1*Q_001000110*R_201[0]+-1*Q_101000010*R_300[0]+Q_101000110*R_301[0];
double QR_000001010200=Q_000001010*R_200[0]+-1*Q_000001110*R_201[0]+-1*Q_000101010*R_210[0]+Q_000101110*R_211[0];
double QR_000000011200=Q_000000011*R_200[0]+-1*Q_000000111*R_201[0]+Q_000000211*R_202[0];
double QR_011000000003=Q_011000000*R_003[0]+-1*Q_111000000*R_103[0]+Q_211000000*R_203[0];
double QR_010001000003=Q_010001000*R_003[0]+-1*Q_010101000*R_013[0]+-1*Q_110001000*R_103[0]+Q_110101000*R_113[0];
double QR_010000001003=Q_010000001*R_003[0]+-1*Q_010000101*R_004[0]+-1*Q_110000001*R_103[0]+Q_110000101*R_104[0];
double QR_001010000003=Q_001010000*R_003[0]+-1*Q_001110000*R_013[0]+-1*Q_101010000*R_103[0]+Q_101110000*R_113[0];
double QR_000011000003=Q_000011000*R_003[0]+-1*Q_000111000*R_013[0]+Q_000211000*R_023[0];
double QR_000010001003=Q_000010001*R_003[0]+-1*Q_000010101*R_004[0]+-1*Q_000110001*R_013[0]+Q_000110101*R_014[0];
double QR_001000010003=Q_001000010*R_003[0]+-1*Q_001000110*R_004[0]+-1*Q_101000010*R_103[0]+Q_101000110*R_104[0];
double QR_000001010003=Q_000001010*R_003[0]+-1*Q_000001110*R_004[0]+-1*Q_000101010*R_013[0]+Q_000101110*R_014[0];
double QR_000000011003=Q_000000011*R_003[0]+-1*Q_000000111*R_004[0]+Q_000000211*R_005[0];
double QR_011000000012=Q_011000000*R_012[0]+-1*Q_111000000*R_112[0]+Q_211000000*R_212[0];
double QR_010001000012=Q_010001000*R_012[0]+-1*Q_010101000*R_022[0]+-1*Q_110001000*R_112[0]+Q_110101000*R_122[0];
double QR_010000001012=Q_010000001*R_012[0]+-1*Q_010000101*R_013[0]+-1*Q_110000001*R_112[0]+Q_110000101*R_113[0];
double QR_001010000012=Q_001010000*R_012[0]+-1*Q_001110000*R_022[0]+-1*Q_101010000*R_112[0]+Q_101110000*R_122[0];
double QR_000011000012=Q_000011000*R_012[0]+-1*Q_000111000*R_022[0]+Q_000211000*R_032[0];
double QR_000010001012=Q_000010001*R_012[0]+-1*Q_000010101*R_013[0]+-1*Q_000110001*R_022[0]+Q_000110101*R_023[0];
double QR_001000010012=Q_001000010*R_012[0]+-1*Q_001000110*R_013[0]+-1*Q_101000010*R_112[0]+Q_101000110*R_113[0];
double QR_000001010012=Q_000001010*R_012[0]+-1*Q_000001110*R_013[0]+-1*Q_000101010*R_022[0]+Q_000101110*R_023[0];
double QR_000000011012=Q_000000011*R_012[0]+-1*Q_000000111*R_013[0]+Q_000000211*R_014[0];
double QR_011000000021=Q_011000000*R_021[0]+-1*Q_111000000*R_121[0]+Q_211000000*R_221[0];
double QR_010001000021=Q_010001000*R_021[0]+-1*Q_010101000*R_031[0]+-1*Q_110001000*R_121[0]+Q_110101000*R_131[0];
double QR_010000001021=Q_010000001*R_021[0]+-1*Q_010000101*R_022[0]+-1*Q_110000001*R_121[0]+Q_110000101*R_122[0];
double QR_001010000021=Q_001010000*R_021[0]+-1*Q_001110000*R_031[0]+-1*Q_101010000*R_121[0]+Q_101110000*R_131[0];
double QR_000011000021=Q_000011000*R_021[0]+-1*Q_000111000*R_031[0]+Q_000211000*R_041[0];
double QR_000010001021=Q_000010001*R_021[0]+-1*Q_000010101*R_022[0]+-1*Q_000110001*R_031[0]+Q_000110101*R_032[0];
double QR_001000010021=Q_001000010*R_021[0]+-1*Q_001000110*R_022[0]+-1*Q_101000010*R_121[0]+Q_101000110*R_122[0];
double QR_000001010021=Q_000001010*R_021[0]+-1*Q_000001110*R_022[0]+-1*Q_000101010*R_031[0]+Q_000101110*R_032[0];
double QR_000000011021=Q_000000011*R_021[0]+-1*Q_000000111*R_022[0]+Q_000000211*R_023[0];
double QR_011000000030=Q_011000000*R_030[0]+-1*Q_111000000*R_130[0]+Q_211000000*R_230[0];
double QR_010001000030=Q_010001000*R_030[0]+-1*Q_010101000*R_040[0]+-1*Q_110001000*R_130[0]+Q_110101000*R_140[0];
double QR_010000001030=Q_010000001*R_030[0]+-1*Q_010000101*R_031[0]+-1*Q_110000001*R_130[0]+Q_110000101*R_131[0];
double QR_001010000030=Q_001010000*R_030[0]+-1*Q_001110000*R_040[0]+-1*Q_101010000*R_130[0]+Q_101110000*R_140[0];
double QR_000011000030=Q_000011000*R_030[0]+-1*Q_000111000*R_040[0]+Q_000211000*R_050[0];
double QR_000010001030=Q_000010001*R_030[0]+-1*Q_000010101*R_031[0]+-1*Q_000110001*R_040[0]+Q_000110101*R_041[0];
double QR_001000010030=Q_001000010*R_030[0]+-1*Q_001000110*R_031[0]+-1*Q_101000010*R_130[0]+Q_101000110*R_131[0];
double QR_000001010030=Q_000001010*R_030[0]+-1*Q_000001110*R_031[0]+-1*Q_000101010*R_040[0]+Q_000101110*R_041[0];
double QR_000000011030=Q_000000011*R_030[0]+-1*Q_000000111*R_031[0]+Q_000000211*R_032[0];
double QR_011000000102=Q_011000000*R_102[0]+-1*Q_111000000*R_202[0]+Q_211000000*R_302[0];
double QR_010001000102=Q_010001000*R_102[0]+-1*Q_010101000*R_112[0]+-1*Q_110001000*R_202[0]+Q_110101000*R_212[0];
double QR_010000001102=Q_010000001*R_102[0]+-1*Q_010000101*R_103[0]+-1*Q_110000001*R_202[0]+Q_110000101*R_203[0];
double QR_001010000102=Q_001010000*R_102[0]+-1*Q_001110000*R_112[0]+-1*Q_101010000*R_202[0]+Q_101110000*R_212[0];
double QR_000011000102=Q_000011000*R_102[0]+-1*Q_000111000*R_112[0]+Q_000211000*R_122[0];
double QR_000010001102=Q_000010001*R_102[0]+-1*Q_000010101*R_103[0]+-1*Q_000110001*R_112[0]+Q_000110101*R_113[0];
double QR_001000010102=Q_001000010*R_102[0]+-1*Q_001000110*R_103[0]+-1*Q_101000010*R_202[0]+Q_101000110*R_203[0];
double QR_000001010102=Q_000001010*R_102[0]+-1*Q_000001110*R_103[0]+-1*Q_000101010*R_112[0]+Q_000101110*R_113[0];
double QR_000000011102=Q_000000011*R_102[0]+-1*Q_000000111*R_103[0]+Q_000000211*R_104[0];
double QR_011000000111=Q_011000000*R_111[0]+-1*Q_111000000*R_211[0]+Q_211000000*R_311[0];
double QR_010001000111=Q_010001000*R_111[0]+-1*Q_010101000*R_121[0]+-1*Q_110001000*R_211[0]+Q_110101000*R_221[0];
double QR_010000001111=Q_010000001*R_111[0]+-1*Q_010000101*R_112[0]+-1*Q_110000001*R_211[0]+Q_110000101*R_212[0];
double QR_001010000111=Q_001010000*R_111[0]+-1*Q_001110000*R_121[0]+-1*Q_101010000*R_211[0]+Q_101110000*R_221[0];
double QR_000011000111=Q_000011000*R_111[0]+-1*Q_000111000*R_121[0]+Q_000211000*R_131[0];
double QR_000010001111=Q_000010001*R_111[0]+-1*Q_000010101*R_112[0]+-1*Q_000110001*R_121[0]+Q_000110101*R_122[0];
double QR_001000010111=Q_001000010*R_111[0]+-1*Q_001000110*R_112[0]+-1*Q_101000010*R_211[0]+Q_101000110*R_212[0];
double QR_000001010111=Q_000001010*R_111[0]+-1*Q_000001110*R_112[0]+-1*Q_000101010*R_121[0]+Q_000101110*R_122[0];
double QR_000000011111=Q_000000011*R_111[0]+-1*Q_000000111*R_112[0]+Q_000000211*R_113[0];
double QR_011000000120=Q_011000000*R_120[0]+-1*Q_111000000*R_220[0]+Q_211000000*R_320[0];
double QR_010001000120=Q_010001000*R_120[0]+-1*Q_010101000*R_130[0]+-1*Q_110001000*R_220[0]+Q_110101000*R_230[0];
double QR_010000001120=Q_010000001*R_120[0]+-1*Q_010000101*R_121[0]+-1*Q_110000001*R_220[0]+Q_110000101*R_221[0];
double QR_001010000120=Q_001010000*R_120[0]+-1*Q_001110000*R_130[0]+-1*Q_101010000*R_220[0]+Q_101110000*R_230[0];
double QR_000011000120=Q_000011000*R_120[0]+-1*Q_000111000*R_130[0]+Q_000211000*R_140[0];
double QR_000010001120=Q_000010001*R_120[0]+-1*Q_000010101*R_121[0]+-1*Q_000110001*R_130[0]+Q_000110101*R_131[0];
double QR_001000010120=Q_001000010*R_120[0]+-1*Q_001000110*R_121[0]+-1*Q_101000010*R_220[0]+Q_101000110*R_221[0];
double QR_000001010120=Q_000001010*R_120[0]+-1*Q_000001110*R_121[0]+-1*Q_000101010*R_130[0]+Q_000101110*R_131[0];
double QR_000000011120=Q_000000011*R_120[0]+-1*Q_000000111*R_121[0]+Q_000000211*R_122[0];
double QR_011000000201=Q_011000000*R_201[0]+-1*Q_111000000*R_301[0]+Q_211000000*R_401[0];
double QR_010001000201=Q_010001000*R_201[0]+-1*Q_010101000*R_211[0]+-1*Q_110001000*R_301[0]+Q_110101000*R_311[0];
double QR_010000001201=Q_010000001*R_201[0]+-1*Q_010000101*R_202[0]+-1*Q_110000001*R_301[0]+Q_110000101*R_302[0];
double QR_001010000201=Q_001010000*R_201[0]+-1*Q_001110000*R_211[0]+-1*Q_101010000*R_301[0]+Q_101110000*R_311[0];
double QR_000011000201=Q_000011000*R_201[0]+-1*Q_000111000*R_211[0]+Q_000211000*R_221[0];
double QR_000010001201=Q_000010001*R_201[0]+-1*Q_000010101*R_202[0]+-1*Q_000110001*R_211[0]+Q_000110101*R_212[0];
double QR_001000010201=Q_001000010*R_201[0]+-1*Q_001000110*R_202[0]+-1*Q_101000010*R_301[0]+Q_101000110*R_302[0];
double QR_000001010201=Q_000001010*R_201[0]+-1*Q_000001110*R_202[0]+-1*Q_000101010*R_211[0]+Q_000101110*R_212[0];
double QR_000000011201=Q_000000011*R_201[0]+-1*Q_000000111*R_202[0]+Q_000000211*R_203[0];
double QR_011000000210=Q_011000000*R_210[0]+-1*Q_111000000*R_310[0]+Q_211000000*R_410[0];
double QR_010001000210=Q_010001000*R_210[0]+-1*Q_010101000*R_220[0]+-1*Q_110001000*R_310[0]+Q_110101000*R_320[0];
double QR_010000001210=Q_010000001*R_210[0]+-1*Q_010000101*R_211[0]+-1*Q_110000001*R_310[0]+Q_110000101*R_311[0];
double QR_001010000210=Q_001010000*R_210[0]+-1*Q_001110000*R_220[0]+-1*Q_101010000*R_310[0]+Q_101110000*R_320[0];
double QR_000011000210=Q_000011000*R_210[0]+-1*Q_000111000*R_220[0]+Q_000211000*R_230[0];
double QR_000010001210=Q_000010001*R_210[0]+-1*Q_000010101*R_211[0]+-1*Q_000110001*R_220[0]+Q_000110101*R_221[0];
double QR_001000010210=Q_001000010*R_210[0]+-1*Q_001000110*R_211[0]+-1*Q_101000010*R_310[0]+Q_101000110*R_311[0];
double QR_000001010210=Q_000001010*R_210[0]+-1*Q_000001110*R_211[0]+-1*Q_000101010*R_220[0]+Q_000101110*R_221[0];
double QR_000000011210=Q_000000011*R_210[0]+-1*Q_000000111*R_211[0]+Q_000000211*R_212[0];
double QR_011000000300=Q_011000000*R_300[0]+-1*Q_111000000*R_400[0]+Q_211000000*R_500[0];
double QR_010001000300=Q_010001000*R_300[0]+-1*Q_010101000*R_310[0]+-1*Q_110001000*R_400[0]+Q_110101000*R_410[0];
double QR_010000001300=Q_010000001*R_300[0]+-1*Q_010000101*R_301[0]+-1*Q_110000001*R_400[0]+Q_110000101*R_401[0];
double QR_001010000300=Q_001010000*R_300[0]+-1*Q_001110000*R_310[0]+-1*Q_101010000*R_400[0]+Q_101110000*R_410[0];
double QR_000011000300=Q_000011000*R_300[0]+-1*Q_000111000*R_310[0]+Q_000211000*R_320[0];
double QR_000010001300=Q_000010001*R_300[0]+-1*Q_000010101*R_301[0]+-1*Q_000110001*R_310[0]+Q_000110101*R_311[0];
double QR_001000010300=Q_001000010*R_300[0]+-1*Q_001000110*R_301[0]+-1*Q_101000010*R_400[0]+Q_101000110*R_401[0];
double QR_000001010300=Q_000001010*R_300[0]+-1*Q_000001110*R_301[0]+-1*Q_000101010*R_310[0]+Q_000101110*R_311[0];
double QR_000000011300=Q_000000011*R_300[0]+-1*Q_000000111*R_301[0]+Q_000000211*R_302[0];
double QR_011000000004=Q_011000000*R_004[0]+-1*Q_111000000*R_104[0]+Q_211000000*R_204[0];
double QR_010001000004=Q_010001000*R_004[0]+-1*Q_010101000*R_014[0]+-1*Q_110001000*R_104[0]+Q_110101000*R_114[0];
double QR_010000001004=Q_010000001*R_004[0]+-1*Q_010000101*R_005[0]+-1*Q_110000001*R_104[0]+Q_110000101*R_105[0];
double QR_001010000004=Q_001010000*R_004[0]+-1*Q_001110000*R_014[0]+-1*Q_101010000*R_104[0]+Q_101110000*R_114[0];
double QR_000011000004=Q_000011000*R_004[0]+-1*Q_000111000*R_014[0]+Q_000211000*R_024[0];
double QR_000010001004=Q_000010001*R_004[0]+-1*Q_000010101*R_005[0]+-1*Q_000110001*R_014[0]+Q_000110101*R_015[0];
double QR_001000010004=Q_001000010*R_004[0]+-1*Q_001000110*R_005[0]+-1*Q_101000010*R_104[0]+Q_101000110*R_105[0];
double QR_000001010004=Q_000001010*R_004[0]+-1*Q_000001110*R_005[0]+-1*Q_000101010*R_014[0]+Q_000101110*R_015[0];
double QR_000000011004=Q_000000011*R_004[0]+-1*Q_000000111*R_005[0]+Q_000000211*R_006[0];
double QR_011000000013=Q_011000000*R_013[0]+-1*Q_111000000*R_113[0]+Q_211000000*R_213[0];
double QR_010001000013=Q_010001000*R_013[0]+-1*Q_010101000*R_023[0]+-1*Q_110001000*R_113[0]+Q_110101000*R_123[0];
double QR_010000001013=Q_010000001*R_013[0]+-1*Q_010000101*R_014[0]+-1*Q_110000001*R_113[0]+Q_110000101*R_114[0];
double QR_001010000013=Q_001010000*R_013[0]+-1*Q_001110000*R_023[0]+-1*Q_101010000*R_113[0]+Q_101110000*R_123[0];
double QR_000011000013=Q_000011000*R_013[0]+-1*Q_000111000*R_023[0]+Q_000211000*R_033[0];
double QR_000010001013=Q_000010001*R_013[0]+-1*Q_000010101*R_014[0]+-1*Q_000110001*R_023[0]+Q_000110101*R_024[0];
double QR_001000010013=Q_001000010*R_013[0]+-1*Q_001000110*R_014[0]+-1*Q_101000010*R_113[0]+Q_101000110*R_114[0];
double QR_000001010013=Q_000001010*R_013[0]+-1*Q_000001110*R_014[0]+-1*Q_000101010*R_023[0]+Q_000101110*R_024[0];
double QR_000000011013=Q_000000011*R_013[0]+-1*Q_000000111*R_014[0]+Q_000000211*R_015[0];
double QR_011000000022=Q_011000000*R_022[0]+-1*Q_111000000*R_122[0]+Q_211000000*R_222[0];
double QR_010001000022=Q_010001000*R_022[0]+-1*Q_010101000*R_032[0]+-1*Q_110001000*R_122[0]+Q_110101000*R_132[0];
double QR_010000001022=Q_010000001*R_022[0]+-1*Q_010000101*R_023[0]+-1*Q_110000001*R_122[0]+Q_110000101*R_123[0];
double QR_001010000022=Q_001010000*R_022[0]+-1*Q_001110000*R_032[0]+-1*Q_101010000*R_122[0]+Q_101110000*R_132[0];
double QR_000011000022=Q_000011000*R_022[0]+-1*Q_000111000*R_032[0]+Q_000211000*R_042[0];
double QR_000010001022=Q_000010001*R_022[0]+-1*Q_000010101*R_023[0]+-1*Q_000110001*R_032[0]+Q_000110101*R_033[0];
double QR_001000010022=Q_001000010*R_022[0]+-1*Q_001000110*R_023[0]+-1*Q_101000010*R_122[0]+Q_101000110*R_123[0];
double QR_000001010022=Q_000001010*R_022[0]+-1*Q_000001110*R_023[0]+-1*Q_000101010*R_032[0]+Q_000101110*R_033[0];
double QR_000000011022=Q_000000011*R_022[0]+-1*Q_000000111*R_023[0]+Q_000000211*R_024[0];
double QR_011000000031=Q_011000000*R_031[0]+-1*Q_111000000*R_131[0]+Q_211000000*R_231[0];
double QR_010001000031=Q_010001000*R_031[0]+-1*Q_010101000*R_041[0]+-1*Q_110001000*R_131[0]+Q_110101000*R_141[0];
double QR_010000001031=Q_010000001*R_031[0]+-1*Q_010000101*R_032[0]+-1*Q_110000001*R_131[0]+Q_110000101*R_132[0];
double QR_001010000031=Q_001010000*R_031[0]+-1*Q_001110000*R_041[0]+-1*Q_101010000*R_131[0]+Q_101110000*R_141[0];
double QR_000011000031=Q_000011000*R_031[0]+-1*Q_000111000*R_041[0]+Q_000211000*R_051[0];
double QR_000010001031=Q_000010001*R_031[0]+-1*Q_000010101*R_032[0]+-1*Q_000110001*R_041[0]+Q_000110101*R_042[0];
double QR_001000010031=Q_001000010*R_031[0]+-1*Q_001000110*R_032[0]+-1*Q_101000010*R_131[0]+Q_101000110*R_132[0];
double QR_000001010031=Q_000001010*R_031[0]+-1*Q_000001110*R_032[0]+-1*Q_000101010*R_041[0]+Q_000101110*R_042[0];
double QR_000000011031=Q_000000011*R_031[0]+-1*Q_000000111*R_032[0]+Q_000000211*R_033[0];
double QR_011000000040=Q_011000000*R_040[0]+-1*Q_111000000*R_140[0]+Q_211000000*R_240[0];
double QR_010001000040=Q_010001000*R_040[0]+-1*Q_010101000*R_050[0]+-1*Q_110001000*R_140[0]+Q_110101000*R_150[0];
double QR_010000001040=Q_010000001*R_040[0]+-1*Q_010000101*R_041[0]+-1*Q_110000001*R_140[0]+Q_110000101*R_141[0];
double QR_001010000040=Q_001010000*R_040[0]+-1*Q_001110000*R_050[0]+-1*Q_101010000*R_140[0]+Q_101110000*R_150[0];
double QR_000011000040=Q_000011000*R_040[0]+-1*Q_000111000*R_050[0]+Q_000211000*R_060[0];
double QR_000010001040=Q_000010001*R_040[0]+-1*Q_000010101*R_041[0]+-1*Q_000110001*R_050[0]+Q_000110101*R_051[0];
double QR_001000010040=Q_001000010*R_040[0]+-1*Q_001000110*R_041[0]+-1*Q_101000010*R_140[0]+Q_101000110*R_141[0];
double QR_000001010040=Q_000001010*R_040[0]+-1*Q_000001110*R_041[0]+-1*Q_000101010*R_050[0]+Q_000101110*R_051[0];
double QR_000000011040=Q_000000011*R_040[0]+-1*Q_000000111*R_041[0]+Q_000000211*R_042[0];
double QR_011000000103=Q_011000000*R_103[0]+-1*Q_111000000*R_203[0]+Q_211000000*R_303[0];
double QR_010001000103=Q_010001000*R_103[0]+-1*Q_010101000*R_113[0]+-1*Q_110001000*R_203[0]+Q_110101000*R_213[0];
double QR_010000001103=Q_010000001*R_103[0]+-1*Q_010000101*R_104[0]+-1*Q_110000001*R_203[0]+Q_110000101*R_204[0];
double QR_001010000103=Q_001010000*R_103[0]+-1*Q_001110000*R_113[0]+-1*Q_101010000*R_203[0]+Q_101110000*R_213[0];
double QR_000011000103=Q_000011000*R_103[0]+-1*Q_000111000*R_113[0]+Q_000211000*R_123[0];
double QR_000010001103=Q_000010001*R_103[0]+-1*Q_000010101*R_104[0]+-1*Q_000110001*R_113[0]+Q_000110101*R_114[0];
double QR_001000010103=Q_001000010*R_103[0]+-1*Q_001000110*R_104[0]+-1*Q_101000010*R_203[0]+Q_101000110*R_204[0];
double QR_000001010103=Q_000001010*R_103[0]+-1*Q_000001110*R_104[0]+-1*Q_000101010*R_113[0]+Q_000101110*R_114[0];
double QR_000000011103=Q_000000011*R_103[0]+-1*Q_000000111*R_104[0]+Q_000000211*R_105[0];
double QR_011000000112=Q_011000000*R_112[0]+-1*Q_111000000*R_212[0]+Q_211000000*R_312[0];
double QR_010001000112=Q_010001000*R_112[0]+-1*Q_010101000*R_122[0]+-1*Q_110001000*R_212[0]+Q_110101000*R_222[0];
double QR_010000001112=Q_010000001*R_112[0]+-1*Q_010000101*R_113[0]+-1*Q_110000001*R_212[0]+Q_110000101*R_213[0];
double QR_001010000112=Q_001010000*R_112[0]+-1*Q_001110000*R_122[0]+-1*Q_101010000*R_212[0]+Q_101110000*R_222[0];
double QR_000011000112=Q_000011000*R_112[0]+-1*Q_000111000*R_122[0]+Q_000211000*R_132[0];
double QR_000010001112=Q_000010001*R_112[0]+-1*Q_000010101*R_113[0]+-1*Q_000110001*R_122[0]+Q_000110101*R_123[0];
double QR_001000010112=Q_001000010*R_112[0]+-1*Q_001000110*R_113[0]+-1*Q_101000010*R_212[0]+Q_101000110*R_213[0];
double QR_000001010112=Q_000001010*R_112[0]+-1*Q_000001110*R_113[0]+-1*Q_000101010*R_122[0]+Q_000101110*R_123[0];
double QR_000000011112=Q_000000011*R_112[0]+-1*Q_000000111*R_113[0]+Q_000000211*R_114[0];
double QR_011000000121=Q_011000000*R_121[0]+-1*Q_111000000*R_221[0]+Q_211000000*R_321[0];
double QR_010001000121=Q_010001000*R_121[0]+-1*Q_010101000*R_131[0]+-1*Q_110001000*R_221[0]+Q_110101000*R_231[0];
double QR_010000001121=Q_010000001*R_121[0]+-1*Q_010000101*R_122[0]+-1*Q_110000001*R_221[0]+Q_110000101*R_222[0];
double QR_001010000121=Q_001010000*R_121[0]+-1*Q_001110000*R_131[0]+-1*Q_101010000*R_221[0]+Q_101110000*R_231[0];
double QR_000011000121=Q_000011000*R_121[0]+-1*Q_000111000*R_131[0]+Q_000211000*R_141[0];
double QR_000010001121=Q_000010001*R_121[0]+-1*Q_000010101*R_122[0]+-1*Q_000110001*R_131[0]+Q_000110101*R_132[0];
double QR_001000010121=Q_001000010*R_121[0]+-1*Q_001000110*R_122[0]+-1*Q_101000010*R_221[0]+Q_101000110*R_222[0];
double QR_000001010121=Q_000001010*R_121[0]+-1*Q_000001110*R_122[0]+-1*Q_000101010*R_131[0]+Q_000101110*R_132[0];
double QR_000000011121=Q_000000011*R_121[0]+-1*Q_000000111*R_122[0]+Q_000000211*R_123[0];
double QR_011000000130=Q_011000000*R_130[0]+-1*Q_111000000*R_230[0]+Q_211000000*R_330[0];
double QR_010001000130=Q_010001000*R_130[0]+-1*Q_010101000*R_140[0]+-1*Q_110001000*R_230[0]+Q_110101000*R_240[0];
double QR_010000001130=Q_010000001*R_130[0]+-1*Q_010000101*R_131[0]+-1*Q_110000001*R_230[0]+Q_110000101*R_231[0];
double QR_001010000130=Q_001010000*R_130[0]+-1*Q_001110000*R_140[0]+-1*Q_101010000*R_230[0]+Q_101110000*R_240[0];
double QR_000011000130=Q_000011000*R_130[0]+-1*Q_000111000*R_140[0]+Q_000211000*R_150[0];
double QR_000010001130=Q_000010001*R_130[0]+-1*Q_000010101*R_131[0]+-1*Q_000110001*R_140[0]+Q_000110101*R_141[0];
double QR_001000010130=Q_001000010*R_130[0]+-1*Q_001000110*R_131[0]+-1*Q_101000010*R_230[0]+Q_101000110*R_231[0];
double QR_000001010130=Q_000001010*R_130[0]+-1*Q_000001110*R_131[0]+-1*Q_000101010*R_140[0]+Q_000101110*R_141[0];
double QR_000000011130=Q_000000011*R_130[0]+-1*Q_000000111*R_131[0]+Q_000000211*R_132[0];
double QR_011000000202=Q_011000000*R_202[0]+-1*Q_111000000*R_302[0]+Q_211000000*R_402[0];
double QR_010001000202=Q_010001000*R_202[0]+-1*Q_010101000*R_212[0]+-1*Q_110001000*R_302[0]+Q_110101000*R_312[0];
double QR_010000001202=Q_010000001*R_202[0]+-1*Q_010000101*R_203[0]+-1*Q_110000001*R_302[0]+Q_110000101*R_303[0];
double QR_001010000202=Q_001010000*R_202[0]+-1*Q_001110000*R_212[0]+-1*Q_101010000*R_302[0]+Q_101110000*R_312[0];
double QR_000011000202=Q_000011000*R_202[0]+-1*Q_000111000*R_212[0]+Q_000211000*R_222[0];
double QR_000010001202=Q_000010001*R_202[0]+-1*Q_000010101*R_203[0]+-1*Q_000110001*R_212[0]+Q_000110101*R_213[0];
double QR_001000010202=Q_001000010*R_202[0]+-1*Q_001000110*R_203[0]+-1*Q_101000010*R_302[0]+Q_101000110*R_303[0];
double QR_000001010202=Q_000001010*R_202[0]+-1*Q_000001110*R_203[0]+-1*Q_000101010*R_212[0]+Q_000101110*R_213[0];
double QR_000000011202=Q_000000011*R_202[0]+-1*Q_000000111*R_203[0]+Q_000000211*R_204[0];
double QR_011000000211=Q_011000000*R_211[0]+-1*Q_111000000*R_311[0]+Q_211000000*R_411[0];
double QR_010001000211=Q_010001000*R_211[0]+-1*Q_010101000*R_221[0]+-1*Q_110001000*R_311[0]+Q_110101000*R_321[0];
double QR_010000001211=Q_010000001*R_211[0]+-1*Q_010000101*R_212[0]+-1*Q_110000001*R_311[0]+Q_110000101*R_312[0];
double QR_001010000211=Q_001010000*R_211[0]+-1*Q_001110000*R_221[0]+-1*Q_101010000*R_311[0]+Q_101110000*R_321[0];
double QR_000011000211=Q_000011000*R_211[0]+-1*Q_000111000*R_221[0]+Q_000211000*R_231[0];
double QR_000010001211=Q_000010001*R_211[0]+-1*Q_000010101*R_212[0]+-1*Q_000110001*R_221[0]+Q_000110101*R_222[0];
double QR_001000010211=Q_001000010*R_211[0]+-1*Q_001000110*R_212[0]+-1*Q_101000010*R_311[0]+Q_101000110*R_312[0];
double QR_000001010211=Q_000001010*R_211[0]+-1*Q_000001110*R_212[0]+-1*Q_000101010*R_221[0]+Q_000101110*R_222[0];
double QR_000000011211=Q_000000011*R_211[0]+-1*Q_000000111*R_212[0]+Q_000000211*R_213[0];
double QR_011000000220=Q_011000000*R_220[0]+-1*Q_111000000*R_320[0]+Q_211000000*R_420[0];
double QR_010001000220=Q_010001000*R_220[0]+-1*Q_010101000*R_230[0]+-1*Q_110001000*R_320[0]+Q_110101000*R_330[0];
double QR_010000001220=Q_010000001*R_220[0]+-1*Q_010000101*R_221[0]+-1*Q_110000001*R_320[0]+Q_110000101*R_321[0];
double QR_001010000220=Q_001010000*R_220[0]+-1*Q_001110000*R_230[0]+-1*Q_101010000*R_320[0]+Q_101110000*R_330[0];
double QR_000011000220=Q_000011000*R_220[0]+-1*Q_000111000*R_230[0]+Q_000211000*R_240[0];
double QR_000010001220=Q_000010001*R_220[0]+-1*Q_000010101*R_221[0]+-1*Q_000110001*R_230[0]+Q_000110101*R_231[0];
double QR_001000010220=Q_001000010*R_220[0]+-1*Q_001000110*R_221[0]+-1*Q_101000010*R_320[0]+Q_101000110*R_321[0];
double QR_000001010220=Q_000001010*R_220[0]+-1*Q_000001110*R_221[0]+-1*Q_000101010*R_230[0]+Q_000101110*R_231[0];
double QR_000000011220=Q_000000011*R_220[0]+-1*Q_000000111*R_221[0]+Q_000000211*R_222[0];
double QR_011000000301=Q_011000000*R_301[0]+-1*Q_111000000*R_401[0]+Q_211000000*R_501[0];
double QR_010001000301=Q_010001000*R_301[0]+-1*Q_010101000*R_311[0]+-1*Q_110001000*R_401[0]+Q_110101000*R_411[0];
double QR_010000001301=Q_010000001*R_301[0]+-1*Q_010000101*R_302[0]+-1*Q_110000001*R_401[0]+Q_110000101*R_402[0];
double QR_001010000301=Q_001010000*R_301[0]+-1*Q_001110000*R_311[0]+-1*Q_101010000*R_401[0]+Q_101110000*R_411[0];
double QR_000011000301=Q_000011000*R_301[0]+-1*Q_000111000*R_311[0]+Q_000211000*R_321[0];
double QR_000010001301=Q_000010001*R_301[0]+-1*Q_000010101*R_302[0]+-1*Q_000110001*R_311[0]+Q_000110101*R_312[0];
double QR_001000010301=Q_001000010*R_301[0]+-1*Q_001000110*R_302[0]+-1*Q_101000010*R_401[0]+Q_101000110*R_402[0];
double QR_000001010301=Q_000001010*R_301[0]+-1*Q_000001110*R_302[0]+-1*Q_000101010*R_311[0]+Q_000101110*R_312[0];
double QR_000000011301=Q_000000011*R_301[0]+-1*Q_000000111*R_302[0]+Q_000000211*R_303[0];
double QR_011000000310=Q_011000000*R_310[0]+-1*Q_111000000*R_410[0]+Q_211000000*R_510[0];
double QR_010001000310=Q_010001000*R_310[0]+-1*Q_010101000*R_320[0]+-1*Q_110001000*R_410[0]+Q_110101000*R_420[0];
double QR_010000001310=Q_010000001*R_310[0]+-1*Q_010000101*R_311[0]+-1*Q_110000001*R_410[0]+Q_110000101*R_411[0];
double QR_001010000310=Q_001010000*R_310[0]+-1*Q_001110000*R_320[0]+-1*Q_101010000*R_410[0]+Q_101110000*R_420[0];
double QR_000011000310=Q_000011000*R_310[0]+-1*Q_000111000*R_320[0]+Q_000211000*R_330[0];
double QR_000010001310=Q_000010001*R_310[0]+-1*Q_000010101*R_311[0]+-1*Q_000110001*R_320[0]+Q_000110101*R_321[0];
double QR_001000010310=Q_001000010*R_310[0]+-1*Q_001000110*R_311[0]+-1*Q_101000010*R_410[0]+Q_101000110*R_411[0];
double QR_000001010310=Q_000001010*R_310[0]+-1*Q_000001110*R_311[0]+-1*Q_000101010*R_320[0]+Q_000101110*R_321[0];
double QR_000000011310=Q_000000011*R_310[0]+-1*Q_000000111*R_311[0]+Q_000000211*R_312[0];
double QR_011000000400=Q_011000000*R_400[0]+-1*Q_111000000*R_500[0]+Q_211000000*R_600[0];
double QR_010001000400=Q_010001000*R_400[0]+-1*Q_010101000*R_410[0]+-1*Q_110001000*R_500[0]+Q_110101000*R_510[0];
double QR_010000001400=Q_010000001*R_400[0]+-1*Q_010000101*R_401[0]+-1*Q_110000001*R_500[0]+Q_110000101*R_501[0];
double QR_001010000400=Q_001010000*R_400[0]+-1*Q_001110000*R_410[0]+-1*Q_101010000*R_500[0]+Q_101110000*R_510[0];
double QR_000011000400=Q_000011000*R_400[0]+-1*Q_000111000*R_410[0]+Q_000211000*R_420[0];
double QR_000010001400=Q_000010001*R_400[0]+-1*Q_000010101*R_401[0]+-1*Q_000110001*R_410[0]+Q_000110101*R_411[0];
double QR_001000010400=Q_001000010*R_400[0]+-1*Q_001000110*R_401[0]+-1*Q_101000010*R_500[0]+Q_101000110*R_501[0];
double QR_000001010400=Q_000001010*R_400[0]+-1*Q_000001110*R_401[0]+-1*Q_000101010*R_410[0]+Q_000101110*R_411[0];
double QR_000000011400=Q_000000011*R_400[0]+-1*Q_000000111*R_401[0]+Q_000000211*R_402[0];
double Pd_101[3];
double Pd_002[3];
double Pd_102[3];
double Pd_202[3];
double Pd_110[3];
double Pd_011[3];
double Pd_111[3];
double Pd_211[3];
double Pd_012[3];
double Pd_112[3];
double Pd_212[3];
double Pd_312[3];
double Pd_020[3];
double Pd_120[3];
double Pd_220[3];
double Pd_021[3];
double Pd_121[3];
double Pd_221[3];
double Pd_321[3];
double Pd_022[3];
double Pd_122[3];
double Pd_222[3];
double Pd_322[3];
double Pd_422[3];
for(int i=0;i<3;i++){
Pd_101[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_002[i]=Pd_101[i]+Pd_001[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_102[i]=Pd_001[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_202[i]=aPin1*Pd_101[i];
}
for(int i=0;i<3;i++){
Pd_110[i]=aPin1;
}
for(int i=0;i<3;i++){
Pd_011[i]=Pd_101[i]+Pd_010[i]*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_111[i]=Pd_010[i]*Pd_101[i]+aPin1*Pd_001[i];
}
for(int i=0;i<3;i++){
Pd_211[i]=aPin1*Pd_101[i];
}
for(int i=0;i<3;i++){
Pd_012[i]=Pd_111[i]+Pd_001[i]*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_112[i]=2*Pd_211[i]+Pd_001[i]*Pd_111[i]+aPin1*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_212[i]=Pd_001[i]*Pd_211[i]+aPin1*Pd_111[i];
}
for(int i=0;i<3;i++){
Pd_312[i]=aPin1*Pd_211[i];
}
for(int i=0;i<3;i++){
Pd_020[i]=Pd_110[i]+Pd_010[i]*Pd_010[i];
}
for(int i=0;i<3;i++){
Pd_120[i]=Pd_010[i]*Pd_110[i]+aPin1*Pd_010[i];
}
for(int i=0;i<3;i++){
Pd_220[i]=aPin1*Pd_110[i];
}
for(int i=0;i<3;i++){
Pd_021[i]=Pd_111[i]+Pd_010[i]*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_121[i]=2*Pd_211[i]+Pd_010[i]*Pd_111[i]+aPin1*Pd_011[i];
}
for(int i=0;i<3;i++){
Pd_221[i]=Pd_010[i]*Pd_211[i]+aPin1*Pd_111[i];
}
for(int i=0;i<3;i++){
Pd_321[i]=aPin1*Pd_211[i];
}
for(int i=0;i<3;i++){
Pd_022[i]=Pd_112[i]+Pd_010[i]*Pd_012[i];
}
for(int i=0;i<3;i++){
Pd_122[i]=2*Pd_212[i]+Pd_010[i]*Pd_112[i]+aPin1*Pd_012[i];
}
for(int i=0;i<3;i++){
Pd_222[i]=3*Pd_312[i]+Pd_010[i]*Pd_212[i]+aPin1*Pd_112[i];
}
for(int i=0;i<3;i++){
Pd_322[i]=Pd_010[i]*Pd_312[i]+aPin1*Pd_212[i];
}
for(int i=0;i<3;i++){
Pd_422[i]=aPin1*Pd_312[i];
}
double P_022000000=Pd_022[0];
double P_122000000=Pd_122[0];
double P_222000000=Pd_222[0];
double P_322000000=Pd_322[0];
double P_422000000=Pd_422[0];
double P_021001000=Pd_021[0]*Pd_001[1];
double P_021101000=Pd_021[0]*Pd_101[1];
double P_121001000=Pd_121[0]*Pd_001[1];
double P_121101000=Pd_121[0]*Pd_101[1];
double P_221001000=Pd_221[0]*Pd_001[1];
double P_221101000=Pd_221[0]*Pd_101[1];
double P_321001000=Pd_321[0]*Pd_001[1];
double P_321101000=Pd_321[0]*Pd_101[1];
double P_020002000=Pd_020[0]*Pd_002[1];
double P_020102000=Pd_020[0]*Pd_102[1];
double P_020202000=Pd_020[0]*Pd_202[1];
double P_120002000=Pd_120[0]*Pd_002[1];
double P_120102000=Pd_120[0]*Pd_102[1];
double P_120202000=Pd_120[0]*Pd_202[1];
double P_220002000=Pd_220[0]*Pd_002[1];
double P_220102000=Pd_220[0]*Pd_102[1];
double P_220202000=Pd_220[0]*Pd_202[1];
double P_021000001=Pd_021[0]*Pd_001[2];
double P_021000101=Pd_021[0]*Pd_101[2];
double P_121000001=Pd_121[0]*Pd_001[2];
double P_121000101=Pd_121[0]*Pd_101[2];
double P_221000001=Pd_221[0]*Pd_001[2];
double P_221000101=Pd_221[0]*Pd_101[2];
double P_321000001=Pd_321[0]*Pd_001[2];
double P_321000101=Pd_321[0]*Pd_101[2];
double P_020001001=Pd_020[0]*Pd_001[1]*Pd_001[2];
double P_020001101=Pd_020[0]*Pd_001[1]*Pd_101[2];
double P_020101001=Pd_020[0]*Pd_101[1]*Pd_001[2];
double P_020101101=Pd_020[0]*Pd_101[1]*Pd_101[2];
double P_120001001=Pd_120[0]*Pd_001[1]*Pd_001[2];
double P_120001101=Pd_120[0]*Pd_001[1]*Pd_101[2];
double P_120101001=Pd_120[0]*Pd_101[1]*Pd_001[2];
double P_120101101=Pd_120[0]*Pd_101[1]*Pd_101[2];
double P_220001001=Pd_220[0]*Pd_001[1]*Pd_001[2];
double P_220001101=Pd_220[0]*Pd_001[1]*Pd_101[2];
double P_220101001=Pd_220[0]*Pd_101[1]*Pd_001[2];
double P_220101101=Pd_220[0]*Pd_101[1]*Pd_101[2];
double P_020000002=Pd_020[0]*Pd_002[2];
double P_020000102=Pd_020[0]*Pd_102[2];
double P_020000202=Pd_020[0]*Pd_202[2];
double P_120000002=Pd_120[0]*Pd_002[2];
double P_120000102=Pd_120[0]*Pd_102[2];
double P_120000202=Pd_120[0]*Pd_202[2];
double P_220000002=Pd_220[0]*Pd_002[2];
double P_220000102=Pd_220[0]*Pd_102[2];
double P_220000202=Pd_220[0]*Pd_202[2];
double P_012010000=Pd_012[0]*Pd_010[1];
double P_012110000=Pd_012[0]*Pd_110[1];
double P_112010000=Pd_112[0]*Pd_010[1];
double P_112110000=Pd_112[0]*Pd_110[1];
double P_212010000=Pd_212[0]*Pd_010[1];
double P_212110000=Pd_212[0]*Pd_110[1];
double P_312010000=Pd_312[0]*Pd_010[1];
double P_312110000=Pd_312[0]*Pd_110[1];
double P_011011000=Pd_011[0]*Pd_011[1];
double P_011111000=Pd_011[0]*Pd_111[1];
double P_011211000=Pd_011[0]*Pd_211[1];
double P_111011000=Pd_111[0]*Pd_011[1];
double P_111111000=Pd_111[0]*Pd_111[1];
double P_111211000=Pd_111[0]*Pd_211[1];
double P_211011000=Pd_211[0]*Pd_011[1];
double P_211111000=Pd_211[0]*Pd_111[1];
double P_211211000=Pd_211[0]*Pd_211[1];
double P_010012000=Pd_010[0]*Pd_012[1];
double P_010112000=Pd_010[0]*Pd_112[1];
double P_010212000=Pd_010[0]*Pd_212[1];
double P_010312000=Pd_010[0]*Pd_312[1];
double P_110012000=Pd_110[0]*Pd_012[1];
double P_110112000=Pd_110[0]*Pd_112[1];
double P_110212000=Pd_110[0]*Pd_212[1];
double P_110312000=Pd_110[0]*Pd_312[1];
double P_011010001=Pd_011[0]*Pd_010[1]*Pd_001[2];
double P_011010101=Pd_011[0]*Pd_010[1]*Pd_101[2];
double P_011110001=Pd_011[0]*Pd_110[1]*Pd_001[2];
double P_011110101=Pd_011[0]*Pd_110[1]*Pd_101[2];
double P_111010001=Pd_111[0]*Pd_010[1]*Pd_001[2];
double P_111010101=Pd_111[0]*Pd_010[1]*Pd_101[2];
double P_111110001=Pd_111[0]*Pd_110[1]*Pd_001[2];
double P_111110101=Pd_111[0]*Pd_110[1]*Pd_101[2];
double P_211010001=Pd_211[0]*Pd_010[1]*Pd_001[2];
double P_211010101=Pd_211[0]*Pd_010[1]*Pd_101[2];
double P_211110001=Pd_211[0]*Pd_110[1]*Pd_001[2];
double P_211110101=Pd_211[0]*Pd_110[1]*Pd_101[2];
double P_010011001=Pd_010[0]*Pd_011[1]*Pd_001[2];
double P_010011101=Pd_010[0]*Pd_011[1]*Pd_101[2];
double P_010111001=Pd_010[0]*Pd_111[1]*Pd_001[2];
double P_010111101=Pd_010[0]*Pd_111[1]*Pd_101[2];
double P_010211001=Pd_010[0]*Pd_211[1]*Pd_001[2];
double P_010211101=Pd_010[0]*Pd_211[1]*Pd_101[2];
double P_110011001=Pd_110[0]*Pd_011[1]*Pd_001[2];
double P_110011101=Pd_110[0]*Pd_011[1]*Pd_101[2];
double P_110111001=Pd_110[0]*Pd_111[1]*Pd_001[2];
double P_110111101=Pd_110[0]*Pd_111[1]*Pd_101[2];
double P_110211001=Pd_110[0]*Pd_211[1]*Pd_001[2];
double P_110211101=Pd_110[0]*Pd_211[1]*Pd_101[2];
double P_010010002=Pd_010[0]*Pd_010[1]*Pd_002[2];
double P_010010102=Pd_010[0]*Pd_010[1]*Pd_102[2];
double P_010010202=Pd_010[0]*Pd_010[1]*Pd_202[2];
double P_010110002=Pd_010[0]*Pd_110[1]*Pd_002[2];
double P_010110102=Pd_010[0]*Pd_110[1]*Pd_102[2];
double P_010110202=Pd_010[0]*Pd_110[1]*Pd_202[2];
double P_110010002=Pd_110[0]*Pd_010[1]*Pd_002[2];
double P_110010102=Pd_110[0]*Pd_010[1]*Pd_102[2];
double P_110010202=Pd_110[0]*Pd_010[1]*Pd_202[2];
double P_110110002=Pd_110[0]*Pd_110[1]*Pd_002[2];
double P_110110102=Pd_110[0]*Pd_110[1]*Pd_102[2];
double P_110110202=Pd_110[0]*Pd_110[1]*Pd_202[2];
double P_002020000=Pd_002[0]*Pd_020[1];
double P_002120000=Pd_002[0]*Pd_120[1];
double P_002220000=Pd_002[0]*Pd_220[1];
double P_102020000=Pd_102[0]*Pd_020[1];
double P_102120000=Pd_102[0]*Pd_120[1];
double P_102220000=Pd_102[0]*Pd_220[1];
double P_202020000=Pd_202[0]*Pd_020[1];
double P_202120000=Pd_202[0]*Pd_120[1];
double P_202220000=Pd_202[0]*Pd_220[1];
double P_001021000=Pd_001[0]*Pd_021[1];
double P_001121000=Pd_001[0]*Pd_121[1];
double P_001221000=Pd_001[0]*Pd_221[1];
double P_001321000=Pd_001[0]*Pd_321[1];
double P_101021000=Pd_101[0]*Pd_021[1];
double P_101121000=Pd_101[0]*Pd_121[1];
double P_101221000=Pd_101[0]*Pd_221[1];
double P_101321000=Pd_101[0]*Pd_321[1];
double P_000022000=Pd_022[1];
double P_000122000=Pd_122[1];
double P_000222000=Pd_222[1];
double P_000322000=Pd_322[1];
double P_000422000=Pd_422[1];
double P_001020001=Pd_001[0]*Pd_020[1]*Pd_001[2];
double P_001020101=Pd_001[0]*Pd_020[1]*Pd_101[2];
double P_001120001=Pd_001[0]*Pd_120[1]*Pd_001[2];
double P_001120101=Pd_001[0]*Pd_120[1]*Pd_101[2];
double P_001220001=Pd_001[0]*Pd_220[1]*Pd_001[2];
double P_001220101=Pd_001[0]*Pd_220[1]*Pd_101[2];
double P_101020001=Pd_101[0]*Pd_020[1]*Pd_001[2];
double P_101020101=Pd_101[0]*Pd_020[1]*Pd_101[2];
double P_101120001=Pd_101[0]*Pd_120[1]*Pd_001[2];
double P_101120101=Pd_101[0]*Pd_120[1]*Pd_101[2];
double P_101220001=Pd_101[0]*Pd_220[1]*Pd_001[2];
double P_101220101=Pd_101[0]*Pd_220[1]*Pd_101[2];
double P_000021001=Pd_021[1]*Pd_001[2];
double P_000021101=Pd_021[1]*Pd_101[2];
double P_000121001=Pd_121[1]*Pd_001[2];
double P_000121101=Pd_121[1]*Pd_101[2];
double P_000221001=Pd_221[1]*Pd_001[2];
double P_000221101=Pd_221[1]*Pd_101[2];
double P_000321001=Pd_321[1]*Pd_001[2];
double P_000321101=Pd_321[1]*Pd_101[2];
double P_000020002=Pd_020[1]*Pd_002[2];
double P_000020102=Pd_020[1]*Pd_102[2];
double P_000020202=Pd_020[1]*Pd_202[2];
double P_000120002=Pd_120[1]*Pd_002[2];
double P_000120102=Pd_120[1]*Pd_102[2];
double P_000120202=Pd_120[1]*Pd_202[2];
double P_000220002=Pd_220[1]*Pd_002[2];
double P_000220102=Pd_220[1]*Pd_102[2];
double P_000220202=Pd_220[1]*Pd_202[2];
double P_012000010=Pd_012[0]*Pd_010[2];
double P_012000110=Pd_012[0]*Pd_110[2];
double P_112000010=Pd_112[0]*Pd_010[2];
double P_112000110=Pd_112[0]*Pd_110[2];
double P_212000010=Pd_212[0]*Pd_010[2];
double P_212000110=Pd_212[0]*Pd_110[2];
double P_312000010=Pd_312[0]*Pd_010[2];
double P_312000110=Pd_312[0]*Pd_110[2];
double P_011001010=Pd_011[0]*Pd_001[1]*Pd_010[2];
double P_011001110=Pd_011[0]*Pd_001[1]*Pd_110[2];
double P_011101010=Pd_011[0]*Pd_101[1]*Pd_010[2];
double P_011101110=Pd_011[0]*Pd_101[1]*Pd_110[2];
double P_111001010=Pd_111[0]*Pd_001[1]*Pd_010[2];
double P_111001110=Pd_111[0]*Pd_001[1]*Pd_110[2];
double P_111101010=Pd_111[0]*Pd_101[1]*Pd_010[2];
double P_111101110=Pd_111[0]*Pd_101[1]*Pd_110[2];
double P_211001010=Pd_211[0]*Pd_001[1]*Pd_010[2];
double P_211001110=Pd_211[0]*Pd_001[1]*Pd_110[2];
double P_211101010=Pd_211[0]*Pd_101[1]*Pd_010[2];
double P_211101110=Pd_211[0]*Pd_101[1]*Pd_110[2];
double P_010002010=Pd_010[0]*Pd_002[1]*Pd_010[2];
double P_010002110=Pd_010[0]*Pd_002[1]*Pd_110[2];
double P_010102010=Pd_010[0]*Pd_102[1]*Pd_010[2];
double P_010102110=Pd_010[0]*Pd_102[1]*Pd_110[2];
double P_010202010=Pd_010[0]*Pd_202[1]*Pd_010[2];
double P_010202110=Pd_010[0]*Pd_202[1]*Pd_110[2];
double P_110002010=Pd_110[0]*Pd_002[1]*Pd_010[2];
double P_110002110=Pd_110[0]*Pd_002[1]*Pd_110[2];
double P_110102010=Pd_110[0]*Pd_102[1]*Pd_010[2];
double P_110102110=Pd_110[0]*Pd_102[1]*Pd_110[2];
double P_110202010=Pd_110[0]*Pd_202[1]*Pd_010[2];
double P_110202110=Pd_110[0]*Pd_202[1]*Pd_110[2];
double P_011000011=Pd_011[0]*Pd_011[2];
double P_011000111=Pd_011[0]*Pd_111[2];
double P_011000211=Pd_011[0]*Pd_211[2];
double P_111000011=Pd_111[0]*Pd_011[2];
double P_111000111=Pd_111[0]*Pd_111[2];
double P_111000211=Pd_111[0]*Pd_211[2];
double P_211000011=Pd_211[0]*Pd_011[2];
double P_211000111=Pd_211[0]*Pd_111[2];
double P_211000211=Pd_211[0]*Pd_211[2];
double P_010001011=Pd_010[0]*Pd_001[1]*Pd_011[2];
double P_010001111=Pd_010[0]*Pd_001[1]*Pd_111[2];
double P_010001211=Pd_010[0]*Pd_001[1]*Pd_211[2];
double P_010101011=Pd_010[0]*Pd_101[1]*Pd_011[2];
double P_010101111=Pd_010[0]*Pd_101[1]*Pd_111[2];
double P_010101211=Pd_010[0]*Pd_101[1]*Pd_211[2];
double P_110001011=Pd_110[0]*Pd_001[1]*Pd_011[2];
double P_110001111=Pd_110[0]*Pd_001[1]*Pd_111[2];
double P_110001211=Pd_110[0]*Pd_001[1]*Pd_211[2];
double P_110101011=Pd_110[0]*Pd_101[1]*Pd_011[2];
double P_110101111=Pd_110[0]*Pd_101[1]*Pd_111[2];
double P_110101211=Pd_110[0]*Pd_101[1]*Pd_211[2];
double P_010000012=Pd_010[0]*Pd_012[2];
double P_010000112=Pd_010[0]*Pd_112[2];
double P_010000212=Pd_010[0]*Pd_212[2];
double P_010000312=Pd_010[0]*Pd_312[2];
double P_110000012=Pd_110[0]*Pd_012[2];
double P_110000112=Pd_110[0]*Pd_112[2];
double P_110000212=Pd_110[0]*Pd_212[2];
double P_110000312=Pd_110[0]*Pd_312[2];
double P_002010010=Pd_002[0]*Pd_010[1]*Pd_010[2];
double P_002010110=Pd_002[0]*Pd_010[1]*Pd_110[2];
double P_002110010=Pd_002[0]*Pd_110[1]*Pd_010[2];
double P_002110110=Pd_002[0]*Pd_110[1]*Pd_110[2];
double P_102010010=Pd_102[0]*Pd_010[1]*Pd_010[2];
double P_102010110=Pd_102[0]*Pd_010[1]*Pd_110[2];
double P_102110010=Pd_102[0]*Pd_110[1]*Pd_010[2];
double P_102110110=Pd_102[0]*Pd_110[1]*Pd_110[2];
double P_202010010=Pd_202[0]*Pd_010[1]*Pd_010[2];
double P_202010110=Pd_202[0]*Pd_010[1]*Pd_110[2];
double P_202110010=Pd_202[0]*Pd_110[1]*Pd_010[2];
double P_202110110=Pd_202[0]*Pd_110[1]*Pd_110[2];
double P_001011010=Pd_001[0]*Pd_011[1]*Pd_010[2];
double P_001011110=Pd_001[0]*Pd_011[1]*Pd_110[2];
double P_001111010=Pd_001[0]*Pd_111[1]*Pd_010[2];
double P_001111110=Pd_001[0]*Pd_111[1]*Pd_110[2];
double P_001211010=Pd_001[0]*Pd_211[1]*Pd_010[2];
double P_001211110=Pd_001[0]*Pd_211[1]*Pd_110[2];
double P_101011010=Pd_101[0]*Pd_011[1]*Pd_010[2];
double P_101011110=Pd_101[0]*Pd_011[1]*Pd_110[2];
double P_101111010=Pd_101[0]*Pd_111[1]*Pd_010[2];
double P_101111110=Pd_101[0]*Pd_111[1]*Pd_110[2];
double P_101211010=Pd_101[0]*Pd_211[1]*Pd_010[2];
double P_101211110=Pd_101[0]*Pd_211[1]*Pd_110[2];
double P_000012010=Pd_012[1]*Pd_010[2];
double P_000012110=Pd_012[1]*Pd_110[2];
double P_000112010=Pd_112[1]*Pd_010[2];
double P_000112110=Pd_112[1]*Pd_110[2];
double P_000212010=Pd_212[1]*Pd_010[2];
double P_000212110=Pd_212[1]*Pd_110[2];
double P_000312010=Pd_312[1]*Pd_010[2];
double P_000312110=Pd_312[1]*Pd_110[2];
double P_001010011=Pd_001[0]*Pd_010[1]*Pd_011[2];
double P_001010111=Pd_001[0]*Pd_010[1]*Pd_111[2];
double P_001010211=Pd_001[0]*Pd_010[1]*Pd_211[2];
double P_001110011=Pd_001[0]*Pd_110[1]*Pd_011[2];
double P_001110111=Pd_001[0]*Pd_110[1]*Pd_111[2];
double P_001110211=Pd_001[0]*Pd_110[1]*Pd_211[2];
double P_101010011=Pd_101[0]*Pd_010[1]*Pd_011[2];
double P_101010111=Pd_101[0]*Pd_010[1]*Pd_111[2];
double P_101010211=Pd_101[0]*Pd_010[1]*Pd_211[2];
double P_101110011=Pd_101[0]*Pd_110[1]*Pd_011[2];
double P_101110111=Pd_101[0]*Pd_110[1]*Pd_111[2];
double P_101110211=Pd_101[0]*Pd_110[1]*Pd_211[2];
double P_000011011=Pd_011[1]*Pd_011[2];
double P_000011111=Pd_011[1]*Pd_111[2];
double P_000011211=Pd_011[1]*Pd_211[2];
double P_000111011=Pd_111[1]*Pd_011[2];
double P_000111111=Pd_111[1]*Pd_111[2];
double P_000111211=Pd_111[1]*Pd_211[2];
double P_000211011=Pd_211[1]*Pd_011[2];
double P_000211111=Pd_211[1]*Pd_111[2];
double P_000211211=Pd_211[1]*Pd_211[2];
double P_000010012=Pd_010[1]*Pd_012[2];
double P_000010112=Pd_010[1]*Pd_112[2];
double P_000010212=Pd_010[1]*Pd_212[2];
double P_000010312=Pd_010[1]*Pd_312[2];
double P_000110012=Pd_110[1]*Pd_012[2];
double P_000110112=Pd_110[1]*Pd_112[2];
double P_000110212=Pd_110[1]*Pd_212[2];
double P_000110312=Pd_110[1]*Pd_312[2];
double P_002000020=Pd_002[0]*Pd_020[2];
double P_002000120=Pd_002[0]*Pd_120[2];
double P_002000220=Pd_002[0]*Pd_220[2];
double P_102000020=Pd_102[0]*Pd_020[2];
double P_102000120=Pd_102[0]*Pd_120[2];
double P_102000220=Pd_102[0]*Pd_220[2];
double P_202000020=Pd_202[0]*Pd_020[2];
double P_202000120=Pd_202[0]*Pd_120[2];
double P_202000220=Pd_202[0]*Pd_220[2];
double P_001001020=Pd_001[0]*Pd_001[1]*Pd_020[2];
double P_001001120=Pd_001[0]*Pd_001[1]*Pd_120[2];
double P_001001220=Pd_001[0]*Pd_001[1]*Pd_220[2];
double P_001101020=Pd_001[0]*Pd_101[1]*Pd_020[2];
double P_001101120=Pd_001[0]*Pd_101[1]*Pd_120[2];
double P_001101220=Pd_001[0]*Pd_101[1]*Pd_220[2];
double P_101001020=Pd_101[0]*Pd_001[1]*Pd_020[2];
double P_101001120=Pd_101[0]*Pd_001[1]*Pd_120[2];
double P_101001220=Pd_101[0]*Pd_001[1]*Pd_220[2];
double P_101101020=Pd_101[0]*Pd_101[1]*Pd_020[2];
double P_101101120=Pd_101[0]*Pd_101[1]*Pd_120[2];
double P_101101220=Pd_101[0]*Pd_101[1]*Pd_220[2];
double P_000002020=Pd_002[1]*Pd_020[2];
double P_000002120=Pd_002[1]*Pd_120[2];
double P_000002220=Pd_002[1]*Pd_220[2];
double P_000102020=Pd_102[1]*Pd_020[2];
double P_000102120=Pd_102[1]*Pd_120[2];
double P_000102220=Pd_102[1]*Pd_220[2];
double P_000202020=Pd_202[1]*Pd_020[2];
double P_000202120=Pd_202[1]*Pd_120[2];
double P_000202220=Pd_202[1]*Pd_220[2];
double P_001000021=Pd_001[0]*Pd_021[2];
double P_001000121=Pd_001[0]*Pd_121[2];
double P_001000221=Pd_001[0]*Pd_221[2];
double P_001000321=Pd_001[0]*Pd_321[2];
double P_101000021=Pd_101[0]*Pd_021[2];
double P_101000121=Pd_101[0]*Pd_121[2];
double P_101000221=Pd_101[0]*Pd_221[2];
double P_101000321=Pd_101[0]*Pd_321[2];
double P_000001021=Pd_001[1]*Pd_021[2];
double P_000001121=Pd_001[1]*Pd_121[2];
double P_000001221=Pd_001[1]*Pd_221[2];
double P_000001321=Pd_001[1]*Pd_321[2];
double P_000101021=Pd_101[1]*Pd_021[2];
double P_000101121=Pd_101[1]*Pd_121[2];
double P_000101221=Pd_101[1]*Pd_221[2];
double P_000101321=Pd_101[1]*Pd_321[2];
double P_000000022=Pd_022[2];
double P_000000122=Pd_122[2];
double P_000000222=Pd_222[2];
double P_000000322=Pd_322[2];
double P_000000422=Pd_422[2];
ans_temp[ans_id*18+0]+=Pmtrx[0]*(P_022000000*QR_011000000000+P_122000000*QR_011000000100+P_222000000*QR_011000000200+P_322000000*QR_011000000300+P_422000000*QR_011000000400);
ans_temp[ans_id*18+0]+=Pmtrx[1]*(P_022000000*QR_010001000000+P_122000000*QR_010001000100+P_222000000*QR_010001000200+P_322000000*QR_010001000300+P_422000000*QR_010001000400);
ans_temp[ans_id*18+0]+=Pmtrx[2]*(P_022000000*QR_010000001000+P_122000000*QR_010000001100+P_222000000*QR_010000001200+P_322000000*QR_010000001300+P_422000000*QR_010000001400);
ans_temp[ans_id*18+1]+=Pmtrx[0]*(P_022000000*QR_001010000000+P_122000000*QR_001010000100+P_222000000*QR_001010000200+P_322000000*QR_001010000300+P_422000000*QR_001010000400);
ans_temp[ans_id*18+1]+=Pmtrx[1]*(P_022000000*QR_000011000000+P_122000000*QR_000011000100+P_222000000*QR_000011000200+P_322000000*QR_000011000300+P_422000000*QR_000011000400);
ans_temp[ans_id*18+1]+=Pmtrx[2]*(P_022000000*QR_000010001000+P_122000000*QR_000010001100+P_222000000*QR_000010001200+P_322000000*QR_000010001300+P_422000000*QR_000010001400);
ans_temp[ans_id*18+2]+=Pmtrx[0]*(P_022000000*QR_001000010000+P_122000000*QR_001000010100+P_222000000*QR_001000010200+P_322000000*QR_001000010300+P_422000000*QR_001000010400);
ans_temp[ans_id*18+2]+=Pmtrx[1]*(P_022000000*QR_000001010000+P_122000000*QR_000001010100+P_222000000*QR_000001010200+P_322000000*QR_000001010300+P_422000000*QR_000001010400);
ans_temp[ans_id*18+2]+=Pmtrx[2]*(P_022000000*QR_000000011000+P_122000000*QR_000000011100+P_222000000*QR_000000011200+P_322000000*QR_000000011300+P_422000000*QR_000000011400);
ans_temp[ans_id*18+0]+=Pmtrx[3]*(P_021001000*QR_011000000000+P_021101000*QR_011000000010+P_121001000*QR_011000000100+P_121101000*QR_011000000110+P_221001000*QR_011000000200+P_221101000*QR_011000000210+P_321001000*QR_011000000300+P_321101000*QR_011000000310);
ans_temp[ans_id*18+0]+=Pmtrx[4]*(P_021001000*QR_010001000000+P_021101000*QR_010001000010+P_121001000*QR_010001000100+P_121101000*QR_010001000110+P_221001000*QR_010001000200+P_221101000*QR_010001000210+P_321001000*QR_010001000300+P_321101000*QR_010001000310);
ans_temp[ans_id*18+0]+=Pmtrx[5]*(P_021001000*QR_010000001000+P_021101000*QR_010000001010+P_121001000*QR_010000001100+P_121101000*QR_010000001110+P_221001000*QR_010000001200+P_221101000*QR_010000001210+P_321001000*QR_010000001300+P_321101000*QR_010000001310);
ans_temp[ans_id*18+1]+=Pmtrx[3]*(P_021001000*QR_001010000000+P_021101000*QR_001010000010+P_121001000*QR_001010000100+P_121101000*QR_001010000110+P_221001000*QR_001010000200+P_221101000*QR_001010000210+P_321001000*QR_001010000300+P_321101000*QR_001010000310);
ans_temp[ans_id*18+1]+=Pmtrx[4]*(P_021001000*QR_000011000000+P_021101000*QR_000011000010+P_121001000*QR_000011000100+P_121101000*QR_000011000110+P_221001000*QR_000011000200+P_221101000*QR_000011000210+P_321001000*QR_000011000300+P_321101000*QR_000011000310);
ans_temp[ans_id*18+1]+=Pmtrx[5]*(P_021001000*QR_000010001000+P_021101000*QR_000010001010+P_121001000*QR_000010001100+P_121101000*QR_000010001110+P_221001000*QR_000010001200+P_221101000*QR_000010001210+P_321001000*QR_000010001300+P_321101000*QR_000010001310);
ans_temp[ans_id*18+2]+=Pmtrx[3]*(P_021001000*QR_001000010000+P_021101000*QR_001000010010+P_121001000*QR_001000010100+P_121101000*QR_001000010110+P_221001000*QR_001000010200+P_221101000*QR_001000010210+P_321001000*QR_001000010300+P_321101000*QR_001000010310);
ans_temp[ans_id*18+2]+=Pmtrx[4]*(P_021001000*QR_000001010000+P_021101000*QR_000001010010+P_121001000*QR_000001010100+P_121101000*QR_000001010110+P_221001000*QR_000001010200+P_221101000*QR_000001010210+P_321001000*QR_000001010300+P_321101000*QR_000001010310);
ans_temp[ans_id*18+2]+=Pmtrx[5]*(P_021001000*QR_000000011000+P_021101000*QR_000000011010+P_121001000*QR_000000011100+P_121101000*QR_000000011110+P_221001000*QR_000000011200+P_221101000*QR_000000011210+P_321001000*QR_000000011300+P_321101000*QR_000000011310);
ans_temp[ans_id*18+0]+=Pmtrx[6]*(P_020002000*QR_011000000000+P_020102000*QR_011000000010+P_020202000*QR_011000000020+P_120002000*QR_011000000100+P_120102000*QR_011000000110+P_120202000*QR_011000000120+P_220002000*QR_011000000200+P_220102000*QR_011000000210+P_220202000*QR_011000000220);
ans_temp[ans_id*18+0]+=Pmtrx[7]*(P_020002000*QR_010001000000+P_020102000*QR_010001000010+P_020202000*QR_010001000020+P_120002000*QR_010001000100+P_120102000*QR_010001000110+P_120202000*QR_010001000120+P_220002000*QR_010001000200+P_220102000*QR_010001000210+P_220202000*QR_010001000220);
ans_temp[ans_id*18+0]+=Pmtrx[8]*(P_020002000*QR_010000001000+P_020102000*QR_010000001010+P_020202000*QR_010000001020+P_120002000*QR_010000001100+P_120102000*QR_010000001110+P_120202000*QR_010000001120+P_220002000*QR_010000001200+P_220102000*QR_010000001210+P_220202000*QR_010000001220);
ans_temp[ans_id*18+1]+=Pmtrx[6]*(P_020002000*QR_001010000000+P_020102000*QR_001010000010+P_020202000*QR_001010000020+P_120002000*QR_001010000100+P_120102000*QR_001010000110+P_120202000*QR_001010000120+P_220002000*QR_001010000200+P_220102000*QR_001010000210+P_220202000*QR_001010000220);
ans_temp[ans_id*18+1]+=Pmtrx[7]*(P_020002000*QR_000011000000+P_020102000*QR_000011000010+P_020202000*QR_000011000020+P_120002000*QR_000011000100+P_120102000*QR_000011000110+P_120202000*QR_000011000120+P_220002000*QR_000011000200+P_220102000*QR_000011000210+P_220202000*QR_000011000220);
ans_temp[ans_id*18+1]+=Pmtrx[8]*(P_020002000*QR_000010001000+P_020102000*QR_000010001010+P_020202000*QR_000010001020+P_120002000*QR_000010001100+P_120102000*QR_000010001110+P_120202000*QR_000010001120+P_220002000*QR_000010001200+P_220102000*QR_000010001210+P_220202000*QR_000010001220);
ans_temp[ans_id*18+2]+=Pmtrx[6]*(P_020002000*QR_001000010000+P_020102000*QR_001000010010+P_020202000*QR_001000010020+P_120002000*QR_001000010100+P_120102000*QR_001000010110+P_120202000*QR_001000010120+P_220002000*QR_001000010200+P_220102000*QR_001000010210+P_220202000*QR_001000010220);
ans_temp[ans_id*18+2]+=Pmtrx[7]*(P_020002000*QR_000001010000+P_020102000*QR_000001010010+P_020202000*QR_000001010020+P_120002000*QR_000001010100+P_120102000*QR_000001010110+P_120202000*QR_000001010120+P_220002000*QR_000001010200+P_220102000*QR_000001010210+P_220202000*QR_000001010220);
ans_temp[ans_id*18+2]+=Pmtrx[8]*(P_020002000*QR_000000011000+P_020102000*QR_000000011010+P_020202000*QR_000000011020+P_120002000*QR_000000011100+P_120102000*QR_000000011110+P_120202000*QR_000000011120+P_220002000*QR_000000011200+P_220102000*QR_000000011210+P_220202000*QR_000000011220);
ans_temp[ans_id*18+0]+=Pmtrx[9]*(P_021000001*QR_011000000000+P_021000101*QR_011000000001+P_121000001*QR_011000000100+P_121000101*QR_011000000101+P_221000001*QR_011000000200+P_221000101*QR_011000000201+P_321000001*QR_011000000300+P_321000101*QR_011000000301);
ans_temp[ans_id*18+0]+=Pmtrx[10]*(P_021000001*QR_010001000000+P_021000101*QR_010001000001+P_121000001*QR_010001000100+P_121000101*QR_010001000101+P_221000001*QR_010001000200+P_221000101*QR_010001000201+P_321000001*QR_010001000300+P_321000101*QR_010001000301);
ans_temp[ans_id*18+0]+=Pmtrx[11]*(P_021000001*QR_010000001000+P_021000101*QR_010000001001+P_121000001*QR_010000001100+P_121000101*QR_010000001101+P_221000001*QR_010000001200+P_221000101*QR_010000001201+P_321000001*QR_010000001300+P_321000101*QR_010000001301);
ans_temp[ans_id*18+1]+=Pmtrx[9]*(P_021000001*QR_001010000000+P_021000101*QR_001010000001+P_121000001*QR_001010000100+P_121000101*QR_001010000101+P_221000001*QR_001010000200+P_221000101*QR_001010000201+P_321000001*QR_001010000300+P_321000101*QR_001010000301);
ans_temp[ans_id*18+1]+=Pmtrx[10]*(P_021000001*QR_000011000000+P_021000101*QR_000011000001+P_121000001*QR_000011000100+P_121000101*QR_000011000101+P_221000001*QR_000011000200+P_221000101*QR_000011000201+P_321000001*QR_000011000300+P_321000101*QR_000011000301);
ans_temp[ans_id*18+1]+=Pmtrx[11]*(P_021000001*QR_000010001000+P_021000101*QR_000010001001+P_121000001*QR_000010001100+P_121000101*QR_000010001101+P_221000001*QR_000010001200+P_221000101*QR_000010001201+P_321000001*QR_000010001300+P_321000101*QR_000010001301);
ans_temp[ans_id*18+2]+=Pmtrx[9]*(P_021000001*QR_001000010000+P_021000101*QR_001000010001+P_121000001*QR_001000010100+P_121000101*QR_001000010101+P_221000001*QR_001000010200+P_221000101*QR_001000010201+P_321000001*QR_001000010300+P_321000101*QR_001000010301);
ans_temp[ans_id*18+2]+=Pmtrx[10]*(P_021000001*QR_000001010000+P_021000101*QR_000001010001+P_121000001*QR_000001010100+P_121000101*QR_000001010101+P_221000001*QR_000001010200+P_221000101*QR_000001010201+P_321000001*QR_000001010300+P_321000101*QR_000001010301);
ans_temp[ans_id*18+2]+=Pmtrx[11]*(P_021000001*QR_000000011000+P_021000101*QR_000000011001+P_121000001*QR_000000011100+P_121000101*QR_000000011101+P_221000001*QR_000000011200+P_221000101*QR_000000011201+P_321000001*QR_000000011300+P_321000101*QR_000000011301);
ans_temp[ans_id*18+0]+=Pmtrx[12]*(P_020001001*QR_011000000000+P_020001101*QR_011000000001+P_020101001*QR_011000000010+P_020101101*QR_011000000011+P_120001001*QR_011000000100+P_120001101*QR_011000000101+P_120101001*QR_011000000110+P_120101101*QR_011000000111+P_220001001*QR_011000000200+P_220001101*QR_011000000201+P_220101001*QR_011000000210+P_220101101*QR_011000000211);
ans_temp[ans_id*18+0]+=Pmtrx[13]*(P_020001001*QR_010001000000+P_020001101*QR_010001000001+P_020101001*QR_010001000010+P_020101101*QR_010001000011+P_120001001*QR_010001000100+P_120001101*QR_010001000101+P_120101001*QR_010001000110+P_120101101*QR_010001000111+P_220001001*QR_010001000200+P_220001101*QR_010001000201+P_220101001*QR_010001000210+P_220101101*QR_010001000211);
ans_temp[ans_id*18+0]+=Pmtrx[14]*(P_020001001*QR_010000001000+P_020001101*QR_010000001001+P_020101001*QR_010000001010+P_020101101*QR_010000001011+P_120001001*QR_010000001100+P_120001101*QR_010000001101+P_120101001*QR_010000001110+P_120101101*QR_010000001111+P_220001001*QR_010000001200+P_220001101*QR_010000001201+P_220101001*QR_010000001210+P_220101101*QR_010000001211);
ans_temp[ans_id*18+1]+=Pmtrx[12]*(P_020001001*QR_001010000000+P_020001101*QR_001010000001+P_020101001*QR_001010000010+P_020101101*QR_001010000011+P_120001001*QR_001010000100+P_120001101*QR_001010000101+P_120101001*QR_001010000110+P_120101101*QR_001010000111+P_220001001*QR_001010000200+P_220001101*QR_001010000201+P_220101001*QR_001010000210+P_220101101*QR_001010000211);
ans_temp[ans_id*18+1]+=Pmtrx[13]*(P_020001001*QR_000011000000+P_020001101*QR_000011000001+P_020101001*QR_000011000010+P_020101101*QR_000011000011+P_120001001*QR_000011000100+P_120001101*QR_000011000101+P_120101001*QR_000011000110+P_120101101*QR_000011000111+P_220001001*QR_000011000200+P_220001101*QR_000011000201+P_220101001*QR_000011000210+P_220101101*QR_000011000211);
ans_temp[ans_id*18+1]+=Pmtrx[14]*(P_020001001*QR_000010001000+P_020001101*QR_000010001001+P_020101001*QR_000010001010+P_020101101*QR_000010001011+P_120001001*QR_000010001100+P_120001101*QR_000010001101+P_120101001*QR_000010001110+P_120101101*QR_000010001111+P_220001001*QR_000010001200+P_220001101*QR_000010001201+P_220101001*QR_000010001210+P_220101101*QR_000010001211);
ans_temp[ans_id*18+2]+=Pmtrx[12]*(P_020001001*QR_001000010000+P_020001101*QR_001000010001+P_020101001*QR_001000010010+P_020101101*QR_001000010011+P_120001001*QR_001000010100+P_120001101*QR_001000010101+P_120101001*QR_001000010110+P_120101101*QR_001000010111+P_220001001*QR_001000010200+P_220001101*QR_001000010201+P_220101001*QR_001000010210+P_220101101*QR_001000010211);
ans_temp[ans_id*18+2]+=Pmtrx[13]*(P_020001001*QR_000001010000+P_020001101*QR_000001010001+P_020101001*QR_000001010010+P_020101101*QR_000001010011+P_120001001*QR_000001010100+P_120001101*QR_000001010101+P_120101001*QR_000001010110+P_120101101*QR_000001010111+P_220001001*QR_000001010200+P_220001101*QR_000001010201+P_220101001*QR_000001010210+P_220101101*QR_000001010211);
ans_temp[ans_id*18+2]+=Pmtrx[14]*(P_020001001*QR_000000011000+P_020001101*QR_000000011001+P_020101001*QR_000000011010+P_020101101*QR_000000011011+P_120001001*QR_000000011100+P_120001101*QR_000000011101+P_120101001*QR_000000011110+P_120101101*QR_000000011111+P_220001001*QR_000000011200+P_220001101*QR_000000011201+P_220101001*QR_000000011210+P_220101101*QR_000000011211);
ans_temp[ans_id*18+0]+=Pmtrx[15]*(P_020000002*QR_011000000000+P_020000102*QR_011000000001+P_020000202*QR_011000000002+P_120000002*QR_011000000100+P_120000102*QR_011000000101+P_120000202*QR_011000000102+P_220000002*QR_011000000200+P_220000102*QR_011000000201+P_220000202*QR_011000000202);
ans_temp[ans_id*18+0]+=Pmtrx[16]*(P_020000002*QR_010001000000+P_020000102*QR_010001000001+P_020000202*QR_010001000002+P_120000002*QR_010001000100+P_120000102*QR_010001000101+P_120000202*QR_010001000102+P_220000002*QR_010001000200+P_220000102*QR_010001000201+P_220000202*QR_010001000202);
ans_temp[ans_id*18+0]+=Pmtrx[17]*(P_020000002*QR_010000001000+P_020000102*QR_010000001001+P_020000202*QR_010000001002+P_120000002*QR_010000001100+P_120000102*QR_010000001101+P_120000202*QR_010000001102+P_220000002*QR_010000001200+P_220000102*QR_010000001201+P_220000202*QR_010000001202);
ans_temp[ans_id*18+1]+=Pmtrx[15]*(P_020000002*QR_001010000000+P_020000102*QR_001010000001+P_020000202*QR_001010000002+P_120000002*QR_001010000100+P_120000102*QR_001010000101+P_120000202*QR_001010000102+P_220000002*QR_001010000200+P_220000102*QR_001010000201+P_220000202*QR_001010000202);
ans_temp[ans_id*18+1]+=Pmtrx[16]*(P_020000002*QR_000011000000+P_020000102*QR_000011000001+P_020000202*QR_000011000002+P_120000002*QR_000011000100+P_120000102*QR_000011000101+P_120000202*QR_000011000102+P_220000002*QR_000011000200+P_220000102*QR_000011000201+P_220000202*QR_000011000202);
ans_temp[ans_id*18+1]+=Pmtrx[17]*(P_020000002*QR_000010001000+P_020000102*QR_000010001001+P_020000202*QR_000010001002+P_120000002*QR_000010001100+P_120000102*QR_000010001101+P_120000202*QR_000010001102+P_220000002*QR_000010001200+P_220000102*QR_000010001201+P_220000202*QR_000010001202);
ans_temp[ans_id*18+2]+=Pmtrx[15]*(P_020000002*QR_001000010000+P_020000102*QR_001000010001+P_020000202*QR_001000010002+P_120000002*QR_001000010100+P_120000102*QR_001000010101+P_120000202*QR_001000010102+P_220000002*QR_001000010200+P_220000102*QR_001000010201+P_220000202*QR_001000010202);
ans_temp[ans_id*18+2]+=Pmtrx[16]*(P_020000002*QR_000001010000+P_020000102*QR_000001010001+P_020000202*QR_000001010002+P_120000002*QR_000001010100+P_120000102*QR_000001010101+P_120000202*QR_000001010102+P_220000002*QR_000001010200+P_220000102*QR_000001010201+P_220000202*QR_000001010202);
ans_temp[ans_id*18+2]+=Pmtrx[17]*(P_020000002*QR_000000011000+P_020000102*QR_000000011001+P_020000202*QR_000000011002+P_120000002*QR_000000011100+P_120000102*QR_000000011101+P_120000202*QR_000000011102+P_220000002*QR_000000011200+P_220000102*QR_000000011201+P_220000202*QR_000000011202);
ans_temp[ans_id*18+3]+=Pmtrx[0]*(P_012010000*QR_011000000000+P_012110000*QR_011000000010+P_112010000*QR_011000000100+P_112110000*QR_011000000110+P_212010000*QR_011000000200+P_212110000*QR_011000000210+P_312010000*QR_011000000300+P_312110000*QR_011000000310);
ans_temp[ans_id*18+3]+=Pmtrx[1]*(P_012010000*QR_010001000000+P_012110000*QR_010001000010+P_112010000*QR_010001000100+P_112110000*QR_010001000110+P_212010000*QR_010001000200+P_212110000*QR_010001000210+P_312010000*QR_010001000300+P_312110000*QR_010001000310);
ans_temp[ans_id*18+3]+=Pmtrx[2]*(P_012010000*QR_010000001000+P_012110000*QR_010000001010+P_112010000*QR_010000001100+P_112110000*QR_010000001110+P_212010000*QR_010000001200+P_212110000*QR_010000001210+P_312010000*QR_010000001300+P_312110000*QR_010000001310);
ans_temp[ans_id*18+4]+=Pmtrx[0]*(P_012010000*QR_001010000000+P_012110000*QR_001010000010+P_112010000*QR_001010000100+P_112110000*QR_001010000110+P_212010000*QR_001010000200+P_212110000*QR_001010000210+P_312010000*QR_001010000300+P_312110000*QR_001010000310);
ans_temp[ans_id*18+4]+=Pmtrx[1]*(P_012010000*QR_000011000000+P_012110000*QR_000011000010+P_112010000*QR_000011000100+P_112110000*QR_000011000110+P_212010000*QR_000011000200+P_212110000*QR_000011000210+P_312010000*QR_000011000300+P_312110000*QR_000011000310);
ans_temp[ans_id*18+4]+=Pmtrx[2]*(P_012010000*QR_000010001000+P_012110000*QR_000010001010+P_112010000*QR_000010001100+P_112110000*QR_000010001110+P_212010000*QR_000010001200+P_212110000*QR_000010001210+P_312010000*QR_000010001300+P_312110000*QR_000010001310);
ans_temp[ans_id*18+5]+=Pmtrx[0]*(P_012010000*QR_001000010000+P_012110000*QR_001000010010+P_112010000*QR_001000010100+P_112110000*QR_001000010110+P_212010000*QR_001000010200+P_212110000*QR_001000010210+P_312010000*QR_001000010300+P_312110000*QR_001000010310);
ans_temp[ans_id*18+5]+=Pmtrx[1]*(P_012010000*QR_000001010000+P_012110000*QR_000001010010+P_112010000*QR_000001010100+P_112110000*QR_000001010110+P_212010000*QR_000001010200+P_212110000*QR_000001010210+P_312010000*QR_000001010300+P_312110000*QR_000001010310);
ans_temp[ans_id*18+5]+=Pmtrx[2]*(P_012010000*QR_000000011000+P_012110000*QR_000000011010+P_112010000*QR_000000011100+P_112110000*QR_000000011110+P_212010000*QR_000000011200+P_212110000*QR_000000011210+P_312010000*QR_000000011300+P_312110000*QR_000000011310);
ans_temp[ans_id*18+3]+=Pmtrx[3]*(P_011011000*QR_011000000000+P_011111000*QR_011000000010+P_011211000*QR_011000000020+P_111011000*QR_011000000100+P_111111000*QR_011000000110+P_111211000*QR_011000000120+P_211011000*QR_011000000200+P_211111000*QR_011000000210+P_211211000*QR_011000000220);
ans_temp[ans_id*18+3]+=Pmtrx[4]*(P_011011000*QR_010001000000+P_011111000*QR_010001000010+P_011211000*QR_010001000020+P_111011000*QR_010001000100+P_111111000*QR_010001000110+P_111211000*QR_010001000120+P_211011000*QR_010001000200+P_211111000*QR_010001000210+P_211211000*QR_010001000220);
ans_temp[ans_id*18+3]+=Pmtrx[5]*(P_011011000*QR_010000001000+P_011111000*QR_010000001010+P_011211000*QR_010000001020+P_111011000*QR_010000001100+P_111111000*QR_010000001110+P_111211000*QR_010000001120+P_211011000*QR_010000001200+P_211111000*QR_010000001210+P_211211000*QR_010000001220);
ans_temp[ans_id*18+4]+=Pmtrx[3]*(P_011011000*QR_001010000000+P_011111000*QR_001010000010+P_011211000*QR_001010000020+P_111011000*QR_001010000100+P_111111000*QR_001010000110+P_111211000*QR_001010000120+P_211011000*QR_001010000200+P_211111000*QR_001010000210+P_211211000*QR_001010000220);
ans_temp[ans_id*18+4]+=Pmtrx[4]*(P_011011000*QR_000011000000+P_011111000*QR_000011000010+P_011211000*QR_000011000020+P_111011000*QR_000011000100+P_111111000*QR_000011000110+P_111211000*QR_000011000120+P_211011000*QR_000011000200+P_211111000*QR_000011000210+P_211211000*QR_000011000220);
ans_temp[ans_id*18+4]+=Pmtrx[5]*(P_011011000*QR_000010001000+P_011111000*QR_000010001010+P_011211000*QR_000010001020+P_111011000*QR_000010001100+P_111111000*QR_000010001110+P_111211000*QR_000010001120+P_211011000*QR_000010001200+P_211111000*QR_000010001210+P_211211000*QR_000010001220);
ans_temp[ans_id*18+5]+=Pmtrx[3]*(P_011011000*QR_001000010000+P_011111000*QR_001000010010+P_011211000*QR_001000010020+P_111011000*QR_001000010100+P_111111000*QR_001000010110+P_111211000*QR_001000010120+P_211011000*QR_001000010200+P_211111000*QR_001000010210+P_211211000*QR_001000010220);
ans_temp[ans_id*18+5]+=Pmtrx[4]*(P_011011000*QR_000001010000+P_011111000*QR_000001010010+P_011211000*QR_000001010020+P_111011000*QR_000001010100+P_111111000*QR_000001010110+P_111211000*QR_000001010120+P_211011000*QR_000001010200+P_211111000*QR_000001010210+P_211211000*QR_000001010220);
ans_temp[ans_id*18+5]+=Pmtrx[5]*(P_011011000*QR_000000011000+P_011111000*QR_000000011010+P_011211000*QR_000000011020+P_111011000*QR_000000011100+P_111111000*QR_000000011110+P_111211000*QR_000000011120+P_211011000*QR_000000011200+P_211111000*QR_000000011210+P_211211000*QR_000000011220);
ans_temp[ans_id*18+3]+=Pmtrx[6]*(P_010012000*QR_011000000000+P_010112000*QR_011000000010+P_010212000*QR_011000000020+P_010312000*QR_011000000030+P_110012000*QR_011000000100+P_110112000*QR_011000000110+P_110212000*QR_011000000120+P_110312000*QR_011000000130);
ans_temp[ans_id*18+3]+=Pmtrx[7]*(P_010012000*QR_010001000000+P_010112000*QR_010001000010+P_010212000*QR_010001000020+P_010312000*QR_010001000030+P_110012000*QR_010001000100+P_110112000*QR_010001000110+P_110212000*QR_010001000120+P_110312000*QR_010001000130);
ans_temp[ans_id*18+3]+=Pmtrx[8]*(P_010012000*QR_010000001000+P_010112000*QR_010000001010+P_010212000*QR_010000001020+P_010312000*QR_010000001030+P_110012000*QR_010000001100+P_110112000*QR_010000001110+P_110212000*QR_010000001120+P_110312000*QR_010000001130);
ans_temp[ans_id*18+4]+=Pmtrx[6]*(P_010012000*QR_001010000000+P_010112000*QR_001010000010+P_010212000*QR_001010000020+P_010312000*QR_001010000030+P_110012000*QR_001010000100+P_110112000*QR_001010000110+P_110212000*QR_001010000120+P_110312000*QR_001010000130);
ans_temp[ans_id*18+4]+=Pmtrx[7]*(P_010012000*QR_000011000000+P_010112000*QR_000011000010+P_010212000*QR_000011000020+P_010312000*QR_000011000030+P_110012000*QR_000011000100+P_110112000*QR_000011000110+P_110212000*QR_000011000120+P_110312000*QR_000011000130);
ans_temp[ans_id*18+4]+=Pmtrx[8]*(P_010012000*QR_000010001000+P_010112000*QR_000010001010+P_010212000*QR_000010001020+P_010312000*QR_000010001030+P_110012000*QR_000010001100+P_110112000*QR_000010001110+P_110212000*QR_000010001120+P_110312000*QR_000010001130);
ans_temp[ans_id*18+5]+=Pmtrx[6]*(P_010012000*QR_001000010000+P_010112000*QR_001000010010+P_010212000*QR_001000010020+P_010312000*QR_001000010030+P_110012000*QR_001000010100+P_110112000*QR_001000010110+P_110212000*QR_001000010120+P_110312000*QR_001000010130);
ans_temp[ans_id*18+5]+=Pmtrx[7]*(P_010012000*QR_000001010000+P_010112000*QR_000001010010+P_010212000*QR_000001010020+P_010312000*QR_000001010030+P_110012000*QR_000001010100+P_110112000*QR_000001010110+P_110212000*QR_000001010120+P_110312000*QR_000001010130);
ans_temp[ans_id*18+5]+=Pmtrx[8]*(P_010012000*QR_000000011000+P_010112000*QR_000000011010+P_010212000*QR_000000011020+P_010312000*QR_000000011030+P_110012000*QR_000000011100+P_110112000*QR_000000011110+P_110212000*QR_000000011120+P_110312000*QR_000000011130);
ans_temp[ans_id*18+3]+=Pmtrx[9]*(P_011010001*QR_011000000000+P_011010101*QR_011000000001+P_011110001*QR_011000000010+P_011110101*QR_011000000011+P_111010001*QR_011000000100+P_111010101*QR_011000000101+P_111110001*QR_011000000110+P_111110101*QR_011000000111+P_211010001*QR_011000000200+P_211010101*QR_011000000201+P_211110001*QR_011000000210+P_211110101*QR_011000000211);
ans_temp[ans_id*18+3]+=Pmtrx[10]*(P_011010001*QR_010001000000+P_011010101*QR_010001000001+P_011110001*QR_010001000010+P_011110101*QR_010001000011+P_111010001*QR_010001000100+P_111010101*QR_010001000101+P_111110001*QR_010001000110+P_111110101*QR_010001000111+P_211010001*QR_010001000200+P_211010101*QR_010001000201+P_211110001*QR_010001000210+P_211110101*QR_010001000211);
ans_temp[ans_id*18+3]+=Pmtrx[11]*(P_011010001*QR_010000001000+P_011010101*QR_010000001001+P_011110001*QR_010000001010+P_011110101*QR_010000001011+P_111010001*QR_010000001100+P_111010101*QR_010000001101+P_111110001*QR_010000001110+P_111110101*QR_010000001111+P_211010001*QR_010000001200+P_211010101*QR_010000001201+P_211110001*QR_010000001210+P_211110101*QR_010000001211);
ans_temp[ans_id*18+4]+=Pmtrx[9]*(P_011010001*QR_001010000000+P_011010101*QR_001010000001+P_011110001*QR_001010000010+P_011110101*QR_001010000011+P_111010001*QR_001010000100+P_111010101*QR_001010000101+P_111110001*QR_001010000110+P_111110101*QR_001010000111+P_211010001*QR_001010000200+P_211010101*QR_001010000201+P_211110001*QR_001010000210+P_211110101*QR_001010000211);
ans_temp[ans_id*18+4]+=Pmtrx[10]*(P_011010001*QR_000011000000+P_011010101*QR_000011000001+P_011110001*QR_000011000010+P_011110101*QR_000011000011+P_111010001*QR_000011000100+P_111010101*QR_000011000101+P_111110001*QR_000011000110+P_111110101*QR_000011000111+P_211010001*QR_000011000200+P_211010101*QR_000011000201+P_211110001*QR_000011000210+P_211110101*QR_000011000211);
ans_temp[ans_id*18+4]+=Pmtrx[11]*(P_011010001*QR_000010001000+P_011010101*QR_000010001001+P_011110001*QR_000010001010+P_011110101*QR_000010001011+P_111010001*QR_000010001100+P_111010101*QR_000010001101+P_111110001*QR_000010001110+P_111110101*QR_000010001111+P_211010001*QR_000010001200+P_211010101*QR_000010001201+P_211110001*QR_000010001210+P_211110101*QR_000010001211);
ans_temp[ans_id*18+5]+=Pmtrx[9]*(P_011010001*QR_001000010000+P_011010101*QR_001000010001+P_011110001*QR_001000010010+P_011110101*QR_001000010011+P_111010001*QR_001000010100+P_111010101*QR_001000010101+P_111110001*QR_001000010110+P_111110101*QR_001000010111+P_211010001*QR_001000010200+P_211010101*QR_001000010201+P_211110001*QR_001000010210+P_211110101*QR_001000010211);
ans_temp[ans_id*18+5]+=Pmtrx[10]*(P_011010001*QR_000001010000+P_011010101*QR_000001010001+P_011110001*QR_000001010010+P_011110101*QR_000001010011+P_111010001*QR_000001010100+P_111010101*QR_000001010101+P_111110001*QR_000001010110+P_111110101*QR_000001010111+P_211010001*QR_000001010200+P_211010101*QR_000001010201+P_211110001*QR_000001010210+P_211110101*QR_000001010211);
ans_temp[ans_id*18+5]+=Pmtrx[11]*(P_011010001*QR_000000011000+P_011010101*QR_000000011001+P_011110001*QR_000000011010+P_011110101*QR_000000011011+P_111010001*QR_000000011100+P_111010101*QR_000000011101+P_111110001*QR_000000011110+P_111110101*QR_000000011111+P_211010001*QR_000000011200+P_211010101*QR_000000011201+P_211110001*QR_000000011210+P_211110101*QR_000000011211);
ans_temp[ans_id*18+3]+=Pmtrx[12]*(P_010011001*QR_011000000000+P_010011101*QR_011000000001+P_010111001*QR_011000000010+P_010111101*QR_011000000011+P_010211001*QR_011000000020+P_010211101*QR_011000000021+P_110011001*QR_011000000100+P_110011101*QR_011000000101+P_110111001*QR_011000000110+P_110111101*QR_011000000111+P_110211001*QR_011000000120+P_110211101*QR_011000000121);
ans_temp[ans_id*18+3]+=Pmtrx[13]*(P_010011001*QR_010001000000+P_010011101*QR_010001000001+P_010111001*QR_010001000010+P_010111101*QR_010001000011+P_010211001*QR_010001000020+P_010211101*QR_010001000021+P_110011001*QR_010001000100+P_110011101*QR_010001000101+P_110111001*QR_010001000110+P_110111101*QR_010001000111+P_110211001*QR_010001000120+P_110211101*QR_010001000121);
ans_temp[ans_id*18+3]+=Pmtrx[14]*(P_010011001*QR_010000001000+P_010011101*QR_010000001001+P_010111001*QR_010000001010+P_010111101*QR_010000001011+P_010211001*QR_010000001020+P_010211101*QR_010000001021+P_110011001*QR_010000001100+P_110011101*QR_010000001101+P_110111001*QR_010000001110+P_110111101*QR_010000001111+P_110211001*QR_010000001120+P_110211101*QR_010000001121);
ans_temp[ans_id*18+4]+=Pmtrx[12]*(P_010011001*QR_001010000000+P_010011101*QR_001010000001+P_010111001*QR_001010000010+P_010111101*QR_001010000011+P_010211001*QR_001010000020+P_010211101*QR_001010000021+P_110011001*QR_001010000100+P_110011101*QR_001010000101+P_110111001*QR_001010000110+P_110111101*QR_001010000111+P_110211001*QR_001010000120+P_110211101*QR_001010000121);
ans_temp[ans_id*18+4]+=Pmtrx[13]*(P_010011001*QR_000011000000+P_010011101*QR_000011000001+P_010111001*QR_000011000010+P_010111101*QR_000011000011+P_010211001*QR_000011000020+P_010211101*QR_000011000021+P_110011001*QR_000011000100+P_110011101*QR_000011000101+P_110111001*QR_000011000110+P_110111101*QR_000011000111+P_110211001*QR_000011000120+P_110211101*QR_000011000121);
ans_temp[ans_id*18+4]+=Pmtrx[14]*(P_010011001*QR_000010001000+P_010011101*QR_000010001001+P_010111001*QR_000010001010+P_010111101*QR_000010001011+P_010211001*QR_000010001020+P_010211101*QR_000010001021+P_110011001*QR_000010001100+P_110011101*QR_000010001101+P_110111001*QR_000010001110+P_110111101*QR_000010001111+P_110211001*QR_000010001120+P_110211101*QR_000010001121);
ans_temp[ans_id*18+5]+=Pmtrx[12]*(P_010011001*QR_001000010000+P_010011101*QR_001000010001+P_010111001*QR_001000010010+P_010111101*QR_001000010011+P_010211001*QR_001000010020+P_010211101*QR_001000010021+P_110011001*QR_001000010100+P_110011101*QR_001000010101+P_110111001*QR_001000010110+P_110111101*QR_001000010111+P_110211001*QR_001000010120+P_110211101*QR_001000010121);
ans_temp[ans_id*18+5]+=Pmtrx[13]*(P_010011001*QR_000001010000+P_010011101*QR_000001010001+P_010111001*QR_000001010010+P_010111101*QR_000001010011+P_010211001*QR_000001010020+P_010211101*QR_000001010021+P_110011001*QR_000001010100+P_110011101*QR_000001010101+P_110111001*QR_000001010110+P_110111101*QR_000001010111+P_110211001*QR_000001010120+P_110211101*QR_000001010121);
ans_temp[ans_id*18+5]+=Pmtrx[14]*(P_010011001*QR_000000011000+P_010011101*QR_000000011001+P_010111001*QR_000000011010+P_010111101*QR_000000011011+P_010211001*QR_000000011020+P_010211101*QR_000000011021+P_110011001*QR_000000011100+P_110011101*QR_000000011101+P_110111001*QR_000000011110+P_110111101*QR_000000011111+P_110211001*QR_000000011120+P_110211101*QR_000000011121);
ans_temp[ans_id*18+3]+=Pmtrx[15]*(P_010010002*QR_011000000000+P_010010102*QR_011000000001+P_010010202*QR_011000000002+P_010110002*QR_011000000010+P_010110102*QR_011000000011+P_010110202*QR_011000000012+P_110010002*QR_011000000100+P_110010102*QR_011000000101+P_110010202*QR_011000000102+P_110110002*QR_011000000110+P_110110102*QR_011000000111+P_110110202*QR_011000000112);
ans_temp[ans_id*18+3]+=Pmtrx[16]*(P_010010002*QR_010001000000+P_010010102*QR_010001000001+P_010010202*QR_010001000002+P_010110002*QR_010001000010+P_010110102*QR_010001000011+P_010110202*QR_010001000012+P_110010002*QR_010001000100+P_110010102*QR_010001000101+P_110010202*QR_010001000102+P_110110002*QR_010001000110+P_110110102*QR_010001000111+P_110110202*QR_010001000112);
ans_temp[ans_id*18+3]+=Pmtrx[17]*(P_010010002*QR_010000001000+P_010010102*QR_010000001001+P_010010202*QR_010000001002+P_010110002*QR_010000001010+P_010110102*QR_010000001011+P_010110202*QR_010000001012+P_110010002*QR_010000001100+P_110010102*QR_010000001101+P_110010202*QR_010000001102+P_110110002*QR_010000001110+P_110110102*QR_010000001111+P_110110202*QR_010000001112);
ans_temp[ans_id*18+4]+=Pmtrx[15]*(P_010010002*QR_001010000000+P_010010102*QR_001010000001+P_010010202*QR_001010000002+P_010110002*QR_001010000010+P_010110102*QR_001010000011+P_010110202*QR_001010000012+P_110010002*QR_001010000100+P_110010102*QR_001010000101+P_110010202*QR_001010000102+P_110110002*QR_001010000110+P_110110102*QR_001010000111+P_110110202*QR_001010000112);
ans_temp[ans_id*18+4]+=Pmtrx[16]*(P_010010002*QR_000011000000+P_010010102*QR_000011000001+P_010010202*QR_000011000002+P_010110002*QR_000011000010+P_010110102*QR_000011000011+P_010110202*QR_000011000012+P_110010002*QR_000011000100+P_110010102*QR_000011000101+P_110010202*QR_000011000102+P_110110002*QR_000011000110+P_110110102*QR_000011000111+P_110110202*QR_000011000112);
ans_temp[ans_id*18+4]+=Pmtrx[17]*(P_010010002*QR_000010001000+P_010010102*QR_000010001001+P_010010202*QR_000010001002+P_010110002*QR_000010001010+P_010110102*QR_000010001011+P_010110202*QR_000010001012+P_110010002*QR_000010001100+P_110010102*QR_000010001101+P_110010202*QR_000010001102+P_110110002*QR_000010001110+P_110110102*QR_000010001111+P_110110202*QR_000010001112);
ans_temp[ans_id*18+5]+=Pmtrx[15]*(P_010010002*QR_001000010000+P_010010102*QR_001000010001+P_010010202*QR_001000010002+P_010110002*QR_001000010010+P_010110102*QR_001000010011+P_010110202*QR_001000010012+P_110010002*QR_001000010100+P_110010102*QR_001000010101+P_110010202*QR_001000010102+P_110110002*QR_001000010110+P_110110102*QR_001000010111+P_110110202*QR_001000010112);
ans_temp[ans_id*18+5]+=Pmtrx[16]*(P_010010002*QR_000001010000+P_010010102*QR_000001010001+P_010010202*QR_000001010002+P_010110002*QR_000001010010+P_010110102*QR_000001010011+P_010110202*QR_000001010012+P_110010002*QR_000001010100+P_110010102*QR_000001010101+P_110010202*QR_000001010102+P_110110002*QR_000001010110+P_110110102*QR_000001010111+P_110110202*QR_000001010112);
ans_temp[ans_id*18+5]+=Pmtrx[17]*(P_010010002*QR_000000011000+P_010010102*QR_000000011001+P_010010202*QR_000000011002+P_010110002*QR_000000011010+P_010110102*QR_000000011011+P_010110202*QR_000000011012+P_110010002*QR_000000011100+P_110010102*QR_000000011101+P_110010202*QR_000000011102+P_110110002*QR_000000011110+P_110110102*QR_000000011111+P_110110202*QR_000000011112);
ans_temp[ans_id*18+6]+=Pmtrx[0]*(P_002020000*QR_011000000000+P_002120000*QR_011000000010+P_002220000*QR_011000000020+P_102020000*QR_011000000100+P_102120000*QR_011000000110+P_102220000*QR_011000000120+P_202020000*QR_011000000200+P_202120000*QR_011000000210+P_202220000*QR_011000000220);
ans_temp[ans_id*18+6]+=Pmtrx[1]*(P_002020000*QR_010001000000+P_002120000*QR_010001000010+P_002220000*QR_010001000020+P_102020000*QR_010001000100+P_102120000*QR_010001000110+P_102220000*QR_010001000120+P_202020000*QR_010001000200+P_202120000*QR_010001000210+P_202220000*QR_010001000220);
ans_temp[ans_id*18+6]+=Pmtrx[2]*(P_002020000*QR_010000001000+P_002120000*QR_010000001010+P_002220000*QR_010000001020+P_102020000*QR_010000001100+P_102120000*QR_010000001110+P_102220000*QR_010000001120+P_202020000*QR_010000001200+P_202120000*QR_010000001210+P_202220000*QR_010000001220);
ans_temp[ans_id*18+7]+=Pmtrx[0]*(P_002020000*QR_001010000000+P_002120000*QR_001010000010+P_002220000*QR_001010000020+P_102020000*QR_001010000100+P_102120000*QR_001010000110+P_102220000*QR_001010000120+P_202020000*QR_001010000200+P_202120000*QR_001010000210+P_202220000*QR_001010000220);
ans_temp[ans_id*18+7]+=Pmtrx[1]*(P_002020000*QR_000011000000+P_002120000*QR_000011000010+P_002220000*QR_000011000020+P_102020000*QR_000011000100+P_102120000*QR_000011000110+P_102220000*QR_000011000120+P_202020000*QR_000011000200+P_202120000*QR_000011000210+P_202220000*QR_000011000220);
ans_temp[ans_id*18+7]+=Pmtrx[2]*(P_002020000*QR_000010001000+P_002120000*QR_000010001010+P_002220000*QR_000010001020+P_102020000*QR_000010001100+P_102120000*QR_000010001110+P_102220000*QR_000010001120+P_202020000*QR_000010001200+P_202120000*QR_000010001210+P_202220000*QR_000010001220);
ans_temp[ans_id*18+8]+=Pmtrx[0]*(P_002020000*QR_001000010000+P_002120000*QR_001000010010+P_002220000*QR_001000010020+P_102020000*QR_001000010100+P_102120000*QR_001000010110+P_102220000*QR_001000010120+P_202020000*QR_001000010200+P_202120000*QR_001000010210+P_202220000*QR_001000010220);
ans_temp[ans_id*18+8]+=Pmtrx[1]*(P_002020000*QR_000001010000+P_002120000*QR_000001010010+P_002220000*QR_000001010020+P_102020000*QR_000001010100+P_102120000*QR_000001010110+P_102220000*QR_000001010120+P_202020000*QR_000001010200+P_202120000*QR_000001010210+P_202220000*QR_000001010220);
ans_temp[ans_id*18+8]+=Pmtrx[2]*(P_002020000*QR_000000011000+P_002120000*QR_000000011010+P_002220000*QR_000000011020+P_102020000*QR_000000011100+P_102120000*QR_000000011110+P_102220000*QR_000000011120+P_202020000*QR_000000011200+P_202120000*QR_000000011210+P_202220000*QR_000000011220);
ans_temp[ans_id*18+6]+=Pmtrx[3]*(P_001021000*QR_011000000000+P_001121000*QR_011000000010+P_001221000*QR_011000000020+P_001321000*QR_011000000030+P_101021000*QR_011000000100+P_101121000*QR_011000000110+P_101221000*QR_011000000120+P_101321000*QR_011000000130);
ans_temp[ans_id*18+6]+=Pmtrx[4]*(P_001021000*QR_010001000000+P_001121000*QR_010001000010+P_001221000*QR_010001000020+P_001321000*QR_010001000030+P_101021000*QR_010001000100+P_101121000*QR_010001000110+P_101221000*QR_010001000120+P_101321000*QR_010001000130);
ans_temp[ans_id*18+6]+=Pmtrx[5]*(P_001021000*QR_010000001000+P_001121000*QR_010000001010+P_001221000*QR_010000001020+P_001321000*QR_010000001030+P_101021000*QR_010000001100+P_101121000*QR_010000001110+P_101221000*QR_010000001120+P_101321000*QR_010000001130);
ans_temp[ans_id*18+7]+=Pmtrx[3]*(P_001021000*QR_001010000000+P_001121000*QR_001010000010+P_001221000*QR_001010000020+P_001321000*QR_001010000030+P_101021000*QR_001010000100+P_101121000*QR_001010000110+P_101221000*QR_001010000120+P_101321000*QR_001010000130);
ans_temp[ans_id*18+7]+=Pmtrx[4]*(P_001021000*QR_000011000000+P_001121000*QR_000011000010+P_001221000*QR_000011000020+P_001321000*QR_000011000030+P_101021000*QR_000011000100+P_101121000*QR_000011000110+P_101221000*QR_000011000120+P_101321000*QR_000011000130);
ans_temp[ans_id*18+7]+=Pmtrx[5]*(P_001021000*QR_000010001000+P_001121000*QR_000010001010+P_001221000*QR_000010001020+P_001321000*QR_000010001030+P_101021000*QR_000010001100+P_101121000*QR_000010001110+P_101221000*QR_000010001120+P_101321000*QR_000010001130);
ans_temp[ans_id*18+8]+=Pmtrx[3]*(P_001021000*QR_001000010000+P_001121000*QR_001000010010+P_001221000*QR_001000010020+P_001321000*QR_001000010030+P_101021000*QR_001000010100+P_101121000*QR_001000010110+P_101221000*QR_001000010120+P_101321000*QR_001000010130);
ans_temp[ans_id*18+8]+=Pmtrx[4]*(P_001021000*QR_000001010000+P_001121000*QR_000001010010+P_001221000*QR_000001010020+P_001321000*QR_000001010030+P_101021000*QR_000001010100+P_101121000*QR_000001010110+P_101221000*QR_000001010120+P_101321000*QR_000001010130);
ans_temp[ans_id*18+8]+=Pmtrx[5]*(P_001021000*QR_000000011000+P_001121000*QR_000000011010+P_001221000*QR_000000011020+P_001321000*QR_000000011030+P_101021000*QR_000000011100+P_101121000*QR_000000011110+P_101221000*QR_000000011120+P_101321000*QR_000000011130);
ans_temp[ans_id*18+6]+=Pmtrx[6]*(P_000022000*QR_011000000000+P_000122000*QR_011000000010+P_000222000*QR_011000000020+P_000322000*QR_011000000030+P_000422000*QR_011000000040);
ans_temp[ans_id*18+6]+=Pmtrx[7]*(P_000022000*QR_010001000000+P_000122000*QR_010001000010+P_000222000*QR_010001000020+P_000322000*QR_010001000030+P_000422000*QR_010001000040);
ans_temp[ans_id*18+6]+=Pmtrx[8]*(P_000022000*QR_010000001000+P_000122000*QR_010000001010+P_000222000*QR_010000001020+P_000322000*QR_010000001030+P_000422000*QR_010000001040);
ans_temp[ans_id*18+7]+=Pmtrx[6]*(P_000022000*QR_001010000000+P_000122000*QR_001010000010+P_000222000*QR_001010000020+P_000322000*QR_001010000030+P_000422000*QR_001010000040);
ans_temp[ans_id*18+7]+=Pmtrx[7]*(P_000022000*QR_000011000000+P_000122000*QR_000011000010+P_000222000*QR_000011000020+P_000322000*QR_000011000030+P_000422000*QR_000011000040);
ans_temp[ans_id*18+7]+=Pmtrx[8]*(P_000022000*QR_000010001000+P_000122000*QR_000010001010+P_000222000*QR_000010001020+P_000322000*QR_000010001030+P_000422000*QR_000010001040);
ans_temp[ans_id*18+8]+=Pmtrx[6]*(P_000022000*QR_001000010000+P_000122000*QR_001000010010+P_000222000*QR_001000010020+P_000322000*QR_001000010030+P_000422000*QR_001000010040);
ans_temp[ans_id*18+8]+=Pmtrx[7]*(P_000022000*QR_000001010000+P_000122000*QR_000001010010+P_000222000*QR_000001010020+P_000322000*QR_000001010030+P_000422000*QR_000001010040);
ans_temp[ans_id*18+8]+=Pmtrx[8]*(P_000022000*QR_000000011000+P_000122000*QR_000000011010+P_000222000*QR_000000011020+P_000322000*QR_000000011030+P_000422000*QR_000000011040);
ans_temp[ans_id*18+6]+=Pmtrx[9]*(P_001020001*QR_011000000000+P_001020101*QR_011000000001+P_001120001*QR_011000000010+P_001120101*QR_011000000011+P_001220001*QR_011000000020+P_001220101*QR_011000000021+P_101020001*QR_011000000100+P_101020101*QR_011000000101+P_101120001*QR_011000000110+P_101120101*QR_011000000111+P_101220001*QR_011000000120+P_101220101*QR_011000000121);
ans_temp[ans_id*18+6]+=Pmtrx[10]*(P_001020001*QR_010001000000+P_001020101*QR_010001000001+P_001120001*QR_010001000010+P_001120101*QR_010001000011+P_001220001*QR_010001000020+P_001220101*QR_010001000021+P_101020001*QR_010001000100+P_101020101*QR_010001000101+P_101120001*QR_010001000110+P_101120101*QR_010001000111+P_101220001*QR_010001000120+P_101220101*QR_010001000121);
ans_temp[ans_id*18+6]+=Pmtrx[11]*(P_001020001*QR_010000001000+P_001020101*QR_010000001001+P_001120001*QR_010000001010+P_001120101*QR_010000001011+P_001220001*QR_010000001020+P_001220101*QR_010000001021+P_101020001*QR_010000001100+P_101020101*QR_010000001101+P_101120001*QR_010000001110+P_101120101*QR_010000001111+P_101220001*QR_010000001120+P_101220101*QR_010000001121);
ans_temp[ans_id*18+7]+=Pmtrx[9]*(P_001020001*QR_001010000000+P_001020101*QR_001010000001+P_001120001*QR_001010000010+P_001120101*QR_001010000011+P_001220001*QR_001010000020+P_001220101*QR_001010000021+P_101020001*QR_001010000100+P_101020101*QR_001010000101+P_101120001*QR_001010000110+P_101120101*QR_001010000111+P_101220001*QR_001010000120+P_101220101*QR_001010000121);
ans_temp[ans_id*18+7]+=Pmtrx[10]*(P_001020001*QR_000011000000+P_001020101*QR_000011000001+P_001120001*QR_000011000010+P_001120101*QR_000011000011+P_001220001*QR_000011000020+P_001220101*QR_000011000021+P_101020001*QR_000011000100+P_101020101*QR_000011000101+P_101120001*QR_000011000110+P_101120101*QR_000011000111+P_101220001*QR_000011000120+P_101220101*QR_000011000121);
ans_temp[ans_id*18+7]+=Pmtrx[11]*(P_001020001*QR_000010001000+P_001020101*QR_000010001001+P_001120001*QR_000010001010+P_001120101*QR_000010001011+P_001220001*QR_000010001020+P_001220101*QR_000010001021+P_101020001*QR_000010001100+P_101020101*QR_000010001101+P_101120001*QR_000010001110+P_101120101*QR_000010001111+P_101220001*QR_000010001120+P_101220101*QR_000010001121);
ans_temp[ans_id*18+8]+=Pmtrx[9]*(P_001020001*QR_001000010000+P_001020101*QR_001000010001+P_001120001*QR_001000010010+P_001120101*QR_001000010011+P_001220001*QR_001000010020+P_001220101*QR_001000010021+P_101020001*QR_001000010100+P_101020101*QR_001000010101+P_101120001*QR_001000010110+P_101120101*QR_001000010111+P_101220001*QR_001000010120+P_101220101*QR_001000010121);
ans_temp[ans_id*18+8]+=Pmtrx[10]*(P_001020001*QR_000001010000+P_001020101*QR_000001010001+P_001120001*QR_000001010010+P_001120101*QR_000001010011+P_001220001*QR_000001010020+P_001220101*QR_000001010021+P_101020001*QR_000001010100+P_101020101*QR_000001010101+P_101120001*QR_000001010110+P_101120101*QR_000001010111+P_101220001*QR_000001010120+P_101220101*QR_000001010121);
ans_temp[ans_id*18+8]+=Pmtrx[11]*(P_001020001*QR_000000011000+P_001020101*QR_000000011001+P_001120001*QR_000000011010+P_001120101*QR_000000011011+P_001220001*QR_000000011020+P_001220101*QR_000000011021+P_101020001*QR_000000011100+P_101020101*QR_000000011101+P_101120001*QR_000000011110+P_101120101*QR_000000011111+P_101220001*QR_000000011120+P_101220101*QR_000000011121);
ans_temp[ans_id*18+6]+=Pmtrx[12]*(P_000021001*QR_011000000000+P_000021101*QR_011000000001+P_000121001*QR_011000000010+P_000121101*QR_011000000011+P_000221001*QR_011000000020+P_000221101*QR_011000000021+P_000321001*QR_011000000030+P_000321101*QR_011000000031);
ans_temp[ans_id*18+6]+=Pmtrx[13]*(P_000021001*QR_010001000000+P_000021101*QR_010001000001+P_000121001*QR_010001000010+P_000121101*QR_010001000011+P_000221001*QR_010001000020+P_000221101*QR_010001000021+P_000321001*QR_010001000030+P_000321101*QR_010001000031);
ans_temp[ans_id*18+6]+=Pmtrx[14]*(P_000021001*QR_010000001000+P_000021101*QR_010000001001+P_000121001*QR_010000001010+P_000121101*QR_010000001011+P_000221001*QR_010000001020+P_000221101*QR_010000001021+P_000321001*QR_010000001030+P_000321101*QR_010000001031);
ans_temp[ans_id*18+7]+=Pmtrx[12]*(P_000021001*QR_001010000000+P_000021101*QR_001010000001+P_000121001*QR_001010000010+P_000121101*QR_001010000011+P_000221001*QR_001010000020+P_000221101*QR_001010000021+P_000321001*QR_001010000030+P_000321101*QR_001010000031);
ans_temp[ans_id*18+7]+=Pmtrx[13]*(P_000021001*QR_000011000000+P_000021101*QR_000011000001+P_000121001*QR_000011000010+P_000121101*QR_000011000011+P_000221001*QR_000011000020+P_000221101*QR_000011000021+P_000321001*QR_000011000030+P_000321101*QR_000011000031);
ans_temp[ans_id*18+7]+=Pmtrx[14]*(P_000021001*QR_000010001000+P_000021101*QR_000010001001+P_000121001*QR_000010001010+P_000121101*QR_000010001011+P_000221001*QR_000010001020+P_000221101*QR_000010001021+P_000321001*QR_000010001030+P_000321101*QR_000010001031);
ans_temp[ans_id*18+8]+=Pmtrx[12]*(P_000021001*QR_001000010000+P_000021101*QR_001000010001+P_000121001*QR_001000010010+P_000121101*QR_001000010011+P_000221001*QR_001000010020+P_000221101*QR_001000010021+P_000321001*QR_001000010030+P_000321101*QR_001000010031);
ans_temp[ans_id*18+8]+=Pmtrx[13]*(P_000021001*QR_000001010000+P_000021101*QR_000001010001+P_000121001*QR_000001010010+P_000121101*QR_000001010011+P_000221001*QR_000001010020+P_000221101*QR_000001010021+P_000321001*QR_000001010030+P_000321101*QR_000001010031);
ans_temp[ans_id*18+8]+=Pmtrx[14]*(P_000021001*QR_000000011000+P_000021101*QR_000000011001+P_000121001*QR_000000011010+P_000121101*QR_000000011011+P_000221001*QR_000000011020+P_000221101*QR_000000011021+P_000321001*QR_000000011030+P_000321101*QR_000000011031);
ans_temp[ans_id*18+6]+=Pmtrx[15]*(P_000020002*QR_011000000000+P_000020102*QR_011000000001+P_000020202*QR_011000000002+P_000120002*QR_011000000010+P_000120102*QR_011000000011+P_000120202*QR_011000000012+P_000220002*QR_011000000020+P_000220102*QR_011000000021+P_000220202*QR_011000000022);
ans_temp[ans_id*18+6]+=Pmtrx[16]*(P_000020002*QR_010001000000+P_000020102*QR_010001000001+P_000020202*QR_010001000002+P_000120002*QR_010001000010+P_000120102*QR_010001000011+P_000120202*QR_010001000012+P_000220002*QR_010001000020+P_000220102*QR_010001000021+P_000220202*QR_010001000022);
ans_temp[ans_id*18+6]+=Pmtrx[17]*(P_000020002*QR_010000001000+P_000020102*QR_010000001001+P_000020202*QR_010000001002+P_000120002*QR_010000001010+P_000120102*QR_010000001011+P_000120202*QR_010000001012+P_000220002*QR_010000001020+P_000220102*QR_010000001021+P_000220202*QR_010000001022);
ans_temp[ans_id*18+7]+=Pmtrx[15]*(P_000020002*QR_001010000000+P_000020102*QR_001010000001+P_000020202*QR_001010000002+P_000120002*QR_001010000010+P_000120102*QR_001010000011+P_000120202*QR_001010000012+P_000220002*QR_001010000020+P_000220102*QR_001010000021+P_000220202*QR_001010000022);
ans_temp[ans_id*18+7]+=Pmtrx[16]*(P_000020002*QR_000011000000+P_000020102*QR_000011000001+P_000020202*QR_000011000002+P_000120002*QR_000011000010+P_000120102*QR_000011000011+P_000120202*QR_000011000012+P_000220002*QR_000011000020+P_000220102*QR_000011000021+P_000220202*QR_000011000022);
ans_temp[ans_id*18+7]+=Pmtrx[17]*(P_000020002*QR_000010001000+P_000020102*QR_000010001001+P_000020202*QR_000010001002+P_000120002*QR_000010001010+P_000120102*QR_000010001011+P_000120202*QR_000010001012+P_000220002*QR_000010001020+P_000220102*QR_000010001021+P_000220202*QR_000010001022);
ans_temp[ans_id*18+8]+=Pmtrx[15]*(P_000020002*QR_001000010000+P_000020102*QR_001000010001+P_000020202*QR_001000010002+P_000120002*QR_001000010010+P_000120102*QR_001000010011+P_000120202*QR_001000010012+P_000220002*QR_001000010020+P_000220102*QR_001000010021+P_000220202*QR_001000010022);
ans_temp[ans_id*18+8]+=Pmtrx[16]*(P_000020002*QR_000001010000+P_000020102*QR_000001010001+P_000020202*QR_000001010002+P_000120002*QR_000001010010+P_000120102*QR_000001010011+P_000120202*QR_000001010012+P_000220002*QR_000001010020+P_000220102*QR_000001010021+P_000220202*QR_000001010022);
ans_temp[ans_id*18+8]+=Pmtrx[17]*(P_000020002*QR_000000011000+P_000020102*QR_000000011001+P_000020202*QR_000000011002+P_000120002*QR_000000011010+P_000120102*QR_000000011011+P_000120202*QR_000000011012+P_000220002*QR_000000011020+P_000220102*QR_000000011021+P_000220202*QR_000000011022);
ans_temp[ans_id*18+9]+=Pmtrx[0]*(P_012000010*QR_011000000000+P_012000110*QR_011000000001+P_112000010*QR_011000000100+P_112000110*QR_011000000101+P_212000010*QR_011000000200+P_212000110*QR_011000000201+P_312000010*QR_011000000300+P_312000110*QR_011000000301);
ans_temp[ans_id*18+9]+=Pmtrx[1]*(P_012000010*QR_010001000000+P_012000110*QR_010001000001+P_112000010*QR_010001000100+P_112000110*QR_010001000101+P_212000010*QR_010001000200+P_212000110*QR_010001000201+P_312000010*QR_010001000300+P_312000110*QR_010001000301);
ans_temp[ans_id*18+9]+=Pmtrx[2]*(P_012000010*QR_010000001000+P_012000110*QR_010000001001+P_112000010*QR_010000001100+P_112000110*QR_010000001101+P_212000010*QR_010000001200+P_212000110*QR_010000001201+P_312000010*QR_010000001300+P_312000110*QR_010000001301);
ans_temp[ans_id*18+10]+=Pmtrx[0]*(P_012000010*QR_001010000000+P_012000110*QR_001010000001+P_112000010*QR_001010000100+P_112000110*QR_001010000101+P_212000010*QR_001010000200+P_212000110*QR_001010000201+P_312000010*QR_001010000300+P_312000110*QR_001010000301);
ans_temp[ans_id*18+10]+=Pmtrx[1]*(P_012000010*QR_000011000000+P_012000110*QR_000011000001+P_112000010*QR_000011000100+P_112000110*QR_000011000101+P_212000010*QR_000011000200+P_212000110*QR_000011000201+P_312000010*QR_000011000300+P_312000110*QR_000011000301);
ans_temp[ans_id*18+10]+=Pmtrx[2]*(P_012000010*QR_000010001000+P_012000110*QR_000010001001+P_112000010*QR_000010001100+P_112000110*QR_000010001101+P_212000010*QR_000010001200+P_212000110*QR_000010001201+P_312000010*QR_000010001300+P_312000110*QR_000010001301);
ans_temp[ans_id*18+11]+=Pmtrx[0]*(P_012000010*QR_001000010000+P_012000110*QR_001000010001+P_112000010*QR_001000010100+P_112000110*QR_001000010101+P_212000010*QR_001000010200+P_212000110*QR_001000010201+P_312000010*QR_001000010300+P_312000110*QR_001000010301);
ans_temp[ans_id*18+11]+=Pmtrx[1]*(P_012000010*QR_000001010000+P_012000110*QR_000001010001+P_112000010*QR_000001010100+P_112000110*QR_000001010101+P_212000010*QR_000001010200+P_212000110*QR_000001010201+P_312000010*QR_000001010300+P_312000110*QR_000001010301);
ans_temp[ans_id*18+11]+=Pmtrx[2]*(P_012000010*QR_000000011000+P_012000110*QR_000000011001+P_112000010*QR_000000011100+P_112000110*QR_000000011101+P_212000010*QR_000000011200+P_212000110*QR_000000011201+P_312000010*QR_000000011300+P_312000110*QR_000000011301);
ans_temp[ans_id*18+9]+=Pmtrx[3]*(P_011001010*QR_011000000000+P_011001110*QR_011000000001+P_011101010*QR_011000000010+P_011101110*QR_011000000011+P_111001010*QR_011000000100+P_111001110*QR_011000000101+P_111101010*QR_011000000110+P_111101110*QR_011000000111+P_211001010*QR_011000000200+P_211001110*QR_011000000201+P_211101010*QR_011000000210+P_211101110*QR_011000000211);
ans_temp[ans_id*18+9]+=Pmtrx[4]*(P_011001010*QR_010001000000+P_011001110*QR_010001000001+P_011101010*QR_010001000010+P_011101110*QR_010001000011+P_111001010*QR_010001000100+P_111001110*QR_010001000101+P_111101010*QR_010001000110+P_111101110*QR_010001000111+P_211001010*QR_010001000200+P_211001110*QR_010001000201+P_211101010*QR_010001000210+P_211101110*QR_010001000211);
ans_temp[ans_id*18+9]+=Pmtrx[5]*(P_011001010*QR_010000001000+P_011001110*QR_010000001001+P_011101010*QR_010000001010+P_011101110*QR_010000001011+P_111001010*QR_010000001100+P_111001110*QR_010000001101+P_111101010*QR_010000001110+P_111101110*QR_010000001111+P_211001010*QR_010000001200+P_211001110*QR_010000001201+P_211101010*QR_010000001210+P_211101110*QR_010000001211);
ans_temp[ans_id*18+10]+=Pmtrx[3]*(P_011001010*QR_001010000000+P_011001110*QR_001010000001+P_011101010*QR_001010000010+P_011101110*QR_001010000011+P_111001010*QR_001010000100+P_111001110*QR_001010000101+P_111101010*QR_001010000110+P_111101110*QR_001010000111+P_211001010*QR_001010000200+P_211001110*QR_001010000201+P_211101010*QR_001010000210+P_211101110*QR_001010000211);
ans_temp[ans_id*18+10]+=Pmtrx[4]*(P_011001010*QR_000011000000+P_011001110*QR_000011000001+P_011101010*QR_000011000010+P_011101110*QR_000011000011+P_111001010*QR_000011000100+P_111001110*QR_000011000101+P_111101010*QR_000011000110+P_111101110*QR_000011000111+P_211001010*QR_000011000200+P_211001110*QR_000011000201+P_211101010*QR_000011000210+P_211101110*QR_000011000211);
ans_temp[ans_id*18+10]+=Pmtrx[5]*(P_011001010*QR_000010001000+P_011001110*QR_000010001001+P_011101010*QR_000010001010+P_011101110*QR_000010001011+P_111001010*QR_000010001100+P_111001110*QR_000010001101+P_111101010*QR_000010001110+P_111101110*QR_000010001111+P_211001010*QR_000010001200+P_211001110*QR_000010001201+P_211101010*QR_000010001210+P_211101110*QR_000010001211);
ans_temp[ans_id*18+11]+=Pmtrx[3]*(P_011001010*QR_001000010000+P_011001110*QR_001000010001+P_011101010*QR_001000010010+P_011101110*QR_001000010011+P_111001010*QR_001000010100+P_111001110*QR_001000010101+P_111101010*QR_001000010110+P_111101110*QR_001000010111+P_211001010*QR_001000010200+P_211001110*QR_001000010201+P_211101010*QR_001000010210+P_211101110*QR_001000010211);
ans_temp[ans_id*18+11]+=Pmtrx[4]*(P_011001010*QR_000001010000+P_011001110*QR_000001010001+P_011101010*QR_000001010010+P_011101110*QR_000001010011+P_111001010*QR_000001010100+P_111001110*QR_000001010101+P_111101010*QR_000001010110+P_111101110*QR_000001010111+P_211001010*QR_000001010200+P_211001110*QR_000001010201+P_211101010*QR_000001010210+P_211101110*QR_000001010211);
ans_temp[ans_id*18+11]+=Pmtrx[5]*(P_011001010*QR_000000011000+P_011001110*QR_000000011001+P_011101010*QR_000000011010+P_011101110*QR_000000011011+P_111001010*QR_000000011100+P_111001110*QR_000000011101+P_111101010*QR_000000011110+P_111101110*QR_000000011111+P_211001010*QR_000000011200+P_211001110*QR_000000011201+P_211101010*QR_000000011210+P_211101110*QR_000000011211);
ans_temp[ans_id*18+9]+=Pmtrx[6]*(P_010002010*QR_011000000000+P_010002110*QR_011000000001+P_010102010*QR_011000000010+P_010102110*QR_011000000011+P_010202010*QR_011000000020+P_010202110*QR_011000000021+P_110002010*QR_011000000100+P_110002110*QR_011000000101+P_110102010*QR_011000000110+P_110102110*QR_011000000111+P_110202010*QR_011000000120+P_110202110*QR_011000000121);
ans_temp[ans_id*18+9]+=Pmtrx[7]*(P_010002010*QR_010001000000+P_010002110*QR_010001000001+P_010102010*QR_010001000010+P_010102110*QR_010001000011+P_010202010*QR_010001000020+P_010202110*QR_010001000021+P_110002010*QR_010001000100+P_110002110*QR_010001000101+P_110102010*QR_010001000110+P_110102110*QR_010001000111+P_110202010*QR_010001000120+P_110202110*QR_010001000121);
ans_temp[ans_id*18+9]+=Pmtrx[8]*(P_010002010*QR_010000001000+P_010002110*QR_010000001001+P_010102010*QR_010000001010+P_010102110*QR_010000001011+P_010202010*QR_010000001020+P_010202110*QR_010000001021+P_110002010*QR_010000001100+P_110002110*QR_010000001101+P_110102010*QR_010000001110+P_110102110*QR_010000001111+P_110202010*QR_010000001120+P_110202110*QR_010000001121);
ans_temp[ans_id*18+10]+=Pmtrx[6]*(P_010002010*QR_001010000000+P_010002110*QR_001010000001+P_010102010*QR_001010000010+P_010102110*QR_001010000011+P_010202010*QR_001010000020+P_010202110*QR_001010000021+P_110002010*QR_001010000100+P_110002110*QR_001010000101+P_110102010*QR_001010000110+P_110102110*QR_001010000111+P_110202010*QR_001010000120+P_110202110*QR_001010000121);
ans_temp[ans_id*18+10]+=Pmtrx[7]*(P_010002010*QR_000011000000+P_010002110*QR_000011000001+P_010102010*QR_000011000010+P_010102110*QR_000011000011+P_010202010*QR_000011000020+P_010202110*QR_000011000021+P_110002010*QR_000011000100+P_110002110*QR_000011000101+P_110102010*QR_000011000110+P_110102110*QR_000011000111+P_110202010*QR_000011000120+P_110202110*QR_000011000121);
ans_temp[ans_id*18+10]+=Pmtrx[8]*(P_010002010*QR_000010001000+P_010002110*QR_000010001001+P_010102010*QR_000010001010+P_010102110*QR_000010001011+P_010202010*QR_000010001020+P_010202110*QR_000010001021+P_110002010*QR_000010001100+P_110002110*QR_000010001101+P_110102010*QR_000010001110+P_110102110*QR_000010001111+P_110202010*QR_000010001120+P_110202110*QR_000010001121);
ans_temp[ans_id*18+11]+=Pmtrx[6]*(P_010002010*QR_001000010000+P_010002110*QR_001000010001+P_010102010*QR_001000010010+P_010102110*QR_001000010011+P_010202010*QR_001000010020+P_010202110*QR_001000010021+P_110002010*QR_001000010100+P_110002110*QR_001000010101+P_110102010*QR_001000010110+P_110102110*QR_001000010111+P_110202010*QR_001000010120+P_110202110*QR_001000010121);
ans_temp[ans_id*18+11]+=Pmtrx[7]*(P_010002010*QR_000001010000+P_010002110*QR_000001010001+P_010102010*QR_000001010010+P_010102110*QR_000001010011+P_010202010*QR_000001010020+P_010202110*QR_000001010021+P_110002010*QR_000001010100+P_110002110*QR_000001010101+P_110102010*QR_000001010110+P_110102110*QR_000001010111+P_110202010*QR_000001010120+P_110202110*QR_000001010121);
ans_temp[ans_id*18+11]+=Pmtrx[8]*(P_010002010*QR_000000011000+P_010002110*QR_000000011001+P_010102010*QR_000000011010+P_010102110*QR_000000011011+P_010202010*QR_000000011020+P_010202110*QR_000000011021+P_110002010*QR_000000011100+P_110002110*QR_000000011101+P_110102010*QR_000000011110+P_110102110*QR_000000011111+P_110202010*QR_000000011120+P_110202110*QR_000000011121);
ans_temp[ans_id*18+9]+=Pmtrx[9]*(P_011000011*QR_011000000000+P_011000111*QR_011000000001+P_011000211*QR_011000000002+P_111000011*QR_011000000100+P_111000111*QR_011000000101+P_111000211*QR_011000000102+P_211000011*QR_011000000200+P_211000111*QR_011000000201+P_211000211*QR_011000000202);
ans_temp[ans_id*18+9]+=Pmtrx[10]*(P_011000011*QR_010001000000+P_011000111*QR_010001000001+P_011000211*QR_010001000002+P_111000011*QR_010001000100+P_111000111*QR_010001000101+P_111000211*QR_010001000102+P_211000011*QR_010001000200+P_211000111*QR_010001000201+P_211000211*QR_010001000202);
ans_temp[ans_id*18+9]+=Pmtrx[11]*(P_011000011*QR_010000001000+P_011000111*QR_010000001001+P_011000211*QR_010000001002+P_111000011*QR_010000001100+P_111000111*QR_010000001101+P_111000211*QR_010000001102+P_211000011*QR_010000001200+P_211000111*QR_010000001201+P_211000211*QR_010000001202);
ans_temp[ans_id*18+10]+=Pmtrx[9]*(P_011000011*QR_001010000000+P_011000111*QR_001010000001+P_011000211*QR_001010000002+P_111000011*QR_001010000100+P_111000111*QR_001010000101+P_111000211*QR_001010000102+P_211000011*QR_001010000200+P_211000111*QR_001010000201+P_211000211*QR_001010000202);
ans_temp[ans_id*18+10]+=Pmtrx[10]*(P_011000011*QR_000011000000+P_011000111*QR_000011000001+P_011000211*QR_000011000002+P_111000011*QR_000011000100+P_111000111*QR_000011000101+P_111000211*QR_000011000102+P_211000011*QR_000011000200+P_211000111*QR_000011000201+P_211000211*QR_000011000202);
ans_temp[ans_id*18+10]+=Pmtrx[11]*(P_011000011*QR_000010001000+P_011000111*QR_000010001001+P_011000211*QR_000010001002+P_111000011*QR_000010001100+P_111000111*QR_000010001101+P_111000211*QR_000010001102+P_211000011*QR_000010001200+P_211000111*QR_000010001201+P_211000211*QR_000010001202);
ans_temp[ans_id*18+11]+=Pmtrx[9]*(P_011000011*QR_001000010000+P_011000111*QR_001000010001+P_011000211*QR_001000010002+P_111000011*QR_001000010100+P_111000111*QR_001000010101+P_111000211*QR_001000010102+P_211000011*QR_001000010200+P_211000111*QR_001000010201+P_211000211*QR_001000010202);
ans_temp[ans_id*18+11]+=Pmtrx[10]*(P_011000011*QR_000001010000+P_011000111*QR_000001010001+P_011000211*QR_000001010002+P_111000011*QR_000001010100+P_111000111*QR_000001010101+P_111000211*QR_000001010102+P_211000011*QR_000001010200+P_211000111*QR_000001010201+P_211000211*QR_000001010202);
ans_temp[ans_id*18+11]+=Pmtrx[11]*(P_011000011*QR_000000011000+P_011000111*QR_000000011001+P_011000211*QR_000000011002+P_111000011*QR_000000011100+P_111000111*QR_000000011101+P_111000211*QR_000000011102+P_211000011*QR_000000011200+P_211000111*QR_000000011201+P_211000211*QR_000000011202);
ans_temp[ans_id*18+9]+=Pmtrx[12]*(P_010001011*QR_011000000000+P_010001111*QR_011000000001+P_010001211*QR_011000000002+P_010101011*QR_011000000010+P_010101111*QR_011000000011+P_010101211*QR_011000000012+P_110001011*QR_011000000100+P_110001111*QR_011000000101+P_110001211*QR_011000000102+P_110101011*QR_011000000110+P_110101111*QR_011000000111+P_110101211*QR_011000000112);
ans_temp[ans_id*18+9]+=Pmtrx[13]*(P_010001011*QR_010001000000+P_010001111*QR_010001000001+P_010001211*QR_010001000002+P_010101011*QR_010001000010+P_010101111*QR_010001000011+P_010101211*QR_010001000012+P_110001011*QR_010001000100+P_110001111*QR_010001000101+P_110001211*QR_010001000102+P_110101011*QR_010001000110+P_110101111*QR_010001000111+P_110101211*QR_010001000112);
ans_temp[ans_id*18+9]+=Pmtrx[14]*(P_010001011*QR_010000001000+P_010001111*QR_010000001001+P_010001211*QR_010000001002+P_010101011*QR_010000001010+P_010101111*QR_010000001011+P_010101211*QR_010000001012+P_110001011*QR_010000001100+P_110001111*QR_010000001101+P_110001211*QR_010000001102+P_110101011*QR_010000001110+P_110101111*QR_010000001111+P_110101211*QR_010000001112);
ans_temp[ans_id*18+10]+=Pmtrx[12]*(P_010001011*QR_001010000000+P_010001111*QR_001010000001+P_010001211*QR_001010000002+P_010101011*QR_001010000010+P_010101111*QR_001010000011+P_010101211*QR_001010000012+P_110001011*QR_001010000100+P_110001111*QR_001010000101+P_110001211*QR_001010000102+P_110101011*QR_001010000110+P_110101111*QR_001010000111+P_110101211*QR_001010000112);
ans_temp[ans_id*18+10]+=Pmtrx[13]*(P_010001011*QR_000011000000+P_010001111*QR_000011000001+P_010001211*QR_000011000002+P_010101011*QR_000011000010+P_010101111*QR_000011000011+P_010101211*QR_000011000012+P_110001011*QR_000011000100+P_110001111*QR_000011000101+P_110001211*QR_000011000102+P_110101011*QR_000011000110+P_110101111*QR_000011000111+P_110101211*QR_000011000112);
ans_temp[ans_id*18+10]+=Pmtrx[14]*(P_010001011*QR_000010001000+P_010001111*QR_000010001001+P_010001211*QR_000010001002+P_010101011*QR_000010001010+P_010101111*QR_000010001011+P_010101211*QR_000010001012+P_110001011*QR_000010001100+P_110001111*QR_000010001101+P_110001211*QR_000010001102+P_110101011*QR_000010001110+P_110101111*QR_000010001111+P_110101211*QR_000010001112);
ans_temp[ans_id*18+11]+=Pmtrx[12]*(P_010001011*QR_001000010000+P_010001111*QR_001000010001+P_010001211*QR_001000010002+P_010101011*QR_001000010010+P_010101111*QR_001000010011+P_010101211*QR_001000010012+P_110001011*QR_001000010100+P_110001111*QR_001000010101+P_110001211*QR_001000010102+P_110101011*QR_001000010110+P_110101111*QR_001000010111+P_110101211*QR_001000010112);
ans_temp[ans_id*18+11]+=Pmtrx[13]*(P_010001011*QR_000001010000+P_010001111*QR_000001010001+P_010001211*QR_000001010002+P_010101011*QR_000001010010+P_010101111*QR_000001010011+P_010101211*QR_000001010012+P_110001011*QR_000001010100+P_110001111*QR_000001010101+P_110001211*QR_000001010102+P_110101011*QR_000001010110+P_110101111*QR_000001010111+P_110101211*QR_000001010112);
ans_temp[ans_id*18+11]+=Pmtrx[14]*(P_010001011*QR_000000011000+P_010001111*QR_000000011001+P_010001211*QR_000000011002+P_010101011*QR_000000011010+P_010101111*QR_000000011011+P_010101211*QR_000000011012+P_110001011*QR_000000011100+P_110001111*QR_000000011101+P_110001211*QR_000000011102+P_110101011*QR_000000011110+P_110101111*QR_000000011111+P_110101211*QR_000000011112);
ans_temp[ans_id*18+9]+=Pmtrx[15]*(P_010000012*QR_011000000000+P_010000112*QR_011000000001+P_010000212*QR_011000000002+P_010000312*QR_011000000003+P_110000012*QR_011000000100+P_110000112*QR_011000000101+P_110000212*QR_011000000102+P_110000312*QR_011000000103);
ans_temp[ans_id*18+9]+=Pmtrx[16]*(P_010000012*QR_010001000000+P_010000112*QR_010001000001+P_010000212*QR_010001000002+P_010000312*QR_010001000003+P_110000012*QR_010001000100+P_110000112*QR_010001000101+P_110000212*QR_010001000102+P_110000312*QR_010001000103);
ans_temp[ans_id*18+9]+=Pmtrx[17]*(P_010000012*QR_010000001000+P_010000112*QR_010000001001+P_010000212*QR_010000001002+P_010000312*QR_010000001003+P_110000012*QR_010000001100+P_110000112*QR_010000001101+P_110000212*QR_010000001102+P_110000312*QR_010000001103);
ans_temp[ans_id*18+10]+=Pmtrx[15]*(P_010000012*QR_001010000000+P_010000112*QR_001010000001+P_010000212*QR_001010000002+P_010000312*QR_001010000003+P_110000012*QR_001010000100+P_110000112*QR_001010000101+P_110000212*QR_001010000102+P_110000312*QR_001010000103);
ans_temp[ans_id*18+10]+=Pmtrx[16]*(P_010000012*QR_000011000000+P_010000112*QR_000011000001+P_010000212*QR_000011000002+P_010000312*QR_000011000003+P_110000012*QR_000011000100+P_110000112*QR_000011000101+P_110000212*QR_000011000102+P_110000312*QR_000011000103);
ans_temp[ans_id*18+10]+=Pmtrx[17]*(P_010000012*QR_000010001000+P_010000112*QR_000010001001+P_010000212*QR_000010001002+P_010000312*QR_000010001003+P_110000012*QR_000010001100+P_110000112*QR_000010001101+P_110000212*QR_000010001102+P_110000312*QR_000010001103);
ans_temp[ans_id*18+11]+=Pmtrx[15]*(P_010000012*QR_001000010000+P_010000112*QR_001000010001+P_010000212*QR_001000010002+P_010000312*QR_001000010003+P_110000012*QR_001000010100+P_110000112*QR_001000010101+P_110000212*QR_001000010102+P_110000312*QR_001000010103);
ans_temp[ans_id*18+11]+=Pmtrx[16]*(P_010000012*QR_000001010000+P_010000112*QR_000001010001+P_010000212*QR_000001010002+P_010000312*QR_000001010003+P_110000012*QR_000001010100+P_110000112*QR_000001010101+P_110000212*QR_000001010102+P_110000312*QR_000001010103);
ans_temp[ans_id*18+11]+=Pmtrx[17]*(P_010000012*QR_000000011000+P_010000112*QR_000000011001+P_010000212*QR_000000011002+P_010000312*QR_000000011003+P_110000012*QR_000000011100+P_110000112*QR_000000011101+P_110000212*QR_000000011102+P_110000312*QR_000000011103);
ans_temp[ans_id*18+12]+=Pmtrx[0]*(P_002010010*QR_011000000000+P_002010110*QR_011000000001+P_002110010*QR_011000000010+P_002110110*QR_011000000011+P_102010010*QR_011000000100+P_102010110*QR_011000000101+P_102110010*QR_011000000110+P_102110110*QR_011000000111+P_202010010*QR_011000000200+P_202010110*QR_011000000201+P_202110010*QR_011000000210+P_202110110*QR_011000000211);
ans_temp[ans_id*18+12]+=Pmtrx[1]*(P_002010010*QR_010001000000+P_002010110*QR_010001000001+P_002110010*QR_010001000010+P_002110110*QR_010001000011+P_102010010*QR_010001000100+P_102010110*QR_010001000101+P_102110010*QR_010001000110+P_102110110*QR_010001000111+P_202010010*QR_010001000200+P_202010110*QR_010001000201+P_202110010*QR_010001000210+P_202110110*QR_010001000211);
ans_temp[ans_id*18+12]+=Pmtrx[2]*(P_002010010*QR_010000001000+P_002010110*QR_010000001001+P_002110010*QR_010000001010+P_002110110*QR_010000001011+P_102010010*QR_010000001100+P_102010110*QR_010000001101+P_102110010*QR_010000001110+P_102110110*QR_010000001111+P_202010010*QR_010000001200+P_202010110*QR_010000001201+P_202110010*QR_010000001210+P_202110110*QR_010000001211);
ans_temp[ans_id*18+13]+=Pmtrx[0]*(P_002010010*QR_001010000000+P_002010110*QR_001010000001+P_002110010*QR_001010000010+P_002110110*QR_001010000011+P_102010010*QR_001010000100+P_102010110*QR_001010000101+P_102110010*QR_001010000110+P_102110110*QR_001010000111+P_202010010*QR_001010000200+P_202010110*QR_001010000201+P_202110010*QR_001010000210+P_202110110*QR_001010000211);
ans_temp[ans_id*18+13]+=Pmtrx[1]*(P_002010010*QR_000011000000+P_002010110*QR_000011000001+P_002110010*QR_000011000010+P_002110110*QR_000011000011+P_102010010*QR_000011000100+P_102010110*QR_000011000101+P_102110010*QR_000011000110+P_102110110*QR_000011000111+P_202010010*QR_000011000200+P_202010110*QR_000011000201+P_202110010*QR_000011000210+P_202110110*QR_000011000211);
ans_temp[ans_id*18+13]+=Pmtrx[2]*(P_002010010*QR_000010001000+P_002010110*QR_000010001001+P_002110010*QR_000010001010+P_002110110*QR_000010001011+P_102010010*QR_000010001100+P_102010110*QR_000010001101+P_102110010*QR_000010001110+P_102110110*QR_000010001111+P_202010010*QR_000010001200+P_202010110*QR_000010001201+P_202110010*QR_000010001210+P_202110110*QR_000010001211);
ans_temp[ans_id*18+14]+=Pmtrx[0]*(P_002010010*QR_001000010000+P_002010110*QR_001000010001+P_002110010*QR_001000010010+P_002110110*QR_001000010011+P_102010010*QR_001000010100+P_102010110*QR_001000010101+P_102110010*QR_001000010110+P_102110110*QR_001000010111+P_202010010*QR_001000010200+P_202010110*QR_001000010201+P_202110010*QR_001000010210+P_202110110*QR_001000010211);
ans_temp[ans_id*18+14]+=Pmtrx[1]*(P_002010010*QR_000001010000+P_002010110*QR_000001010001+P_002110010*QR_000001010010+P_002110110*QR_000001010011+P_102010010*QR_000001010100+P_102010110*QR_000001010101+P_102110010*QR_000001010110+P_102110110*QR_000001010111+P_202010010*QR_000001010200+P_202010110*QR_000001010201+P_202110010*QR_000001010210+P_202110110*QR_000001010211);
ans_temp[ans_id*18+14]+=Pmtrx[2]*(P_002010010*QR_000000011000+P_002010110*QR_000000011001+P_002110010*QR_000000011010+P_002110110*QR_000000011011+P_102010010*QR_000000011100+P_102010110*QR_000000011101+P_102110010*QR_000000011110+P_102110110*QR_000000011111+P_202010010*QR_000000011200+P_202010110*QR_000000011201+P_202110010*QR_000000011210+P_202110110*QR_000000011211);
ans_temp[ans_id*18+12]+=Pmtrx[3]*(P_001011010*QR_011000000000+P_001011110*QR_011000000001+P_001111010*QR_011000000010+P_001111110*QR_011000000011+P_001211010*QR_011000000020+P_001211110*QR_011000000021+P_101011010*QR_011000000100+P_101011110*QR_011000000101+P_101111010*QR_011000000110+P_101111110*QR_011000000111+P_101211010*QR_011000000120+P_101211110*QR_011000000121);
ans_temp[ans_id*18+12]+=Pmtrx[4]*(P_001011010*QR_010001000000+P_001011110*QR_010001000001+P_001111010*QR_010001000010+P_001111110*QR_010001000011+P_001211010*QR_010001000020+P_001211110*QR_010001000021+P_101011010*QR_010001000100+P_101011110*QR_010001000101+P_101111010*QR_010001000110+P_101111110*QR_010001000111+P_101211010*QR_010001000120+P_101211110*QR_010001000121);
ans_temp[ans_id*18+12]+=Pmtrx[5]*(P_001011010*QR_010000001000+P_001011110*QR_010000001001+P_001111010*QR_010000001010+P_001111110*QR_010000001011+P_001211010*QR_010000001020+P_001211110*QR_010000001021+P_101011010*QR_010000001100+P_101011110*QR_010000001101+P_101111010*QR_010000001110+P_101111110*QR_010000001111+P_101211010*QR_010000001120+P_101211110*QR_010000001121);
ans_temp[ans_id*18+13]+=Pmtrx[3]*(P_001011010*QR_001010000000+P_001011110*QR_001010000001+P_001111010*QR_001010000010+P_001111110*QR_001010000011+P_001211010*QR_001010000020+P_001211110*QR_001010000021+P_101011010*QR_001010000100+P_101011110*QR_001010000101+P_101111010*QR_001010000110+P_101111110*QR_001010000111+P_101211010*QR_001010000120+P_101211110*QR_001010000121);
ans_temp[ans_id*18+13]+=Pmtrx[4]*(P_001011010*QR_000011000000+P_001011110*QR_000011000001+P_001111010*QR_000011000010+P_001111110*QR_000011000011+P_001211010*QR_000011000020+P_001211110*QR_000011000021+P_101011010*QR_000011000100+P_101011110*QR_000011000101+P_101111010*QR_000011000110+P_101111110*QR_000011000111+P_101211010*QR_000011000120+P_101211110*QR_000011000121);
ans_temp[ans_id*18+13]+=Pmtrx[5]*(P_001011010*QR_000010001000+P_001011110*QR_000010001001+P_001111010*QR_000010001010+P_001111110*QR_000010001011+P_001211010*QR_000010001020+P_001211110*QR_000010001021+P_101011010*QR_000010001100+P_101011110*QR_000010001101+P_101111010*QR_000010001110+P_101111110*QR_000010001111+P_101211010*QR_000010001120+P_101211110*QR_000010001121);
ans_temp[ans_id*18+14]+=Pmtrx[3]*(P_001011010*QR_001000010000+P_001011110*QR_001000010001+P_001111010*QR_001000010010+P_001111110*QR_001000010011+P_001211010*QR_001000010020+P_001211110*QR_001000010021+P_101011010*QR_001000010100+P_101011110*QR_001000010101+P_101111010*QR_001000010110+P_101111110*QR_001000010111+P_101211010*QR_001000010120+P_101211110*QR_001000010121);
ans_temp[ans_id*18+14]+=Pmtrx[4]*(P_001011010*QR_000001010000+P_001011110*QR_000001010001+P_001111010*QR_000001010010+P_001111110*QR_000001010011+P_001211010*QR_000001010020+P_001211110*QR_000001010021+P_101011010*QR_000001010100+P_101011110*QR_000001010101+P_101111010*QR_000001010110+P_101111110*QR_000001010111+P_101211010*QR_000001010120+P_101211110*QR_000001010121);
ans_temp[ans_id*18+14]+=Pmtrx[5]*(P_001011010*QR_000000011000+P_001011110*QR_000000011001+P_001111010*QR_000000011010+P_001111110*QR_000000011011+P_001211010*QR_000000011020+P_001211110*QR_000000011021+P_101011010*QR_000000011100+P_101011110*QR_000000011101+P_101111010*QR_000000011110+P_101111110*QR_000000011111+P_101211010*QR_000000011120+P_101211110*QR_000000011121);
ans_temp[ans_id*18+12]+=Pmtrx[6]*(P_000012010*QR_011000000000+P_000012110*QR_011000000001+P_000112010*QR_011000000010+P_000112110*QR_011000000011+P_000212010*QR_011000000020+P_000212110*QR_011000000021+P_000312010*QR_011000000030+P_000312110*QR_011000000031);
ans_temp[ans_id*18+12]+=Pmtrx[7]*(P_000012010*QR_010001000000+P_000012110*QR_010001000001+P_000112010*QR_010001000010+P_000112110*QR_010001000011+P_000212010*QR_010001000020+P_000212110*QR_010001000021+P_000312010*QR_010001000030+P_000312110*QR_010001000031);
ans_temp[ans_id*18+12]+=Pmtrx[8]*(P_000012010*QR_010000001000+P_000012110*QR_010000001001+P_000112010*QR_010000001010+P_000112110*QR_010000001011+P_000212010*QR_010000001020+P_000212110*QR_010000001021+P_000312010*QR_010000001030+P_000312110*QR_010000001031);
ans_temp[ans_id*18+13]+=Pmtrx[6]*(P_000012010*QR_001010000000+P_000012110*QR_001010000001+P_000112010*QR_001010000010+P_000112110*QR_001010000011+P_000212010*QR_001010000020+P_000212110*QR_001010000021+P_000312010*QR_001010000030+P_000312110*QR_001010000031);
ans_temp[ans_id*18+13]+=Pmtrx[7]*(P_000012010*QR_000011000000+P_000012110*QR_000011000001+P_000112010*QR_000011000010+P_000112110*QR_000011000011+P_000212010*QR_000011000020+P_000212110*QR_000011000021+P_000312010*QR_000011000030+P_000312110*QR_000011000031);
ans_temp[ans_id*18+13]+=Pmtrx[8]*(P_000012010*QR_000010001000+P_000012110*QR_000010001001+P_000112010*QR_000010001010+P_000112110*QR_000010001011+P_000212010*QR_000010001020+P_000212110*QR_000010001021+P_000312010*QR_000010001030+P_000312110*QR_000010001031);
ans_temp[ans_id*18+14]+=Pmtrx[6]*(P_000012010*QR_001000010000+P_000012110*QR_001000010001+P_000112010*QR_001000010010+P_000112110*QR_001000010011+P_000212010*QR_001000010020+P_000212110*QR_001000010021+P_000312010*QR_001000010030+P_000312110*QR_001000010031);
ans_temp[ans_id*18+14]+=Pmtrx[7]*(P_000012010*QR_000001010000+P_000012110*QR_000001010001+P_000112010*QR_000001010010+P_000112110*QR_000001010011+P_000212010*QR_000001010020+P_000212110*QR_000001010021+P_000312010*QR_000001010030+P_000312110*QR_000001010031);
ans_temp[ans_id*18+14]+=Pmtrx[8]*(P_000012010*QR_000000011000+P_000012110*QR_000000011001+P_000112010*QR_000000011010+P_000112110*QR_000000011011+P_000212010*QR_000000011020+P_000212110*QR_000000011021+P_000312010*QR_000000011030+P_000312110*QR_000000011031);
ans_temp[ans_id*18+12]+=Pmtrx[9]*(P_001010011*QR_011000000000+P_001010111*QR_011000000001+P_001010211*QR_011000000002+P_001110011*QR_011000000010+P_001110111*QR_011000000011+P_001110211*QR_011000000012+P_101010011*QR_011000000100+P_101010111*QR_011000000101+P_101010211*QR_011000000102+P_101110011*QR_011000000110+P_101110111*QR_011000000111+P_101110211*QR_011000000112);
ans_temp[ans_id*18+12]+=Pmtrx[10]*(P_001010011*QR_010001000000+P_001010111*QR_010001000001+P_001010211*QR_010001000002+P_001110011*QR_010001000010+P_001110111*QR_010001000011+P_001110211*QR_010001000012+P_101010011*QR_010001000100+P_101010111*QR_010001000101+P_101010211*QR_010001000102+P_101110011*QR_010001000110+P_101110111*QR_010001000111+P_101110211*QR_010001000112);
ans_temp[ans_id*18+12]+=Pmtrx[11]*(P_001010011*QR_010000001000+P_001010111*QR_010000001001+P_001010211*QR_010000001002+P_001110011*QR_010000001010+P_001110111*QR_010000001011+P_001110211*QR_010000001012+P_101010011*QR_010000001100+P_101010111*QR_010000001101+P_101010211*QR_010000001102+P_101110011*QR_010000001110+P_101110111*QR_010000001111+P_101110211*QR_010000001112);
ans_temp[ans_id*18+13]+=Pmtrx[9]*(P_001010011*QR_001010000000+P_001010111*QR_001010000001+P_001010211*QR_001010000002+P_001110011*QR_001010000010+P_001110111*QR_001010000011+P_001110211*QR_001010000012+P_101010011*QR_001010000100+P_101010111*QR_001010000101+P_101010211*QR_001010000102+P_101110011*QR_001010000110+P_101110111*QR_001010000111+P_101110211*QR_001010000112);
ans_temp[ans_id*18+13]+=Pmtrx[10]*(P_001010011*QR_000011000000+P_001010111*QR_000011000001+P_001010211*QR_000011000002+P_001110011*QR_000011000010+P_001110111*QR_000011000011+P_001110211*QR_000011000012+P_101010011*QR_000011000100+P_101010111*QR_000011000101+P_101010211*QR_000011000102+P_101110011*QR_000011000110+P_101110111*QR_000011000111+P_101110211*QR_000011000112);
ans_temp[ans_id*18+13]+=Pmtrx[11]*(P_001010011*QR_000010001000+P_001010111*QR_000010001001+P_001010211*QR_000010001002+P_001110011*QR_000010001010+P_001110111*QR_000010001011+P_001110211*QR_000010001012+P_101010011*QR_000010001100+P_101010111*QR_000010001101+P_101010211*QR_000010001102+P_101110011*QR_000010001110+P_101110111*QR_000010001111+P_101110211*QR_000010001112);
ans_temp[ans_id*18+14]+=Pmtrx[9]*(P_001010011*QR_001000010000+P_001010111*QR_001000010001+P_001010211*QR_001000010002+P_001110011*QR_001000010010+P_001110111*QR_001000010011+P_001110211*QR_001000010012+P_101010011*QR_001000010100+P_101010111*QR_001000010101+P_101010211*QR_001000010102+P_101110011*QR_001000010110+P_101110111*QR_001000010111+P_101110211*QR_001000010112);
ans_temp[ans_id*18+14]+=Pmtrx[10]*(P_001010011*QR_000001010000+P_001010111*QR_000001010001+P_001010211*QR_000001010002+P_001110011*QR_000001010010+P_001110111*QR_000001010011+P_001110211*QR_000001010012+P_101010011*QR_000001010100+P_101010111*QR_000001010101+P_101010211*QR_000001010102+P_101110011*QR_000001010110+P_101110111*QR_000001010111+P_101110211*QR_000001010112);
ans_temp[ans_id*18+14]+=Pmtrx[11]*(P_001010011*QR_000000011000+P_001010111*QR_000000011001+P_001010211*QR_000000011002+P_001110011*QR_000000011010+P_001110111*QR_000000011011+P_001110211*QR_000000011012+P_101010011*QR_000000011100+P_101010111*QR_000000011101+P_101010211*QR_000000011102+P_101110011*QR_000000011110+P_101110111*QR_000000011111+P_101110211*QR_000000011112);
ans_temp[ans_id*18+12]+=Pmtrx[12]*(P_000011011*QR_011000000000+P_000011111*QR_011000000001+P_000011211*QR_011000000002+P_000111011*QR_011000000010+P_000111111*QR_011000000011+P_000111211*QR_011000000012+P_000211011*QR_011000000020+P_000211111*QR_011000000021+P_000211211*QR_011000000022);
ans_temp[ans_id*18+12]+=Pmtrx[13]*(P_000011011*QR_010001000000+P_000011111*QR_010001000001+P_000011211*QR_010001000002+P_000111011*QR_010001000010+P_000111111*QR_010001000011+P_000111211*QR_010001000012+P_000211011*QR_010001000020+P_000211111*QR_010001000021+P_000211211*QR_010001000022);
ans_temp[ans_id*18+12]+=Pmtrx[14]*(P_000011011*QR_010000001000+P_000011111*QR_010000001001+P_000011211*QR_010000001002+P_000111011*QR_010000001010+P_000111111*QR_010000001011+P_000111211*QR_010000001012+P_000211011*QR_010000001020+P_000211111*QR_010000001021+P_000211211*QR_010000001022);
ans_temp[ans_id*18+13]+=Pmtrx[12]*(P_000011011*QR_001010000000+P_000011111*QR_001010000001+P_000011211*QR_001010000002+P_000111011*QR_001010000010+P_000111111*QR_001010000011+P_000111211*QR_001010000012+P_000211011*QR_001010000020+P_000211111*QR_001010000021+P_000211211*QR_001010000022);
ans_temp[ans_id*18+13]+=Pmtrx[13]*(P_000011011*QR_000011000000+P_000011111*QR_000011000001+P_000011211*QR_000011000002+P_000111011*QR_000011000010+P_000111111*QR_000011000011+P_000111211*QR_000011000012+P_000211011*QR_000011000020+P_000211111*QR_000011000021+P_000211211*QR_000011000022);
ans_temp[ans_id*18+13]+=Pmtrx[14]*(P_000011011*QR_000010001000+P_000011111*QR_000010001001+P_000011211*QR_000010001002+P_000111011*QR_000010001010+P_000111111*QR_000010001011+P_000111211*QR_000010001012+P_000211011*QR_000010001020+P_000211111*QR_000010001021+P_000211211*QR_000010001022);
ans_temp[ans_id*18+14]+=Pmtrx[12]*(P_000011011*QR_001000010000+P_000011111*QR_001000010001+P_000011211*QR_001000010002+P_000111011*QR_001000010010+P_000111111*QR_001000010011+P_000111211*QR_001000010012+P_000211011*QR_001000010020+P_000211111*QR_001000010021+P_000211211*QR_001000010022);
ans_temp[ans_id*18+14]+=Pmtrx[13]*(P_000011011*QR_000001010000+P_000011111*QR_000001010001+P_000011211*QR_000001010002+P_000111011*QR_000001010010+P_000111111*QR_000001010011+P_000111211*QR_000001010012+P_000211011*QR_000001010020+P_000211111*QR_000001010021+P_000211211*QR_000001010022);
ans_temp[ans_id*18+14]+=Pmtrx[14]*(P_000011011*QR_000000011000+P_000011111*QR_000000011001+P_000011211*QR_000000011002+P_000111011*QR_000000011010+P_000111111*QR_000000011011+P_000111211*QR_000000011012+P_000211011*QR_000000011020+P_000211111*QR_000000011021+P_000211211*QR_000000011022);
ans_temp[ans_id*18+12]+=Pmtrx[15]*(P_000010012*QR_011000000000+P_000010112*QR_011000000001+P_000010212*QR_011000000002+P_000010312*QR_011000000003+P_000110012*QR_011000000010+P_000110112*QR_011000000011+P_000110212*QR_011000000012+P_000110312*QR_011000000013);
ans_temp[ans_id*18+12]+=Pmtrx[16]*(P_000010012*QR_010001000000+P_000010112*QR_010001000001+P_000010212*QR_010001000002+P_000010312*QR_010001000003+P_000110012*QR_010001000010+P_000110112*QR_010001000011+P_000110212*QR_010001000012+P_000110312*QR_010001000013);
ans_temp[ans_id*18+12]+=Pmtrx[17]*(P_000010012*QR_010000001000+P_000010112*QR_010000001001+P_000010212*QR_010000001002+P_000010312*QR_010000001003+P_000110012*QR_010000001010+P_000110112*QR_010000001011+P_000110212*QR_010000001012+P_000110312*QR_010000001013);
ans_temp[ans_id*18+13]+=Pmtrx[15]*(P_000010012*QR_001010000000+P_000010112*QR_001010000001+P_000010212*QR_001010000002+P_000010312*QR_001010000003+P_000110012*QR_001010000010+P_000110112*QR_001010000011+P_000110212*QR_001010000012+P_000110312*QR_001010000013);
ans_temp[ans_id*18+13]+=Pmtrx[16]*(P_000010012*QR_000011000000+P_000010112*QR_000011000001+P_000010212*QR_000011000002+P_000010312*QR_000011000003+P_000110012*QR_000011000010+P_000110112*QR_000011000011+P_000110212*QR_000011000012+P_000110312*QR_000011000013);
ans_temp[ans_id*18+13]+=Pmtrx[17]*(P_000010012*QR_000010001000+P_000010112*QR_000010001001+P_000010212*QR_000010001002+P_000010312*QR_000010001003+P_000110012*QR_000010001010+P_000110112*QR_000010001011+P_000110212*QR_000010001012+P_000110312*QR_000010001013);
ans_temp[ans_id*18+14]+=Pmtrx[15]*(P_000010012*QR_001000010000+P_000010112*QR_001000010001+P_000010212*QR_001000010002+P_000010312*QR_001000010003+P_000110012*QR_001000010010+P_000110112*QR_001000010011+P_000110212*QR_001000010012+P_000110312*QR_001000010013);
ans_temp[ans_id*18+14]+=Pmtrx[16]*(P_000010012*QR_000001010000+P_000010112*QR_000001010001+P_000010212*QR_000001010002+P_000010312*QR_000001010003+P_000110012*QR_000001010010+P_000110112*QR_000001010011+P_000110212*QR_000001010012+P_000110312*QR_000001010013);
ans_temp[ans_id*18+14]+=Pmtrx[17]*(P_000010012*QR_000000011000+P_000010112*QR_000000011001+P_000010212*QR_000000011002+P_000010312*QR_000000011003+P_000110012*QR_000000011010+P_000110112*QR_000000011011+P_000110212*QR_000000011012+P_000110312*QR_000000011013);
ans_temp[ans_id*18+15]+=Pmtrx[0]*(P_002000020*QR_011000000000+P_002000120*QR_011000000001+P_002000220*QR_011000000002+P_102000020*QR_011000000100+P_102000120*QR_011000000101+P_102000220*QR_011000000102+P_202000020*QR_011000000200+P_202000120*QR_011000000201+P_202000220*QR_011000000202);
ans_temp[ans_id*18+15]+=Pmtrx[1]*(P_002000020*QR_010001000000+P_002000120*QR_010001000001+P_002000220*QR_010001000002+P_102000020*QR_010001000100+P_102000120*QR_010001000101+P_102000220*QR_010001000102+P_202000020*QR_010001000200+P_202000120*QR_010001000201+P_202000220*QR_010001000202);
ans_temp[ans_id*18+15]+=Pmtrx[2]*(P_002000020*QR_010000001000+P_002000120*QR_010000001001+P_002000220*QR_010000001002+P_102000020*QR_010000001100+P_102000120*QR_010000001101+P_102000220*QR_010000001102+P_202000020*QR_010000001200+P_202000120*QR_010000001201+P_202000220*QR_010000001202);
ans_temp[ans_id*18+16]+=Pmtrx[0]*(P_002000020*QR_001010000000+P_002000120*QR_001010000001+P_002000220*QR_001010000002+P_102000020*QR_001010000100+P_102000120*QR_001010000101+P_102000220*QR_001010000102+P_202000020*QR_001010000200+P_202000120*QR_001010000201+P_202000220*QR_001010000202);
ans_temp[ans_id*18+16]+=Pmtrx[1]*(P_002000020*QR_000011000000+P_002000120*QR_000011000001+P_002000220*QR_000011000002+P_102000020*QR_000011000100+P_102000120*QR_000011000101+P_102000220*QR_000011000102+P_202000020*QR_000011000200+P_202000120*QR_000011000201+P_202000220*QR_000011000202);
ans_temp[ans_id*18+16]+=Pmtrx[2]*(P_002000020*QR_000010001000+P_002000120*QR_000010001001+P_002000220*QR_000010001002+P_102000020*QR_000010001100+P_102000120*QR_000010001101+P_102000220*QR_000010001102+P_202000020*QR_000010001200+P_202000120*QR_000010001201+P_202000220*QR_000010001202);
ans_temp[ans_id*18+17]+=Pmtrx[0]*(P_002000020*QR_001000010000+P_002000120*QR_001000010001+P_002000220*QR_001000010002+P_102000020*QR_001000010100+P_102000120*QR_001000010101+P_102000220*QR_001000010102+P_202000020*QR_001000010200+P_202000120*QR_001000010201+P_202000220*QR_001000010202);
ans_temp[ans_id*18+17]+=Pmtrx[1]*(P_002000020*QR_000001010000+P_002000120*QR_000001010001+P_002000220*QR_000001010002+P_102000020*QR_000001010100+P_102000120*QR_000001010101+P_102000220*QR_000001010102+P_202000020*QR_000001010200+P_202000120*QR_000001010201+P_202000220*QR_000001010202);
ans_temp[ans_id*18+17]+=Pmtrx[2]*(P_002000020*QR_000000011000+P_002000120*QR_000000011001+P_002000220*QR_000000011002+P_102000020*QR_000000011100+P_102000120*QR_000000011101+P_102000220*QR_000000011102+P_202000020*QR_000000011200+P_202000120*QR_000000011201+P_202000220*QR_000000011202);
ans_temp[ans_id*18+15]+=Pmtrx[3]*(P_001001020*QR_011000000000+P_001001120*QR_011000000001+P_001001220*QR_011000000002+P_001101020*QR_011000000010+P_001101120*QR_011000000011+P_001101220*QR_011000000012+P_101001020*QR_011000000100+P_101001120*QR_011000000101+P_101001220*QR_011000000102+P_101101020*QR_011000000110+P_101101120*QR_011000000111+P_101101220*QR_011000000112);
ans_temp[ans_id*18+15]+=Pmtrx[4]*(P_001001020*QR_010001000000+P_001001120*QR_010001000001+P_001001220*QR_010001000002+P_001101020*QR_010001000010+P_001101120*QR_010001000011+P_001101220*QR_010001000012+P_101001020*QR_010001000100+P_101001120*QR_010001000101+P_101001220*QR_010001000102+P_101101020*QR_010001000110+P_101101120*QR_010001000111+P_101101220*QR_010001000112);
ans_temp[ans_id*18+15]+=Pmtrx[5]*(P_001001020*QR_010000001000+P_001001120*QR_010000001001+P_001001220*QR_010000001002+P_001101020*QR_010000001010+P_001101120*QR_010000001011+P_001101220*QR_010000001012+P_101001020*QR_010000001100+P_101001120*QR_010000001101+P_101001220*QR_010000001102+P_101101020*QR_010000001110+P_101101120*QR_010000001111+P_101101220*QR_010000001112);
ans_temp[ans_id*18+16]+=Pmtrx[3]*(P_001001020*QR_001010000000+P_001001120*QR_001010000001+P_001001220*QR_001010000002+P_001101020*QR_001010000010+P_001101120*QR_001010000011+P_001101220*QR_001010000012+P_101001020*QR_001010000100+P_101001120*QR_001010000101+P_101001220*QR_001010000102+P_101101020*QR_001010000110+P_101101120*QR_001010000111+P_101101220*QR_001010000112);
ans_temp[ans_id*18+16]+=Pmtrx[4]*(P_001001020*QR_000011000000+P_001001120*QR_000011000001+P_001001220*QR_000011000002+P_001101020*QR_000011000010+P_001101120*QR_000011000011+P_001101220*QR_000011000012+P_101001020*QR_000011000100+P_101001120*QR_000011000101+P_101001220*QR_000011000102+P_101101020*QR_000011000110+P_101101120*QR_000011000111+P_101101220*QR_000011000112);
ans_temp[ans_id*18+16]+=Pmtrx[5]*(P_001001020*QR_000010001000+P_001001120*QR_000010001001+P_001001220*QR_000010001002+P_001101020*QR_000010001010+P_001101120*QR_000010001011+P_001101220*QR_000010001012+P_101001020*QR_000010001100+P_101001120*QR_000010001101+P_101001220*QR_000010001102+P_101101020*QR_000010001110+P_101101120*QR_000010001111+P_101101220*QR_000010001112);
ans_temp[ans_id*18+17]+=Pmtrx[3]*(P_001001020*QR_001000010000+P_001001120*QR_001000010001+P_001001220*QR_001000010002+P_001101020*QR_001000010010+P_001101120*QR_001000010011+P_001101220*QR_001000010012+P_101001020*QR_001000010100+P_101001120*QR_001000010101+P_101001220*QR_001000010102+P_101101020*QR_001000010110+P_101101120*QR_001000010111+P_101101220*QR_001000010112);
ans_temp[ans_id*18+17]+=Pmtrx[4]*(P_001001020*QR_000001010000+P_001001120*QR_000001010001+P_001001220*QR_000001010002+P_001101020*QR_000001010010+P_001101120*QR_000001010011+P_001101220*QR_000001010012+P_101001020*QR_000001010100+P_101001120*QR_000001010101+P_101001220*QR_000001010102+P_101101020*QR_000001010110+P_101101120*QR_000001010111+P_101101220*QR_000001010112);
ans_temp[ans_id*18+17]+=Pmtrx[5]*(P_001001020*QR_000000011000+P_001001120*QR_000000011001+P_001001220*QR_000000011002+P_001101020*QR_000000011010+P_001101120*QR_000000011011+P_001101220*QR_000000011012+P_101001020*QR_000000011100+P_101001120*QR_000000011101+P_101001220*QR_000000011102+P_101101020*QR_000000011110+P_101101120*QR_000000011111+P_101101220*QR_000000011112);
ans_temp[ans_id*18+15]+=Pmtrx[6]*(P_000002020*QR_011000000000+P_000002120*QR_011000000001+P_000002220*QR_011000000002+P_000102020*QR_011000000010+P_000102120*QR_011000000011+P_000102220*QR_011000000012+P_000202020*QR_011000000020+P_000202120*QR_011000000021+P_000202220*QR_011000000022);
ans_temp[ans_id*18+15]+=Pmtrx[7]*(P_000002020*QR_010001000000+P_000002120*QR_010001000001+P_000002220*QR_010001000002+P_000102020*QR_010001000010+P_000102120*QR_010001000011+P_000102220*QR_010001000012+P_000202020*QR_010001000020+P_000202120*QR_010001000021+P_000202220*QR_010001000022);
ans_temp[ans_id*18+15]+=Pmtrx[8]*(P_000002020*QR_010000001000+P_000002120*QR_010000001001+P_000002220*QR_010000001002+P_000102020*QR_010000001010+P_000102120*QR_010000001011+P_000102220*QR_010000001012+P_000202020*QR_010000001020+P_000202120*QR_010000001021+P_000202220*QR_010000001022);
ans_temp[ans_id*18+16]+=Pmtrx[6]*(P_000002020*QR_001010000000+P_000002120*QR_001010000001+P_000002220*QR_001010000002+P_000102020*QR_001010000010+P_000102120*QR_001010000011+P_000102220*QR_001010000012+P_000202020*QR_001010000020+P_000202120*QR_001010000021+P_000202220*QR_001010000022);
ans_temp[ans_id*18+16]+=Pmtrx[7]*(P_000002020*QR_000011000000+P_000002120*QR_000011000001+P_000002220*QR_000011000002+P_000102020*QR_000011000010+P_000102120*QR_000011000011+P_000102220*QR_000011000012+P_000202020*QR_000011000020+P_000202120*QR_000011000021+P_000202220*QR_000011000022);
ans_temp[ans_id*18+16]+=Pmtrx[8]*(P_000002020*QR_000010001000+P_000002120*QR_000010001001+P_000002220*QR_000010001002+P_000102020*QR_000010001010+P_000102120*QR_000010001011+P_000102220*QR_000010001012+P_000202020*QR_000010001020+P_000202120*QR_000010001021+P_000202220*QR_000010001022);
ans_temp[ans_id*18+17]+=Pmtrx[6]*(P_000002020*QR_001000010000+P_000002120*QR_001000010001+P_000002220*QR_001000010002+P_000102020*QR_001000010010+P_000102120*QR_001000010011+P_000102220*QR_001000010012+P_000202020*QR_001000010020+P_000202120*QR_001000010021+P_000202220*QR_001000010022);
ans_temp[ans_id*18+17]+=Pmtrx[7]*(P_000002020*QR_000001010000+P_000002120*QR_000001010001+P_000002220*QR_000001010002+P_000102020*QR_000001010010+P_000102120*QR_000001010011+P_000102220*QR_000001010012+P_000202020*QR_000001010020+P_000202120*QR_000001010021+P_000202220*QR_000001010022);
ans_temp[ans_id*18+17]+=Pmtrx[8]*(P_000002020*QR_000000011000+P_000002120*QR_000000011001+P_000002220*QR_000000011002+P_000102020*QR_000000011010+P_000102120*QR_000000011011+P_000102220*QR_000000011012+P_000202020*QR_000000011020+P_000202120*QR_000000011021+P_000202220*QR_000000011022);
ans_temp[ans_id*18+15]+=Pmtrx[9]*(P_001000021*QR_011000000000+P_001000121*QR_011000000001+P_001000221*QR_011000000002+P_001000321*QR_011000000003+P_101000021*QR_011000000100+P_101000121*QR_011000000101+P_101000221*QR_011000000102+P_101000321*QR_011000000103);
ans_temp[ans_id*18+15]+=Pmtrx[10]*(P_001000021*QR_010001000000+P_001000121*QR_010001000001+P_001000221*QR_010001000002+P_001000321*QR_010001000003+P_101000021*QR_010001000100+P_101000121*QR_010001000101+P_101000221*QR_010001000102+P_101000321*QR_010001000103);
ans_temp[ans_id*18+15]+=Pmtrx[11]*(P_001000021*QR_010000001000+P_001000121*QR_010000001001+P_001000221*QR_010000001002+P_001000321*QR_010000001003+P_101000021*QR_010000001100+P_101000121*QR_010000001101+P_101000221*QR_010000001102+P_101000321*QR_010000001103);
ans_temp[ans_id*18+16]+=Pmtrx[9]*(P_001000021*QR_001010000000+P_001000121*QR_001010000001+P_001000221*QR_001010000002+P_001000321*QR_001010000003+P_101000021*QR_001010000100+P_101000121*QR_001010000101+P_101000221*QR_001010000102+P_101000321*QR_001010000103);
ans_temp[ans_id*18+16]+=Pmtrx[10]*(P_001000021*QR_000011000000+P_001000121*QR_000011000001+P_001000221*QR_000011000002+P_001000321*QR_000011000003+P_101000021*QR_000011000100+P_101000121*QR_000011000101+P_101000221*QR_000011000102+P_101000321*QR_000011000103);
ans_temp[ans_id*18+16]+=Pmtrx[11]*(P_001000021*QR_000010001000+P_001000121*QR_000010001001+P_001000221*QR_000010001002+P_001000321*QR_000010001003+P_101000021*QR_000010001100+P_101000121*QR_000010001101+P_101000221*QR_000010001102+P_101000321*QR_000010001103);
ans_temp[ans_id*18+17]+=Pmtrx[9]*(P_001000021*QR_001000010000+P_001000121*QR_001000010001+P_001000221*QR_001000010002+P_001000321*QR_001000010003+P_101000021*QR_001000010100+P_101000121*QR_001000010101+P_101000221*QR_001000010102+P_101000321*QR_001000010103);
ans_temp[ans_id*18+17]+=Pmtrx[10]*(P_001000021*QR_000001010000+P_001000121*QR_000001010001+P_001000221*QR_000001010002+P_001000321*QR_000001010003+P_101000021*QR_000001010100+P_101000121*QR_000001010101+P_101000221*QR_000001010102+P_101000321*QR_000001010103);
ans_temp[ans_id*18+17]+=Pmtrx[11]*(P_001000021*QR_000000011000+P_001000121*QR_000000011001+P_001000221*QR_000000011002+P_001000321*QR_000000011003+P_101000021*QR_000000011100+P_101000121*QR_000000011101+P_101000221*QR_000000011102+P_101000321*QR_000000011103);
ans_temp[ans_id*18+15]+=Pmtrx[12]*(P_000001021*QR_011000000000+P_000001121*QR_011000000001+P_000001221*QR_011000000002+P_000001321*QR_011000000003+P_000101021*QR_011000000010+P_000101121*QR_011000000011+P_000101221*QR_011000000012+P_000101321*QR_011000000013);
ans_temp[ans_id*18+15]+=Pmtrx[13]*(P_000001021*QR_010001000000+P_000001121*QR_010001000001+P_000001221*QR_010001000002+P_000001321*QR_010001000003+P_000101021*QR_010001000010+P_000101121*QR_010001000011+P_000101221*QR_010001000012+P_000101321*QR_010001000013);
ans_temp[ans_id*18+15]+=Pmtrx[14]*(P_000001021*QR_010000001000+P_000001121*QR_010000001001+P_000001221*QR_010000001002+P_000001321*QR_010000001003+P_000101021*QR_010000001010+P_000101121*QR_010000001011+P_000101221*QR_010000001012+P_000101321*QR_010000001013);
ans_temp[ans_id*18+16]+=Pmtrx[12]*(P_000001021*QR_001010000000+P_000001121*QR_001010000001+P_000001221*QR_001010000002+P_000001321*QR_001010000003+P_000101021*QR_001010000010+P_000101121*QR_001010000011+P_000101221*QR_001010000012+P_000101321*QR_001010000013);
ans_temp[ans_id*18+16]+=Pmtrx[13]*(P_000001021*QR_000011000000+P_000001121*QR_000011000001+P_000001221*QR_000011000002+P_000001321*QR_000011000003+P_000101021*QR_000011000010+P_000101121*QR_000011000011+P_000101221*QR_000011000012+P_000101321*QR_000011000013);
ans_temp[ans_id*18+16]+=Pmtrx[14]*(P_000001021*QR_000010001000+P_000001121*QR_000010001001+P_000001221*QR_000010001002+P_000001321*QR_000010001003+P_000101021*QR_000010001010+P_000101121*QR_000010001011+P_000101221*QR_000010001012+P_000101321*QR_000010001013);
ans_temp[ans_id*18+17]+=Pmtrx[12]*(P_000001021*QR_001000010000+P_000001121*QR_001000010001+P_000001221*QR_001000010002+P_000001321*QR_001000010003+P_000101021*QR_001000010010+P_000101121*QR_001000010011+P_000101221*QR_001000010012+P_000101321*QR_001000010013);
ans_temp[ans_id*18+17]+=Pmtrx[13]*(P_000001021*QR_000001010000+P_000001121*QR_000001010001+P_000001221*QR_000001010002+P_000001321*QR_000001010003+P_000101021*QR_000001010010+P_000101121*QR_000001010011+P_000101221*QR_000001010012+P_000101321*QR_000001010013);
ans_temp[ans_id*18+17]+=Pmtrx[14]*(P_000001021*QR_000000011000+P_000001121*QR_000000011001+P_000001221*QR_000000011002+P_000001321*QR_000000011003+P_000101021*QR_000000011010+P_000101121*QR_000000011011+P_000101221*QR_000000011012+P_000101321*QR_000000011013);
ans_temp[ans_id*18+15]+=Pmtrx[15]*(P_000000022*QR_011000000000+P_000000122*QR_011000000001+P_000000222*QR_011000000002+P_000000322*QR_011000000003+P_000000422*QR_011000000004);
ans_temp[ans_id*18+15]+=Pmtrx[16]*(P_000000022*QR_010001000000+P_000000122*QR_010001000001+P_000000222*QR_010001000002+P_000000322*QR_010001000003+P_000000422*QR_010001000004);
ans_temp[ans_id*18+15]+=Pmtrx[17]*(P_000000022*QR_010000001000+P_000000122*QR_010000001001+P_000000222*QR_010000001002+P_000000322*QR_010000001003+P_000000422*QR_010000001004);
ans_temp[ans_id*18+16]+=Pmtrx[15]*(P_000000022*QR_001010000000+P_000000122*QR_001010000001+P_000000222*QR_001010000002+P_000000322*QR_001010000003+P_000000422*QR_001010000004);
ans_temp[ans_id*18+16]+=Pmtrx[16]*(P_000000022*QR_000011000000+P_000000122*QR_000011000001+P_000000222*QR_000011000002+P_000000322*QR_000011000003+P_000000422*QR_000011000004);
ans_temp[ans_id*18+16]+=Pmtrx[17]*(P_000000022*QR_000010001000+P_000000122*QR_000010001001+P_000000222*QR_000010001002+P_000000322*QR_000010001003+P_000000422*QR_000010001004);
ans_temp[ans_id*18+17]+=Pmtrx[15]*(P_000000022*QR_001000010000+P_000000122*QR_001000010001+P_000000222*QR_001000010002+P_000000322*QR_001000010003+P_000000422*QR_001000010004);
ans_temp[ans_id*18+17]+=Pmtrx[16]*(P_000000022*QR_000001010000+P_000000122*QR_000001010001+P_000000222*QR_000001010002+P_000000322*QR_000001010003+P_000000422*QR_000001010004);
ans_temp[ans_id*18+17]+=Pmtrx[17]*(P_000000022*QR_000000011000+P_000000122*QR_000000011001+P_000000222*QR_000000011002+P_000000322*QR_000000011003+P_000000422*QR_000000011004);
}
}
__syncthreads();
int num_thread=NTHREAD/2;
while (num_thread!=0){
__syncthreads();
if(tId_x<num_thread){
for(int ians=0;ians<18;ians++){
ans_temp[tId_x*18+ians]+=ans_temp[(tId_x+num_thread)*18+ians];
}
}
num_thread/=2;
}
if(tId_x==0){
for(int ians=0;ians<18;ians++){
ans[(i_contrc_bra*contrc_ket_num+j_contrc_ket)*18+ians]=ans_temp[(tId_x)*18+ians];
}
}
}
}
}
|
6c70680bd73e922e953e387320ccce6c8ff1c472.hip | // !!! This is a file automatically generated by hipify!!!
#include "SandParticle.h"
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
#include <stdio.h>
#include "Affine.h"
#include <atomic>
#include <vector>
const int width = 100;
const int height = 10;
const int gridWidth = 800;
const int gridHeight = 800;
const dim3 threadsPerBlock(16, 16);
const int blockSize = 256;
__constant__ float gap = 0.01f;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char* file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define cudaCheck(x) { hipError_t err = x; if (err != hipSuccess) { printf("Cuda error: %d in %s at %s:%d\n", err, #x, __FILE__, __LINE__); assert(0); } }
__global__ void SetGrid(ParticleGrid* grids)
{
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
ParticleGrid& grid = grids[index];
float xPos = -5 + ((index % gridWidth) * gap);
float yPos = ((index / gridHeight) * gap);
grid.gridPos.x = xPos;
grid.gridPos.y = yPos;
grid.index = index;
grid.status = Empty;
grid.predictedMoveInWaterParticleIndex = -1;
grid.landIndex = -1;
}
__global__ void SetSpawner(ParticleGrid* grid, SpawnerPos* spawnerPos, int startIndex, int width, int i)
{
int gridIndex = startIndex + threadIdx.x + (i * gridWidth);
ParticleGrid& currGrid = grid[gridIndex];
int iIndex = i * width;
//currGrid.status = FilledWithLand;
spawnerPos[iIndex + threadIdx.x].pos = currGrid.gridPos;
spawnerPos[iIndex + threadIdx.x].currGridIndex = currGrid.index;
}
__global__ void SetLand(ParticleGrid* grid, Land* land, int landStartIndex, int landWidth, int i)
{
int index = landStartIndex + threadIdx.x;
int ogLandStartIndex = landStartIndex;
ParticleGrid& currGrid = grid[index];
int iIndex = i * landWidth;
currGrid.status = FilledWithLand;
land[iIndex + threadIdx.x].landPos = currGrid.gridPos;
}
__global__ void CheckGridPos(ParticleSand* particles, ParticleGrid* grids)
{
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
const int startIndex = gridWidth * 500 + 400;
const int widthVal = index % width;
const int heightVal = index / width;
const int heightInGrid = heightVal * gridWidth;
particles[index].currGrid = &grids[startIndex + heightInGrid + widthVal];
particles[index].pos = grids[startIndex + heightInGrid + widthVal].gridPos;
}
__global__ void SpawnerMove(ParticleGrid* grids, SpawnerPos* spawnerPos, int dir)
{
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
const int currGridIndex = spawnerPos[index].currGridIndex;
//printf("currGrid : %d", currGridIndex);
const int destGridIndex = currGridIndex + dir;
SpawnerPos& spawner = spawnerPos[index];
spawner.currGridIndex = destGridIndex;
spawner.pos = grids[destGridIndex].gridPos;
}
__global__ void Spawn(ParticleSand* particle, ParticleGrid* grid, SpawnerPos* spawners, int lastIndex)
{
int index = lastIndex + threadIdx.x;
ParticleSand& part = particle[index];
SpawnerPos& spawner = spawners[threadIdx.x];
ParticleGrid& gridInfo = grid[spawner.currGridIndex];
part.pos = spawner.pos;
part.currGrid = &grid[spawner.currGridIndex];
gridInfo.status = FilledWithSand;
}
__global__ void SpawnLand(Land* lands, ParticleGrid* grid, SpawnerPos* spawners, int lastIndex)
{
int index = lastIndex + threadIdx.x + (blockIdx.x * blockDim.x);
Land& land = lands[index];
SpawnerPos& spawner = spawners[threadIdx.x];
ParticleGrid& gridInfo = grid[spawner.currGridIndex];
land.landPos = spawner.pos;
land.currGridIndex = spawner.currGridIndex;
gridInfo.status = FilledWithLand;
}
__global__ void SpawnWater(ParticleWater* waters, ParticleGrid* grid, SpawnerPos* spawners, int lastIndex)
{
int index = lastIndex + threadIdx.x + (blockIdx.x * blockDim.x);
ParticleWater& water = waters[index];
SpawnerPos& spawner = spawners[threadIdx.x];
ParticleGrid& gridInfo = grid[spawner.currGridIndex];
water.pos = spawner.pos;
water.currGridIndex = spawner.currGridIndex;
gridInfo.status = FilledWithWater;
}
__global__ void MoveWaterFromPredictedPosition(ParticleWater* particles, ParticleGrid* grids, int particleNum)
{
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index >= particleNum)
{
printf("over");
return;
}
ParticleWater& particle = particles[index];
if(particle.predictMoveInGridIndex != -1)
{
const int predictMoveInGridIndex = particle.predictMoveInGridIndex;
ParticleGrid& predictedGrid = grids[predictMoveInGridIndex];
ParticleGrid& currGrid = grids[particle.currGridIndex];
if(predictedGrid.predictedMoveInWaterParticleIndex == index)
{
//move
particle.pos = predictedGrid.gridPos;
particle.currGridIndex = predictedGrid.index;
particle.predictMoveInGridIndex = -1;
predictedGrid.status = FilledWithWater;
predictedGrid.predictedMoveInWaterParticleIndex = -1;
currGrid.status = Empty;
}
/*else
{
const int leftIndex = predictMoveInGridIndex - 1;
const int rightIndex = predictMoveInGridIndex + 1;
const int upIndex = predictMoveInGridIndex - gridWidth;
const int downIndex = predictMoveInGridIndex + gridWidth;
ParticleGrid& leftGrid = grids[leftIndex];
ParticleGrid& rightGrid = grids[rightIndex];
ParticleGrid& downGrid = grids[downIndex];
ParticleGrid& upGrid = grids[upIndex];
int newDestGridIndex = -1;
if(leftGrid.predictedMoveInWaterParticleIndex == -1 && leftGrid.status == Empty)
{
newDestGridIndex = leftIndex;
}
else if(rightGrid.predictedMoveInWaterParticleIndex == -1 && rightGrid.status == Empty)
{
newDestGridIndex = rightIndex;
}
else if (downGrid.predictedMoveInWaterParticleIndex == -1 && downGrid.status == Empty)
{
newDestGridIndex = downIndex;
}
else if (upGrid.predictedMoveInWaterParticleIndex == -1 && upGrid.status == Empty)
{
newDestGridIndex = upIndex;
}
if(newDestGridIndex != -1)
{
ParticleGrid& newDestGrid = grids[newDestGridIndex];
particle.pos = newDestGrid.gridPos;
particle.currGridIndex = newDestGrid.index;
particle.predictMoveInGridIndex = -1;
newDestGrid.status = FilledWithWater;
newDestGrid.predictedMoveInWaterParticleIndex = -1;
currGrid.status = Empty;
}
}*/
}
}
__global__ void MoveDownWater(ParticleWater* particles, ParticleGrid* grids, int particleNum)
{
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= particleNum)
{
return;
}
ParticleWater& particle = particles[index];
ParticleGrid& currGrid = grids[particle.currGridIndex];
const int gridIndex = currGrid.index;
int gridDownIndex = gridIndex - gridWidth;
int gridLeftDownIndex = gridIndex - (gridWidth - 1);
int gridRightDownIndex = gridIndex - (gridWidth + 1);
int gridLeftIndex = gridIndex - 1;
int gridRightIndex = gridIndex + 1;
__shared__ ParticleGrid gridsInfo[256 * 2000];
if (gridIndex < gridWidth)
{
gridDownIndex = gridIndex;
gridLeftDownIndex = gridIndex;
gridRightDownIndex = gridIndex;
gridLeftIndex = gridIndex;
gridRightIndex = gridIndex;
}
if (gridIndex % gridWidth == 0)
{
gridLeftDownIndex = gridIndex;
gridLeftIndex = gridIndex;
}
if (gridIndex % gridWidth == gridWidth - 1)
{
gridRightDownIndex = gridIndex;
gridRightIndex = gridIndex;
}
if (gridDownIndex <= 0 || gridDownIndex >= 256 * 2000 ||
gridLeftDownIndex <= 0 || gridLeftDownIndex >= 256 * 2000 ||
gridRightDownIndex <= 0 || gridRightDownIndex >= 256 * 2000 ||
gridLeftIndex <= 0 || gridLeftIndex >= 256 * 2000 ||
gridRightIndex <= 0 || gridRightIndex >= 256 * 2000)
{
return;
}
ParticleGrid& downGrid = grids[gridDownIndex];
ParticleGrid& leftDownGrid = grids[gridLeftDownIndex];
ParticleGrid& rightDownGrid = grids[gridRightDownIndex];
ParticleGrid& leftGrid = grids[gridLeftIndex];
ParticleGrid& rightGrid = grids[gridRightIndex];
if (downGrid.status == Empty)
{
/*particle.pos = downGrid.gridPos;
particle.currGridIndex = downGrid.index;
currGrid.status = Empty;
downGrid.status = FilledWithWater;*/
downGrid.predictedMoveInWaterParticleIndex = index;
particle.predictMoveInGridIndex = gridDownIndex;
}
else if (leftDownGrid.status == Empty)
{
/*particle.pos = leftDownGrid.gridPos;
particle.currGridIndex = leftDownGrid.index;
currGrid.status = Empty;
leftDownGrid.status = FilledWithWater;*/
leftDownGrid.predictedMoveInWaterParticleIndex = index;
particle.predictMoveInGridIndex = gridLeftDownIndex;
}
else if (rightDownGrid.status == Empty)
{
//particle.pos = rightDownGrid.gridPos;
//particle.currGridIndex = rightDownGrid.index;
//
//currGrid.status = Empty;
//rightDownGrid.status = FilledWithWater;
rightDownGrid.predictedMoveInWaterParticleIndex = index;
particle.predictMoveInGridIndex = gridRightDownIndex;
}
else if (leftGrid.status == Empty)
{
//particle.pos = grids[gridLeftIndex].gridPos;
//particle.currGridIndex = grids[gridLeftIndex].index;
//currGrid.status = Empty;
//grids[gridLeftIndex].status = FilledWithWater;
leftGrid.predictedMoveInWaterParticleIndex = index;
particle.predictMoveInGridIndex = gridLeftIndex;
}
else if (rightGrid.status == Empty)
{
//particle.pos = grids[gridRightIndex].gridPos;
//particle.currGridIndex = grids[gridRightIndex].index;
//currGrid.status = Empty;
//grids[gridRightIndex].status = FilledWithWater;
rightGrid.predictedMoveInWaterParticleIndex = index;
particle.predictMoveInGridIndex = gridRightIndex;
}
else
{
particle.predictMoveInGridIndex = -1;
}
}
__global__ void MoveDown(ParticleSand* particles, ParticleGrid* grids, int particleNum)
{
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= particleNum)
{
return;
}
ParticleSand& particle = particles[index];
ParticleGrid& currGrid = *particle.currGrid;
const int gridIndex = currGrid.index;
int gridDownIndex = gridIndex - gridWidth;
int gridLeftDownIndex = gridIndex - (gridWidth - 1);
int gridRightDownIndex = gridIndex - (gridWidth + 1);
if (gridIndex < gridWidth)
{
gridDownIndex = gridIndex;
gridLeftDownIndex = gridIndex;
gridRightDownIndex = gridIndex;
}
if (gridIndex % gridWidth == 0)
{
gridLeftDownIndex = gridIndex;
}
if (gridIndex % gridWidth == gridWidth - 1)
{
gridRightDownIndex = gridIndex;
}
if (gridDownIndex <= 0 || gridDownIndex >= 256 * 2000 ||
gridLeftDownIndex <= 0 || gridLeftDownIndex >= 256 * 2000 ||
gridRightDownIndex <= 0 || gridRightDownIndex >= 256 * 2000)
{
return;
}
ParticleGrid& downGrid = grids[gridDownIndex];
ParticleGrid& leftDownGrid = grids[gridLeftDownIndex];
ParticleGrid& rightDownGrid = grids[gridRightDownIndex];
if (downGrid.status == Empty)
{
particle.pos = downGrid.gridPos;
particle.currGrid = &downGrid;
currGrid.status = Empty;
downGrid.status = FilledWithSand;
}
else if (leftDownGrid.status == Empty)
{
particle.pos = leftDownGrid.gridPos;
particle.currGrid = &leftDownGrid;
currGrid.status = Empty;
leftDownGrid.status = FilledWithSand;
}
else if (rightDownGrid.status == Empty)
{
particle.pos = rightDownGrid.gridPos;
particle.currGrid = &rightDownGrid;
currGrid.status = Empty;
rightDownGrid.status = FilledWithSand;
}
}
__global__ void DeleteLand(Land* lands, ParticleGrid* grids, SpawnerPos* spawners)
{
int index = threadIdx.x + (blockDim.x * blockIdx.x);
SpawnerPos& spawnerInfo = spawners[index];
ParticleGrid& gridInfo = grids[spawnerInfo.currGridIndex];
if(gridInfo.status == FilledWithLand)
{
Land& landInfo = lands[gridInfo.landIndex];
landInfo.currGridIndex = 0;
landInfo.landPos = grids[landInfo.currGridIndex].gridPos;
gridInfo.status = Empty;
gridInfo.landIndex = 0;
}
}
__global__ void UpdateGrid(ParticleSand* sands, ParticleWater* waters, ParticleGrid* grids)
{
int index = threadIdx.x + (blockDim.x * blockIdx.x);
extern __shared__ ParticleGrid gridsInfo[];
ParticleGrid& grid = grids[index];
//gridsInfo
if(grid.status == Empty)
{
}
}
void DeleteLands(Land* lands, ParticleGrid* grids, SpawnerPos* spawners)
{
DeleteLand << <1, 256 >> > (lands, grids, spawners);
}
void SimulationUpdate(int particleNum, int gridNum, int waterNum, ParticleSand* particle, ParticleWater* water, ParticleGrid* grid)
{
int sandGridCount = particleNum / blockSize;
int waterGridCount = waterNum / blockSize;
int gridGridCount = gridNum / blockSize;
if(sandGridCount >= 1)
{
MoveDown << <sandGridCount, blockSize>> > (particle, grid, particleNum);
gpuErrchk(hipPeekAtLastError());
}
if(waterGridCount >= 1)
{
MoveDownWater << <waterGridCount, blockSize >> > (water, grid, waterNum);
gpuErrchk(hipPeekAtLastError());
MoveWaterFromPredictedPosition << <waterGridCount, blockSize >> > (water, grid, waterNum);
gpuErrchk(hipPeekAtLastError());
}
//MoveSideWater << <waterGridCount, blockSize >> > (water, grid, waterNum);
//gpuErrchk(hipPeekAtLastError());
}
void AddWatersInSpawnerPos(ParticleWater* waters, ParticleGrid* grid, SpawnerPos* spawners, int lastIndex)
{
SpawnWater << <1, blockSize >> > (waters, grid, spawners, lastIndex);
}
void AddSandsInContSpawnerPos(ParticleSand* particle, ParticleGrid* grid, Vector2* spawners, int lastIndex)
{
}
void AddSandsInSpawnerPos(ParticleSand* particle, ParticleGrid* grid, SpawnerPos* spawners, int lastIndex)
{
Spawn << <1, blockSize >> > (particle, grid, spawners, lastIndex);
}
void AddLandsInSpawnerPos(Land* lands, ParticleGrid* grid, SpawnerPos* spawners, int lastIndex)
{
SpawnLand << <1, 64>> > (lands, grid, spawners, lastIndex);
}
void MoveSpawner(ParticleGrid* grid, SpawnerPos* spawners, int dir, int spawnerCount)
{
SpawnerMove << <1, spawnerCount >> > (grid, spawners, dir);
}
void SetLands(std::vector<int>& landStartRandomIndices, ParticleSand* particle, ParticleGrid* grid, Land* land)
{
size_t vecSize = landStartRandomIndices.size();
int landCount = 1;
int landWidth = 50;
int landStartIndex = gridWidth * 100 + 200;
for (size_t i = 0; i < vecSize; ++i)
{
SetLand << <landCount, landWidth >> > (grid, land, landStartRandomIndices[i], landWidth, i);
}
}
__global__ void LoadLand(Land* lands, ParticleGrid* grids, int* indices, int lastIndex)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= lastIndex)
{
printf("over");
return;
}
const int gridIndex = indices[index];
Land& land = lands[index];
ParticleGrid& grid = grids[gridIndex];
land.landPos = grid.gridPos;
land.currGridIndex = grid.index;
grid.status = FilledWithLand;
grid.landIndex = index;
}
__global__ void SetContiSpawnerPos(SpawnerPos* poses, ParticleGrid* grids, int startIndex)
{
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
SpawnerPos& pos = poses[index];
const int gridIndex = startIndex + index * 2;
pos.pos = grids[gridIndex].gridPos;
pos.currGridIndex = gridIndex;
}
void LoadLands(int* landGridIndices, ParticleGrid* grid, Land* land, int landsNum)
{
int landBlockCount = landsNum / blockSize;
if (landBlockCount < 0)
landBlockCount = 1;
LoadLand << <landBlockCount, blockSize>> > (land, grid, landGridIndices, landsNum);
gpuErrchk(hipPeekAtLastError());
}
void Init(int particleNum, int gridNum, int spawnerNum, int contiSpawnerNum, ParticleSand* particle, ParticleGrid* grid, Land* land, SpawnerPos* spawners
, SpawnerPos* firstContiSpawner, SpawnerPos* secondContiSpawner)
{
int particleGridCount = particleNum / blockSize;
int gridCount = gridNum / blockSize;
int spawnerStartIndex = gridWidth * 300 + 500;
SetGrid << <gridCount, blockSize >> > (grid);
//CheckGridPos << <particleGridCount, blockSize >> > (particle, grid);
int spawnerWidth = sqrt(spawnerNum);
int spwanerCount = 1;
for (int i = 0; i < spawnerWidth; ++i)
{
SetSpawner << <spwanerCount, spawnerWidth >> > (grid, spawners, spawnerStartIndex, spawnerWidth, i);
}
int contiSpawnerGridCount = contiSpawnerNum / blockSize;
const int firstContiSpawnerStartIndex = gridWidth * 600 + 200;
const int secondContiSpawnerStartIndex = firstContiSpawnerStartIndex + 1;
SetContiSpawnerPos << <contiSpawnerGridCount, blockSize >> > (firstContiSpawner, grid, firstContiSpawnerStartIndex);
SetContiSpawnerPos << <contiSpawnerGridCount, blockSize >> > (secondContiSpawner, grid, secondContiSpawnerStartIndex);
//gpuErrchk(hipPeekAtLastError());
} | 6c70680bd73e922e953e387320ccce6c8ff1c472.cu | #include "SandParticle.h"
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include <stdio.h>
#include "Affine.h"
#include <atomic>
#include <vector>
const int width = 100;
const int height = 10;
const int gridWidth = 800;
const int gridHeight = 800;
const dim3 threadsPerBlock(16, 16);
const int blockSize = 256;
__constant__ float gap = 0.01f;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char* file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define cudaCheck(x) { cudaError_t err = x; if (err != cudaSuccess) { printf("Cuda error: %d in %s at %s:%d\n", err, #x, __FILE__, __LINE__); assert(0); } }
__global__ void SetGrid(ParticleGrid* grids)
{
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
ParticleGrid& grid = grids[index];
float xPos = -5 + ((index % gridWidth) * gap);
float yPos = ((index / gridHeight) * gap);
grid.gridPos.x = xPos;
grid.gridPos.y = yPos;
grid.index = index;
grid.status = Empty;
grid.predictedMoveInWaterParticleIndex = -1;
grid.landIndex = -1;
}
__global__ void SetSpawner(ParticleGrid* grid, SpawnerPos* spawnerPos, int startIndex, int width, int i)
{
int gridIndex = startIndex + threadIdx.x + (i * gridWidth);
ParticleGrid& currGrid = grid[gridIndex];
int iIndex = i * width;
//currGrid.status = FilledWithLand;
spawnerPos[iIndex + threadIdx.x].pos = currGrid.gridPos;
spawnerPos[iIndex + threadIdx.x].currGridIndex = currGrid.index;
}
__global__ void SetLand(ParticleGrid* grid, Land* land, int landStartIndex, int landWidth, int i)
{
int index = landStartIndex + threadIdx.x;
int ogLandStartIndex = landStartIndex;
ParticleGrid& currGrid = grid[index];
int iIndex = i * landWidth;
currGrid.status = FilledWithLand;
land[iIndex + threadIdx.x].landPos = currGrid.gridPos;
}
__global__ void CheckGridPos(ParticleSand* particles, ParticleGrid* grids)
{
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
const int startIndex = gridWidth * 500 + 400;
const int widthVal = index % width;
const int heightVal = index / width;
const int heightInGrid = heightVal * gridWidth;
particles[index].currGrid = &grids[startIndex + heightInGrid + widthVal];
particles[index].pos = grids[startIndex + heightInGrid + widthVal].gridPos;
}
__global__ void SpawnerMove(ParticleGrid* grids, SpawnerPos* spawnerPos, int dir)
{
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
const int currGridIndex = spawnerPos[index].currGridIndex;
//printf("currGrid : %d", currGridIndex);
const int destGridIndex = currGridIndex + dir;
SpawnerPos& spawner = spawnerPos[index];
spawner.currGridIndex = destGridIndex;
spawner.pos = grids[destGridIndex].gridPos;
}
__global__ void Spawn(ParticleSand* particle, ParticleGrid* grid, SpawnerPos* spawners, int lastIndex)
{
int index = lastIndex + threadIdx.x;
ParticleSand& part = particle[index];
SpawnerPos& spawner = spawners[threadIdx.x];
ParticleGrid& gridInfo = grid[spawner.currGridIndex];
part.pos = spawner.pos;
part.currGrid = &grid[spawner.currGridIndex];
gridInfo.status = FilledWithSand;
}
__global__ void SpawnLand(Land* lands, ParticleGrid* grid, SpawnerPos* spawners, int lastIndex)
{
int index = lastIndex + threadIdx.x + (blockIdx.x * blockDim.x);
Land& land = lands[index];
SpawnerPos& spawner = spawners[threadIdx.x];
ParticleGrid& gridInfo = grid[spawner.currGridIndex];
land.landPos = spawner.pos;
land.currGridIndex = spawner.currGridIndex;
gridInfo.status = FilledWithLand;
}
__global__ void SpawnWater(ParticleWater* waters, ParticleGrid* grid, SpawnerPos* spawners, int lastIndex)
{
int index = lastIndex + threadIdx.x + (blockIdx.x * blockDim.x);
ParticleWater& water = waters[index];
SpawnerPos& spawner = spawners[threadIdx.x];
ParticleGrid& gridInfo = grid[spawner.currGridIndex];
water.pos = spawner.pos;
water.currGridIndex = spawner.currGridIndex;
gridInfo.status = FilledWithWater;
}
__global__ void MoveWaterFromPredictedPosition(ParticleWater* particles, ParticleGrid* grids, int particleNum)
{
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index >= particleNum)
{
printf("over");
return;
}
ParticleWater& particle = particles[index];
if(particle.predictMoveInGridIndex != -1)
{
const int predictMoveInGridIndex = particle.predictMoveInGridIndex;
ParticleGrid& predictedGrid = grids[predictMoveInGridIndex];
ParticleGrid& currGrid = grids[particle.currGridIndex];
if(predictedGrid.predictedMoveInWaterParticleIndex == index)
{
//move
particle.pos = predictedGrid.gridPos;
particle.currGridIndex = predictedGrid.index;
particle.predictMoveInGridIndex = -1;
predictedGrid.status = FilledWithWater;
predictedGrid.predictedMoveInWaterParticleIndex = -1;
currGrid.status = Empty;
}
/*else
{
const int leftIndex = predictMoveInGridIndex - 1;
const int rightIndex = predictMoveInGridIndex + 1;
const int upIndex = predictMoveInGridIndex - gridWidth;
const int downIndex = predictMoveInGridIndex + gridWidth;
ParticleGrid& leftGrid = grids[leftIndex];
ParticleGrid& rightGrid = grids[rightIndex];
ParticleGrid& downGrid = grids[downIndex];
ParticleGrid& upGrid = grids[upIndex];
int newDestGridIndex = -1;
if(leftGrid.predictedMoveInWaterParticleIndex == -1 && leftGrid.status == Empty)
{
newDestGridIndex = leftIndex;
}
else if(rightGrid.predictedMoveInWaterParticleIndex == -1 && rightGrid.status == Empty)
{
newDestGridIndex = rightIndex;
}
else if (downGrid.predictedMoveInWaterParticleIndex == -1 && downGrid.status == Empty)
{
newDestGridIndex = downIndex;
}
else if (upGrid.predictedMoveInWaterParticleIndex == -1 && upGrid.status == Empty)
{
newDestGridIndex = upIndex;
}
if(newDestGridIndex != -1)
{
ParticleGrid& newDestGrid = grids[newDestGridIndex];
particle.pos = newDestGrid.gridPos;
particle.currGridIndex = newDestGrid.index;
particle.predictMoveInGridIndex = -1;
newDestGrid.status = FilledWithWater;
newDestGrid.predictedMoveInWaterParticleIndex = -1;
currGrid.status = Empty;
}
}*/
}
}
__global__ void MoveDownWater(ParticleWater* particles, ParticleGrid* grids, int particleNum)
{
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= particleNum)
{
return;
}
ParticleWater& particle = particles[index];
ParticleGrid& currGrid = grids[particle.currGridIndex];
const int gridIndex = currGrid.index;
int gridDownIndex = gridIndex - gridWidth;
int gridLeftDownIndex = gridIndex - (gridWidth - 1);
int gridRightDownIndex = gridIndex - (gridWidth + 1);
int gridLeftIndex = gridIndex - 1;
int gridRightIndex = gridIndex + 1;
__shared__ ParticleGrid gridsInfo[256 * 2000];
if (gridIndex < gridWidth)
{
gridDownIndex = gridIndex;
gridLeftDownIndex = gridIndex;
gridRightDownIndex = gridIndex;
gridLeftIndex = gridIndex;
gridRightIndex = gridIndex;
}
if (gridIndex % gridWidth == 0)
{
gridLeftDownIndex = gridIndex;
gridLeftIndex = gridIndex;
}
if (gridIndex % gridWidth == gridWidth - 1)
{
gridRightDownIndex = gridIndex;
gridRightIndex = gridIndex;
}
if (gridDownIndex <= 0 || gridDownIndex >= 256 * 2000 ||
gridLeftDownIndex <= 0 || gridLeftDownIndex >= 256 * 2000 ||
gridRightDownIndex <= 0 || gridRightDownIndex >= 256 * 2000 ||
gridLeftIndex <= 0 || gridLeftIndex >= 256 * 2000 ||
gridRightIndex <= 0 || gridRightIndex >= 256 * 2000)
{
return;
}
ParticleGrid& downGrid = grids[gridDownIndex];
ParticleGrid& leftDownGrid = grids[gridLeftDownIndex];
ParticleGrid& rightDownGrid = grids[gridRightDownIndex];
ParticleGrid& leftGrid = grids[gridLeftIndex];
ParticleGrid& rightGrid = grids[gridRightIndex];
if (downGrid.status == Empty)
{
/*particle.pos = downGrid.gridPos;
particle.currGridIndex = downGrid.index;
currGrid.status = Empty;
downGrid.status = FilledWithWater;*/
downGrid.predictedMoveInWaterParticleIndex = index;
particle.predictMoveInGridIndex = gridDownIndex;
}
else if (leftDownGrid.status == Empty)
{
/*particle.pos = leftDownGrid.gridPos;
particle.currGridIndex = leftDownGrid.index;
currGrid.status = Empty;
leftDownGrid.status = FilledWithWater;*/
leftDownGrid.predictedMoveInWaterParticleIndex = index;
particle.predictMoveInGridIndex = gridLeftDownIndex;
}
else if (rightDownGrid.status == Empty)
{
//particle.pos = rightDownGrid.gridPos;
//particle.currGridIndex = rightDownGrid.index;
//
//currGrid.status = Empty;
//rightDownGrid.status = FilledWithWater;
rightDownGrid.predictedMoveInWaterParticleIndex = index;
particle.predictMoveInGridIndex = gridRightDownIndex;
}
else if (leftGrid.status == Empty)
{
//particle.pos = grids[gridLeftIndex].gridPos;
//particle.currGridIndex = grids[gridLeftIndex].index;
//currGrid.status = Empty;
//grids[gridLeftIndex].status = FilledWithWater;
leftGrid.predictedMoveInWaterParticleIndex = index;
particle.predictMoveInGridIndex = gridLeftIndex;
}
else if (rightGrid.status == Empty)
{
//particle.pos = grids[gridRightIndex].gridPos;
//particle.currGridIndex = grids[gridRightIndex].index;
//currGrid.status = Empty;
//grids[gridRightIndex].status = FilledWithWater;
rightGrid.predictedMoveInWaterParticleIndex = index;
particle.predictMoveInGridIndex = gridRightIndex;
}
else
{
particle.predictMoveInGridIndex = -1;
}
}
__global__ void MoveDown(ParticleSand* particles, ParticleGrid* grids, int particleNum)
{
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= particleNum)
{
return;
}
ParticleSand& particle = particles[index];
ParticleGrid& currGrid = *particle.currGrid;
const int gridIndex = currGrid.index;
int gridDownIndex = gridIndex - gridWidth;
int gridLeftDownIndex = gridIndex - (gridWidth - 1);
int gridRightDownIndex = gridIndex - (gridWidth + 1);
if (gridIndex < gridWidth)
{
gridDownIndex = gridIndex;
gridLeftDownIndex = gridIndex;
gridRightDownIndex = gridIndex;
}
if (gridIndex % gridWidth == 0)
{
gridLeftDownIndex = gridIndex;
}
if (gridIndex % gridWidth == gridWidth - 1)
{
gridRightDownIndex = gridIndex;
}
if (gridDownIndex <= 0 || gridDownIndex >= 256 * 2000 ||
gridLeftDownIndex <= 0 || gridLeftDownIndex >= 256 * 2000 ||
gridRightDownIndex <= 0 || gridRightDownIndex >= 256 * 2000)
{
return;
}
ParticleGrid& downGrid = grids[gridDownIndex];
ParticleGrid& leftDownGrid = grids[gridLeftDownIndex];
ParticleGrid& rightDownGrid = grids[gridRightDownIndex];
if (downGrid.status == Empty)
{
particle.pos = downGrid.gridPos;
particle.currGrid = &downGrid;
currGrid.status = Empty;
downGrid.status = FilledWithSand;
}
else if (leftDownGrid.status == Empty)
{
particle.pos = leftDownGrid.gridPos;
particle.currGrid = &leftDownGrid;
currGrid.status = Empty;
leftDownGrid.status = FilledWithSand;
}
else if (rightDownGrid.status == Empty)
{
particle.pos = rightDownGrid.gridPos;
particle.currGrid = &rightDownGrid;
currGrid.status = Empty;
rightDownGrid.status = FilledWithSand;
}
}
__global__ void DeleteLand(Land* lands, ParticleGrid* grids, SpawnerPos* spawners)
{
int index = threadIdx.x + (blockDim.x * blockIdx.x);
SpawnerPos& spawnerInfo = spawners[index];
ParticleGrid& gridInfo = grids[spawnerInfo.currGridIndex];
if(gridInfo.status == FilledWithLand)
{
Land& landInfo = lands[gridInfo.landIndex];
landInfo.currGridIndex = 0;
landInfo.landPos = grids[landInfo.currGridIndex].gridPos;
gridInfo.status = Empty;
gridInfo.landIndex = 0;
}
}
__global__ void UpdateGrid(ParticleSand* sands, ParticleWater* waters, ParticleGrid* grids)
{
int index = threadIdx.x + (blockDim.x * blockIdx.x);
extern __shared__ ParticleGrid gridsInfo[];
ParticleGrid& grid = grids[index];
//gridsInfo
if(grid.status == Empty)
{
}
}
void DeleteLands(Land* lands, ParticleGrid* grids, SpawnerPos* spawners)
{
DeleteLand << <1, 256 >> > (lands, grids, spawners);
}
void SimulationUpdate(int particleNum, int gridNum, int waterNum, ParticleSand* particle, ParticleWater* water, ParticleGrid* grid)
{
int sandGridCount = particleNum / blockSize;
int waterGridCount = waterNum / blockSize;
int gridGridCount = gridNum / blockSize;
if(sandGridCount >= 1)
{
MoveDown << <sandGridCount, blockSize>> > (particle, grid, particleNum);
gpuErrchk(cudaPeekAtLastError());
}
if(waterGridCount >= 1)
{
MoveDownWater << <waterGridCount, blockSize >> > (water, grid, waterNum);
gpuErrchk(cudaPeekAtLastError());
MoveWaterFromPredictedPosition << <waterGridCount, blockSize >> > (water, grid, waterNum);
gpuErrchk(cudaPeekAtLastError());
}
//MoveSideWater << <waterGridCount, blockSize >> > (water, grid, waterNum);
//gpuErrchk(cudaPeekAtLastError());
}
void AddWatersInSpawnerPos(ParticleWater* waters, ParticleGrid* grid, SpawnerPos* spawners, int lastIndex)
{
SpawnWater << <1, blockSize >> > (waters, grid, spawners, lastIndex);
}
void AddSandsInContSpawnerPos(ParticleSand* particle, ParticleGrid* grid, Vector2* spawners, int lastIndex)
{
}
void AddSandsInSpawnerPos(ParticleSand* particle, ParticleGrid* grid, SpawnerPos* spawners, int lastIndex)
{
Spawn << <1, blockSize >> > (particle, grid, spawners, lastIndex);
}
void AddLandsInSpawnerPos(Land* lands, ParticleGrid* grid, SpawnerPos* spawners, int lastIndex)
{
SpawnLand << <1, 64>> > (lands, grid, spawners, lastIndex);
}
void MoveSpawner(ParticleGrid* grid, SpawnerPos* spawners, int dir, int spawnerCount)
{
SpawnerMove << <1, spawnerCount >> > (grid, spawners, dir);
}
void SetLands(std::vector<int>& landStartRandomIndices, ParticleSand* particle, ParticleGrid* grid, Land* land)
{
size_t vecSize = landStartRandomIndices.size();
int landCount = 1;
int landWidth = 50;
int landStartIndex = gridWidth * 100 + 200;
for (size_t i = 0; i < vecSize; ++i)
{
SetLand << <landCount, landWidth >> > (grid, land, landStartRandomIndices[i], landWidth, i);
}
}
__global__ void LoadLand(Land* lands, ParticleGrid* grids, int* indices, int lastIndex)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= lastIndex)
{
printf("over");
return;
}
const int gridIndex = indices[index];
Land& land = lands[index];
ParticleGrid& grid = grids[gridIndex];
land.landPos = grid.gridPos;
land.currGridIndex = grid.index;
grid.status = FilledWithLand;
grid.landIndex = index;
}
__global__ void SetContiSpawnerPos(SpawnerPos* poses, ParticleGrid* grids, int startIndex)
{
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
SpawnerPos& pos = poses[index];
const int gridIndex = startIndex + index * 2;
pos.pos = grids[gridIndex].gridPos;
pos.currGridIndex = gridIndex;
}
void LoadLands(int* landGridIndices, ParticleGrid* grid, Land* land, int landsNum)
{
int landBlockCount = landsNum / blockSize;
if (landBlockCount < 0)
landBlockCount = 1;
LoadLand << <landBlockCount, blockSize>> > (land, grid, landGridIndices, landsNum);
gpuErrchk(cudaPeekAtLastError());
}
void Init(int particleNum, int gridNum, int spawnerNum, int contiSpawnerNum, ParticleSand* particle, ParticleGrid* grid, Land* land, SpawnerPos* spawners
, SpawnerPos* firstContiSpawner, SpawnerPos* secondContiSpawner)
{
int particleGridCount = particleNum / blockSize;
int gridCount = gridNum / blockSize;
int spawnerStartIndex = gridWidth * 300 + 500;
SetGrid << <gridCount, blockSize >> > (grid);
//CheckGridPos << <particleGridCount, blockSize >> > (particle, grid);
int spawnerWidth = sqrt(spawnerNum);
int spwanerCount = 1;
for (int i = 0; i < spawnerWidth; ++i)
{
SetSpawner << <spwanerCount, spawnerWidth >> > (grid, spawners, spawnerStartIndex, spawnerWidth, i);
}
int contiSpawnerGridCount = contiSpawnerNum / blockSize;
const int firstContiSpawnerStartIndex = gridWidth * 600 + 200;
const int secondContiSpawnerStartIndex = firstContiSpawnerStartIndex + 1;
SetContiSpawnerPos << <contiSpawnerGridCount, blockSize >> > (firstContiSpawner, grid, firstContiSpawnerStartIndex);
SetContiSpawnerPos << <contiSpawnerGridCount, blockSize >> > (secondContiSpawner, grid, secondContiSpawnerStartIndex);
//gpuErrchk(cudaPeekAtLastError());
} |
62dd37be03090d18fabc64df5b98119a7632b41d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <linalg/map_then_reduce.h>
#include "benchmark.cuh"
namespace MLCommon {
namespace Bench {
namespace LinAlg {
struct Params {
int len;
};
template <typename Type>
struct Identity {
HDI Type operator()(Type a) { return a; }
};
template <typename T>
struct MapThenReduce : public Fixture {
MapThenReduce(const std::string& name, const Params& p)
: Fixture(name), params(p) {}
protected:
void allocateBuffers(const ::benchmark::State& state) override {
allocate(in, params.len, true);
allocate(out, 1, true);
}
void deallocateBuffers(const ::benchmark::State& state) override {
CUDA_CHECK(hipFree(in));
CUDA_CHECK(hipFree(out));
}
void runBenchmark(::benchmark::State& state) override {
for (auto _ : state) {
CudaEventTimer timer(state, scratchBuffer, stream);
MLCommon::LinAlg::mapThenSumReduce(out, params.len, Identity<T>(), stream,
in);
}
}
private:
Params params;
T *out, *in;
}; // struct MapThenReduce
static std::vector<Params> getInputs() {
return {
{1024 * 1024}, {32 * 1024 * 1024}, {1024 * 1024 * 1024},
{1024 * 1024 + 2}, {32 * 1024 * 1024 + 2}, {1024 * 1024 * 1024 + 2},
{1024 * 1024 + 1}, {32 * 1024 * 1024 + 1}, {1024 * 1024 * 1024 + 1},
};
}
PRIMS_BENCH_REGISTER(Params, MapThenReduce<float>, "mapReduce", getInputs());
PRIMS_BENCH_REGISTER(Params, MapThenReduce<double>, "mapReduce", getInputs());
} // namespace LinAlg
} // namespace Bench
} // namespace MLCommon
| 62dd37be03090d18fabc64df5b98119a7632b41d.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <linalg/map_then_reduce.h>
#include "benchmark.cuh"
namespace MLCommon {
namespace Bench {
namespace LinAlg {
struct Params {
int len;
};
template <typename Type>
struct Identity {
HDI Type operator()(Type a) { return a; }
};
template <typename T>
struct MapThenReduce : public Fixture {
MapThenReduce(const std::string& name, const Params& p)
: Fixture(name), params(p) {}
protected:
void allocateBuffers(const ::benchmark::State& state) override {
allocate(in, params.len, true);
allocate(out, 1, true);
}
void deallocateBuffers(const ::benchmark::State& state) override {
CUDA_CHECK(cudaFree(in));
CUDA_CHECK(cudaFree(out));
}
void runBenchmark(::benchmark::State& state) override {
for (auto _ : state) {
CudaEventTimer timer(state, scratchBuffer, stream);
MLCommon::LinAlg::mapThenSumReduce(out, params.len, Identity<T>(), stream,
in);
}
}
private:
Params params;
T *out, *in;
}; // struct MapThenReduce
static std::vector<Params> getInputs() {
return {
{1024 * 1024}, {32 * 1024 * 1024}, {1024 * 1024 * 1024},
{1024 * 1024 + 2}, {32 * 1024 * 1024 + 2}, {1024 * 1024 * 1024 + 2},
{1024 * 1024 + 1}, {32 * 1024 * 1024 + 1}, {1024 * 1024 * 1024 + 1},
};
}
PRIMS_BENCH_REGISTER(Params, MapThenReduce<float>, "mapReduce", getInputs());
PRIMS_BENCH_REGISTER(Params, MapThenReduce<double>, "mapReduce", getInputs());
} // namespace LinAlg
} // namespace Bench
} // namespace MLCommon
|
61d5fd06b9196c1e7077d15d9ac25a82f9169527.hip | // !!! This is a file automatically generated by hipify!!!
///sta programa calcula la versin paralelizada del algoritmo FFT_DIF_DIT_TD
///(04/01/2017)
///sta versin sirve para graficar en matlab los errores absolutos y relativos Caso: N^20, Li=N, Lo=307
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hipfft.h>
#include <cufftw.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_complex.h>
#include <math.h>
#include <math_constants.h>
#include <iostream>
#include <time.h>
//////////////////////////////////////////////////////////////////////////
///////////////////////DECLARACIN DE FUNCIONES///////////////////////////
//////////////////////////////////////////////////////////////////////////
void vector_entrada_xn(int Li);
void arreglo_W(int N);
void asign_rap(int N,int Li,int Lo);
void factor(int N);
void product(int vector_1[500],int vector_2[500],int valor);
void etapa_entrada(void);
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y);
void etapa_intermedia(void);
void etapa_salida(void);
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X);
//////////////////////////////////////////////////////////////////////////
/////////////////////DECLARACIN DE VARIABLES GLOBALES////////////////////
//////////////////////////////////////////////////////////////////////////
cuFloatComplex *x_host;
cuFloatComplex *W_host;
//cuFloatComplex *y_host;
//cuFloatComplex *z_host;
cuFloatComplex *X_host;
cuFloatComplex *x_device;
cuFloatComplex *W_device;
cuFloatComplex *y_device;
cuFloatComplex *z_device;
cuFloatComplex *X_device;
hipfftComplex *in,*out;
FILE *db_open,*dc_open;
int Dip,Dop,P,N,Li,Lo;
int vF[500]; //Almacena los factores de N
int svF; //Almacena el numero de factores de N
int Prod[500];
int a;
#define inf 99999
//////////////////////////////////////////////////////////////////////////
//////////////////////////DATOS DE ENTRADA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// N >>> Nmero de elementos del vector de entrada
/// Li >>> Nmero de elementos de entrada diferentes de cero
/// Lo >>> Nmero de elementos de salida requeridos
/// loop >>> Nmero de iteraciones
/// muestras >>> Nmero de muestras
//////////////////////////////////////////////////////////////////////////
///////////////////////////DATOS DE SALIDA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// X >>> Vector de salida
//////////////////////////////////////////////////////////////////////////
/////////////////// SE INGRESAN LOS DATOS DE ENTRADA /////////////////////
//////////////////////////////////////////////////////////////////////////
///Ingrese el nmero de iteraciones requeridas
const int loop = 1;
///Ingrese el valor de N_max
const int N_max = 11;
///Ingrese el valor de Li_max
const int Li_max = 2048;
///Ingrese el valor de Lo_max
const int Lo_max = 30;
//////////////////////////////////////////////////////////////////////////
//////////////////////////FUNCION PRINCIPAL///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Funcin principal
int main()
{
//////////////////////////////////////////////////////////////////////////
//////////////////////////SELECCIN DEL DEVICE////////////////////////////
//////////////////////////////////////////////////////////////////////////
int device;
hipSetDevice(0);
hipGetDevice(&device);
if(device == 0)
{
printf("\n\n---DEVICE = GeForce GTX 970---\n\n");
}
if(device == 1)
{
printf("\n\n---DEVICE = TESLA K20---\n\n");
}
//////////////////////////////////////////////////////////////////////////
int i,j,i_N,j_res,k_res,cont,i_prom,m;
float *parte_real;
float *parte_imag;
//float suma;
//float promedio[N_max];
FILE *da,*db;
//da = fopen("Tiempos_N20_LiN_LoVARIA_CUDA.bin","a+b"); //Crea o sobre escribe archivo
da = fopen("Resultados_N20_LiN_Lo307_real_CUDA.bin","a+b"); //Crea o sobre escribe archivo
db = fopen("Resultados_N20_LiN_Lo307_imag_CUDA.bin","a+b"); //Crea o sobre escribe archivo
//Pausa
printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n");
getchar();
for(i_N = N_max;i_N <= N_max;i_N++)
{
N = (int )pow(2,i_N);
//N = N_max;
printf("\n N = %d \n",N);
for(j_res=Li_max;j_res <= Li_max;j_res++)
{
Li=j_res;
for(k_res=Lo_max;k_res <= Lo_max;k_res++)
{
Lo=k_res;
printf("\n Li = %d Lo = %d",Li,Lo);
//////////////////////////////////////////////////////////
parte_real = (float*) malloc(Lo*sizeof(float));
parte_imag = (float*) malloc(Lo*sizeof(float));
for(m=0;m<=Lo-1;m++)
{
parte_real[m] = 0.0;
parte_imag[m] = 0.0;
}
///Se abre el archivo binario
db_open = fopen("Entrada_real_N20_C.bin","rb");
dc_open = fopen("Entrada_imag_N20_C.bin","rb");
//suma=0.0;
for(j=0;j<loop;j++)
{
//Comandos necesarios para medir el tiempo
float elapsedTime_app;
hipEvent_t start_app, stop_app;
hipEventCreate(&start_app);
hipEventCreate(&stop_app);
//Se generan en el host los valores del vector de entrada x[n]
vector_entrada_xn(Li);
///Se genera el arreglo W[N]
arreglo_W(N);
//---------------------------------------------------------------------------------------------
//Se empieza a medir el tiempo de ejecucion de la aplicacion
hipEventRecord(start_app,0);
//Se generan en el host los factores Dip y Dop
asign_rap(N,Li,Lo);
//Clculo en el host del factor P
P = N/(Dip*Dop);
//printf("\n\n FACTOR P:\n\n");
printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
//Funcin auxiliar del host para ejecutar la etapa de entrada
etapa_entrada();
//Funcin auxiliar del host para ejecutar la etapa intermedia
etapa_intermedia();
//Funcin auxiliar del host para ejecutar la etapa de salida
etapa_salida();
//printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////
///SUMATORIAS
for(m=0;m<=Lo-1;m++)
{
parte_real[m] = parte_real[m] + cuCrealf(X_host[m]);
parte_imag[m] = parte_imag[m] + cuCimagf(X_host[m]);
//printf("\n X[%d] = %.4f + (%.4f)",m,creal(X[m]),cimag(X[m]));
//fprintf(dc,"%f %f\n",creal(X[m]),cimag(X[m]));
}
////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////
//---------------------------------------------------------------------------------------------
//Comandos necesarios para medir el tiempo de la aplicacion (app)
hipEventRecord(stop_app,0);
hipEventSynchronize(stop_app);
hipEventElapsedTime(&elapsedTime_app,start_app,stop_app);
//Suma de todos los tiempos
//suma = suma + elapsedTime_app;
//Se destruyen los eventos que miden el tiempo de la aplicacion
hipEventDestroy(start_app);
hipEventDestroy(stop_app);
//Se liberan memorias del Host y Device
free(x_host);
free(W_host);
free(X_host);
hipFree(x_device);
hipFree(W_device);
hipFree(y_device);
hipFree(z_device);
hipFree(X_device);
}
///////////////////////////////////
///PROMEDIO DE ERRORES
for(m=0;m<=Lo-1;m++)
{
parte_real[m] = parte_real[m]/loop;
parte_imag[m] = parte_imag[m] /loop;
}
//////////////////////////////////
///Se imprimen los resultados en los archivos binarios
fwrite(parte_real,sizeof(float),Lo,da);
fwrite(parte_imag,sizeof(float),Lo,db);
//promedio[k_res-1] = suma/(float)loop;
fclose(db_open);
fclose(dc_open);
free(parte_real);
free(parte_imag);
}
}
}
//fwrite(promedio,sizeof(float),N_max,da);
fclose(da);
fclose(db);
return EXIT_SUCCESS;
}
//////////////////////////////////////////////////////////////////////////
/////////////////////////FUNCIONES SECUNDARIAS////////////////////////////
//////////////////////////////////////////////////////////////////////////
//sta funcin genera el vector de entrada x[n]
void vector_entrada_xn(int Li)
{
//Declaracin de variables locales
int k;
float *buffer_real,*buffer_imag;
//Se reserva memoria para xn_host en el host
x_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N);
buffer_real = (float*)malloc(sizeof(float)*N);
buffer_imag = (float*)malloc(sizeof(float)*N);
///Se lee el vector de entrada del archivo binario
fread(buffer_real,sizeof(float),N,db_open);
fread(buffer_imag,sizeof(float),N,dc_open);
//Se dan valores a x[n]
for(k = 0;k < N; k++)
{
//x_host[k] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%11));
//x_host[k] = make_cuFloatComplex((float)(k + 1),(float)(0.0));
x_host[k] = make_cuFloatComplex(buffer_real[k],buffer_imag[k]);
}
/*
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(k=0;k<Li;k++)
{
printf(" %d-> (%f) + (%f)\n",k+1,cuCrealf(x_host[k]),cuCimagf(x_host[k]));
}
*/
free(buffer_real);
free(buffer_imag);
}
//sta funcin genera el arreglo W
void arreglo_W(int N)
{
//Declaracin de variables locales
int n;
//Se reserva memoria para W_host en el host
W_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N);
//Se genera el arreglo W
for(n = 1;n <= N;n++)
{
W_host[n-1] = make_cuFloatComplex((float)cos((2*CUDART_PI*n)/N),(float)(-1)*sin((2*CUDART_PI*n)/N));
}
/*
//Se imprimen los valores del arreglo W[N]
printf("\n---ARREGLO W[N]---\n\n");
for(n = 0;n < N; n++)
{
printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCrealf(W_host[n]),cuCimagf(W_host[n]));
}
*/
}
//sta funcin genera los factores Dip y Dop
void asign_rap(int N,int Li,int Lo)
{
//Declaracin de variables locales
float NLi,NLo,Diprapt,Doprapt;
int Nh[500];
int k[500];
int G;
int g,i,t,ta;
int Dipt[500],Dopt[500];
float distrapt,distrap;
int Pos,h,Poss;
int nk[500];
int r;
//Inicializaciones
G = 0;
svF = 0;
//Factores Dip y Dop ideales
NLi=(float)N/(float)Li;
NLo=(float)N/(float)Lo;
Diprapt=NLi;
Doprapt=NLo;
//Se encuentran los factores de "N"
//vF almacena los factores de "N"
//svF almacena el nmero de factores de "N"
factor(N);
/*
Almacena en el vector Nh los factores que son diferentes de del vector vF
En el vector k se almacena la cantidad de veces que se repite cada
elemento almacenado en el vector Nh.
*/
Nh[0] = vF[0];
k[0]=1;
for(g=1;g<=svF-1;g=g+1)
{
if(vF[g]!=vF[g-1])
{
G=G+1;
Nh[G]=vF[g];
k[G]=1;
}
else
{
k[G]=k[G]+1;
}
}
/*
Almacena en el vector Nh todas las posibles combinaciones que den como
producto a N. t almacena el numero de elementos del vector Nh.
*/
product(Nh,k,G);
t = a;
for(i=0;i<t;i=i+1)
{
Dipt[i]=Prod[i];
}
distrapt=inf;
for(g=1;g<=t;g=g+1)
{
if(Dipt[g-1]<=NLi)
{
Pos=g-1;
for(h=0;h<=G;h=h+1)
{
Poss=floor(Pos/(k[h]+1));
nk[h]=k[h]+Poss*(k[h]+1)-Pos;
Pos=Poss;
}
product(Nh,nk,G);
ta=a;
for(i=0;i<ta;i=i+1)
{
Dopt[i]=Prod[i];
}
////////////////////////////////////////////
//int j;
//for(j=0;j<ta;j++)
//{
// printf(" %d ",Dopt[j]);
//}
//printf("\n\n ta=%d\n\n",ta);
///////////////////////////////////////////
for(r=0;r<ta;r=r+1)
{
distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2));
if(distrap<distrapt)
{
distrapt=distrap;
Dip=Dipt[g-1];
Dop=Dopt[r];
}
}
}
}
/*
printf("\n\n FACTOR Dip :\n\n");
printf(" %d ",Dip);
printf("\n\n FACTOR Dop:\n\n");
printf(" %d ",Dop);
*/
}
//sta funcin encuentra los factores de "N"
void factor(int N)
{
//Se empieza a verificar los factores desde 2
int i=2;
long N_factor;
N_factor = N;
while(i<=N_factor)
{
while((N_factor%i)==0)
{
vF[svF]=i;
N_factor=N_factor/i;
// printf("Factores: %d ",vF[svF]);
svF++;
}
i++;
}
}
//sta funcin encuentra todas las posibles combinaciones de factores que den como resultado "N"
void product(int vector_1[500],int vector_2[500],int valor)
{
int d,e,s,pNh,i;
int cont=0;
Prod[0]=1;
a=1;
for(d=0;d<=valor;d=d+1)
{
s=a;
pNh=1;
for(e=1;e<=vector_2[d];e=e+1)
{
pNh=pNh*vector_1[d];
for(i=(s*e+1);i<=(s*e+s);i=i+1)
{
Prod[i-1]=pNh*Prod[cont];
cont=cont+1;
}
a=a+s;
cont=0;
}
}
}
//Funcin auxiliar del host para calcular la etapa de entrada en el device
void etapa_entrada(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE ENTRADA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int k1,n1,n2;
//Asignacin de memoria en el device para el arreglo "x_device"
hipMalloc((void**)&x_device,Li*sizeof(cuFloatComplex));
//Se reserva memoria en el device para el arreglo "W_device"
hipMalloc((void**)&W_device,N*sizeof(cuFloatComplex));
//Asignacin de memoria en el device para el arreglo "y"
hipMalloc((void**)&y_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Se pasa el arreglo x_host a x_device
hipMemcpy(x_device,x_host,Li*sizeof(cuFloatComplex),hipMemcpyHostToDevice);
//Envo de los arreglos W hacia la memoria global del device
hipMemcpy(W_device,W_host,N*sizeof(cuFloatComplex),hipMemcpyHostToDevice);
//Asignacin de memoria en el host para "y"
//y_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Dimensionamiento del grid para la funcin kernel "inputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((P*Dop) < 32 && (Dip) < 32)
{
blockDim.x = (P*Dop);
blockDim.y = (Dip);
gridDim.x = 1;
gridDim.y = 1;
}
else
{
blockDim.x = 32;
blockDim.y = 32;
gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x));
gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y));
}
//Lanzamiento del kernel "inputStage_kernel"
hipLaunchKernelGGL(( inputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Li,Dip,Dop,P,x_device,W_device,y_device);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
/*
//Copia del arreglo "y" del device hacia el host
hipMemcpy(y_host,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToHost);
//Se imprimen los valores de "y"
printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(n2 = 0;n2 < P;n2++)
{
printf(" (%f) + (%f) ",cuCrealf(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimagf(y_host[(k1*Dop*P)+(n1*P)+n2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//funcin kernel que ejecuta la etapa de entrada en el device
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y)
{
int n1,n2;
cuFloatComplex t1;
//Threads
int n = blockDim.x *blockIdx.x + threadIdx.x;
int k1 = blockDim.y *blockIdx.y + threadIdx.y;
//Se resetean las flags
//flag_inputstage_1_d[0] = 0;
//flag_inputstage_2_d[0] = 0;
//flag_inputstage_3_d[0] = 0;
//printf("\n n = %d k1 = %d",n,k1);
if( (n < (P*Dop)) && (k1 < Dip))
{
n2 = floorf(n/Dop);
n1 = n - (Dop*n2);
//Generacin de los elementos que dependen de x[0]
if(n == 0)
{
y[(k1*Dop*P)+(0*P)+ 0] = x[0];
///Flag
//flag_inputstage_1_d[0] = 1;
}
//Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's
if((n >= 1) && (n <= (Li-1)))
{
t1 = x[n];
if(k1 == 0)
{
y[(0*Dop*P)+(n1*P)+ n2] = t1;
}
if(k1 >= 1)
{
y[(k1*Dop*P)+(n1*P)+ n2] = cuCmulf(W[((n*k1)%N)-1],t1);
}
///Flag
//flag_inputstage_2_d[0] = 1;
}
//Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1
if((n >= Li) && (n <= (P*Dop)-1))
{
y[(k1*Dop*P)+(n1*P)+ n2] = make_cuFloatComplex(0.0,0.0);
///Flag
//flag_inputstage_3_d[0] = 1;
}
//printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2]));
}
}
//Funcin auxiliar del host para calcular la etapa intermedia en el device
void etapa_intermedia(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA INTERMEDIA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int k1,k2,n1;
int n[1] = {P};
int inembed[1] = {P};
int onembed[1] = {P};
//Asignacin de memoria en el device para "z"
hipMalloc((void**)&z_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Asignacin de memoria en el host para "z"
//z_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Asignacin de memoria en el device para "in" y "out"
hipMalloc((void**)&in,sizeof(hipfftComplex)*P*Dip*Dop);
hipMalloc((void**)&out,sizeof(hipfftComplex)*P*Dip*Dop);
//Se copia el arreglo "y" al arreglo "in"
hipMemcpy(in,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice);
//Se crea un plan
hipfftHandle plan;
hipfftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,HIPFFT_C2C,Dip*Dop);
//Ejecucin del plan
hipfftExecC2C(plan,in,out,HIPFFT_FORWARD);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
//Se copian los datos del arreglo "out" al arreglo "z_device"
hipMemcpy(z_device,out,sizeof(hipfftComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice);
//Se destruye el plan
hipfftDestroy(plan);
//Se liberan los arreglos "in" y "out"
hipFree(in);
hipFree(out);
/*
//Se copian los datos del arreglo "z_device" al arreglo "z_host"
hipMemcpy(z_host,z_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToHost);
///Se imprimen los valores de z(n1,k2,k1)
printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(k2 = 0;k2 < P;k2++)
{
printf(" (%f) + (%f) ",cuCrealf(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimagf(z_host[(k1*Dop*P)+(n1*P)+k2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//Funcin auxiliar del host para calcular la etapa de salida en el device
void etapa_salida(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE SALIDA///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int m;
//Asignacin de memoria en el device para "X"
hipMalloc((void**)&X_device,Lo*sizeof(cuFloatComplex));
//Asignacin de memoria en el host para "X"
X_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Lo);
//Dimensionamiento del grid para la funcin kernel "outputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((Lo) < 1024)
{
blockDim.x = Lo;
gridDim.x = 1;
}
else
{
blockDim.x = 1024;
gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x));
}
//Lanzamiento del kernel "outputStage_kernel"
hipLaunchKernelGGL(( outputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Lo,Dip,Dop,P,z_device,W_device,X_device);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
//Copia del arreglo "X" del device hacia el host
hipMemcpy(X_host,X_device,sizeof(cuFloatComplex)*Lo,hipMemcpyDeviceToHost);
/*
//Se imprimen los valores de "X_host"
///Imprimir X[k]
printf("\n\n--- ARREGLO X[k] ---\n\n");
for(m=0;m<=Lo-1;m++)
{
printf("\n X[%d] = %.4f + (%.4f)",m,cuCrealf(X_host[m]),cuCimagf(X_host[m]));
//fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i]));
}
*/
}
//funcin kernel que ejecuta la etapa de salida en el device
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X)
{
//Declaracin de variables locales
int n1,k_aux,k1,k2,a,b;
cuFloatComplex t1,t2,t3,t4,t5;
//Threads
int k = blockDim.x *blockIdx.x + threadIdx.x;
//Se resetean las flags
//flag_outputstage_1_d[0] = 0;
//flag_outputstage_2_d[0] = 0;
//flag_outputstage_3_d[0] = 0;
if(k < Lo)
{
for(n1 = 0; n1 <= (Dop-1); n1 = n1+1)
{
if(Lo <= Dip)
{
//Clculo de X(k) para 0<=k<=Lo-1.
//printf("\n--- Caso (Lo <= Dip) ---\n");
//En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
///Flag
//flag_outputstage_1_d[0] = 1;
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
///Flag
//flag_outputstage_1_d[0] = 1;
}
}
else
{
if((k >= 0) && (k <= (Dip-1)))
{
//Clculo de X(k) para 0<=k<=Dip-1.
//En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
}
}
else
{
if(Dop <= 4)
{
//Usando el mtodo directo
//printf("\n--- Caso (Metodo directo) ---\n");
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
///Flag
//flag_outputstage_2_d[0] = 1;
}
else
{
if(n1 == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
}
a = floorf(k/(Dip*P));
X[k] = cuCaddf(X[k],cuCmulf(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1]));
///Flag
//flag_outputstage_2_d[0] = 1;
}
}
else
{
//Usando el mtodo filtering 2BF
//printf("\n--- Caso (Filtro 2BF) ---\n");
if((Dop-2) >= 1)
{
if(n1 == 0)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
///Flag
//flag_outputstage_3_d[0] = 1;
}
if((n1 >= 1) && (n1 <= (Dop-2)))
{
t2 = t1;
t1 = cuCaddf(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4);
t3 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t4 = cuCsubf(t3,t2);
}
if(n1 == (Dop-1))
{
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
}
}
else
{
if(Dop == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
X[k] = t1;
///Flag
//flag_outputstage_3_d[0] = 1;
}
else
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
///Flag
//flag_outputstage_3_d[0] = 1;
}
}
}
}
}
}
}
}
| 61d5fd06b9196c1e7077d15d9ac25a82f9169527.cu | ///Ésta programa calcula la versión paralelizada del algoritmo FFT_DIF_DIT_TD
///(04/01/2017)
///Ésta versión sirve para graficar en matlab los errores absolutos y relativos Caso: N^20, Li=N, Lo=307
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cufft.h>
#include <cufftw.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuComplex.h>
#include <math.h>
#include <math_constants.h>
#include <iostream>
#include <time.h>
//////////////////////////////////////////////////////////////////////////
///////////////////////DECLARACIÓN DE FUNCIONES///////////////////////////
//////////////////////////////////////////////////////////////////////////
void vector_entrada_xn(int Li);
void arreglo_W(int N);
void asign_rap(int N,int Li,int Lo);
void factor(int N);
void product(int vector_1[500],int vector_2[500],int valor);
void etapa_entrada(void);
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y);
void etapa_intermedia(void);
void etapa_salida(void);
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X);
//////////////////////////////////////////////////////////////////////////
/////////////////////DECLARACIÓN DE VARIABLES GLOBALES////////////////////
//////////////////////////////////////////////////////////////////////////
cuFloatComplex *x_host;
cuFloatComplex *W_host;
//cuFloatComplex *y_host;
//cuFloatComplex *z_host;
cuFloatComplex *X_host;
cuFloatComplex *x_device;
cuFloatComplex *W_device;
cuFloatComplex *y_device;
cuFloatComplex *z_device;
cuFloatComplex *X_device;
cufftComplex *in,*out;
FILE *db_open,*dc_open;
int Dip,Dop,P,N,Li,Lo;
int vF[500]; //Almacena los factores de N
int svF; //Almacena el numero de factores de N
int Prod[500];
int a;
#define inf 99999
//////////////////////////////////////////////////////////////////////////
//////////////////////////DATOS DE ENTRADA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// N >>> Número de elementos del vector de entrada
/// Li >>> Número de elementos de entrada diferentes de cero
/// Lo >>> Número de elementos de salida requeridos
/// loop >>> Número de iteraciones
/// muestras >>> Número de muestras
//////////////////////////////////////////////////////////////////////////
///////////////////////////DATOS DE SALIDA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// X >>> Vector de salida
//////////////////////////////////////////////////////////////////////////
/////////////////// SE INGRESAN LOS DATOS DE ENTRADA /////////////////////
//////////////////////////////////////////////////////////////////////////
///Ingrese el número de iteraciones requeridas
const int loop = 1;
///Ingrese el valor de N_max
const int N_max = 11;
///Ingrese el valor de Li_max
const int Li_max = 2048;
///Ingrese el valor de Lo_max
const int Lo_max = 30;
//////////////////////////////////////////////////////////////////////////
//////////////////////////FUNCION PRINCIPAL///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Función principal
int main()
{
//////////////////////////////////////////////////////////////////////////
//////////////////////////SELECCIÓN DEL DEVICE////////////////////////////
//////////////////////////////////////////////////////////////////////////
int device;
cudaSetDevice(0);
cudaGetDevice(&device);
if(device == 0)
{
printf("\n\n---DEVICE = GeForce GTX 970---\n\n");
}
if(device == 1)
{
printf("\n\n---DEVICE = TESLA K20---\n\n");
}
//////////////////////////////////////////////////////////////////////////
int i,j,i_N,j_res,k_res,cont,i_prom,m;
float *parte_real;
float *parte_imag;
//float suma;
//float promedio[N_max];
FILE *da,*db;
//da = fopen("Tiempos_N20_LiN_LoVARIA_CUDA.bin","a+b"); //Crea o sobre escribe archivo
da = fopen("Resultados_N20_LiN_Lo307_real_CUDA.bin","a+b"); //Crea o sobre escribe archivo
db = fopen("Resultados_N20_LiN_Lo307_imag_CUDA.bin","a+b"); //Crea o sobre escribe archivo
//Pausa
printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n");
getchar();
for(i_N = N_max;i_N <= N_max;i_N++)
{
N = (int )pow(2,i_N);
//N = N_max;
printf("\n N = %d \n",N);
for(j_res=Li_max;j_res <= Li_max;j_res++)
{
Li=j_res;
for(k_res=Lo_max;k_res <= Lo_max;k_res++)
{
Lo=k_res;
printf("\n Li = %d Lo = %d",Li,Lo);
//////////////////////////////////////////////////////////
parte_real = (float*) malloc(Lo*sizeof(float));
parte_imag = (float*) malloc(Lo*sizeof(float));
for(m=0;m<=Lo-1;m++)
{
parte_real[m] = 0.0;
parte_imag[m] = 0.0;
}
///Se abre el archivo binario
db_open = fopen("Entrada_real_N20_C.bin","rb");
dc_open = fopen("Entrada_imag_N20_C.bin","rb");
//suma=0.0;
for(j=0;j<loop;j++)
{
//Comandos necesarios para medir el tiempo
float elapsedTime_app;
cudaEvent_t start_app, stop_app;
cudaEventCreate(&start_app);
cudaEventCreate(&stop_app);
//Se generan en el host los valores del vector de entrada x[n]
vector_entrada_xn(Li);
///Se genera el arreglo W[N]
arreglo_W(N);
//---------------------------------------------------------------------------------------------
//Se empieza a medir el tiempo de ejecucion de la aplicacion
cudaEventRecord(start_app,0);
//Se generan en el host los factores Dip y Dop
asign_rap(N,Li,Lo);
//Cálculo en el host del factor P
P = N/(Dip*Dop);
//printf("\n\n FACTOR P:\n\n");
printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
//Función auxiliar del host para ejecutar la etapa de entrada
etapa_entrada();
//Función auxiliar del host para ejecutar la etapa intermedia
etapa_intermedia();
//Función auxiliar del host para ejecutar la etapa de salida
etapa_salida();
//printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////
///SUMATORIAS
for(m=0;m<=Lo-1;m++)
{
parte_real[m] = parte_real[m] + cuCrealf(X_host[m]);
parte_imag[m] = parte_imag[m] + cuCimagf(X_host[m]);
//printf("\n X[%d] = %.4f + (%.4f)",m,creal(X[m]),cimag(X[m]));
//fprintf(dc,"%f %f\n",creal(X[m]),cimag(X[m]));
}
////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////
//---------------------------------------------------------------------------------------------
//Comandos necesarios para medir el tiempo de la aplicacion (app)
cudaEventRecord(stop_app,0);
cudaEventSynchronize(stop_app);
cudaEventElapsedTime(&elapsedTime_app,start_app,stop_app);
//Suma de todos los tiempos
//suma = suma + elapsedTime_app;
//Se destruyen los eventos que miden el tiempo de la aplicacion
cudaEventDestroy(start_app);
cudaEventDestroy(stop_app);
//Se liberan memorias del Host y Device
free(x_host);
free(W_host);
free(X_host);
cudaFree(x_device);
cudaFree(W_device);
cudaFree(y_device);
cudaFree(z_device);
cudaFree(X_device);
}
///////////////////////////////////
///PROMEDIO DE ERRORES
for(m=0;m<=Lo-1;m++)
{
parte_real[m] = parte_real[m]/loop;
parte_imag[m] = parte_imag[m] /loop;
}
//////////////////////////////////
///Se imprimen los resultados en los archivos binarios
fwrite(parte_real,sizeof(float),Lo,da);
fwrite(parte_imag,sizeof(float),Lo,db);
//promedio[k_res-1] = suma/(float)loop;
fclose(db_open);
fclose(dc_open);
free(parte_real);
free(parte_imag);
}
}
}
//fwrite(promedio,sizeof(float),N_max,da);
fclose(da);
fclose(db);
return EXIT_SUCCESS;
}
//////////////////////////////////////////////////////////////////////////
/////////////////////////FUNCIONES SECUNDARIAS////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Ésta función genera el vector de entrada x[n]
void vector_entrada_xn(int Li)
{
//Declaración de variables locales
int k;
float *buffer_real,*buffer_imag;
//Se reserva memoria para xn_host en el host
x_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N);
buffer_real = (float*)malloc(sizeof(float)*N);
buffer_imag = (float*)malloc(sizeof(float)*N);
///Se lee el vector de entrada del archivo binario
fread(buffer_real,sizeof(float),N,db_open);
fread(buffer_imag,sizeof(float),N,dc_open);
//Se dan valores a x[n]
for(k = 0;k < N; k++)
{
//x_host[k] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%11));
//x_host[k] = make_cuFloatComplex((float)(k + 1),(float)(0.0));
x_host[k] = make_cuFloatComplex(buffer_real[k],buffer_imag[k]);
}
/*
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(k=0;k<Li;k++)
{
printf(" %d-> (%f) + (%f)\n",k+1,cuCrealf(x_host[k]),cuCimagf(x_host[k]));
}
*/
free(buffer_real);
free(buffer_imag);
}
//Ésta función genera el arreglo W
void arreglo_W(int N)
{
//Declaración de variables locales
int n;
//Se reserva memoria para W_host en el host
W_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N);
//Se genera el arreglo W
for(n = 1;n <= N;n++)
{
W_host[n-1] = make_cuFloatComplex((float)cos((2*CUDART_PI*n)/N),(float)(-1)*sin((2*CUDART_PI*n)/N));
}
/*
//Se imprimen los valores del arreglo W[N]
printf("\n---ARREGLO W[N]---\n\n");
for(n = 0;n < N; n++)
{
printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCrealf(W_host[n]),cuCimagf(W_host[n]));
}
*/
}
//Ésta función genera los factores Dip y Dop
void asign_rap(int N,int Li,int Lo)
{
//Declaración de variables locales
float NLi,NLo,Diprapt,Doprapt;
int Nh[500];
int k[500];
int G;
int g,i,t,ta;
int Dipt[500],Dopt[500];
float distrapt,distrap;
int Pos,h,Poss;
int nk[500];
int r;
//Inicializaciones
G = 0;
svF = 0;
//Factores Dip y Dop ideales
NLi=(float)N/(float)Li;
NLo=(float)N/(float)Lo;
Diprapt=NLi;
Doprapt=NLo;
//Se encuentran los factores de "N"
//vF almacena los factores de "N"
//svF almacena el número de factores de "N"
factor(N);
/*
Almacena en el vector Nh los factores que son diferentes de del vector vF
En el vector k se almacena la cantidad de veces que se repite cada
elemento almacenado en el vector Nh.
*/
Nh[0] = vF[0];
k[0]=1;
for(g=1;g<=svF-1;g=g+1)
{
if(vF[g]!=vF[g-1])
{
G=G+1;
Nh[G]=vF[g];
k[G]=1;
}
else
{
k[G]=k[G]+1;
}
}
/*
Almacena en el vector Nh todas las posibles combinaciones que den como
producto a N. t almacena el numero de elementos del vector Nh.
*/
product(Nh,k,G);
t = a;
for(i=0;i<t;i=i+1)
{
Dipt[i]=Prod[i];
}
distrapt=inf;
for(g=1;g<=t;g=g+1)
{
if(Dipt[g-1]<=NLi)
{
Pos=g-1;
for(h=0;h<=G;h=h+1)
{
Poss=floor(Pos/(k[h]+1));
nk[h]=k[h]+Poss*(k[h]+1)-Pos;
Pos=Poss;
}
product(Nh,nk,G);
ta=a;
for(i=0;i<ta;i=i+1)
{
Dopt[i]=Prod[i];
}
////////////////////////////////////////////
//int j;
//for(j=0;j<ta;j++)
//{
// printf(" %d ",Dopt[j]);
//}
//printf("\n\n ta=%d\n\n",ta);
///////////////////////////////////////////
for(r=0;r<ta;r=r+1)
{
distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2));
if(distrap<distrapt)
{
distrapt=distrap;
Dip=Dipt[g-1];
Dop=Dopt[r];
}
}
}
}
/*
printf("\n\n FACTOR Dip :\n\n");
printf(" %d ",Dip);
printf("\n\n FACTOR Dop:\n\n");
printf(" %d ",Dop);
*/
}
//Ésta función encuentra los factores de "N"
void factor(int N)
{
//Se empieza a verificar los factores desde 2
int i=2;
long N_factor;
N_factor = N;
while(i<=N_factor)
{
while((N_factor%i)==0)
{
vF[svF]=i;
N_factor=N_factor/i;
// printf("Factores: %d ",vF[svF]);
svF++;
}
i++;
}
}
//Ésta función encuentra todas las posibles combinaciones de factores que den como resultado "N"
void product(int vector_1[500],int vector_2[500],int valor)
{
int d,e,s,pNh,i;
int cont=0;
Prod[0]=1;
a=1;
for(d=0;d<=valor;d=d+1)
{
s=a;
pNh=1;
for(e=1;e<=vector_2[d];e=e+1)
{
pNh=pNh*vector_1[d];
for(i=(s*e+1);i<=(s*e+s);i=i+1)
{
Prod[i-1]=pNh*Prod[cont];
cont=cont+1;
}
a=a+s;
cont=0;
}
}
}
//Función auxiliar del host para calcular la etapa de entrada en el device
void etapa_entrada(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE ENTRADA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int k1,n1,n2;
//Asignación de memoria en el device para el arreglo "x_device"
cudaMalloc((void**)&x_device,Li*sizeof(cuFloatComplex));
//Se reserva memoria en el device para el arreglo "W_device"
cudaMalloc((void**)&W_device,N*sizeof(cuFloatComplex));
//Asignación de memoria en el device para el arreglo "y"
cudaMalloc((void**)&y_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Se pasa el arreglo x_host a x_device
cudaMemcpy(x_device,x_host,Li*sizeof(cuFloatComplex),cudaMemcpyHostToDevice);
//Envío de los arreglos W hacia la memoria global del device
cudaMemcpy(W_device,W_host,N*sizeof(cuFloatComplex),cudaMemcpyHostToDevice);
//Asignación de memoria en el host para "y"
//y_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Dimensionamiento del grid para la función kernel "inputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((P*Dop) < 32 && (Dip) < 32)
{
blockDim.x = (P*Dop);
blockDim.y = (Dip);
gridDim.x = 1;
gridDim.y = 1;
}
else
{
blockDim.x = 32;
blockDim.y = 32;
gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x));
gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y));
}
//Lanzamiento del kernel "inputStage_kernel"
inputStage_kernel<<<gridDim,blockDim>>>(N,Li,Dip,Dop,P,x_device,W_device,y_device);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
/*
//Copia del arreglo "y" del device hacia el host
cudaMemcpy(y_host,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost);
//Se imprimen los valores de "y"
printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(n2 = 0;n2 < P;n2++)
{
printf(" (%f) + (%f) ",cuCrealf(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimagf(y_host[(k1*Dop*P)+(n1*P)+n2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//función kernel que ejecuta la etapa de entrada en el device
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y)
{
int n1,n2;
cuFloatComplex t1;
//Threads
int n = blockDim.x *blockIdx.x + threadIdx.x;
int k1 = blockDim.y *blockIdx.y + threadIdx.y;
//Se resetean las flags
//flag_inputstage_1_d[0] = 0;
//flag_inputstage_2_d[0] = 0;
//flag_inputstage_3_d[0] = 0;
//printf("\n n = %d k1 = %d",n,k1);
if( (n < (P*Dop)) && (k1 < Dip))
{
n2 = floorf(n/Dop);
n1 = n - (Dop*n2);
//Generación de los elementos que dependen de x[0]
if(n == 0)
{
y[(k1*Dop*P)+(0*P)+ 0] = x[0];
///Flag
//flag_inputstage_1_d[0] = 1;
}
//Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's
if((n >= 1) && (n <= (Li-1)))
{
t1 = x[n];
if(k1 == 0)
{
y[(0*Dop*P)+(n1*P)+ n2] = t1;
}
if(k1 >= 1)
{
y[(k1*Dop*P)+(n1*P)+ n2] = cuCmulf(W[((n*k1)%N)-1],t1);
}
///Flag
//flag_inputstage_2_d[0] = 1;
}
//Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1
if((n >= Li) && (n <= (P*Dop)-1))
{
y[(k1*Dop*P)+(n1*P)+ n2] = make_cuFloatComplex(0.0,0.0);
///Flag
//flag_inputstage_3_d[0] = 1;
}
//printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2]));
}
}
//Función auxiliar del host para calcular la etapa intermedia en el device
void etapa_intermedia(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA INTERMEDIA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int k1,k2,n1;
int n[1] = {P};
int inembed[1] = {P};
int onembed[1] = {P};
//Asignación de memoria en el device para "z"
cudaMalloc((void**)&z_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Asignación de memoria en el host para "z"
//z_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Asignación de memoria en el device para "in" y "out"
cudaMalloc((void**)&in,sizeof(cufftComplex)*P*Dip*Dop);
cudaMalloc((void**)&out,sizeof(cufftComplex)*P*Dip*Dop);
//Se copia el arreglo "y" al arreglo "in"
cudaMemcpy(in,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice);
//Se crea un plan
cufftHandle plan;
cufftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,CUFFT_C2C,Dip*Dop);
//Ejecución del plan
cufftExecC2C(plan,in,out,CUFFT_FORWARD);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
//Se copian los datos del arreglo "out" al arreglo "z_device"
cudaMemcpy(z_device,out,sizeof(cufftComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice);
//Se destruye el plan
cufftDestroy(plan);
//Se liberan los arreglos "in" y "out"
cudaFree(in);
cudaFree(out);
/*
//Se copian los datos del arreglo "z_device" al arreglo "z_host"
cudaMemcpy(z_host,z_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost);
///Se imprimen los valores de z(n1,k2,k1)
printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(k2 = 0;k2 < P;k2++)
{
printf(" (%f) + (%f) ",cuCrealf(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimagf(z_host[(k1*Dop*P)+(n1*P)+k2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//Función auxiliar del host para calcular la etapa de salida en el device
void etapa_salida(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE SALIDA///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int m;
//Asignación de memoria en el device para "X"
cudaMalloc((void**)&X_device,Lo*sizeof(cuFloatComplex));
//Asignación de memoria en el host para "X"
X_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Lo);
//Dimensionamiento del grid para la función kernel "outputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((Lo) < 1024)
{
blockDim.x = Lo;
gridDim.x = 1;
}
else
{
blockDim.x = 1024;
gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x));
}
//Lanzamiento del kernel "outputStage_kernel"
outputStage_kernel<<<gridDim,blockDim>>>(N,Lo,Dip,Dop,P,z_device,W_device,X_device);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
//Copia del arreglo "X" del device hacia el host
cudaMemcpy(X_host,X_device,sizeof(cuFloatComplex)*Lo,cudaMemcpyDeviceToHost);
/*
//Se imprimen los valores de "X_host"
///Imprimir X[k]
printf("\n\n--- ARREGLO X[k] ---\n\n");
for(m=0;m<=Lo-1;m++)
{
printf("\n X[%d] = %.4f + (%.4f)",m,cuCrealf(X_host[m]),cuCimagf(X_host[m]));
//fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i]));
}
*/
}
//función kernel que ejecuta la etapa de salida en el device
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X)
{
//Declaración de variables locales
int n1,k_aux,k1,k2,a,b;
cuFloatComplex t1,t2,t3,t4,t5;
//Threads
int k = blockDim.x *blockIdx.x + threadIdx.x;
//Se resetean las flags
//flag_outputstage_1_d[0] = 0;
//flag_outputstage_2_d[0] = 0;
//flag_outputstage_3_d[0] = 0;
if(k < Lo)
{
for(n1 = 0; n1 <= (Dop-1); n1 = n1+1)
{
if(Lo <= Dip)
{
//Cálculo de X(k) para 0<=k<=Lo-1.
//printf("\n--- Caso (Lo <= Dip) ---\n");
//En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
///Flag
//flag_outputstage_1_d[0] = 1;
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
///Flag
//flag_outputstage_1_d[0] = 1;
}
}
else
{
if((k >= 0) && (k <= (Dip-1)))
{
//Cálculo de X(k) para 0<=k<=Dip-1.
//En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
}
}
else
{
if(Dop <= 4)
{
//Usando el método directo
//printf("\n--- Caso (Metodo directo) ---\n");
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
///Flag
//flag_outputstage_2_d[0] = 1;
}
else
{
if(n1 == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
}
a = floorf(k/(Dip*P));
X[k] = cuCaddf(X[k],cuCmulf(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1]));
///Flag
//flag_outputstage_2_d[0] = 1;
}
}
else
{
//Usando el método filtering 2BF
//printf("\n--- Caso (Filtro 2BF) ---\n");
if((Dop-2) >= 1)
{
if(n1 == 0)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
///Flag
//flag_outputstage_3_d[0] = 1;
}
if((n1 >= 1) && (n1 <= (Dop-2)))
{
t2 = t1;
t1 = cuCaddf(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4);
t3 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t4 = cuCsubf(t3,t2);
}
if(n1 == (Dop-1))
{
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
}
}
else
{
if(Dop == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
X[k] = t1;
///Flag
//flag_outputstage_3_d[0] = 1;
}
else
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
///Flag
//flag_outputstage_3_d[0] = 1;
}
}
}
}
}
}
}
}
|
0fdbafa3d409c3040cb6b5d752da410a5d01e264.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
const int N = 16;
const int blocksize = 16;
__global__
void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
int main()
{
char a[N] = "Hello \0\0\0\0\0\0";
int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
printf("%s", a);
hipMalloc( (void**)&ad, csize );
hipMalloc( (void**)&bd, isize );
hipMemcpy( ad, a, csize, hipMemcpyHostToDevice );
hipMemcpy( bd, b, isize, hipMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hipLaunchKernelGGL(( hello), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, bd);
hipMemcpy( a, ad, csize, hipMemcpyDeviceToHost );
hipFree( ad );
hipFree( bd );
printf("%s\n", a);
return EXIT_SUCCESS;
}
| 0fdbafa3d409c3040cb6b5d752da410a5d01e264.cu | #include <stdio.h>
const int N = 16;
const int blocksize = 16;
__global__
void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
int main()
{
char a[N] = "Hello \0\0\0\0\0\0";
int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
printf("%s", a);
cudaMalloc( (void**)&ad, csize );
cudaMalloc( (void**)&bd, isize );
cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hello<<<dimGrid, dimBlock>>>(ad, bd);
cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost );
cudaFree( ad );
cudaFree( bd );
printf("%s\n", a);
return EXIT_SUCCESS;
}
|
ad8e5c0dcec611b85f0ba592c0d460d51f881c89.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2008 BOROUJERDI Maxime. Tous droits reserves.
*/
#ifndef __RAYTRACING_KERNEL_H__
#define __RAYTRACING_KERNEL_H__
#include "helper_math.h"
#define numObj 5
class inputs {
public:
inputs() {
this->R = 0.0f;
this->V = 0.0f;
this->B = 0.0f;
this->A = 0.0f;
this->R = 0.0f;
this->C = make_float3(0.0f, 0.0f, 0.0f);
}
int is_sphere;
float R, V, B, A;
float3 C; // centre
float r; // rayon
};
typedef struct { float4 m[3]; } matrice3x4;
__constant__ matrice3x4 MView; // matrice inverse de la matrice de vue
__device__ float float2int_pow50(float a) {
return a * a * a * a * a * a * a * a * a * a * a * a * a * a * a * a * a * a *
a * a * a * a * a * a * a * a * a * a * a * a * a * a * a * a * a * a *
a * a * a * a * a * a * a * a * a * a * a * a * a * a;
}
__device__ uint rgbaFloatToInt(float4 rgba) {
rgba.x = __saturatef(rgba.x); // clamp entre [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return (uint(rgba.w * 255) << 24) | (uint(rgba.z * 255) << 16) |
(uint(rgba.y * 255) << 8) | (uint(rgba.x * 255));
}
class Rayon {
public:
float3 A; // origine
float3 u; // direction
};
class Object {
public:
__device__ Object() {
this->R = 0.0f;
this->V = 0.0f;
this->B = 0.0f;
this->A = 0.0f;
this->R = 0.0f;
this->C = make_float3(0.0f, 0.0f, 0.0f);
}
__device__ Object(float R, float V, float B, float A, float3 C, float r) {
this->R = R;
this->V = V;
this->B = B;
this->A = A;
this->C = C;
this->r = r;
}
__noinline__ __device__ virtual float intersection(Rayon R) = 0;
__noinline__ __device__ virtual float3 getNormale(float3 P) = 0;
float R, V, B, A;
float3 C; // centre
float r; // rayon
};
class Plain : public Object {
public:
__device__ Plain() {
}
__device__ Plain(float R, float V, float B, float A, float3 C, float r) {
this->R = R;
this->V = V;
this->B = B;
this->A = A;
this->C = C;
this->r = r;
}
__noinline__ __device__ float3 getNormale(float3 P) {
return normalize(make_float3(0.0f, 1.0f, 0.0f));
}
__noinline__ __device__ float intersection(Rayon R) {
float res;
float3 N = normalize(make_float3(0.0f, 1.0f, 0.0f));
float m(dot(N, R.u)), d, t;
float3 L;
if (fabs(m) < 0.0001f) {
res = 0.0f;
} else {
L = R.A - this->C;
d = dot(N, L);
t = -d / m;
if (t > 0) {
res = t;
} else {
res = 0.0f;
}
}
return res;
}
};
class Sphere : public Object {
public:
__device__ Sphere() {
}
__device__ Sphere(float R, float V, float B, float A, float3 C, float r) {
this->R = R;
this->V = V;
this->B = B;
this->A = A;
this->C = C;
this->r = r;
}
__noinline__ __device__ float3 getNormale(float3 P) {
return normalize(P - this->C);
}
__noinline__ __device__ float intersection(Rayon R) {
float3 L(this->C - R.A);
float d(dot(L, R.u)), l2(dot(L, L)), r2(this->r * this->r), m2, q, res;
if (d < 0.0f && l2 > r2) {
res = 0.0f;
} else {
m2 = l2 - d * d;
if (m2 > r2) {
res = 0.0f;
} else {
q = sqrt(r2 - m2);
if (l2 > r2)
res = d - q;
else
res = d + q;
}
}
return res;
}
};
void initObject(Object **objList, float *A, int n, obj_alloc *alloc) {
int i = 0;
for (i = 0; i < n; i++) {
if (i == 0) {
objList[i] = (Object *)alloc->my_new<Plain>();
} else {
objList[i] = (Object *)alloc->my_new<Sphere>();
}
}
}
__global__ void initObject_kern(Object **objList, float *A, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
if (i == 0) {
new (CLEANPTR(objList[i], Object *)) Plain(
A[i * 8], A[i * 8 + 1], A[i * 8 + 2], A[i * 8 + 3],
make_float3(A[i * 8 + 4], A[i * 8 + 5], A[i * 8 + 6]), A[i * 8 + 7]);
} else {
new (CLEANPTR(objList[i], Object *)) Sphere(
A[i * 8], A[i * 8 + 1], A[i * 8 + 2], A[i * 8 + 3],
make_float3(A[i * 8 + 4], A[i * 8 + 5], A[i * 8 + 6]), A[i * 8 + 7]);
}
}
}
__managed__ obj_info_tuble *vfun_table;
__managed__ unsigned tree_size_g;
__managed__ void *temp_copyBack;
__managed__ void *temp_TP;
__managed__ void *temp_coal;
__device__ bool notShadowRay(Object **__restrict__ objList, float3 A, float3 u,
int NUM) {
float t(0.0f);
Rayon ray;
float3 L(make_float3(10.0f, 10.0f, 10.0f)), tmp;
float dst(dot(tmp = (L - A), tmp));
ray.A = A + u * 0.0001f;
ray.u = u;
for (int j = 0; j < NUM && !t; j++) {
t = CLEANPTR(objList[j], Object *)->intersection(ray);
if (t > 0.0f && dot(tmp = (A + u * t), tmp) > dst) {
t = 0.0f;
}
}
return t == 0.0f;
}
__device__ bool notShadowRay_vptr(Object **__restrict__ objList, float3 A,
float3 u, int NUM) {
float t(0.0f);
Rayon ray;
void **vtable;
Object *ptr;
float3 L(make_float3(10.0f, 10.0f, 10.0f)), tmp;
float dst(dot(tmp = (L - A), tmp));
ray.A = A + u * 0.0001f;
ray.u = u;
for (int j = 0; j < NUM && !t; j++) {
//ptr=CLEANPTR(objList[j], Object *);
// vtable = get_vfunc(ptr, table, tree_size);
// temp_coal = vtable[0];
// vtable = get_vfunc_type(objList[j], vfun_table);
// temp_TP = vtable[0];
t = CLEANPTR(objList[j], Object *)->intersection(ray);
if (t > 0.0f && dot(tmp = (A + u * t), tmp) > dst) {
t = 0.0f;
}
}
return t == 0.0f;
}
__global__ void render(uint *result, Object **__restrict__ objList,
uint imageW, uint imageH, float df, int NUM) {
uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
uint tid(__umul24(threadIdx.y, blockDim.x) + threadIdx.x);
void **vtable;
Object *ptr;
// __shared__ range_tree_node table[3];
// if (threadIdx.x < tree_size && threadIdx.y==0) {
// //for (int i = 0; i < tree_size; i++) {
// //printf("%d\n",threadIdx.x);
// memcpy(&table[threadIdx.x], &range_tree[threadIdx.x], sizeof(range_tree_node));
// // if(tid==0)
// // printf("%p %p \n",table[i].range_start,table[i].range_end);
// }
uint id(x + y * imageW);
float4 pile[5];
uint Obj, n = 0, nRec = 5;
float prof, tmp;
for (int i = 0; i < nRec; ++i)
pile[i] = make_float4(0.0f, 0.0f, 0.0f, 1.0f);
if (x < imageW && y < imageH) {
prof = 10000.0f;
result[id] = 0;
float tPixel(2.0f / float(min(imageW, imageH)));
float4 f(make_float4(0.0f, 0.0f, 0.0f, 1.0f));
matrice3x4 M(MView);
Rayon R;
R.A = make_float3(M.m[0].w, M.m[1].w, M.m[2].w);
R.u = make_float3(M.m[0]) * df +
make_float3(M.m[2]) * (float(x) - float(imageW) * 0.5f) * tPixel +
make_float3(M.m[1]) * (float(y) - float(imageH) * 0.5f) * tPixel;
R.u = normalize(R.u);
__syncthreads();
// printf("%d: nRec %d\n", threadIdx.x, nRec);
for (int i = 0; i < nRec && n == i; i++) {
for (int j = 0; j < NUM; j++) {
float t;
//ptr=CLEANPTR(objList[j], Object *);
// vtable = get_vfunc(ptr, table, tree_size);
// temp_coal = vtable[0];
// vtable = get_vfunc_type(objList[j], vfun_table);
// temp_TP = vtable[0];
t = CLEANPTR(objList[j], Object *)->intersection(R);
if (t > 0.0f && t < prof) {
prof = t;
Obj = j;
}
}
// printf("%d: i=%d, t=%e\n", threadIdx.x, i, prof);
float t = prof;
if (t > 0.0f && t < 10000.0f) {
n++;
float4 color(make_float4(CLEANPTR(objList[Obj], Object *)->R, CLEANPTR(objList[Obj], Object *)->V,
CLEANPTR(objList[Obj], Object *)->B, CLEANPTR(objList[Obj], Object *)->A));
float3 P(R.A + R.u * t),
L(normalize(make_float3(10.0f, 10.0f, 10.0f) - P)),
V(normalize(R.A - P));
//ptr=CLEANPTR(objList[Obj], Object *);
vtable = get_vfunc_type(objList[Obj], vfun_table);
temp_TP = vtable[1];
float3 N(CLEANPTR(objList[Obj], Object *)->getNormale(P));
float3 Np(dot(V, N) < 0.0f ? (-1 * N) : N);
pile[i] = 0.05f * color;
if (dot(Np, L) > 0.0f && notShadowRay_vptr(objList, P, L, NUM)) {
// float3 Ri(2.0f*Np*dot(Np,L) - L);
float3 Ri(normalize(L + V));
// Ri = (L+V)/normalize(L+V);
pile[i] += 0.3f * color * (min(1.0f, dot(Np, L)));
tmp = 0.8f * pow(max(0.0f, min(1.0f, dot(Np, Ri))), 50.0f);
// tmp = 0.8f * float2int_pow50(max(0.0f,min(1.0f,dot(Np,Ri))));
pile[i].x += tmp;
pile[i].y += tmp;
pile[i].z += tmp;
}
R.u = 2.0f * N * dot(N, V) - V;
R.u = normalize(R.u);
R.A = P + R.u * 0.0001f;
}
prof = 10000.0f;
}
for (int i(n - 1); i > 0; i--)
pile[i - 1] = pile[i - 1] + 0.8f * pile[i];
result[id] += rgbaFloatToInt(pile[0]);
}
}
#endif // __RAYTRACING_KERNEL_H__
| ad8e5c0dcec611b85f0ba592c0d460d51f881c89.cu | /*
* Copyright 2008 BOROUJERDI Maxime. Tous droits reserves.
*/
#ifndef __RAYTRACING_KERNEL_H__
#define __RAYTRACING_KERNEL_H__
#include "helper_math.h"
#define numObj 5
class inputs {
public:
inputs() {
this->R = 0.0f;
this->V = 0.0f;
this->B = 0.0f;
this->A = 0.0f;
this->R = 0.0f;
this->C = make_float3(0.0f, 0.0f, 0.0f);
}
int is_sphere;
float R, V, B, A;
float3 C; // centre
float r; // rayon
};
typedef struct { float4 m[3]; } matrice3x4;
__constant__ matrice3x4 MView; // matrice inverse de la matrice de vue
__device__ float float2int_pow50(float a) {
return a * a * a * a * a * a * a * a * a * a * a * a * a * a * a * a * a * a *
a * a * a * a * a * a * a * a * a * a * a * a * a * a * a * a * a * a *
a * a * a * a * a * a * a * a * a * a * a * a * a * a;
}
__device__ uint rgbaFloatToInt(float4 rgba) {
rgba.x = __saturatef(rgba.x); // clamp entre [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return (uint(rgba.w * 255) << 24) | (uint(rgba.z * 255) << 16) |
(uint(rgba.y * 255) << 8) | (uint(rgba.x * 255));
}
class Rayon {
public:
float3 A; // origine
float3 u; // direction
};
class Object {
public:
__device__ Object() {
this->R = 0.0f;
this->V = 0.0f;
this->B = 0.0f;
this->A = 0.0f;
this->R = 0.0f;
this->C = make_float3(0.0f, 0.0f, 0.0f);
}
__device__ Object(float R, float V, float B, float A, float3 C, float r) {
this->R = R;
this->V = V;
this->B = B;
this->A = A;
this->C = C;
this->r = r;
}
__noinline__ __device__ virtual float intersection(Rayon R) = 0;
__noinline__ __device__ virtual float3 getNormale(float3 P) = 0;
float R, V, B, A;
float3 C; // centre
float r; // rayon
};
class Plain : public Object {
public:
__device__ Plain() {
}
__device__ Plain(float R, float V, float B, float A, float3 C, float r) {
this->R = R;
this->V = V;
this->B = B;
this->A = A;
this->C = C;
this->r = r;
}
__noinline__ __device__ float3 getNormale(float3 P) {
return normalize(make_float3(0.0f, 1.0f, 0.0f));
}
__noinline__ __device__ float intersection(Rayon R) {
float res;
float3 N = normalize(make_float3(0.0f, 1.0f, 0.0f));
float m(dot(N, R.u)), d, t;
float3 L;
if (fabs(m) < 0.0001f) {
res = 0.0f;
} else {
L = R.A - this->C;
d = dot(N, L);
t = -d / m;
if (t > 0) {
res = t;
} else {
res = 0.0f;
}
}
return res;
}
};
class Sphere : public Object {
public:
__device__ Sphere() {
}
__device__ Sphere(float R, float V, float B, float A, float3 C, float r) {
this->R = R;
this->V = V;
this->B = B;
this->A = A;
this->C = C;
this->r = r;
}
__noinline__ __device__ float3 getNormale(float3 P) {
return normalize(P - this->C);
}
__noinline__ __device__ float intersection(Rayon R) {
float3 L(this->C - R.A);
float d(dot(L, R.u)), l2(dot(L, L)), r2(this->r * this->r), m2, q, res;
if (d < 0.0f && l2 > r2) {
res = 0.0f;
} else {
m2 = l2 - d * d;
if (m2 > r2) {
res = 0.0f;
} else {
q = sqrt(r2 - m2);
if (l2 > r2)
res = d - q;
else
res = d + q;
}
}
return res;
}
};
void initObject(Object **objList, float *A, int n, obj_alloc *alloc) {
int i = 0;
for (i = 0; i < n; i++) {
if (i == 0) {
objList[i] = (Object *)alloc->my_new<Plain>();
} else {
objList[i] = (Object *)alloc->my_new<Sphere>();
}
}
}
__global__ void initObject_kern(Object **objList, float *A, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
if (i == 0) {
new (CLEANPTR(objList[i], Object *)) Plain(
A[i * 8], A[i * 8 + 1], A[i * 8 + 2], A[i * 8 + 3],
make_float3(A[i * 8 + 4], A[i * 8 + 5], A[i * 8 + 6]), A[i * 8 + 7]);
} else {
new (CLEANPTR(objList[i], Object *)) Sphere(
A[i * 8], A[i * 8 + 1], A[i * 8 + 2], A[i * 8 + 3],
make_float3(A[i * 8 + 4], A[i * 8 + 5], A[i * 8 + 6]), A[i * 8 + 7]);
}
}
}
__managed__ obj_info_tuble *vfun_table;
__managed__ unsigned tree_size_g;
__managed__ void *temp_copyBack;
__managed__ void *temp_TP;
__managed__ void *temp_coal;
__device__ bool notShadowRay(Object **__restrict__ objList, float3 A, float3 u,
int NUM) {
float t(0.0f);
Rayon ray;
float3 L(make_float3(10.0f, 10.0f, 10.0f)), tmp;
float dst(dot(tmp = (L - A), tmp));
ray.A = A + u * 0.0001f;
ray.u = u;
for (int j = 0; j < NUM && !t; j++) {
t = CLEANPTR(objList[j], Object *)->intersection(ray);
if (t > 0.0f && dot(tmp = (A + u * t), tmp) > dst) {
t = 0.0f;
}
}
return t == 0.0f;
}
__device__ bool notShadowRay_vptr(Object **__restrict__ objList, float3 A,
float3 u, int NUM) {
float t(0.0f);
Rayon ray;
void **vtable;
Object *ptr;
float3 L(make_float3(10.0f, 10.0f, 10.0f)), tmp;
float dst(dot(tmp = (L - A), tmp));
ray.A = A + u * 0.0001f;
ray.u = u;
for (int j = 0; j < NUM && !t; j++) {
//ptr=CLEANPTR(objList[j], Object *);
// vtable = get_vfunc(ptr, table, tree_size);
// temp_coal = vtable[0];
// vtable = get_vfunc_type(objList[j], vfun_table);
// temp_TP = vtable[0];
t = CLEANPTR(objList[j], Object *)->intersection(ray);
if (t > 0.0f && dot(tmp = (A + u * t), tmp) > dst) {
t = 0.0f;
}
}
return t == 0.0f;
}
__global__ void render(uint *result, Object **__restrict__ objList,
uint imageW, uint imageH, float df, int NUM) {
uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
uint tid(__umul24(threadIdx.y, blockDim.x) + threadIdx.x);
void **vtable;
Object *ptr;
// __shared__ range_tree_node table[3];
// if (threadIdx.x < tree_size && threadIdx.y==0) {
// //for (int i = 0; i < tree_size; i++) {
// //printf("%d\n",threadIdx.x);
// memcpy(&table[threadIdx.x], &range_tree[threadIdx.x], sizeof(range_tree_node));
// // if(tid==0)
// // printf("%p %p \n",table[i].range_start,table[i].range_end);
// }
uint id(x + y * imageW);
float4 pile[5];
uint Obj, n = 0, nRec = 5;
float prof, tmp;
for (int i = 0; i < nRec; ++i)
pile[i] = make_float4(0.0f, 0.0f, 0.0f, 1.0f);
if (x < imageW && y < imageH) {
prof = 10000.0f;
result[id] = 0;
float tPixel(2.0f / float(min(imageW, imageH)));
float4 f(make_float4(0.0f, 0.0f, 0.0f, 1.0f));
matrice3x4 M(MView);
Rayon R;
R.A = make_float3(M.m[0].w, M.m[1].w, M.m[2].w);
R.u = make_float3(M.m[0]) * df +
make_float3(M.m[2]) * (float(x) - float(imageW) * 0.5f) * tPixel +
make_float3(M.m[1]) * (float(y) - float(imageH) * 0.5f) * tPixel;
R.u = normalize(R.u);
__syncthreads();
// printf("%d: nRec %d\n", threadIdx.x, nRec);
for (int i = 0; i < nRec && n == i; i++) {
for (int j = 0; j < NUM; j++) {
float t;
//ptr=CLEANPTR(objList[j], Object *);
// vtable = get_vfunc(ptr, table, tree_size);
// temp_coal = vtable[0];
// vtable = get_vfunc_type(objList[j], vfun_table);
// temp_TP = vtable[0];
t = CLEANPTR(objList[j], Object *)->intersection(R);
if (t > 0.0f && t < prof) {
prof = t;
Obj = j;
}
}
// printf("%d: i=%d, t=%e\n", threadIdx.x, i, prof);
float t = prof;
if (t > 0.0f && t < 10000.0f) {
n++;
float4 color(make_float4(CLEANPTR(objList[Obj], Object *)->R, CLEANPTR(objList[Obj], Object *)->V,
CLEANPTR(objList[Obj], Object *)->B, CLEANPTR(objList[Obj], Object *)->A));
float3 P(R.A + R.u * t),
L(normalize(make_float3(10.0f, 10.0f, 10.0f) - P)),
V(normalize(R.A - P));
//ptr=CLEANPTR(objList[Obj], Object *);
vtable = get_vfunc_type(objList[Obj], vfun_table);
temp_TP = vtable[1];
float3 N(CLEANPTR(objList[Obj], Object *)->getNormale(P));
float3 Np(dot(V, N) < 0.0f ? (-1 * N) : N);
pile[i] = 0.05f * color;
if (dot(Np, L) > 0.0f && notShadowRay_vptr(objList, P, L, NUM)) {
// float3 Ri(2.0f*Np*dot(Np,L) - L);
float3 Ri(normalize(L + V));
// Ri = (L+V)/normalize(L+V);
pile[i] += 0.3f * color * (min(1.0f, dot(Np, L)));
tmp = 0.8f * pow(max(0.0f, min(1.0f, dot(Np, Ri))), 50.0f);
// tmp = 0.8f * float2int_pow50(max(0.0f,min(1.0f,dot(Np,Ri))));
pile[i].x += tmp;
pile[i].y += tmp;
pile[i].z += tmp;
}
R.u = 2.0f * N * dot(N, V) - V;
R.u = normalize(R.u);
R.A = P + R.u * 0.0001f;
}
prof = 10000.0f;
}
for (int i(n - 1); i > 0; i--)
pile[i - 1] = pile[i - 1] + 0.8f * pile[i];
result[id] += rgbaFloatToInt(pile[0]);
}
}
#endif // __RAYTRACING_KERNEL_H__
|
24bada211469ad71b356ff9ebd2848eafa4c0c0d.hip | // !!! This is a file automatically generated by hipify!!!
// Hybrid MPI+CUDA computation of Pi
#include <stdio.h>
#include <omp.h>//jd
#include <mpi.h>
#include <hip/hip_runtime.h>
#define NBIN 10000000 // Number of bins
#define NUM_DEVICE 2 // jd
#define NUM_BLOCK 13 // Number of thread blocks
#define NUM_THREAD 192 // Number of threads per block
// Kernel that executes on the CUDA device
__global__ void cal_pi(float *sum,int nbin,float step,float offset,int nthreads,int nblocks) {
int i;
float x;
int idx = blockIdx.x*blockDim.x+threadIdx.x; // Sequential thread index across the blocks
for (i=idx; i<nbin; i+=nthreads*nblocks) { // Interleaved bin assignment to threads
x = offset+(i+0.5)*step;
sum[idx] += 4.0/(1.0+x*x);
}
}
int main(int argc,char **argv) {
int myid,nproc,nbin,tid, mpid;
float step,offset,pi=0.0,pig;
dim3 dimGrid(NUM_BLOCK,1,1); // Grid dimensions (only use 1D)
dim3 dimBlock(NUM_THREAD,1,1); // Block dimensions (only use 1D)
float *sumHost,*sumDev; // Pointers to host & device arrays
int dev_used;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&myid); // My MPI rank
MPI_Comm_size(MPI_COMM_WORLD,&nproc); // Number of MPI processes
//nbin = NBIN/nproc; // Number of bins per MPI process
//step = 1.0/(float)(nbin*nproc); // Step size with redefined number of bins
//offset = myid*step*nbin; // Quadrature-point offset
omp_set_num_threads(NUM_DEVICE); // One OpenMP thread per GPU device
nbin = NBIN/(nproc*NUM_DEVICE); // # of bins per OpenMP thread
step = 1.0/(float)(nbin*nproc*NUM_DEVICE);
#pragma omp parallel private( mpid, offset, sumHost, sumDev, tid, dev_used ) reduction(+:pi)
{
mpid = omp_get_thread_num();
offset = (NUM_DEVICE*myid+mpid)*step*nbin; // Quadrature-point offset
hipSetDevice(mpid%2);
//hipSetDevice(myid%2);
size_t size = NUM_BLOCK*NUM_THREAD*sizeof(float); //Array memory size
sumHost = (float *)malloc(size); // Allocate array on host
hipMalloc((void **) &sumDev,size); // Allocate array on device
hipMemset(sumDev,0,size); // Reset array in device to 0
// Calculate on device (call CUDA kernel)
hipLaunchKernelGGL(( cal_pi) , dim3(dimGrid),dim3(dimBlock), 0, 0, sumDev,nbin,step,offset,NUM_THREAD,NUM_BLOCK);
// Retrieve result from device and store it in host array
hipMemcpy(sumHost,sumDev,size,hipMemcpyDeviceToHost);
// Reduction over CUDA threads
for(tid=0; tid<NUM_THREAD*NUM_BLOCK; tid++)
pi += sumHost[tid];
pi *= step;// race condition solved by reduction(+:pi)
// CUDA cleanup
free(sumHost);
hipFree(sumDev);
hipGetDevice(&dev_used);
//printf("myid = %d: device used = %d; partial pi = %f\n",myid,dev_used,pi);
printf("myid = %d; mpid = %d: device used = %d; partial pi = %f\n", myid, mpid, dev_used, pi);
} // ENd omp parallel jd
// Reduction over MPI processes
MPI_Allreduce(&pi,&pig,1,MPI_FLOAT,MPI_SUM,MPI_COMM_WORLD);
if (myid==0) printf("PI = %f\n",pig);
MPI_Finalize();
return 0;
}
| 24bada211469ad71b356ff9ebd2848eafa4c0c0d.cu | // Hybrid MPI+CUDA computation of Pi
#include <stdio.h>
#include <omp.h>//jd
#include <mpi.h>
#include <cuda.h>
#define NBIN 10000000 // Number of bins
#define NUM_DEVICE 2 // jd
#define NUM_BLOCK 13 // Number of thread blocks
#define NUM_THREAD 192 // Number of threads per block
// Kernel that executes on the CUDA device
__global__ void cal_pi(float *sum,int nbin,float step,float offset,int nthreads,int nblocks) {
int i;
float x;
int idx = blockIdx.x*blockDim.x+threadIdx.x; // Sequential thread index across the blocks
for (i=idx; i<nbin; i+=nthreads*nblocks) { // Interleaved bin assignment to threads
x = offset+(i+0.5)*step;
sum[idx] += 4.0/(1.0+x*x);
}
}
int main(int argc,char **argv) {
int myid,nproc,nbin,tid, mpid;
float step,offset,pi=0.0,pig;
dim3 dimGrid(NUM_BLOCK,1,1); // Grid dimensions (only use 1D)
dim3 dimBlock(NUM_THREAD,1,1); // Block dimensions (only use 1D)
float *sumHost,*sumDev; // Pointers to host & device arrays
int dev_used;
MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&myid); // My MPI rank
MPI_Comm_size(MPI_COMM_WORLD,&nproc); // Number of MPI processes
//nbin = NBIN/nproc; // Number of bins per MPI process
//step = 1.0/(float)(nbin*nproc); // Step size with redefined number of bins
//offset = myid*step*nbin; // Quadrature-point offset
omp_set_num_threads(NUM_DEVICE); // One OpenMP thread per GPU device
nbin = NBIN/(nproc*NUM_DEVICE); // # of bins per OpenMP thread
step = 1.0/(float)(nbin*nproc*NUM_DEVICE);
#pragma omp parallel private( mpid, offset, sumHost, sumDev, tid, dev_used ) reduction(+:pi)
{
mpid = omp_get_thread_num();
offset = (NUM_DEVICE*myid+mpid)*step*nbin; // Quadrature-point offset
cudaSetDevice(mpid%2);
//cudaSetDevice(myid%2);
size_t size = NUM_BLOCK*NUM_THREAD*sizeof(float); //Array memory size
sumHost = (float *)malloc(size); // Allocate array on host
cudaMalloc((void **) &sumDev,size); // Allocate array on device
cudaMemset(sumDev,0,size); // Reset array in device to 0
// Calculate on device (call CUDA kernel)
cal_pi <<<dimGrid,dimBlock>>> (sumDev,nbin,step,offset,NUM_THREAD,NUM_BLOCK);
// Retrieve result from device and store it in host array
cudaMemcpy(sumHost,sumDev,size,cudaMemcpyDeviceToHost);
// Reduction over CUDA threads
for(tid=0; tid<NUM_THREAD*NUM_BLOCK; tid++)
pi += sumHost[tid];
pi *= step;// race condition solved by reduction(+:pi)
// CUDA cleanup
free(sumHost);
cudaFree(sumDev);
cudaGetDevice(&dev_used);
//printf("myid = %d: device used = %d; partial pi = %f\n",myid,dev_used,pi);
printf("myid = %d; mpid = %d: device used = %d; partial pi = %f\n", myid, mpid, dev_used, pi);
} // ENd omp parallel jd
// Reduction over MPI processes
MPI_Allreduce(&pi,&pig,1,MPI_FLOAT,MPI_SUM,MPI_COMM_WORLD);
if (myid==0) printf("PI = %f\n",pig);
MPI_Finalize();
return 0;
}
|
79fd7d37ed98bb885d7a66a292deb2130749cf43.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
typedef unsigned long long ul;
typedef unsigned int uint;
int banyakdata = 5120;
int dimensigrid = 160;
int dimensiblok = 32;
int sizebig = 16;
typedef struct {
char size;
uint* value;
}big;
__host__ __device__ short ukuranbit(big *a) {
uint lastval = a->value[a->size-1];
short res = 0;
while (lastval != 0) {
lastval >>= 1;
res++;
}
return res + (a->size - 1) * 32;
}
__host__ __device__ char getbit(big* a, short count) {
return (a->value[count / 32] & ((uint) 1 << (count % 32))) != 0;
}
__host__ __device__ uint getShiftedBlock(big *num, char noblok, char geser) {
uint part1 = (noblok == 0 || geser == 0) ? 0 : (num->value[noblok - 1] >> (32-geser));
uint part2 = (noblok == num->size) ? 0 : (num->value[noblok] << geser);
return part1 | part2;
}
__host__ __device__ void kali(big *a, big *b, big* res) {
if (a->size == 0 || b->size == 0) {
res->size = 0;
return ;
}
char ukurana = a->size;
char ukuranb = b->size;
char ukuranres = ukurana + ukuranb;
res->size = ukuranres;
for (char i = 0; i < ukuranres; i++) {
res->value[i] = 0;
}
for (char i = 0; i < ukurana; i++) {
uint aval = a->value[i];
if (aval==0){
continue;
}
uint lebih = 0;
for (char j = 0, lebih = 0; j < ukuranb; j++) {
uint bval = b->value[j];
ul temp = res->value[i+j] + aval * bval + lebih;
res->value[i+j] = temp % UINT_MAX;
lebih = temp / UINT_MAX;
}
res->value[i+ukuranb] = lebih;
}
if (res->value[res->size - 1] == 0){
res->size--;
}
}
__host__ __device__ void modulo(big* a, big* b, big* res, uint* minbuff) {
res->size = a->size;
for(char i = 0 ; i < res->size ;i++){
res->value[i] = a->value[i];
}
if (a->size < b->size) {
return ;
}
char i, j, k;
char i2;
uint temp ;
char borrowIn, borrowOut;
char ukurana = a->size;
char ukuranb = b->size;
res->value[res->size] = 0;
res->size++;
i = ukurana - ukuranb + 1;
while (i > 0) {
i--;
i2 = 32;
while (i2 > 0) {
i2--;
for (j = 0, k = i, borrowIn = 0; j <= ukuranb; j++, k++) {
temp = res->value[k] - getShiftedBlock(b, j, i2);
borrowOut = (temp > res->value[k]);
if (borrowIn) {
borrowOut |= (temp == 0);
temp--;
}
minbuff[k] = temp;
borrowIn = borrowOut;
}
for (; k < ukurana && borrowIn; k++) {
borrowIn = (res->value[k] == 0);
minbuff[k] = res->value[k] - 1;
}
if (!borrowIn) {
while (k > i) {
k--;
res->value[k] = minbuff[k];
}
}
}
}
while (res->size > 0 && res->value[res->size - 1] == 0)
res->size--;
}
void tambah(big* a, char b, big* res) {
if (a->size == 0) {
res->size = 1;
res->value[0] = uint(b);
return;
}
char carryIn = 0;
uint temp;
res->size = a->size + 1;
res->value[0] = a->value[0] + (uint)b;
carryIn = (res->value[0] < a->value[0]);
char i = 1;
for (; i < a->size && carryIn; i++) {
temp = a->value[i] + (uint)1;
carryIn = (temp == 0);
res->value[i] = temp;
}
for (; i < a->size; i++)
res->value[i] = a->value[i];
if (carryIn)
res->value[i] = 1;
else
res->size--;
}
void kurang(big* a, big *b, big* res) {
res->size = a->size;
for (int i = 0; i < res->size; i++){
res->value[i] = 0;
}
if (b->size == 0) {
return;
}
char borrowIn, borrowOut;
uint temp;
char i;
for (i = 0, borrowIn = 0; i < b->size; i++) {
temp = a->value[i] - b->value[i];
borrowOut = (temp > a->value[i]);
if (borrowIn) {
borrowOut |= (temp == 0);
temp--;
}
res->value[i] = temp;
borrowIn = borrowOut;
}
for (; i < a->size && borrowIn; i++) {
borrowIn = (a->value[i] == 0);
res->value[i] = a->value[i] - 1;
}
for (; i < a->size; i++)
res->value[i] = a->value[i];
if (res->value[res->size - 1] == 0){
res->size--;
}
}
__host__ __device__ void modexp(big* a, big* b, big* c, big* res, uint* minbuff, big* mulbuff){
//printf("c val 0 %u\n", c->value[0]);
res->size = 1;
res->value[0] = 1;
short i = ukuranbit(b);
while (i > 0) {
i--;
kali(res,res,mulbuff);
modulo(mulbuff,c,res,minbuff);
if (getbit(b,i)) {
kali(res, a, mulbuff);
modulo(mulbuff, c, res, minbuff);
}
}
}
__device__ void enkripsi(big *m, big *k, big *g, big *p, big *y, big *res, uint *minbuff, big *mulbuff) {
//printf("res adlaah tanga\n");
// BLok 1 Cipher
modexp(g,k,p,res,minbuff,mulbuff);
//printf("res 0 val 0 %u\n", res->value[0]);
// Blok 2 Cipher
modexp(y, k, p, res + 1,minbuff,mulbuff);
kali(res + 1, m, mulbuff);
modulo(mulbuff, p, res+1, minbuff);
//printf("res 1 val 0 %u\n", (res+1)->value[0]);
}
void carikunciy(big *g, big *x, big *p, big *y, uint *minbuff, big *mulbuff){
modexp(g,x,p,y,minbuff,mulbuff);
}
__global__ void kernelenk(uint *p, uint *g, uint *y, uint *m, uint *k, uint *resval, char *ressize, uint *buffmin, uint *buffmul){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int jdx = threadIdx.x;
int sizebig = 16;
// int banyakdata = 256;
__shared__ big sm[32];
__shared__ big sk[32];
__shared__ big smulbuff[32];
__shared__ big sres[64];
__shared__ big sp;
__shared__ big sg;
__shared__ big sy;
__shared__ uint s[4200];
uint *spval = s;
uint *sgval = (uint*)&spval[sizebig];
uint *syval = (uint*)&sgval[sizebig];
uint *sresval = (uint*)&syval[sizebig];
uint *smulbuffval = (uint*)&sresval[2*sizebig*32*2];
//uint *sminbuffval = (uint*)&smulbuffval[2*sizebig*128];
//uint *sminbuffval = (uint*)&sresval[2*sizebig*128*2];
uint *smval = (uint*)&smulbuffval[2*sizebig*32];
uint *skval = (uint*)&smval[sizebig*32];
for (int i = 0; i < sizebig; i++)
{
spval[i] = p[i];
sgval[i] = g[i];
syval[i] = y[i];
smval[jdx*sizebig+i] = m[idx*sizebig + i];
skval[jdx*sizebig+i] = k[idx*sizebig + i];
}
sp.size = sizebig;
sg.size = sizebig;
sy.size = sizebig;
sm[jdx].size = sizebig;
sk[jdx].size = sizebig;
sp.value = spval;
sg.value = sgval;
sy.value = syval;
sm[jdx].value = (uint*)&smval[jdx*sizebig];
sk[jdx].value = (uint*)&skval[jdx*sizebig];
sres[2*jdx].value = (uint*)&sresval[jdx*sizebig*4];
sres[2*jdx+1].value = (uint*)&sresval[jdx*sizebig*4+sizebig*2];
smulbuff[jdx].value = (uint*)&smulbuffval[jdx*sizebig*2];
// sminbuff[jdx].value = (uint*)&sminbuffval[jdx*sizebig];
__syncthreads();
//uint* minbuff = (uint*) malloc(sizeof(uint) * sizebig);
enkripsi(sm + jdx, sk + jdx, &sg, &sp, &sy, sres + 2*jdx, buffmin + 2 *sizebig * idx, smulbuff + jdx);
ressize[2*idx] = sres[2*jdx].size;
ressize[2*idx + 1] = sres[2*jdx + 1].size;
for (int i = 0; i < sres[2*jdx].size; i++)
{
resval[2 * idx * sizebig * 2 + i] = sres[2*jdx].value[i];
}
for (int i = 0; i < sres[2*jdx+1].size; i++)
{
resval[(2 * idx + 1)* sizebig * 2 + i] = sres[2*jdx+1].value[i];
}
}
void CUDAenk(uint *p, uint *g, uint *y, uint *m, uint *k, uint *resval, char *ressize) {
//=====================BAGIAN G, P, DAN Y ====================================//
char *devressize;
uint *devp, *devg, *devy, *devm, *devk, *devresval, *buffmin, *buffmul;
hipMalloc((void**)&devp, sizebig * sizeof(uint));
hipMalloc((void**)&devg, sizebig * sizeof(uint));
hipMalloc((void**)&devy, sizebig * sizeof(uint));
hipMalloc((void**)&devm, banyakdata * sizebig * sizeof(uint));
hipMalloc((void**)&devk, banyakdata * sizebig * sizeof(uint));
hipMalloc((void**)&devresval, 2 * banyakdata * 2 * sizebig * sizeof(uint));
hipMalloc((void**)&devressize, 2 * banyakdata * sizeof(char));
hipMalloc((void**)&buffmin, banyakdata * sizebig * 2 * sizeof(uint));
hipMalloc((void**)&buffmul, banyakdata * sizebig * 2 * sizeof(uint));
hipMemcpy(devp, p, sizebig * sizeof(uint), hipMemcpyHostToDevice);
hipMemcpy(devg, g, sizebig * sizeof(uint), hipMemcpyHostToDevice);
hipMemcpy(devy, y, sizebig * sizeof(uint), hipMemcpyHostToDevice);
hipMemcpy(devm, m, banyakdata * sizebig * sizeof(uint), hipMemcpyHostToDevice);
hipMemcpy(devk, k, banyakdata * sizebig * sizeof(uint), hipMemcpyHostToDevice);
kernelenk << <dimensigrid, dimensiblok >> >(devp, devg, devy, devm, devk, devresval, devressize, buffmin, buffmul);
hipDeviceSynchronize();
// COPY FROM DEVICE TO HOST HERE
hipMemcpy(ressize, devressize, 2 * banyakdata, hipMemcpyDeviceToHost);
hipMemcpy(resval, devresval, 2 * banyakdata * 2 * sizebig * sizeof(uint), hipMemcpyDeviceToHost);
hipFree(devp);
hipFree(devg);
hipFree(devy);
hipFree(devm);
hipFree(devk);
hipFree(devresval);
hipFree(devressize);
hipFree(buffmin);
hipFree(buffmul);
}
void init(uint *pval, uint *gval, uint *yval, uint *mval, uint *kval){
srand(2018);
big *p, *g, *x, *y;
p = (big*)malloc(sizeof(big));
g = (big*)malloc(sizeof(big));
x = (big*)malloc(sizeof(big));
y = (big*)malloc(sizeof(big));
p->size = sizebig;
p->value = pval;
p->value[0] = UINT_MAX;
for (int i = 1; i < p->size; i++)
{
//p->value[i] = 2357;
p->value[i] = rand() % UINT_MAX;
}
// Kunci publik g
g->size = sizebig;
g->value = gval;
for (int i = 0; i < g->size; i++)
{
// g->value[i] = 2;
g->value[i] = rand() % UINT_MAX;
}
// Kunci privat x
x->size = sizebig;
x->value = (uint*) malloc(x->size * sizeof(uint));
for (int i = 0; i < x->size; i++)
{
// x->value[i] = 1751;
x->value[i] = rand() % UINT_MAX;
}
// Cari nilai kunci publik y = (g^x) mod p
big* mulbuff = (big*) malloc(sizeof(big));
mulbuff->value = (uint*) malloc(sizeof(uint) * p->size * 2);
uint* minbuff = (uint*) malloc(sizeof(uint) * p->size * 2);
y->value = (uint*) malloc(sizeof(uint) * sizebig * 2);
carikunciy(g,x,p,y,minbuff,mulbuff);
for (int i = 0; i < sizebig; i++)
{
yval[i] = y->value[i];
}
// printf("y size %d : %u\n", y->size, y->value[0]);
//========================================================//
// Blok plainteks dan k
for(int i = 0 ; i < banyakdata * sizebig ; i++){
// mval[i] = 1001;
mval[i] = rand() % UINT_MAX;
// kval[i] = 77;
kval[i] = rand() % UINT_MAX;
}
}
void printbig(uint* val){
for (int i = 0; i < 16; i++)
{
printf("val i = %u\n", val[i]);
}
}
int main(){
char *ressize;
uint *p, *g, *y, *m, *k, *resval;
p = (uint*) malloc(sizebig * sizeof(uint));
g = (uint*) malloc(sizebig * sizeof(uint));
y = (uint*) malloc(sizebig * sizeof(uint));
m = (uint*) malloc(banyakdata * sizebig * sizeof(uint));
k = (uint*) malloc(banyakdata * sizebig * sizeof(uint));
resval = (uint*) malloc(2 * banyakdata * 2 * sizebig * sizeof(uint));
ressize = (char*) malloc(2 * banyakdata * sizeof(char));
init(p,g,y,m,k);
// printf("Encrypting...\n");
//========================================================//
CUDAenk(p,g,y,m,k,resval,ressize);
// for (int i = 0; i < 5; i++)
// {
// printf("Cipher %d size %d : %u\n",i, ressize[i], resval[i*2*sizebig]);
// }
// printf("Cipher ... : ...\n");
// printf("Cipher %d size %d : %u\n",banyakdata*2-2, ressize[banyakdata*2-2], resval[(banyakdata*2-2) * 2 * sizebig]);
// printf("Cipher %d size %d : %u\n",banyakdata*2-1, ressize[banyakdata*2-1], resval[(banyakdata*2-1) * 2 * sizebig]);
free(p);
free(g);
free(y);
free(m);
free(k);
free(resval);
free(ressize);
return 0;
}
| 79fd7d37ed98bb885d7a66a292deb2130749cf43.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
typedef unsigned long long ul;
typedef unsigned int uint;
int banyakdata = 5120;
int dimensigrid = 160;
int dimensiblok = 32;
int sizebig = 16;
typedef struct {
char size;
uint* value;
}big;
__host__ __device__ short ukuranbit(big *a) {
uint lastval = a->value[a->size-1];
short res = 0;
while (lastval != 0) {
lastval >>= 1;
res++;
}
return res + (a->size - 1) * 32;
}
__host__ __device__ char getbit(big* a, short count) {
return (a->value[count / 32] & ((uint) 1 << (count % 32))) != 0;
}
__host__ __device__ uint getShiftedBlock(big *num, char noblok, char geser) {
uint part1 = (noblok == 0 || geser == 0) ? 0 : (num->value[noblok - 1] >> (32-geser));
uint part2 = (noblok == num->size) ? 0 : (num->value[noblok] << geser);
return part1 | part2;
}
__host__ __device__ void kali(big *a, big *b, big* res) {
if (a->size == 0 || b->size == 0) {
res->size = 0;
return ;
}
char ukurana = a->size;
char ukuranb = b->size;
char ukuranres = ukurana + ukuranb;
res->size = ukuranres;
for (char i = 0; i < ukuranres; i++) {
res->value[i] = 0;
}
for (char i = 0; i < ukurana; i++) {
uint aval = a->value[i];
if (aval==0){
continue;
}
uint lebih = 0;
for (char j = 0, lebih = 0; j < ukuranb; j++) {
uint bval = b->value[j];
ul temp = res->value[i+j] + aval * bval + lebih;
res->value[i+j] = temp % UINT_MAX;
lebih = temp / UINT_MAX;
}
res->value[i+ukuranb] = lebih;
}
if (res->value[res->size - 1] == 0){
res->size--;
}
}
__host__ __device__ void modulo(big* a, big* b, big* res, uint* minbuff) {
res->size = a->size;
for(char i = 0 ; i < res->size ;i++){
res->value[i] = a->value[i];
}
if (a->size < b->size) {
return ;
}
char i, j, k;
char i2;
uint temp ;
char borrowIn, borrowOut;
char ukurana = a->size;
char ukuranb = b->size;
res->value[res->size] = 0;
res->size++;
i = ukurana - ukuranb + 1;
while (i > 0) {
i--;
i2 = 32;
while (i2 > 0) {
i2--;
for (j = 0, k = i, borrowIn = 0; j <= ukuranb; j++, k++) {
temp = res->value[k] - getShiftedBlock(b, j, i2);
borrowOut = (temp > res->value[k]);
if (borrowIn) {
borrowOut |= (temp == 0);
temp--;
}
minbuff[k] = temp;
borrowIn = borrowOut;
}
for (; k < ukurana && borrowIn; k++) {
borrowIn = (res->value[k] == 0);
minbuff[k] = res->value[k] - 1;
}
if (!borrowIn) {
while (k > i) {
k--;
res->value[k] = minbuff[k];
}
}
}
}
while (res->size > 0 && res->value[res->size - 1] == 0)
res->size--;
}
void tambah(big* a, char b, big* res) {
if (a->size == 0) {
res->size = 1;
res->value[0] = uint(b);
return;
}
char carryIn = 0;
uint temp;
res->size = a->size + 1;
res->value[0] = a->value[0] + (uint)b;
carryIn = (res->value[0] < a->value[0]);
char i = 1;
for (; i < a->size && carryIn; i++) {
temp = a->value[i] + (uint)1;
carryIn = (temp == 0);
res->value[i] = temp;
}
for (; i < a->size; i++)
res->value[i] = a->value[i];
if (carryIn)
res->value[i] = 1;
else
res->size--;
}
void kurang(big* a, big *b, big* res) {
res->size = a->size;
for (int i = 0; i < res->size; i++){
res->value[i] = 0;
}
if (b->size == 0) {
return;
}
char borrowIn, borrowOut;
uint temp;
char i;
for (i = 0, borrowIn = 0; i < b->size; i++) {
temp = a->value[i] - b->value[i];
borrowOut = (temp > a->value[i]);
if (borrowIn) {
borrowOut |= (temp == 0);
temp--;
}
res->value[i] = temp;
borrowIn = borrowOut;
}
for (; i < a->size && borrowIn; i++) {
borrowIn = (a->value[i] == 0);
res->value[i] = a->value[i] - 1;
}
for (; i < a->size; i++)
res->value[i] = a->value[i];
if (res->value[res->size - 1] == 0){
res->size--;
}
}
__host__ __device__ void modexp(big* a, big* b, big* c, big* res, uint* minbuff, big* mulbuff){
//printf("c val 0 %u\n", c->value[0]);
res->size = 1;
res->value[0] = 1;
short i = ukuranbit(b);
while (i > 0) {
i--;
kali(res,res,mulbuff);
modulo(mulbuff,c,res,minbuff);
if (getbit(b,i)) {
kali(res, a, mulbuff);
modulo(mulbuff, c, res, minbuff);
}
}
}
__device__ void enkripsi(big *m, big *k, big *g, big *p, big *y, big *res, uint *minbuff, big *mulbuff) {
//printf("res adlaah tanga\n");
// BLok 1 Cipher
modexp(g,k,p,res,minbuff,mulbuff);
//printf("res 0 val 0 %u\n", res->value[0]);
// Blok 2 Cipher
modexp(y, k, p, res + 1,minbuff,mulbuff);
kali(res + 1, m, mulbuff);
modulo(mulbuff, p, res+1, minbuff);
//printf("res 1 val 0 %u\n", (res+1)->value[0]);
}
void carikunciy(big *g, big *x, big *p, big *y, uint *minbuff, big *mulbuff){
modexp(g,x,p,y,minbuff,mulbuff);
}
__global__ void kernelenk(uint *p, uint *g, uint *y, uint *m, uint *k, uint *resval, char *ressize, uint *buffmin, uint *buffmul){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int jdx = threadIdx.x;
int sizebig = 16;
// int banyakdata = 256;
__shared__ big sm[32];
__shared__ big sk[32];
__shared__ big smulbuff[32];
__shared__ big sres[64];
__shared__ big sp;
__shared__ big sg;
__shared__ big sy;
__shared__ uint s[4200];
uint *spval = s;
uint *sgval = (uint*)&spval[sizebig];
uint *syval = (uint*)&sgval[sizebig];
uint *sresval = (uint*)&syval[sizebig];
uint *smulbuffval = (uint*)&sresval[2*sizebig*32*2];
//uint *sminbuffval = (uint*)&smulbuffval[2*sizebig*128];
//uint *sminbuffval = (uint*)&sresval[2*sizebig*128*2];
uint *smval = (uint*)&smulbuffval[2*sizebig*32];
uint *skval = (uint*)&smval[sizebig*32];
for (int i = 0; i < sizebig; i++)
{
spval[i] = p[i];
sgval[i] = g[i];
syval[i] = y[i];
smval[jdx*sizebig+i] = m[idx*sizebig + i];
skval[jdx*sizebig+i] = k[idx*sizebig + i];
}
sp.size = sizebig;
sg.size = sizebig;
sy.size = sizebig;
sm[jdx].size = sizebig;
sk[jdx].size = sizebig;
sp.value = spval;
sg.value = sgval;
sy.value = syval;
sm[jdx].value = (uint*)&smval[jdx*sizebig];
sk[jdx].value = (uint*)&skval[jdx*sizebig];
sres[2*jdx].value = (uint*)&sresval[jdx*sizebig*4];
sres[2*jdx+1].value = (uint*)&sresval[jdx*sizebig*4+sizebig*2];
smulbuff[jdx].value = (uint*)&smulbuffval[jdx*sizebig*2];
// sminbuff[jdx].value = (uint*)&sminbuffval[jdx*sizebig];
__syncthreads();
//uint* minbuff = (uint*) malloc(sizeof(uint) * sizebig);
enkripsi(sm + jdx, sk + jdx, &sg, &sp, &sy, sres + 2*jdx, buffmin + 2 *sizebig * idx, smulbuff + jdx);
ressize[2*idx] = sres[2*jdx].size;
ressize[2*idx + 1] = sres[2*jdx + 1].size;
for (int i = 0; i < sres[2*jdx].size; i++)
{
resval[2 * idx * sizebig * 2 + i] = sres[2*jdx].value[i];
}
for (int i = 0; i < sres[2*jdx+1].size; i++)
{
resval[(2 * idx + 1)* sizebig * 2 + i] = sres[2*jdx+1].value[i];
}
}
void CUDAenk(uint *p, uint *g, uint *y, uint *m, uint *k, uint *resval, char *ressize) {
//=====================BAGIAN G, P, DAN Y ====================================//
char *devressize;
uint *devp, *devg, *devy, *devm, *devk, *devresval, *buffmin, *buffmul;
cudaMalloc((void**)&devp, sizebig * sizeof(uint));
cudaMalloc((void**)&devg, sizebig * sizeof(uint));
cudaMalloc((void**)&devy, sizebig * sizeof(uint));
cudaMalloc((void**)&devm, banyakdata * sizebig * sizeof(uint));
cudaMalloc((void**)&devk, banyakdata * sizebig * sizeof(uint));
cudaMalloc((void**)&devresval, 2 * banyakdata * 2 * sizebig * sizeof(uint));
cudaMalloc((void**)&devressize, 2 * banyakdata * sizeof(char));
cudaMalloc((void**)&buffmin, banyakdata * sizebig * 2 * sizeof(uint));
cudaMalloc((void**)&buffmul, banyakdata * sizebig * 2 * sizeof(uint));
cudaMemcpy(devp, p, sizebig * sizeof(uint), cudaMemcpyHostToDevice);
cudaMemcpy(devg, g, sizebig * sizeof(uint), cudaMemcpyHostToDevice);
cudaMemcpy(devy, y, sizebig * sizeof(uint), cudaMemcpyHostToDevice);
cudaMemcpy(devm, m, banyakdata * sizebig * sizeof(uint), cudaMemcpyHostToDevice);
cudaMemcpy(devk, k, banyakdata * sizebig * sizeof(uint), cudaMemcpyHostToDevice);
kernelenk << <dimensigrid, dimensiblok >> >(devp, devg, devy, devm, devk, devresval, devressize, buffmin, buffmul);
cudaDeviceSynchronize();
// COPY FROM DEVICE TO HOST HERE
cudaMemcpy(ressize, devressize, 2 * banyakdata, cudaMemcpyDeviceToHost);
cudaMemcpy(resval, devresval, 2 * banyakdata * 2 * sizebig * sizeof(uint), cudaMemcpyDeviceToHost);
cudaFree(devp);
cudaFree(devg);
cudaFree(devy);
cudaFree(devm);
cudaFree(devk);
cudaFree(devresval);
cudaFree(devressize);
cudaFree(buffmin);
cudaFree(buffmul);
}
void init(uint *pval, uint *gval, uint *yval, uint *mval, uint *kval){
srand(2018);
big *p, *g, *x, *y;
p = (big*)malloc(sizeof(big));
g = (big*)malloc(sizeof(big));
x = (big*)malloc(sizeof(big));
y = (big*)malloc(sizeof(big));
p->size = sizebig;
p->value = pval;
p->value[0] = UINT_MAX;
for (int i = 1; i < p->size; i++)
{
//p->value[i] = 2357;
p->value[i] = rand() % UINT_MAX;
}
// Kunci publik g
g->size = sizebig;
g->value = gval;
for (int i = 0; i < g->size; i++)
{
// g->value[i] = 2;
g->value[i] = rand() % UINT_MAX;
}
// Kunci privat x
x->size = sizebig;
x->value = (uint*) malloc(x->size * sizeof(uint));
for (int i = 0; i < x->size; i++)
{
// x->value[i] = 1751;
x->value[i] = rand() % UINT_MAX;
}
// Cari nilai kunci publik y = (g^x) mod p
big* mulbuff = (big*) malloc(sizeof(big));
mulbuff->value = (uint*) malloc(sizeof(uint) * p->size * 2);
uint* minbuff = (uint*) malloc(sizeof(uint) * p->size * 2);
y->value = (uint*) malloc(sizeof(uint) * sizebig * 2);
carikunciy(g,x,p,y,minbuff,mulbuff);
for (int i = 0; i < sizebig; i++)
{
yval[i] = y->value[i];
}
// printf("y size %d : %u\n", y->size, y->value[0]);
//========================================================//
// Blok plainteks dan k
for(int i = 0 ; i < banyakdata * sizebig ; i++){
// mval[i] = 1001;
mval[i] = rand() % UINT_MAX;
// kval[i] = 77;
kval[i] = rand() % UINT_MAX;
}
}
void printbig(uint* val){
for (int i = 0; i < 16; i++)
{
printf("val i = %u\n", val[i]);
}
}
int main(){
char *ressize;
uint *p, *g, *y, *m, *k, *resval;
p = (uint*) malloc(sizebig * sizeof(uint));
g = (uint*) malloc(sizebig * sizeof(uint));
y = (uint*) malloc(sizebig * sizeof(uint));
m = (uint*) malloc(banyakdata * sizebig * sizeof(uint));
k = (uint*) malloc(banyakdata * sizebig * sizeof(uint));
resval = (uint*) malloc(2 * banyakdata * 2 * sizebig * sizeof(uint));
ressize = (char*) malloc(2 * banyakdata * sizeof(char));
init(p,g,y,m,k);
// printf("Encrypting...\n");
//========================================================//
CUDAenk(p,g,y,m,k,resval,ressize);
// for (int i = 0; i < 5; i++)
// {
// printf("Cipher %d size %d : %u\n",i, ressize[i], resval[i*2*sizebig]);
// }
// printf("Cipher ... : ...\n");
// printf("Cipher %d size %d : %u\n",banyakdata*2-2, ressize[banyakdata*2-2], resval[(banyakdata*2-2) * 2 * sizebig]);
// printf("Cipher %d size %d : %u\n",banyakdata*2-1, ressize[banyakdata*2-1], resval[(banyakdata*2-1) * 2 * sizebig]);
free(p);
free(g);
free(y);
free(m);
free(k);
free(resval);
free(ressize);
return 0;
}
|
52d7e1b67e296a7d75b5dab67c8f27964ef4fd27.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "kernel.h"
#include "CU_geometry.h"
__global__ void kernel()
{
printf("hello world!\n");
CGeoPoint point = CGeoPoint(450, 0);
printf("%.2f, %.2f\n", point.x(), point.y());
}
void test() {
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
kernel << <1, 1 >> >();
hipEventRecord(stop, 0);
//confirm that all things have been done before "stop event"
hipEventSynchronize(stop);
float elapseTime;
hipEventElapsedTime(&elapseTime, start, stop);
printf("Time for I/O : %.5f ms\n", elapseTime);
} | 52d7e1b67e296a7d75b5dab67c8f27964ef4fd27.cu | #include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "kernel.h"
#include "CU_geometry.h"
__global__ void kernel()
{
printf("hello world!\n");
CGeoPoint point = CGeoPoint(450, 0);
printf("%.2f, %.2f\n", point.x(), point.y());
}
void test() {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
kernel << <1, 1 >> >();
cudaEventRecord(stop, 0);
//confirm that all things have been done before "stop event"
cudaEventSynchronize(stop);
float elapseTime;
cudaEventElapsedTime(&elapseTime, start, stop);
printf("Time for I/O : %.5f ms\n", elapseTime);
} |
8d007b86db386744d976b543aafaa4e9845b9246.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include <vector>
#include "caffe/layers/func/eltwise_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
static __global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a,
const Dtype* bottom_data_b, const int blob_idx, Dtype* top_data,
int* mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype maxval = -FLT_MAX;
int maxidx = -1;
if (bottom_data_a[index] > bottom_data_b[index]) {
// only update for very first bottom_data blob (blob_idx == 0)
if (blob_idx == 0) {
maxval = bottom_data_a[index];
top_data[index] = maxval;
maxidx = blob_idx;
mask[index] = maxidx;
}
} else {
maxval = bottom_data_b[index];
top_data[index] = maxval;
maxidx = blob_idx + 1;
mask[index] = maxidx;
}
}
}
template <typename Dtype>
void EltwiseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
int* mask = NULL;
const int count = top[0]->count();
Dtype* top_data = top[0]->mutable_gpu_data();
if (op_ == "prod")
{
caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(),
top_data);
for (int i = 2; i < bottom.size(); ++i) {
caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data);
}
}
else if (op_ == "sum")
{
caffe_gpu_set(count, Dtype(0.), top_data);
// TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1?
for (int i = 0; i < bottom.size(); ++i) {
caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data);
}
}
else if (op_ == "max")
{
mask = max_idx_.mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxForward<Dtype>) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask);
for (int i = 2; i < bottom.size(); ++i) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask);
}
}
else
LOG(FATAL) << "Unknown elementwise operation.";
}
template <typename Dtype>
__global__ void MaxBackward(const int nthreads, const Dtype* top_diff,
const int blob_idx, const int* mask, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype gradient = 0;
if (mask[index] == blob_idx) {
gradient += top_diff[index];
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void EltwiseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<Blob<Dtype>*>& bottom)
{
const int* mask = NULL;
const int count = top[0]->count();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
for (int i = 0; i < bottom.size(); ++i)
{
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
if (op_ == "prod")
{
if (stable_prod_grad_) {
bool initialized = false;
for (int j = 0; j < bottom.size(); ++j) {
if (i == j) { continue; }
if (!initialized) {
caffe_copy(count, bottom[j]->gpu_data(), bottom_diff);
initialized = true;
} else {
caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff,
bottom_diff);
}
}
} else {
caffe_gpu_div(count, top_data, bottom_data, bottom_diff);
}
caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff);
}
else if (op_ == "sum")
{
if (coeffs_[i] == Dtype(1.)) {
caffe_copy(count, top_diff, bottom_diff);
} else {
caffe_gpu_scale(count, coeffs_[i], top_diff, bottom_diff);
}
}
else if (op_ == "max")
{
mask = max_idx_.gpu_data();
MaxBackward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, i, mask, bottom_diff);
}
else
LOG(FATAL) << "Unknown elementwise operation.";
if (backwards_[i] == false)
caffe_gpu_set(bottom[i]->count(),Dtype(0),bottom[i]->mutable_gpu_diff());
}
}
template <typename Dtype>
void EltwiseLayer<Dtype>::SecForward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
}
INSTANTIATE_LAYER_GPU_FUNCS(EltwiseLayer);
} // namespace caffe
| 8d007b86db386744d976b543aafaa4e9845b9246.cu | #include <cfloat>
#include <vector>
#include "caffe/layers/func/eltwise_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
static __global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a,
const Dtype* bottom_data_b, const int blob_idx, Dtype* top_data,
int* mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype maxval = -FLT_MAX;
int maxidx = -1;
if (bottom_data_a[index] > bottom_data_b[index]) {
// only update for very first bottom_data blob (blob_idx == 0)
if (blob_idx == 0) {
maxval = bottom_data_a[index];
top_data[index] = maxval;
maxidx = blob_idx;
mask[index] = maxidx;
}
} else {
maxval = bottom_data_b[index];
top_data[index] = maxval;
maxidx = blob_idx + 1;
mask[index] = maxidx;
}
}
}
template <typename Dtype>
void EltwiseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
int* mask = NULL;
const int count = top[0]->count();
Dtype* top_data = top[0]->mutable_gpu_data();
if (op_ == "prod")
{
caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(),
top_data);
for (int i = 2; i < bottom.size(); ++i) {
caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data);
}
}
else if (op_ == "sum")
{
caffe_gpu_set(count, Dtype(0.), top_data);
// TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1?
for (int i = 0; i < bottom.size(); ++i) {
caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data);
}
}
else if (op_ == "max")
{
mask = max_idx_.mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
MaxForward<Dtype> <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask);
for (int i = 2; i < bottom.size(); ++i) {
// NOLINT_NEXT_LINE(whitespace/operators)
MaxForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask);
}
}
else
LOG(FATAL) << "Unknown elementwise operation.";
}
template <typename Dtype>
__global__ void MaxBackward(const int nthreads, const Dtype* top_diff,
const int blob_idx, const int* mask, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype gradient = 0;
if (mask[index] == blob_idx) {
gradient += top_diff[index];
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void EltwiseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<Blob<Dtype>*>& bottom)
{
const int* mask = NULL;
const int count = top[0]->count();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
for (int i = 0; i < bottom.size(); ++i)
{
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
if (op_ == "prod")
{
if (stable_prod_grad_) {
bool initialized = false;
for (int j = 0; j < bottom.size(); ++j) {
if (i == j) { continue; }
if (!initialized) {
caffe_copy(count, bottom[j]->gpu_data(), bottom_diff);
initialized = true;
} else {
caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff,
bottom_diff);
}
}
} else {
caffe_gpu_div(count, top_data, bottom_data, bottom_diff);
}
caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff);
}
else if (op_ == "sum")
{
if (coeffs_[i] == Dtype(1.)) {
caffe_copy(count, top_diff, bottom_diff);
} else {
caffe_gpu_scale(count, coeffs_[i], top_diff, bottom_diff);
}
}
else if (op_ == "max")
{
mask = max_idx_.gpu_data();
MaxBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, i, mask, bottom_diff);
}
else
LOG(FATAL) << "Unknown elementwise operation.";
if (backwards_[i] == false)
caffe_gpu_set(bottom[i]->count(),Dtype(0),bottom[i]->mutable_gpu_diff());
}
}
template <typename Dtype>
void EltwiseLayer<Dtype>::SecForward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
}
INSTANTIATE_LAYER_GPU_FUNCS(EltwiseLayer);
} // namespace caffe
|
c86b6713dabff9e41782fa670c35a91e215d1704.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void add_1024(long* a, long* b, long* c, long N) { //more simple and probably faster core but works only with 1024 or less elements in vector in this example
c[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x];
__syncthreads();
long step = N / 2;
while (step != 0) {
if (threadIdx.x < step)
{
c[threadIdx.x] += c[threadIdx.x + step];
}
step /= 2;
__syncthreads();
}
} | c86b6713dabff9e41782fa670c35a91e215d1704.cu | #include "includes.h"
__global__ void add_1024(long* a, long* b, long* c, long N) { //more simple and probably faster core but works only with 1024 or less elements in vector in this example
c[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x];
__syncthreads();
long step = N / 2;
while (step != 0) {
if (threadIdx.x < step)
{
c[threadIdx.x] += c[threadIdx.x + step];
}
step /= 2;
__syncthreads();
}
} |
13cd039123448d9b102bf58a5ef5681fdb25cbb0.hip | // !!! This is a file automatically generated by hipify!!!
/*-------------------------------------------------------------------------
*
* CUDA function for backrpojection using FDK weigts for CBCT
*
*
* CODE by Ander Biguri
* Optimized and modified by Robert Bryll
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#define PI_2 1.57079632679489661923
#include <algorithm>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include "voxel_backprojection2.hpp"
#include <math.h>
// https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror
// if (__err != hipSuccess) { \
// printf("%s \n", msg);\
// printf("%s \n", hipGetErrorString(__err));\
// } \
// TODO: Error logging
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
} while (0)
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* *--->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
texture<float, hipTextureType3D , hipReadModeElementType> tex;
__global__ void matrixConstantMultiply(const Geometry geo,float* image,float constant){
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
for(; idx<geo.nVoxelX* geo.nVoxelY *geo.nVoxelZ; idx+=gridDim.x*blockDim.x) {
image[idx]*=constant;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// The optimal values of two constants obtained by Robert Bryll on NVIDIA Quadro K2200 (4 GB RAM, 640 CUDA cores) for 512^3 volume and 512^3 projections (512 proj, each 512 x 512) were:
// PROJ_PER_KERNEL = 32 or 16 (very similar times)
// VOXELS_PER_THREAD = 8
// Speedup of the entire FDK backprojection (not only kernel run, also memcpy etc.) was nearly 4x relative to the original (single projection, single voxel per thread) code.
// (e.g. 16.2 s vs. ~62 s).
const int PROJ_PER_KERNEL = 32; // Number of 2D projections to be analyzed by a single thread. This can be tweaked to see what works best. 32 was the optimal value in the paper by Zinsser and Keck.
const int VOXELS_PER_THREAD = 8; // Number of voxels to be computed by s single thread. Can be tweaked to see what works best. 4 was the optimal value in the paper by Zinsser and Keck.
// We have PROJ_PER_KERNEL projections and we need 6 parameters for each projection:
// deltaX, deltaY, deltaZ, xyzOrigin, offOrig, offDetec
// So we need to keep PROJ_PER_KERNEL*6 values in our deltas array FOR EACH CALL to our main kernel
// (they will be updated in the main loop before each kernel call).
__constant__ Point3D projParamsArray2Dev[7*PROJ_PER_KERNEL]; // Dev means it is on device
// We also need a corresponding array on the host side to be filled before each kernel call, then copied to the device (array in constant memory above)
Point3D projParamsArray2Host[7*PROJ_PER_KERNEL]; // Host means it is host memory
// Now we also need to store sinAlpha and cosAlpha for each projection (two floats per projection)
__constant__ float projSinCosArray2Dev[3*PROJ_PER_KERNEL];
float projSinCosArray2Host[3*PROJ_PER_KERNEL];
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// END RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//______________________________________________________________________________
//
// Function: kernelPixelBackprojectionFDK
//
// Description: Main FDK backprojection kernel
//______________________________________________________________________________
__global__ void kernelPixelBackprojection(const Geometry geo, float* image,const int currProjSetNumber, const int totalNoOfProjections)
{
// Old kernel call signature:
//hipLaunchKernelGGL(( kernelPixelBackprojection), dim3(grid),dim3(block), 0, 0, geo,dimage,i,deltaX,deltaY,deltaZ,xyzOrigin,offOrig,offDetec,sinalpha,cosalpha);
// We just read in most of the params from the constant memory instead of getting them from the param list.
// This is because we now have MANY params, since single kernel processes more than one projection!
/* __global__ void kernelPixelBackprojectionFDK(const Geometry geo,
* float* image,
* const int indAlpha,
* const Point3D deltaX ,
* const Point3D deltaY,
* const Point3D deltaZ,
* const Point3D xyzOrigin,
* const Point3D xyzOffset,
* const Point3D uv0Offset,
* const float sinalpha,
* const float cosalpha){
*/
unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x;
// unsigned long startIndZ = blockIdx.z * blockDim.z + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
unsigned long startIndZ = blockIdx.z * VOXELS_PER_THREAD + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
//Make sure we dont go out of bounds
if (indX>=geo.nVoxelX | indY>=geo.nVoxelY |startIndZ>=geo.nVoxelZ)
return;
// We'll keep a local auxiliary array of values of a column of voxels that this thread will update
float voxelColumn[VOXELS_PER_THREAD];
// First we need to copy the curent 3D volume values from the column to our auxiliary array so that we can then
// work on them (update them by computing values from multiple projections) locally - avoiding main memory reads/writes
int colIdx;
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the updated values go back to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
voxelColumn[colIdx] = image[idx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
} // END copy 3D volume voxels to local array
// Now iterate through projections
for(int projNumber=0; projNumber<PROJ_PER_KERNEL; projNumber++)
{
// Get the current parameters from parameter arrays in constant memory.
int indAlpha = currProjSetNumber*PROJ_PER_KERNEL+projNumber; // This is the ABSOLUTE projection number in the projection array
// Our currImageVal will be updated by hovewer many projections we had left in the "remainder" - that's OK.
if(indAlpha>=totalNoOfProjections)
break;
Point3D deltaX = projParamsArray2Dev[7*projNumber]; // 6*projNumber because we have 6 Point3D values per projection
Point3D deltaY = projParamsArray2Dev[7*projNumber+1];
Point3D deltaZ = projParamsArray2Dev[7*projNumber+2];
Point3D xyzOrigin = projParamsArray2Dev[7*projNumber+3];
Point3D xyzOffset = projParamsArray2Dev[7*projNumber+4];
Point3D uv0Offset = projParamsArray2Dev[7*projNumber+5];
Point3D S = projParamsArray2Dev[7*projNumber+6];
float sinalpha = projSinCosArray2Dev[3*projNumber]; // 2*projNumber because we have 2 float (sin or cos angle) values per projection
float cosalpha = projSinCosArray2Dev[3*projNumber+1];
float COR = projSinCosArray2Dev[3*projNumber+2];
// Geometric trasnformations:
//Source, scaled XYZ coordinates
// Now iterate through Z in our voxel column FOR A GIVEN PROJECTION
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
// "XYZ" in the scaled coordinate system of the current point. The image is rotated with the projection angles.
Point3D P;
P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x);
P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-COR/geo.dDetecU;
P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z);
// This is the vector defining the line from the source to the Voxel
float vectX,vectY,vectZ;
vectX=(P.x -S.x);
vectY=(P.y -S.y);
vectZ=(P.z -S.z);
// Get the coordinates in the detector UV where the mid point of the voxel is projected.
float t=(geo.DSO-geo.DSD /*-DOD*/ - S.x)/vectX;
float y,z;
y=vectY*t+S.y;
z=vectZ*t+S.z;
float u,v;
u=y+geo.nDetecU/2-0.5;
v=z+geo.nDetecV/2-0.5;
float weigth;
//
//
//Real coordinates of Voxel. Instead of reverting the tranformation, its less math (faster) to compute it from the indexes.
Point3D realvoxel;
realvoxel.x=-geo.sVoxelX/2+geo.dVoxelX/2 +indX*geo.dVoxelX +xyzOffset.x;
realvoxel.y=-geo.sVoxelY/2+geo.dVoxelY/2 +indY*geo.dVoxelY +xyzOffset.y;
realvoxel.z=-geo.sVoxelZ/2+geo.dVoxelZ/2 +indZ*geo.dVoxelZ +xyzOffset.z;
//Real coords of Source
// We already have S.x (geo.DSO), and S.y and S.z are always zero. we just need to rotate
Point3D realS;
realS.x= geo.DSO*cosalpha;
realS.y=-geo.DSO*sinalpha;
realS.z=0;
// Real XYZ coordinates of Detector.
Point3D realD, realDaux;
// We know the index of the detector (u,v). Start from there.
realDaux.x=-(geo.DSD-geo.DSO);
realDaux.y=-geo.sDetecU/2+geo.dDetecU/2 + u*geo.dDetecU +uv0Offset.x;
realD.z =-geo.sDetecV/2+geo.dDetecV/2 + v*geo.dDetecV +uv0Offset.y;
//rotate the detector
realD.x= realDaux.x*cosalpha + realDaux.y*sinalpha; //sin(-x)=-sin(x) , cos(-x)=cos(x)
realD.y=-realDaux.x*sinalpha + realDaux.y*cosalpha; //sin(-x)=-sin(x) , cos(-x)=cos(x)
float L,l;
L = sqrt( (realS.x-realD.x)*(realS.x-realD.x)+ (realS.y-realD.y)*(realS.y-realD.y)+ (realD.z)*(realD.z)); // Sz=0 always.
l = sqrt( (realS.x-realvoxel.x)*(realS.x-realvoxel.x)
+ (realS.y-realvoxel.y)*(realS.y-realvoxel.y)
+ (realS.z-realvoxel.z)*(realS.z-realvoxel.z));
weigth=L*L*L/(geo.DSD*l*l);
// Get Value in the computed (U,V) and multiply by the corresponding weigth.
// indAlpha is the ABSOLUTE number of projection in the projection array (NOT the current number of projection set!)
voxelColumn[colIdx]+=tex3D(tex, u +0.5 ,
v +0.5 ,
indAlpha+0.5)* weigth;
} // END iterating through column of voxels
} // END iterating through multiple projections
// And finally copy the updated local voxelColumn array back to our 3D volume (main memory)
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
image[idx] = voxelColumn[colIdx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
// According to references (Papenhausen), doing = is better than +=, since += requires main memory read followed by a write.
// We did all the reads into the local array at the BEGINNING of this kernel. According to Papenhausen, this type of read-write split is
// better for avoiding memory congestion.
} // END copy updated voxels from local array to our 3D volume
} // END kernelPixelBackprojectionFDK
//______________________________________________________________________________
//
// Function: voxel_backprojection
//
// Description: Main host function for FDK backprojection (invokes the kernel)
//______________________________________________________________________________
int voxel_backprojection2(float const * const projections, Geometry geo, float* result,float const * const alphas, int nalpha)
{
/*
* Allocate texture memory on the device
*/
// copy data to CUDA memory
hipArray *d_projectiondata = 0;
const hipExtent extent = make_hipExtent(geo.nDetecU,geo.nDetecV,nalpha);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
hipMalloc3DArray(&d_projectiondata, &channelDesc, extent);
cudaCheckErrors("hipMalloc3D error 3D tex");
hipMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_hipPitchedPtr((void*)projections, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_projectiondata;
copyParams.extent = extent;
copyParams.kind = hipMemcpyHostToDevice;
hipMemcpy3D(©Params);
cudaCheckErrors("hipMemcpy3D fail");
// Configure texture options
tex.normalized = false;
tex.filterMode = hipFilterModeLinear;
tex.addressMode[0] = hipAddressModeBorder;
tex.addressMode[1] = hipAddressModeBorder;
tex.addressMode[2] = hipAddressModeBorder;
hipBindTextureToArray(tex, d_projectiondata, channelDesc);
cudaCheckErrors("3D texture memory bind fail");
// Allocate result image memory
size_t num_bytes = geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ * sizeof(float);
float* dimage;
hipMalloc((void**)&dimage, num_bytes);
hipMemset(dimage,0,num_bytes);
cudaCheckErrors("hipMalloc fail");
// If we are going to time
bool timekernel=false;
hipEvent_t start, stop;
float elapsedTime;
if (timekernel){
hipEventCreate(&start);
hipEventRecord(start,0);
}
int divx,divy,divz;
// RB: Use the optimal (in their tests) block size from paper by Zinsser and Keck (16 in x and 32 in y).
// I tried different sizes and shapes of blocks (tiles), but it does not appear to significantly affect trhoughput, so
// let's stick with the values from Zinsser and Keck.
divx=16;
divy=32;
divz=VOXELS_PER_THREAD; // We now only have 32 x 16 threads per block (flat tile, see below), BUT each thread works on a Z column of VOXELS_PER_THREAD voxels, so we effectively need fewer blocks!
dim3 grid((geo.nVoxelX+divx-1)/divx,
(geo.nVoxelY+divy-1)/divy,
(geo.nVoxelZ+divz-1)/divz);
dim3 block(divx,divy,1); // Note that we have 1 in the Z size, not divz, since each thread works on a vertical set of VOXELS_PER_THREAD voxels (so we only need a "flat" tile of threads, with depth of 1)
//////////////////////////////////////////////////////////////////////////////////////
// Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
// Since we'll have multiple projections processed by a SINGLE kernel call, compute how many
// kernel calls we'll need altogether.
int noOfKernelCalls = (nalpha+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_KERNEL
for (unsigned int i=0; i<noOfKernelCalls; i++)
{
// Now we need to generate and copy all data for PROJ_PER_KERNEL projections to constant memory so that our kernel can use it
int j;
for(j=0; j<PROJ_PER_KERNEL; j++)
{
int currProjNumber=i*PROJ_PER_KERNEL+j;
if(currProjNumber>=nalpha)
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, offDetec,source;
float sinalpha,cosalpha;
geo.alpha=-alphas[currProjNumber];
sinalpha=sin(geo.alpha);
cosalpha=cos(geo.alpha);
projSinCosArray2Host[3*j]=sinalpha; // 2*j because we have 2 float (sin or cos angle) values per projection
projSinCosArray2Host[3*j+1]=cosalpha;
projSinCosArray2Host[3*j+2]=geo.COR[currProjNumber];
computeDeltasCube(geo,geo.alpha,currProjNumber,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source);
offOrig.x=geo.offOrigX[currProjNumber];
offOrig.y=geo.offOrigY[currProjNumber];
offDetec.x=geo.offDetecU[currProjNumber];
offDetec.y=geo.offDetecV[currProjNumber];
projParamsArray2Host[7*j]=deltaX; // 7*j because we have 7 Point3D values per projection
projParamsArray2Host[7*j+1]=deltaY;
projParamsArray2Host[7*j+2]=deltaZ;
projParamsArray2Host[7*j+3]=xyzOrigin;
projParamsArray2Host[7*j+4]=offOrig;
projParamsArray2Host[7*j+5]=offDetec;
projParamsArray2Host[7*j+6]=source;
} // END for (preparing params for kernel call)
// Copy the prepared parameter arrays to constant memory to make it available for the kernel
hipMemcpyToSymbol(projSinCosArray2Dev, projSinCosArray2Host, sizeof(float)*3*PROJ_PER_KERNEL);
hipMemcpyToSymbol(projParamsArray2Dev, projParamsArray2Host, sizeof(Point3D)*7*PROJ_PER_KERNEL);
hipLaunchKernelGGL(( kernelPixelBackprojection), dim3(grid),dim3(block), 0, 0, geo,dimage,i,nalpha);
cudaCheckErrors("Kernel fail");
} // END for
hipLaunchKernelGGL(( matrixConstantMultiply), dim3(60),dim3(MAXTREADS), 0, 0, geo,dimage,geo.dVoxelX*geo.dVoxelY*geo.dVoxelZ/(geo.dDetecU*geo.dDetecV));
//////////////////////////////////////////////////////////////////////////////////////
// END Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
if (timekernel)
{
hipEventCreate(&stop);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start,stop);
//TODO: replace this
// mexPrintf("%f\n" ,elapsedTime);
cudaCheckErrors("cuda Timing fail");
}
hipMemcpy(result, dimage, num_bytes, hipMemcpyDeviceToHost);
cudaCheckErrors("hipMemcpy result fail");
hipUnbindTexture(tex);
cudaCheckErrors("Unbind fail");
hipFree(dimage);
hipFreeArray(d_projectiondata);
cudaCheckErrors("hipFree d_imagedata fail");
//hipDeviceReset();
return 0;
} // END voxel_backprojection
//______________________________________________________________________________
//
// Function: computeDeltasCube
//
// Description: Computes relative increments for each projection (volume rotation).
// Increments get passed to the backprojection kernel.
//______________________________________________________________________________
#ifndef BACKPROJECTION_HPP
void computeDeltasCube(Geometry geo, float alpha,int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D* S)
{
Point3D P0, Px0,Py0,Pz0, source;
// Get coords of Img(0,0,0)
P0.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i];
P0.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i];
P0.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i];
// Get coors from next voxel in each direction
Px0.x=P0.x+geo.dVoxelX; Py0.x=P0.x; Pz0.x=P0.x;
Px0.y=P0.y; Py0.y=P0.y+geo.dVoxelY; Pz0.y=P0.y;
Px0.z=P0.z; Py0.z=P0.z; Pz0.z=P0.z+geo.dVoxelZ;
// Rotate image (this is equivalent of rotating the source and detector)
Point3D P, Px,Py,Pz; // We need other auxiliar variables to be able to perform the rotation, or we would overwrite values!
P.x =P0.x *cos(alpha)-P0.y *sin(alpha); P.y =P0.x *sin(alpha)+P0.y *cos(alpha); P.z =P0.z;
Px.x=Px0.x*cos(alpha)-Px0.y*sin(alpha); Px.y=Px0.x*sin(alpha)+Px0.y*cos(alpha); Px.z=Px0.z;
Py.x=Py0.x*cos(alpha)-Py0.y*sin(alpha); Py.y=Py0.x*sin(alpha)+Py0.y*cos(alpha); Py.z=Py0.z;
Pz.x=Pz0.x*cos(alpha)-Pz0.y*sin(alpha); Pz.y=Pz0.x*sin(alpha)+Pz0.y*cos(alpha); Pz.z=Pz0.z;
//detector offset
P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i];
Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i];
Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i];
Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i];
//Detector Roll pitch Yaw
//
//
// first, we need to offset everything so (0,0,0) is the center of the detector
// Only X is required for that
P.x=P.x+(geo.DSD-geo.DSO);
Px.x=Px.x+(geo.DSD-geo.DSO);
Py.x=Py.x+(geo.DSD-geo.DSO);
Pz.x=Pz.x+(geo.DSD-geo.DSO);
rollPitchYawT(geo,i,&P);
rollPitchYawT(geo,i,&Px);
rollPitchYawT(geo,i,&Py);
rollPitchYawT(geo,i,&Pz);
P.x=P.x-(geo.DSD-geo.DSO);
Px.x=Px.x-(geo.DSD-geo.DSO);
Py.x=Py.x-(geo.DSD-geo.DSO);
Pz.x=Pz.x-(geo.DSD-geo.DSO);
//Done for P, now source
source.x=geo.DSD; //allready offseted for rotation of teh detector
source.y=-geo.offDetecU[i];
source.z=-geo.offDetecV[i];
rollPitchYawT(geo,i,&source);
source.x=source.x-(geo.DSD-geo.DSO);
// Scale coords so detector pixels are 1x1
P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU;
Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU;
Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU;
Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU;
source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU;
// get deltas of the changes in voxels
deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z;
deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z;
deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z;
*xyzorigin=P;
*S=source;
} // END computeDeltasCube
void rollPitchYawT(Geometry geo,int i, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.y
-sin(geo.dPitch[i])*auxPoint.z;
point->y=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.z;
point->z=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.y
+cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z;
}
#endif | 13cd039123448d9b102bf58a5ef5681fdb25cbb0.cu | /*-------------------------------------------------------------------------
*
* CUDA function for backrpojection using FDK weigts for CBCT
*
*
* CODE by Ander Biguri
* Optimized and modified by Robert Bryll
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#define PI_2 1.57079632679489661923
#include <algorithm>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include "voxel_backprojection2.hpp"
#include <math.h>
// https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror
// if (__err != cudaSuccess) { \
// printf("%s \n", msg);\
// printf("%s \n", cudaGetErrorString(__err));\
// } \
// TODO: Error logging
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
} while (0)
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* *--->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
texture<float, cudaTextureType3D , cudaReadModeElementType> tex;
__global__ void matrixConstantMultiply(const Geometry geo,float* image,float constant){
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
for(; idx<geo.nVoxelX* geo.nVoxelY *geo.nVoxelZ; idx+=gridDim.x*blockDim.x) {
image[idx]*=constant;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// The optimal values of two constants obtained by Robert Bryll on NVIDIA Quadro K2200 (4 GB RAM, 640 CUDA cores) for 512^3 volume and 512^3 projections (512 proj, each 512 x 512) were:
// PROJ_PER_KERNEL = 32 or 16 (very similar times)
// VOXELS_PER_THREAD = 8
// Speedup of the entire FDK backprojection (not only kernel run, also memcpy etc.) was nearly 4x relative to the original (single projection, single voxel per thread) code.
// (e.g. 16.2 s vs. ~62 s).
const int PROJ_PER_KERNEL = 32; // Number of 2D projections to be analyzed by a single thread. This can be tweaked to see what works best. 32 was the optimal value in the paper by Zinsser and Keck.
const int VOXELS_PER_THREAD = 8; // Number of voxels to be computed by s single thread. Can be tweaked to see what works best. 4 was the optimal value in the paper by Zinsser and Keck.
// We have PROJ_PER_KERNEL projections and we need 6 parameters for each projection:
// deltaX, deltaY, deltaZ, xyzOrigin, offOrig, offDetec
// So we need to keep PROJ_PER_KERNEL*6 values in our deltas array FOR EACH CALL to our main kernel
// (they will be updated in the main loop before each kernel call).
__constant__ Point3D projParamsArray2Dev[7*PROJ_PER_KERNEL]; // Dev means it is on device
// We also need a corresponding array on the host side to be filled before each kernel call, then copied to the device (array in constant memory above)
Point3D projParamsArray2Host[7*PROJ_PER_KERNEL]; // Host means it is host memory
// Now we also need to store sinAlpha and cosAlpha for each projection (two floats per projection)
__constant__ float projSinCosArray2Dev[3*PROJ_PER_KERNEL];
float projSinCosArray2Host[3*PROJ_PER_KERNEL];
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// END RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//______________________________________________________________________________
//
// Function: kernelPixelBackprojectionFDK
//
// Description: Main FDK backprojection kernel
//______________________________________________________________________________
__global__ void kernelPixelBackprojection(const Geometry geo, float* image,const int currProjSetNumber, const int totalNoOfProjections)
{
// Old kernel call signature:
// kernelPixelBackprojection<<<grid,block>>>(geo,dimage,i,deltaX,deltaY,deltaZ,xyzOrigin,offOrig,offDetec,sinalpha,cosalpha);
// We just read in most of the params from the constant memory instead of getting them from the param list.
// This is because we now have MANY params, since single kernel processes more than one projection!
/* __global__ void kernelPixelBackprojectionFDK(const Geometry geo,
* float* image,
* const int indAlpha,
* const Point3D deltaX ,
* const Point3D deltaY,
* const Point3D deltaZ,
* const Point3D xyzOrigin,
* const Point3D xyzOffset,
* const Point3D uv0Offset,
* const float sinalpha,
* const float cosalpha){
*/
unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x;
// unsigned long startIndZ = blockIdx.z * blockDim.z + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
unsigned long startIndZ = blockIdx.z * VOXELS_PER_THREAD + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
//Make sure we dont go out of bounds
if (indX>=geo.nVoxelX | indY>=geo.nVoxelY |startIndZ>=geo.nVoxelZ)
return;
// We'll keep a local auxiliary array of values of a column of voxels that this thread will update
float voxelColumn[VOXELS_PER_THREAD];
// First we need to copy the curent 3D volume values from the column to our auxiliary array so that we can then
// work on them (update them by computing values from multiple projections) locally - avoiding main memory reads/writes
int colIdx;
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the updated values go back to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
voxelColumn[colIdx] = image[idx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
} // END copy 3D volume voxels to local array
// Now iterate through projections
for(int projNumber=0; projNumber<PROJ_PER_KERNEL; projNumber++)
{
// Get the current parameters from parameter arrays in constant memory.
int indAlpha = currProjSetNumber*PROJ_PER_KERNEL+projNumber; // This is the ABSOLUTE projection number in the projection array
// Our currImageVal will be updated by hovewer many projections we had left in the "remainder" - that's OK.
if(indAlpha>=totalNoOfProjections)
break;
Point3D deltaX = projParamsArray2Dev[7*projNumber]; // 6*projNumber because we have 6 Point3D values per projection
Point3D deltaY = projParamsArray2Dev[7*projNumber+1];
Point3D deltaZ = projParamsArray2Dev[7*projNumber+2];
Point3D xyzOrigin = projParamsArray2Dev[7*projNumber+3];
Point3D xyzOffset = projParamsArray2Dev[7*projNumber+4];
Point3D uv0Offset = projParamsArray2Dev[7*projNumber+5];
Point3D S = projParamsArray2Dev[7*projNumber+6];
float sinalpha = projSinCosArray2Dev[3*projNumber]; // 2*projNumber because we have 2 float (sin or cos angle) values per projection
float cosalpha = projSinCosArray2Dev[3*projNumber+1];
float COR = projSinCosArray2Dev[3*projNumber+2];
// Geometric trasnformations:
//Source, scaled XYZ coordinates
// Now iterate through Z in our voxel column FOR A GIVEN PROJECTION
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
// "XYZ" in the scaled coordinate system of the current point. The image is rotated with the projection angles.
Point3D P;
P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x);
P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-COR/geo.dDetecU;
P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z);
// This is the vector defining the line from the source to the Voxel
float vectX,vectY,vectZ;
vectX=(P.x -S.x);
vectY=(P.y -S.y);
vectZ=(P.z -S.z);
// Get the coordinates in the detector UV where the mid point of the voxel is projected.
float t=(geo.DSO-geo.DSD /*-DOD*/ - S.x)/vectX;
float y,z;
y=vectY*t+S.y;
z=vectZ*t+S.z;
float u,v;
u=y+geo.nDetecU/2-0.5;
v=z+geo.nDetecV/2-0.5;
float weigth;
//
//
//Real coordinates of Voxel. Instead of reverting the tranformation, its less math (faster) to compute it from the indexes.
Point3D realvoxel;
realvoxel.x=-geo.sVoxelX/2+geo.dVoxelX/2 +indX*geo.dVoxelX +xyzOffset.x;
realvoxel.y=-geo.sVoxelY/2+geo.dVoxelY/2 +indY*geo.dVoxelY +xyzOffset.y;
realvoxel.z=-geo.sVoxelZ/2+geo.dVoxelZ/2 +indZ*geo.dVoxelZ +xyzOffset.z;
//Real coords of Source
// We already have S.x (geo.DSO), and S.y and S.z are always zero. we just need to rotate
Point3D realS;
realS.x= geo.DSO*cosalpha;
realS.y=-geo.DSO*sinalpha;
realS.z=0;
// Real XYZ coordinates of Detector.
Point3D realD, realDaux;
// We know the index of the detector (u,v). Start from there.
realDaux.x=-(geo.DSD-geo.DSO);
realDaux.y=-geo.sDetecU/2+geo.dDetecU/2 + u*geo.dDetecU +uv0Offset.x;
realD.z =-geo.sDetecV/2+geo.dDetecV/2 + v*geo.dDetecV +uv0Offset.y;
//rotate the detector
realD.x= realDaux.x*cosalpha + realDaux.y*sinalpha; //sin(-x)=-sin(x) , cos(-x)=cos(x)
realD.y=-realDaux.x*sinalpha + realDaux.y*cosalpha; //sin(-x)=-sin(x) , cos(-x)=cos(x)
float L,l;
L = sqrt( (realS.x-realD.x)*(realS.x-realD.x)+ (realS.y-realD.y)*(realS.y-realD.y)+ (realD.z)*(realD.z)); // Sz=0 always.
l = sqrt( (realS.x-realvoxel.x)*(realS.x-realvoxel.x)
+ (realS.y-realvoxel.y)*(realS.y-realvoxel.y)
+ (realS.z-realvoxel.z)*(realS.z-realvoxel.z));
weigth=L*L*L/(geo.DSD*l*l);
// Get Value in the computed (U,V) and multiply by the corresponding weigth.
// indAlpha is the ABSOLUTE number of projection in the projection array (NOT the current number of projection set!)
voxelColumn[colIdx]+=tex3D(tex, u +0.5 ,
v +0.5 ,
indAlpha+0.5)* weigth;
} // END iterating through column of voxels
} // END iterating through multiple projections
// And finally copy the updated local voxelColumn array back to our 3D volume (main memory)
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
image[idx] = voxelColumn[colIdx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
// According to references (Papenhausen), doing = is better than +=, since += requires main memory read followed by a write.
// We did all the reads into the local array at the BEGINNING of this kernel. According to Papenhausen, this type of read-write split is
// better for avoiding memory congestion.
} // END copy updated voxels from local array to our 3D volume
} // END kernelPixelBackprojectionFDK
//______________________________________________________________________________
//
// Function: voxel_backprojection
//
// Description: Main host function for FDK backprojection (invokes the kernel)
//______________________________________________________________________________
int voxel_backprojection2(float const * const projections, Geometry geo, float* result,float const * const alphas, int nalpha)
{
/*
* Allocate texture memory on the device
*/
// copy data to CUDA memory
cudaArray *d_projectiondata = 0;
const cudaExtent extent = make_cudaExtent(geo.nDetecU,geo.nDetecV,nalpha);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaMalloc3DArray(&d_projectiondata, &channelDesc, extent);
cudaCheckErrors("cudaMalloc3D error 3D tex");
cudaMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_cudaPitchedPtr((void*)projections, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_projectiondata;
copyParams.extent = extent;
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(©Params);
cudaCheckErrors("cudaMemcpy3D fail");
// Configure texture options
tex.normalized = false;
tex.filterMode = cudaFilterModeLinear;
tex.addressMode[0] = cudaAddressModeBorder;
tex.addressMode[1] = cudaAddressModeBorder;
tex.addressMode[2] = cudaAddressModeBorder;
cudaBindTextureToArray(tex, d_projectiondata, channelDesc);
cudaCheckErrors("3D texture memory bind fail");
// Allocate result image memory
size_t num_bytes = geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ * sizeof(float);
float* dimage;
cudaMalloc((void**)&dimage, num_bytes);
cudaMemset(dimage,0,num_bytes);
cudaCheckErrors("cudaMalloc fail");
// If we are going to time
bool timekernel=false;
cudaEvent_t start, stop;
float elapsedTime;
if (timekernel){
cudaEventCreate(&start);
cudaEventRecord(start,0);
}
int divx,divy,divz;
// RB: Use the optimal (in their tests) block size from paper by Zinsser and Keck (16 in x and 32 in y).
// I tried different sizes and shapes of blocks (tiles), but it does not appear to significantly affect trhoughput, so
// let's stick with the values from Zinsser and Keck.
divx=16;
divy=32;
divz=VOXELS_PER_THREAD; // We now only have 32 x 16 threads per block (flat tile, see below), BUT each thread works on a Z column of VOXELS_PER_THREAD voxels, so we effectively need fewer blocks!
dim3 grid((geo.nVoxelX+divx-1)/divx,
(geo.nVoxelY+divy-1)/divy,
(geo.nVoxelZ+divz-1)/divz);
dim3 block(divx,divy,1); // Note that we have 1 in the Z size, not divz, since each thread works on a vertical set of VOXELS_PER_THREAD voxels (so we only need a "flat" tile of threads, with depth of 1)
//////////////////////////////////////////////////////////////////////////////////////
// Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
// Since we'll have multiple projections processed by a SINGLE kernel call, compute how many
// kernel calls we'll need altogether.
int noOfKernelCalls = (nalpha+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_KERNEL
for (unsigned int i=0; i<noOfKernelCalls; i++)
{
// Now we need to generate and copy all data for PROJ_PER_KERNEL projections to constant memory so that our kernel can use it
int j;
for(j=0; j<PROJ_PER_KERNEL; j++)
{
int currProjNumber=i*PROJ_PER_KERNEL+j;
if(currProjNumber>=nalpha)
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, offDetec,source;
float sinalpha,cosalpha;
geo.alpha=-alphas[currProjNumber];
sinalpha=sin(geo.alpha);
cosalpha=cos(geo.alpha);
projSinCosArray2Host[3*j]=sinalpha; // 2*j because we have 2 float (sin or cos angle) values per projection
projSinCosArray2Host[3*j+1]=cosalpha;
projSinCosArray2Host[3*j+2]=geo.COR[currProjNumber];
computeDeltasCube(geo,geo.alpha,currProjNumber,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source);
offOrig.x=geo.offOrigX[currProjNumber];
offOrig.y=geo.offOrigY[currProjNumber];
offDetec.x=geo.offDetecU[currProjNumber];
offDetec.y=geo.offDetecV[currProjNumber];
projParamsArray2Host[7*j]=deltaX; // 7*j because we have 7 Point3D values per projection
projParamsArray2Host[7*j+1]=deltaY;
projParamsArray2Host[7*j+2]=deltaZ;
projParamsArray2Host[7*j+3]=xyzOrigin;
projParamsArray2Host[7*j+4]=offOrig;
projParamsArray2Host[7*j+5]=offDetec;
projParamsArray2Host[7*j+6]=source;
} // END for (preparing params for kernel call)
// Copy the prepared parameter arrays to constant memory to make it available for the kernel
cudaMemcpyToSymbol(projSinCosArray2Dev, projSinCosArray2Host, sizeof(float)*3*PROJ_PER_KERNEL);
cudaMemcpyToSymbol(projParamsArray2Dev, projParamsArray2Host, sizeof(Point3D)*7*PROJ_PER_KERNEL);
kernelPixelBackprojection<<<grid,block>>>(geo,dimage,i,nalpha);
cudaCheckErrors("Kernel fail");
} // END for
matrixConstantMultiply<<<60,MAXTREADS>>>( geo,dimage,geo.dVoxelX*geo.dVoxelY*geo.dVoxelZ/(geo.dDetecU*geo.dDetecV));
//////////////////////////////////////////////////////////////////////////////////////
// END Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
if (timekernel)
{
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
//TODO: replace this
// mexPrintf("%f\n" ,elapsedTime);
cudaCheckErrors("cuda Timing fail");
}
cudaMemcpy(result, dimage, num_bytes, cudaMemcpyDeviceToHost);
cudaCheckErrors("cudaMemcpy result fail");
cudaUnbindTexture(tex);
cudaCheckErrors("Unbind fail");
cudaFree(dimage);
cudaFreeArray(d_projectiondata);
cudaCheckErrors("cudaFree d_imagedata fail");
//cudaDeviceReset();
return 0;
} // END voxel_backprojection
//______________________________________________________________________________
//
// Function: computeDeltasCube
//
// Description: Computes relative increments for each projection (volume rotation).
// Increments get passed to the backprojection kernel.
//______________________________________________________________________________
#ifndef BACKPROJECTION_HPP
void computeDeltasCube(Geometry geo, float alpha,int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D* S)
{
Point3D P0, Px0,Py0,Pz0, source;
// Get coords of Img(0,0,0)
P0.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i];
P0.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i];
P0.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i];
// Get coors from next voxel in each direction
Px0.x=P0.x+geo.dVoxelX; Py0.x=P0.x; Pz0.x=P0.x;
Px0.y=P0.y; Py0.y=P0.y+geo.dVoxelY; Pz0.y=P0.y;
Px0.z=P0.z; Py0.z=P0.z; Pz0.z=P0.z+geo.dVoxelZ;
// Rotate image (this is equivalent of rotating the source and detector)
Point3D P, Px,Py,Pz; // We need other auxiliar variables to be able to perform the rotation, or we would overwrite values!
P.x =P0.x *cos(alpha)-P0.y *sin(alpha); P.y =P0.x *sin(alpha)+P0.y *cos(alpha); P.z =P0.z;
Px.x=Px0.x*cos(alpha)-Px0.y*sin(alpha); Px.y=Px0.x*sin(alpha)+Px0.y*cos(alpha); Px.z=Px0.z;
Py.x=Py0.x*cos(alpha)-Py0.y*sin(alpha); Py.y=Py0.x*sin(alpha)+Py0.y*cos(alpha); Py.z=Py0.z;
Pz.x=Pz0.x*cos(alpha)-Pz0.y*sin(alpha); Pz.y=Pz0.x*sin(alpha)+Pz0.y*cos(alpha); Pz.z=Pz0.z;
//detector offset
P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i];
Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i];
Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i];
Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i];
//Detector Roll pitch Yaw
//
//
// first, we need to offset everything so (0,0,0) is the center of the detector
// Only X is required for that
P.x=P.x+(geo.DSD-geo.DSO);
Px.x=Px.x+(geo.DSD-geo.DSO);
Py.x=Py.x+(geo.DSD-geo.DSO);
Pz.x=Pz.x+(geo.DSD-geo.DSO);
rollPitchYawT(geo,i,&P);
rollPitchYawT(geo,i,&Px);
rollPitchYawT(geo,i,&Py);
rollPitchYawT(geo,i,&Pz);
P.x=P.x-(geo.DSD-geo.DSO);
Px.x=Px.x-(geo.DSD-geo.DSO);
Py.x=Py.x-(geo.DSD-geo.DSO);
Pz.x=Pz.x-(geo.DSD-geo.DSO);
//Done for P, now source
source.x=geo.DSD; //allready offseted for rotation of teh detector
source.y=-geo.offDetecU[i];
source.z=-geo.offDetecV[i];
rollPitchYawT(geo,i,&source);
source.x=source.x-(geo.DSD-geo.DSO);
// Scale coords so detector pixels are 1x1
P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU;
Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU;
Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU;
Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU;
source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU;
// get deltas of the changes in voxels
deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z;
deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z;
deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z;
*xyzorigin=P;
*S=source;
} // END computeDeltasCube
void rollPitchYawT(Geometry geo,int i, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.y
-sin(geo.dPitch[i])*auxPoint.z;
point->y=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.z;
point->z=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.y
+cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z;
}
#endif |
a8e417aa9639c8a22082fc51a760896bea6ce0be.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2019, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/matrix/dense_kernels.hpp"
#include <ginkgo/core/base/math.hpp>
#include <ginkgo/core/base/range_accessors.hpp>
#include <ginkgo/core/matrix/coo.hpp>
#include <ginkgo/core/matrix/csr.hpp>
#include <ginkgo/core/matrix/ell.hpp>
#include <ginkgo/core/matrix/sellp.hpp>
#include "cuda/base/cublas_bindings.hpp"
#include "cuda/components/cooperative_groups.cuh"
#include "cuda/components/prefix_sum.cuh"
#include "cuda/components/reduction.cuh"
#include "cuda/components/uninitialized_array.hpp"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The Dense matrix format namespace.
*
* @ingroup dense
*/
namespace dense {
constexpr auto default_block_size = 512;
template <typename ValueType>
void simple_apply(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *a,
const matrix::Dense<ValueType> *b,
matrix::Dense<ValueType> *c)
{
if (cublas::is_supported<ValueType>::value) {
auto handle = exec->get_cublas_handle();
GKO_ASSERT_NO_CUBLAS_ERRORS(
hipblasSetPointerMode(handle, HIPBLAS_POINTER_MODE_HOST));
auto alpha = one<ValueType>();
auto beta = zero<ValueType>();
cublas::gemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, c->get_size()[1],
c->get_size()[0], a->get_size()[1], &alpha,
b->get_const_values(), b->get_stride(),
a->get_const_values(), a->get_stride(), &beta,
c->get_values(), c->get_stride());
GKO_ASSERT_NO_CUBLAS_ERRORS(
hipblasSetPointerMode(handle, HIPBLAS_POINTER_MODE_DEVICE));
} else {
GKO_NOT_IMPLEMENTED;
}
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_SIMPLE_APPLY_KERNEL);
template <typename ValueType>
void apply(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *alpha,
const matrix::Dense<ValueType> *a, const matrix::Dense<ValueType> *b,
const matrix::Dense<ValueType> *beta, matrix::Dense<ValueType> *c)
{
if (cublas::is_supported<ValueType>::value) {
cublas::gemm(exec->get_cublas_handle(), HIPBLAS_OP_N, HIPBLAS_OP_N,
c->get_size()[1], c->get_size()[0], a->get_size()[1],
alpha->get_const_values(), b->get_const_values(),
b->get_stride(), a->get_const_values(), a->get_stride(),
beta->get_const_values(), c->get_values(),
c->get_stride());
} else {
GKO_NOT_IMPLEMENTED;
}
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_APPLY_KERNEL);
namespace kernel {
template <size_type block_size, typename ValueType>
__global__ __launch_bounds__(block_size) void scale(
size_type num_rows, size_type num_cols, size_type num_alpha_cols,
const ValueType *__restrict__ alpha, ValueType *__restrict__ x,
size_type stride_x)
{
constexpr auto warps_per_block = block_size / cuda_config::warp_size;
const auto global_id =
thread::get_thread_id<cuda_config::warp_size, warps_per_block>();
const auto row_id = global_id / num_cols;
const auto col_id = global_id % num_cols;
const auto alpha_id = num_alpha_cols == 1 ? 0 : col_id;
if (row_id < num_rows) {
x[row_id * stride_x + col_id] =
alpha[alpha_id] == zero<ValueType>()
? zero<ValueType>()
: x[row_id * stride_x + col_id] * alpha[alpha_id];
}
}
} // namespace kernel
template <typename ValueType>
void scale(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *alpha, matrix::Dense<ValueType> *x)
{
if (cublas::is_supported<ValueType>::value && x->get_size()[1] == 1) {
cublas::scal(exec->get_cublas_handle(), x->get_size()[0],
alpha->get_const_values(), x->get_values(),
x->get_stride());
} else {
// TODO: tune this parameter
constexpr auto block_size = default_block_size;
const dim3 grid_dim =
ceildiv(x->get_size()[0] * x->get_size()[1], block_size);
const dim3 block_dim{cuda_config::warp_size, 1,
block_size / cuda_config::warp_size};
hipLaunchKernelGGL(( kernel::scale<block_size>), dim3(grid_dim), dim3(block_dim), 0, 0,
x->get_size()[0], x->get_size()[1], alpha->get_size()[1],
as_cuda_type(alpha->get_const_values()),
as_cuda_type(x->get_values()), x->get_stride());
}
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_SCALE_KERNEL);
namespace kernel {
template <size_type block_size, typename ValueType>
__global__ __launch_bounds__(block_size) void add_scaled(
size_type num_rows, size_type num_cols, size_type num_alpha_cols,
const ValueType *__restrict__ alpha, const ValueType *__restrict__ x,
size_type stride_x, ValueType *__restrict__ y, size_type stride_y)
{
constexpr auto warps_per_block = block_size / cuda_config::warp_size;
const auto global_id =
thread::get_thread_id<cuda_config::warp_size, warps_per_block>();
const auto row_id = global_id / num_cols;
const auto col_id = global_id % num_cols;
const auto alpha_id = num_alpha_cols == 1 ? 0 : col_id;
if (row_id < num_rows && alpha[alpha_id] != zero<ValueType>()) {
y[row_id * stride_y + col_id] +=
x[row_id * stride_x + col_id] * alpha[alpha_id];
}
}
} // namespace kernel
template <typename ValueType>
void add_scaled(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *alpha,
const matrix::Dense<ValueType> *x, matrix::Dense<ValueType> *y)
{
if (cublas::is_supported<ValueType>::value && x->get_size()[1] == 1) {
cublas::axpy(exec->get_cublas_handle(), x->get_size()[0],
alpha->get_const_values(), x->get_const_values(),
x->get_stride(), y->get_values(), y->get_stride());
} else {
// TODO: tune this parameter
constexpr auto block_size = default_block_size;
const dim3 grid_dim =
ceildiv(x->get_size()[0] * x->get_size()[1], block_size);
const dim3 block_dim{cuda_config::warp_size, 1,
block_size / cuda_config::warp_size};
hipLaunchKernelGGL(( kernel::add_scaled<block_size>), dim3(grid_dim), dim3(block_dim), 0, 0,
x->get_size()[0], x->get_size()[1], alpha->get_size()[1],
as_cuda_type(alpha->get_const_values()),
as_cuda_type(x->get_const_values()), x->get_stride(),
as_cuda_type(y->get_values()), y->get_stride());
}
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_ADD_SCALED_KERNEL);
namespace kernel {
template <size_type block_size, typename ValueType>
__global__ __launch_bounds__(block_size) void compute_partial_dot(
size_type num_rows, const ValueType *__restrict__ x, size_type stride_x,
const ValueType *__restrict__ y, size_type stride_y,
ValueType *__restrict__ work)
{
constexpr auto warps_per_block = block_size / cuda_config::warp_size;
const auto num_blocks = gridDim.x;
const auto local_id = thread::get_local_thread_id<cuda_config::warp_size>();
const auto global_id =
thread::get_thread_id<cuda_config::warp_size, warps_per_block>();
auto tmp = zero<ValueType>();
for (auto i = global_id; i < num_rows; i += block_size * num_blocks) {
tmp += x[i * stride_x] * y[i * stride_y];
}
__shared__ UninitializedArray<ValueType, block_size> tmp_work;
tmp_work[local_id] = tmp;
reduce(group::this_thread_block(), static_cast<ValueType *>(tmp_work),
[](const ValueType &x, const ValueType &y) { return x + y; });
if (local_id == 0) {
work[thread::get_block_id()] = tmp_work[0];
}
}
template <size_type block_size, typename ValueType>
__global__ __launch_bounds__(block_size) void finalize_dot_computation(
size_type size, const ValueType *work, ValueType *result)
{
const auto local_id = thread::get_local_thread_id<cuda_config::warp_size>();
ValueType tmp = zero<ValueType>();
for (auto i = local_id; i < size; i += block_size) {
tmp += work[i];
}
__shared__ UninitializedArray<ValueType, block_size> tmp_work;
tmp_work[local_id] = tmp;
reduce(group::this_thread_block(), static_cast<ValueType *>(tmp_work),
[](const ValueType &x, const ValueType &y) { return x + y; });
if (local_id == 0) {
*result = tmp_work[0];
}
}
} // namespace kernel
template <typename ValueType>
void compute_dot(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *x,
const matrix::Dense<ValueType> *y,
matrix::Dense<ValueType> *result)
{
if (cublas::is_supported<ValueType>::value) {
// TODO: write a custom kernel which does this more efficiently
for (size_type col = 0; col < x->get_size()[1]; ++col) {
cublas::dot(exec->get_cublas_handle(), x->get_size()[0],
x->get_const_values() + col, x->get_stride(),
y->get_const_values() + col, y->get_stride(),
result->get_values() + col);
}
} else {
// TODO: these are tuning parameters obtained experimentally, once
// we decide how to handle this uniformly, they should be modified
// appropriately
constexpr auto work_per_thread = 32;
constexpr auto block_size = 1024;
constexpr auto work_per_block = work_per_thread * block_size;
const dim3 grid_dim = ceildiv(x->get_size()[0], work_per_block);
const dim3 block_dim{cuda_config::warp_size, 1,
block_size / cuda_config::warp_size};
Array<ValueType> work(exec, grid_dim.x);
// TODO: write a kernel which does this more efficiently
for (size_type col = 0; col < x->get_size()[1]; ++col) {
hipLaunchKernelGGL(( kernel::compute_partial_dot<block_size>), dim3(grid_dim), dim3(block_dim), 0, 0,
x->get_size()[0], as_cuda_type(x->get_const_values() + col),
x->get_stride(), as_cuda_type(y->get_const_values() + col),
y->get_stride(), as_cuda_type(work.get_data()));
hipLaunchKernelGGL(( kernel::finalize_dot_computation<block_size>), dim3(1), dim3(block_dim), 0, 0,
grid_dim.x, as_cuda_type(work.get_const_data()),
as_cuda_type(result->get_values() + col));
}
}
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_COMPUTE_DOT_KERNEL);
namespace kernel {
template <typename ValueType>
__global__ __launch_bounds__(default_block_size) void compute_sqrt(
size_type num_cols, ValueType *__restrict__ work)
{
const auto tidx =
static_cast<size_type>(blockDim.x) * blockIdx.x + threadIdx.x;
if (tidx < num_cols) {
work[tidx] = sqrt(abs(work[tidx]));
}
}
} // namespace kernel
template <typename ValueType>
void compute_norm2(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *x,
matrix::Dense<ValueType> *result)
{
if (cublas::is_supported<ValueType>::value) {
for (size_type col = 0; col < x->get_size()[1]; ++col) {
cublas::norm2(exec->get_cublas_handle(), x->get_size()[0],
x->get_const_values() + col, x->get_stride(),
result->get_values() + col);
}
} else {
compute_dot(exec, x, x, result);
const dim3 block_size(default_block_size, 1, 1);
const dim3 grid_size(ceildiv(result->get_size()[1], block_size.x), 1,
1);
hipLaunchKernelGGL(( kernel::compute_sqrt), dim3(grid_size), dim3(block_size), 0, 0,
result->get_size()[1], as_cuda_type(result->get_values()));
}
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_COMPUTE_NORM2_KERNEL);
namespace kernel {
template <typename ValueType, typename IndexType>
__global__ __launch_bounds__(default_block_size) void fill_in_coo(
size_type num_rows, size_type num_cols, size_type stride,
const size_type *__restrict__ row_ptrs,
const ValueType *__restrict__ source, IndexType *__restrict__ row_idxs,
IndexType *__restrict__ col_idxs, ValueType *__restrict__ values)
{
const auto tidx = threadIdx.x + blockDim.x * blockIdx.x;
if (tidx < num_rows) {
size_type write_to = row_ptrs[tidx];
for (size_type i = 0; i < num_cols; i++) {
if (source[stride * tidx + i] != zero<ValueType>()) {
values[write_to] = source[stride * tidx + i];
col_idxs[write_to] = i;
row_idxs[write_to] = tidx;
write_to++;
}
}
}
}
} // namespace kernel
template <typename ValueType, typename IndexType>
void convert_to_coo(std::shared_ptr<const CudaExecutor> exec,
matrix::Coo<ValueType, IndexType> *result,
const matrix::Dense<ValueType> *source)
{
auto num_rows = result->get_size()[0];
auto num_cols = result->get_size()[1];
auto row_idxs = result->get_row_idxs();
auto col_idxs = result->get_col_idxs();
auto values = result->get_values();
auto stride = source->get_stride();
auto nnz_prefix_sum = Array<size_type>(exec, num_rows);
calculate_nonzeros_per_row(exec, source, &nnz_prefix_sum);
const size_type grid_dim = ceildiv(num_rows, default_block_size);
auto add_values = Array<size_type>(exec, grid_dim);
hipLaunchKernelGGL(( start_prefix_sum<default_block_size>), dim3(grid_dim), dim3(default_block_size), 0, 0,
num_rows, as_cuda_type(nnz_prefix_sum.get_data()),
as_cuda_type(add_values.get_data()));
hipLaunchKernelGGL(( finalize_prefix_sum<default_block_size>), dim3(grid_dim), dim3(default_block_size), 0, 0,
num_rows, as_cuda_type(nnz_prefix_sum.get_data()),
as_cuda_type(add_values.get_data()));
hipLaunchKernelGGL(( kernel::fill_in_coo), dim3(grid_dim), dim3(default_block_size), 0, 0,
num_rows, num_cols, stride,
as_cuda_type(nnz_prefix_sum.get_const_data()),
as_cuda_type(source->get_const_values()), as_cuda_type(row_idxs),
as_cuda_type(col_idxs), as_cuda_type(values));
nnz_prefix_sum.clear();
add_values.clear();
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_DENSE_CONVERT_TO_COO_KERNEL);
namespace kernel {
template <typename ValueType, typename IndexType>
__global__ __launch_bounds__(default_block_size) void count_nnz_per_row(
size_type num_rows, size_type num_cols, size_type stride,
const ValueType *__restrict__ work, IndexType *__restrict__ result)
{
constexpr auto warp_size = cuda_config::warp_size;
const auto tidx = threadIdx.x + blockIdx.x * blockDim.x;
const auto row_idx = tidx / warp_size;
if (row_idx < num_rows) {
IndexType part_result{};
for (auto i = threadIdx.x % warp_size; i < num_cols; i += warp_size) {
if (work[stride * row_idx + i] != zero<ValueType>()) {
part_result += 1;
}
}
auto warp_tile =
group::tiled_partition<warp_size>(group::this_thread_block());
result[row_idx] = reduce(
warp_tile, part_result,
[](const size_type &a, const size_type &b) { return a + b; });
}
}
template <typename ValueType, typename IndexType>
__global__ __launch_bounds__(default_block_size) void fill_in_csr(
size_type num_rows, size_type num_cols, size_type stride,
const ValueType *__restrict__ source, IndexType *__restrict__ row_ptrs,
IndexType *__restrict__ col_idxs, ValueType *__restrict__ values)
{
const auto tidx = threadIdx.x + blockDim.x * blockIdx.x;
if (tidx < num_rows) {
auto write_to = row_ptrs[tidx];
for (auto i = 0; i < num_cols; i++) {
if (source[stride * tidx + i] != zero<ValueType>()) {
values[write_to] = source[stride * tidx + i];
col_idxs[write_to] = i;
write_to++;
}
}
}
}
} // namespace kernel
template <typename ValueType, typename IndexType>
void convert_to_csr(std::shared_ptr<const CudaExecutor> exec,
matrix::Csr<ValueType, IndexType> *result,
const matrix::Dense<ValueType> *source)
{
auto num_rows = result->get_size()[0];
auto num_cols = result->get_size()[1];
auto row_ptrs = result->get_row_ptrs();
auto col_idxs = result->get_col_idxs();
auto values = result->get_values();
auto stride = source->get_stride();
const auto rows_per_block =
ceildiv(default_block_size, cuda_config::warp_size);
const auto grid_dim_nnz = ceildiv(source->get_size()[0], rows_per_block);
hipLaunchKernelGGL(( kernel::count_nnz_per_row), dim3(grid_dim_nnz), dim3(default_block_size), 0, 0,
num_rows, num_cols, stride, as_cuda_type(source->get_const_values()),
as_cuda_type(row_ptrs));
size_type grid_dim = ceildiv(num_rows + 1, default_block_size);
auto add_values = Array<IndexType>(exec, grid_dim);
hipLaunchKernelGGL(( start_prefix_sum<default_block_size>)
, dim3(grid_dim), dim3(default_block_size), 0, 0, num_rows + 1, as_cuda_type(row_ptrs),
as_cuda_type(add_values.get_data()));
hipLaunchKernelGGL(( finalize_prefix_sum<default_block_size>), dim3(grid_dim), dim3(default_block_size), 0, 0,
num_rows + 1, as_cuda_type(row_ptrs),
as_cuda_type(add_values.get_const_data()));
hipLaunchKernelGGL(( kernel::fill_in_csr), dim3(grid_dim), dim3(default_block_size), 0, 0,
num_rows, num_cols, stride, as_cuda_type(source->get_const_values()),
as_cuda_type(row_ptrs), as_cuda_type(col_idxs), as_cuda_type(values));
add_values.clear();
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_DENSE_CONVERT_TO_CSR_KERNEL);
namespace kernel {
template <typename ValueType, typename IndexType>
__global__ __launch_bounds__(default_block_size) void fill_in_ell(
size_type num_rows, size_type num_cols, size_type source_stride,
const ValueType *__restrict__ source, size_type max_nnz_per_row,
size_type result_stride, IndexType *__restrict__ col_ptrs,
ValueType *__restrict__ values)
{
const auto tidx = threadIdx.x + blockDim.x * blockIdx.x;
if (tidx < num_rows) {
IndexType col_idx = 0;
for (size_type col = 0; col < num_cols; col++) {
if (source[tidx * source_stride + col] != zero<ValueType>()) {
col_ptrs[col_idx * result_stride + tidx] = col;
values[col_idx * result_stride + tidx] =
source[tidx * source_stride + col];
col_idx++;
}
}
for (size_type j = col_idx; j < max_nnz_per_row; j++) {
col_ptrs[j * result_stride + tidx] = 0;
values[j * result_stride + tidx] = zero<ValueType>();
}
} else if (tidx < result_stride) {
for (size_type j = 0; j < max_nnz_per_row; j++) {
col_ptrs[j * result_stride + tidx] = 0;
values[j * result_stride + tidx] = zero<ValueType>();
}
}
}
} // namespace kernel
template <typename ValueType, typename IndexType>
void convert_to_ell(std::shared_ptr<const CudaExecutor> exec,
matrix::Ell<ValueType, IndexType> *result,
const matrix::Dense<ValueType> *source)
{
auto num_rows = result->get_size()[0];
auto num_cols = result->get_size()[1];
auto max_nnz_per_row = result->get_num_stored_elements_per_row();
auto col_ptrs = result->get_col_idxs();
auto values = result->get_values();
auto source_stride = source->get_stride();
auto result_stride = result->get_stride();
auto grid_dim = ceildiv(result_stride, default_block_size);
hipLaunchKernelGGL(( kernel::fill_in_ell), dim3(grid_dim), dim3(default_block_size), 0, 0,
num_rows, num_cols, source_stride,
as_cuda_type(source->get_const_values()), max_nnz_per_row,
result_stride, as_cuda_type(col_ptrs), as_cuda_type(values));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_DENSE_CONVERT_TO_ELL_KERNEL);
template <typename ValueType, typename IndexType>
void convert_to_hybrid(std::shared_ptr<const CudaExecutor> exec,
matrix::Hybrid<ValueType, IndexType> *result,
const matrix::Dense<ValueType> *source)
GKO_NOT_IMPLEMENTED;
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_DENSE_CONVERT_TO_HYBRID_KERNEL);
namespace kernel {
__global__
__launch_bounds__(cuda_config::warp_size) void calculate_slice_lengths(
size_type num_rows, size_type slice_size, int slice_num,
size_type stride_factor, const size_type *__restrict__ nnz_per_row,
size_type *__restrict__ slice_lengths,
size_type *__restrict__ slice_sets)
{
constexpr auto warp_size = cuda_config::warp_size;
const auto sliceid = blockIdx.x;
const auto tid_in_warp = threadIdx.x;
if (sliceid * slice_size + tid_in_warp < num_rows) {
size_type thread_result = 0;
for (auto i = tid_in_warp; i < slice_size; i += warp_size) {
thread_result =
(i + slice_size * sliceid < num_rows)
? max(thread_result, nnz_per_row[sliceid * slice_size + i])
: thread_result;
}
auto warp_tile =
group::tiled_partition<warp_size>(group::this_thread_block());
auto warp_result = reduce(
warp_tile, thread_result,
[](const size_type &a, const size_type &b) { return max(a, b); });
if (tid_in_warp == 0) {
auto slice_length =
ceildiv(warp_result, stride_factor) * stride_factor;
slice_lengths[sliceid] = slice_length;
slice_sets[sliceid] = slice_length;
}
}
}
template <typename ValueType, typename IndexType>
__global__ __launch_bounds__(default_block_size) void fill_in_sellp(
size_type num_rows, size_type num_cols, size_type slice_size,
size_type stride, const ValueType *__restrict__ source,
size_type *__restrict__ slice_lengths, size_type *__restrict__ slice_sets,
IndexType *__restrict__ col_idxs, ValueType *__restrict__ vals)
{
const auto global_row = threadIdx.x + blockIdx.x * blockDim.x;
const auto row = global_row % slice_size;
const auto sliceid = global_row / slice_size;
if (global_row < num_rows) {
size_type sellp_ind = slice_sets[sliceid] * slice_size + row;
for (size_type col = 0; col < num_cols; col++) {
auto val = source[global_row * stride + col];
if (val != zero<ValueType>()) {
col_idxs[sellp_ind] = col;
vals[sellp_ind] = val;
sellp_ind += slice_size;
}
}
for (size_type i = sellp_ind;
i <
(slice_sets[sliceid] + slice_lengths[sliceid]) * slice_size + row;
i += slice_size) {
col_idxs[i] = 0;
vals[i] = zero<ValueType>();
}
}
}
} // namespace kernel
template <typename ValueType, typename IndexType>
void convert_to_sellp(std::shared_ptr<const CudaExecutor> exec,
matrix::Sellp<ValueType, IndexType> *result,
const matrix::Dense<ValueType> *source)
{
const auto stride = source->get_stride();
const auto num_rows = result->get_size()[0];
const auto num_cols = result->get_size()[1];
auto vals = result->get_values();
auto col_idxs = result->get_col_idxs();
auto slice_lengths = result->get_slice_lengths();
auto slice_sets = result->get_slice_sets();
const auto slice_size = (result->get_slice_size() == 0)
? matrix::default_slice_size
: result->get_slice_size();
const auto stride_factor = (result->get_stride_factor() == 0)
? matrix::default_stride_factor
: result->get_stride_factor();
const int slice_num = ceildiv(num_rows, slice_size);
auto nnz_per_row = Array<size_type>(exec, num_rows);
calculate_nonzeros_per_row(exec, source, &nnz_per_row);
auto grid_dim = slice_num;
hipLaunchKernelGGL(( kernel::calculate_slice_lengths), dim3(grid_dim), dim3(cuda_config::warp_size), 0, 0,
num_rows, slice_size, slice_num, stride_factor,
as_cuda_type(nnz_per_row.get_const_data()), as_cuda_type(slice_lengths),
as_cuda_type(slice_sets));
auto add_values =
Array<size_type>(exec, ceildiv(slice_num + 1, default_block_size));
grid_dim = ceildiv(slice_num + 1, default_block_size);
hipLaunchKernelGGL(( start_prefix_sum<default_block_size>), dim3(grid_dim), dim3(default_block_size), 0, 0,
slice_num + 1, as_cuda_type(slice_sets),
as_cuda_type(add_values.get_data()));
hipLaunchKernelGGL(( finalize_prefix_sum<default_block_size>), dim3(grid_dim), dim3(default_block_size), 0, 0,
slice_num + 1, as_cuda_type(slice_sets),
as_cuda_type(add_values.get_const_data()));
grid_dim = ceildiv(num_rows, default_block_size);
hipLaunchKernelGGL(( kernel::fill_in_sellp), dim3(grid_dim), dim3(default_block_size), 0, 0,
num_rows, num_cols, slice_size, stride,
as_cuda_type(source->get_const_values()), as_cuda_type(slice_lengths),
as_cuda_type(slice_sets), as_cuda_type(col_idxs), as_cuda_type(vals));
add_values.clear();
nnz_per_row.clear();
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_DENSE_CONVERT_TO_SELLP_KERNEL);
namespace kernel {
__global__ __launch_bounds__(default_block_size) void reduce_nnz(
size_type size, const size_type *__restrict__ nnz_per_row,
size_type *__restrict__ result)
{
extern __shared__ size_type block_sum[];
reduce_array(size, nnz_per_row, block_sum,
[](const size_type &x, const size_type &y) { return x + y; });
if (threadIdx.x == 0) {
result[blockIdx.x] = block_sum[0];
}
}
} // namespace kernel
template <typename ValueType>
void count_nonzeros(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *source, size_type *result)
{
const auto num_rows = source->get_size()[0];
auto nnz_per_row = Array<size_type>(exec, num_rows);
calculate_nonzeros_per_row(exec, source, &nnz_per_row);
const auto n = ceildiv(num_rows, default_block_size);
const size_type grid_dim =
(n <= default_block_size) ? n : default_block_size;
auto block_results = Array<size_type>(exec, grid_dim);
hipLaunchKernelGGL(( kernel::reduce_nnz), dim3(grid_dim), dim3(default_block_size),
default_block_size * sizeof(size_type), 0,
num_rows, as_cuda_type(nnz_per_row.get_const_data()),
as_cuda_type(block_results.get_data()));
auto d_result = Array<size_type>(exec, 1);
hipLaunchKernelGGL(( kernel::reduce_nnz), dim3(1), dim3(default_block_size),
default_block_size * sizeof(size_type), 0,
grid_dim, as_cuda_type(block_results.get_const_data()),
as_cuda_type(d_result.get_data()));
exec->get_master()->copy_from(exec.get(), 1, d_result.get_const_data(),
result);
d_result.clear();
block_results.clear();
nnz_per_row.clear();
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_COUNT_NONZEROS_KERNEL);
namespace kernel {
__global__ __launch_bounds__(default_block_size) void reduce_max_nnz(
size_type size, const size_type *__restrict__ nnz_per_row,
size_type *__restrict__ result)
{
extern __shared__ size_type block_max[];
reduce_array(
size, nnz_per_row, block_max,
[](const size_type &x, const size_type &y) { return max(x, y); });
if (threadIdx.x == 0) {
result[blockIdx.x] = block_max[0];
}
}
} // namespace kernel
template <typename ValueType>
void calculate_max_nnz_per_row(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *source,
size_type *result)
{
const auto num_rows = source->get_size()[0];
auto nnz_per_row = Array<size_type>(exec, num_rows);
calculate_nonzeros_per_row(exec, source, &nnz_per_row);
const auto n = ceildiv(num_rows, default_block_size);
const size_type grid_dim =
(n <= default_block_size) ? n : default_block_size;
auto block_results = Array<size_type>(exec, grid_dim);
hipLaunchKernelGGL(( kernel::reduce_max_nnz), dim3(grid_dim), dim3(default_block_size),
default_block_size * sizeof(size_type), 0,
num_rows, as_cuda_type(nnz_per_row.get_const_data()),
as_cuda_type(block_results.get_data()));
auto d_result = Array<size_type>(exec, 1);
hipLaunchKernelGGL(( kernel::reduce_max_nnz), dim3(1), dim3(default_block_size),
default_block_size * sizeof(size_type), 0,
grid_dim, as_cuda_type(block_results.get_const_data()),
as_cuda_type(d_result.get_data()));
exec->get_master()->copy_from(exec.get(), 1, d_result.get_const_data(),
result);
d_result.clear();
block_results.clear();
nnz_per_row.clear();
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(
GKO_DECLARE_DENSE_CALCULATE_MAX_NNZ_PER_ROW_KERNEL);
template <typename ValueType>
void calculate_nonzeros_per_row(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *source,
Array<size_type> *result)
{
const dim3 block_size(default_block_size, 1, 1);
auto rows_per_block = ceildiv(default_block_size, cuda_config::warp_size);
const size_t grid_x = ceildiv(source->get_size()[0], rows_per_block);
const dim3 grid_size(grid_x, 1, 1);
hipLaunchKernelGGL(( kernel::count_nnz_per_row), dim3(grid_size), dim3(block_size), 0, 0,
source->get_size()[0], source->get_size()[1], source->get_stride(),
as_cuda_type(source->get_const_values()),
as_cuda_type(result->get_data()));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(
GKO_DECLARE_DENSE_CALCULATE_NONZEROS_PER_ROW_KERNEL);
namespace kernel {
__global__ __launch_bounds__(default_block_size) void reduce_max_nnz_per_slice(
size_type num_rows, size_type slice_size, size_type stride_factor,
const size_type *__restrict__ nnz_per_row, size_type *__restrict__ result)
{
const auto tidx = threadIdx.x + blockIdx.x * blockDim.x;
constexpr auto warp_size = cuda_config::warp_size;
const auto warpid = tidx / warp_size;
const auto tid_in_warp = tidx % warp_size;
size_type thread_result = 0;
for (auto i = tid_in_warp; i < slice_size; i += warp_size) {
if (warpid * warp_size + i < num_rows) {
thread_result =
max(thread_result, nnz_per_row[warpid * warp_size + i]);
}
}
auto warp_tile =
group::tiled_partition<warp_size>(group::this_thread_block());
auto warp_result = reduce(
warp_tile, thread_result,
[](const size_type &a, const size_type &b) { return max(a, b); });
if (tid_in_warp == 0) {
result[warpid] = ceildiv(warp_result, stride_factor) * stride_factor;
}
}
__global__ __launch_bounds__(default_block_size) void reduce_total_cols(
size_type num_slices, const size_type *__restrict__ max_nnz_per_slice,
size_type *__restrict__ result)
{
extern __shared__ size_type block_result[];
reduce_array(num_slices, max_nnz_per_slice, block_result,
[](const size_type &x, const size_type &y) { return x + y; });
if (threadIdx.x == 0) {
result[blockIdx.x] = block_result[0];
}
}
} // namespace kernel
template <typename ValueType>
void calculate_total_cols(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *source,
size_type *result, size_type stride_factor,
size_type slice_size)
{
const auto num_rows = source->get_size()[0];
const auto num_cols = source->get_size()[1];
const auto slice_num = ceildiv(num_rows, slice_size);
auto nnz_per_row = Array<size_type>(exec, num_rows);
calculate_nonzeros_per_row(exec, source, &nnz_per_row);
auto max_nnz_per_slice = Array<size_type>(exec, slice_num);
const auto grid_dim = ceildiv(slice_num, default_block_size);
hipLaunchKernelGGL(( kernel::reduce_max_nnz_per_slice), dim3(grid_dim), dim3(default_block_size), 0, 0,
num_rows, slice_size, stride_factor,
as_cuda_type(nnz_per_row.get_const_data()),
as_cuda_type(max_nnz_per_slice.get_data()));
auto block_results = Array<size_type>(exec, grid_dim);
hipLaunchKernelGGL(( kernel::reduce_total_cols), dim3(grid_dim), dim3(default_block_size),
default_block_size * sizeof(size_type), 0,
slice_num, as_cuda_type(max_nnz_per_slice.get_const_data()),
as_cuda_type(block_results.get_data()));
auto d_result = Array<size_type>(exec, 1);
hipLaunchKernelGGL(( kernel::reduce_total_cols), dim3(1), dim3(default_block_size),
default_block_size * sizeof(size_type), 0,
grid_dim, as_cuda_type(block_results.get_const_data()),
as_cuda_type(d_result.get_data()));
exec->get_master()->copy_from(exec.get(), 1, d_result.get_const_data(),
result);
block_results.clear();
nnz_per_row.clear();
max_nnz_per_slice.clear();
d_result.clear();
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(
GKO_DECLARE_DENSE_CALCULATE_TOTAL_COLS_KERNEL);
template <typename ValueType>
void transpose(std::shared_ptr<const CudaExecutor> exec,
matrix::Dense<ValueType> *trans,
const matrix::Dense<ValueType> *orig)
{
if (cublas::is_supported<ValueType>::value) {
auto handle = exec->get_cublas_handle();
GKO_ASSERT_NO_CUBLAS_ERRORS(
hipblasSetPointerMode(handle, HIPBLAS_POINTER_MODE_HOST));
auto alpha = one<ValueType>();
auto beta = zero<ValueType>();
cublas::geam(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, orig->get_size()[0],
orig->get_size()[1], &alpha, orig->get_const_values(),
orig->get_stride(), &beta,
static_cast<ValueType *>(nullptr), trans->get_size()[1],
trans->get_values(), trans->get_stride());
GKO_ASSERT_NO_CUBLAS_ERRORS(
hipblasSetPointerMode(handle, HIPBLAS_POINTER_MODE_DEVICE));
} else {
GKO_NOT_IMPLEMENTED;
}
};
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_TRANSPOSE_KERNEL);
template <typename ValueType>
void conj_transpose(std::shared_ptr<const CudaExecutor> exec,
matrix::Dense<ValueType> *trans,
const matrix::Dense<ValueType> *orig)
{
if (cublas::is_supported<ValueType>::value) {
auto handle = exec->get_cublas_handle();
GKO_ASSERT_NO_CUBLAS_ERRORS(
hipblasSetPointerMode(handle, HIPBLAS_POINTER_MODE_HOST));
auto alpha = one<ValueType>();
auto beta = zero<ValueType>();
cublas::geam(handle, HIPBLAS_OP_C, HIPBLAS_OP_N, orig->get_size()[0],
orig->get_size()[1], &alpha, orig->get_const_values(),
orig->get_stride(), &beta,
static_cast<ValueType *>(nullptr), trans->get_size()[1],
trans->get_values(), trans->get_stride());
GKO_ASSERT_NO_CUBLAS_ERRORS(
hipblasSetPointerMode(handle, HIPBLAS_POINTER_MODE_HOST));
} else {
GKO_NOT_IMPLEMENTED;
}
};
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_CONJ_TRANSPOSE_KERNEL);
} // namespace dense
} // namespace cuda
} // namespace kernels
} // namespace gko
| a8e417aa9639c8a22082fc51a760896bea6ce0be.cu | /*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2019, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/matrix/dense_kernels.hpp"
#include <ginkgo/core/base/math.hpp>
#include <ginkgo/core/base/range_accessors.hpp>
#include <ginkgo/core/matrix/coo.hpp>
#include <ginkgo/core/matrix/csr.hpp>
#include <ginkgo/core/matrix/ell.hpp>
#include <ginkgo/core/matrix/sellp.hpp>
#include "cuda/base/cublas_bindings.hpp"
#include "cuda/components/cooperative_groups.cuh"
#include "cuda/components/prefix_sum.cuh"
#include "cuda/components/reduction.cuh"
#include "cuda/components/uninitialized_array.hpp"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The Dense matrix format namespace.
*
* @ingroup dense
*/
namespace dense {
constexpr auto default_block_size = 512;
template <typename ValueType>
void simple_apply(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *a,
const matrix::Dense<ValueType> *b,
matrix::Dense<ValueType> *c)
{
if (cublas::is_supported<ValueType>::value) {
auto handle = exec->get_cublas_handle();
GKO_ASSERT_NO_CUBLAS_ERRORS(
cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_HOST));
auto alpha = one<ValueType>();
auto beta = zero<ValueType>();
cublas::gemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, c->get_size()[1],
c->get_size()[0], a->get_size()[1], &alpha,
b->get_const_values(), b->get_stride(),
a->get_const_values(), a->get_stride(), &beta,
c->get_values(), c->get_stride());
GKO_ASSERT_NO_CUBLAS_ERRORS(
cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_DEVICE));
} else {
GKO_NOT_IMPLEMENTED;
}
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_SIMPLE_APPLY_KERNEL);
template <typename ValueType>
void apply(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *alpha,
const matrix::Dense<ValueType> *a, const matrix::Dense<ValueType> *b,
const matrix::Dense<ValueType> *beta, matrix::Dense<ValueType> *c)
{
if (cublas::is_supported<ValueType>::value) {
cublas::gemm(exec->get_cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_N,
c->get_size()[1], c->get_size()[0], a->get_size()[1],
alpha->get_const_values(), b->get_const_values(),
b->get_stride(), a->get_const_values(), a->get_stride(),
beta->get_const_values(), c->get_values(),
c->get_stride());
} else {
GKO_NOT_IMPLEMENTED;
}
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_APPLY_KERNEL);
namespace kernel {
template <size_type block_size, typename ValueType>
__global__ __launch_bounds__(block_size) void scale(
size_type num_rows, size_type num_cols, size_type num_alpha_cols,
const ValueType *__restrict__ alpha, ValueType *__restrict__ x,
size_type stride_x)
{
constexpr auto warps_per_block = block_size / cuda_config::warp_size;
const auto global_id =
thread::get_thread_id<cuda_config::warp_size, warps_per_block>();
const auto row_id = global_id / num_cols;
const auto col_id = global_id % num_cols;
const auto alpha_id = num_alpha_cols == 1 ? 0 : col_id;
if (row_id < num_rows) {
x[row_id * stride_x + col_id] =
alpha[alpha_id] == zero<ValueType>()
? zero<ValueType>()
: x[row_id * stride_x + col_id] * alpha[alpha_id];
}
}
} // namespace kernel
template <typename ValueType>
void scale(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *alpha, matrix::Dense<ValueType> *x)
{
if (cublas::is_supported<ValueType>::value && x->get_size()[1] == 1) {
cublas::scal(exec->get_cublas_handle(), x->get_size()[0],
alpha->get_const_values(), x->get_values(),
x->get_stride());
} else {
// TODO: tune this parameter
constexpr auto block_size = default_block_size;
const dim3 grid_dim =
ceildiv(x->get_size()[0] * x->get_size()[1], block_size);
const dim3 block_dim{cuda_config::warp_size, 1,
block_size / cuda_config::warp_size};
kernel::scale<block_size><<<grid_dim, block_dim>>>(
x->get_size()[0], x->get_size()[1], alpha->get_size()[1],
as_cuda_type(alpha->get_const_values()),
as_cuda_type(x->get_values()), x->get_stride());
}
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_SCALE_KERNEL);
namespace kernel {
template <size_type block_size, typename ValueType>
__global__ __launch_bounds__(block_size) void add_scaled(
size_type num_rows, size_type num_cols, size_type num_alpha_cols,
const ValueType *__restrict__ alpha, const ValueType *__restrict__ x,
size_type stride_x, ValueType *__restrict__ y, size_type stride_y)
{
constexpr auto warps_per_block = block_size / cuda_config::warp_size;
const auto global_id =
thread::get_thread_id<cuda_config::warp_size, warps_per_block>();
const auto row_id = global_id / num_cols;
const auto col_id = global_id % num_cols;
const auto alpha_id = num_alpha_cols == 1 ? 0 : col_id;
if (row_id < num_rows && alpha[alpha_id] != zero<ValueType>()) {
y[row_id * stride_y + col_id] +=
x[row_id * stride_x + col_id] * alpha[alpha_id];
}
}
} // namespace kernel
template <typename ValueType>
void add_scaled(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *alpha,
const matrix::Dense<ValueType> *x, matrix::Dense<ValueType> *y)
{
if (cublas::is_supported<ValueType>::value && x->get_size()[1] == 1) {
cublas::axpy(exec->get_cublas_handle(), x->get_size()[0],
alpha->get_const_values(), x->get_const_values(),
x->get_stride(), y->get_values(), y->get_stride());
} else {
// TODO: tune this parameter
constexpr auto block_size = default_block_size;
const dim3 grid_dim =
ceildiv(x->get_size()[0] * x->get_size()[1], block_size);
const dim3 block_dim{cuda_config::warp_size, 1,
block_size / cuda_config::warp_size};
kernel::add_scaled<block_size><<<grid_dim, block_dim>>>(
x->get_size()[0], x->get_size()[1], alpha->get_size()[1],
as_cuda_type(alpha->get_const_values()),
as_cuda_type(x->get_const_values()), x->get_stride(),
as_cuda_type(y->get_values()), y->get_stride());
}
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_ADD_SCALED_KERNEL);
namespace kernel {
template <size_type block_size, typename ValueType>
__global__ __launch_bounds__(block_size) void compute_partial_dot(
size_type num_rows, const ValueType *__restrict__ x, size_type stride_x,
const ValueType *__restrict__ y, size_type stride_y,
ValueType *__restrict__ work)
{
constexpr auto warps_per_block = block_size / cuda_config::warp_size;
const auto num_blocks = gridDim.x;
const auto local_id = thread::get_local_thread_id<cuda_config::warp_size>();
const auto global_id =
thread::get_thread_id<cuda_config::warp_size, warps_per_block>();
auto tmp = zero<ValueType>();
for (auto i = global_id; i < num_rows; i += block_size * num_blocks) {
tmp += x[i * stride_x] * y[i * stride_y];
}
__shared__ UninitializedArray<ValueType, block_size> tmp_work;
tmp_work[local_id] = tmp;
reduce(group::this_thread_block(), static_cast<ValueType *>(tmp_work),
[](const ValueType &x, const ValueType &y) { return x + y; });
if (local_id == 0) {
work[thread::get_block_id()] = tmp_work[0];
}
}
template <size_type block_size, typename ValueType>
__global__ __launch_bounds__(block_size) void finalize_dot_computation(
size_type size, const ValueType *work, ValueType *result)
{
const auto local_id = thread::get_local_thread_id<cuda_config::warp_size>();
ValueType tmp = zero<ValueType>();
for (auto i = local_id; i < size; i += block_size) {
tmp += work[i];
}
__shared__ UninitializedArray<ValueType, block_size> tmp_work;
tmp_work[local_id] = tmp;
reduce(group::this_thread_block(), static_cast<ValueType *>(tmp_work),
[](const ValueType &x, const ValueType &y) { return x + y; });
if (local_id == 0) {
*result = tmp_work[0];
}
}
} // namespace kernel
template <typename ValueType>
void compute_dot(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *x,
const matrix::Dense<ValueType> *y,
matrix::Dense<ValueType> *result)
{
if (cublas::is_supported<ValueType>::value) {
// TODO: write a custom kernel which does this more efficiently
for (size_type col = 0; col < x->get_size()[1]; ++col) {
cublas::dot(exec->get_cublas_handle(), x->get_size()[0],
x->get_const_values() + col, x->get_stride(),
y->get_const_values() + col, y->get_stride(),
result->get_values() + col);
}
} else {
// TODO: these are tuning parameters obtained experimentally, once
// we decide how to handle this uniformly, they should be modified
// appropriately
constexpr auto work_per_thread = 32;
constexpr auto block_size = 1024;
constexpr auto work_per_block = work_per_thread * block_size;
const dim3 grid_dim = ceildiv(x->get_size()[0], work_per_block);
const dim3 block_dim{cuda_config::warp_size, 1,
block_size / cuda_config::warp_size};
Array<ValueType> work(exec, grid_dim.x);
// TODO: write a kernel which does this more efficiently
for (size_type col = 0; col < x->get_size()[1]; ++col) {
kernel::compute_partial_dot<block_size><<<grid_dim, block_dim>>>(
x->get_size()[0], as_cuda_type(x->get_const_values() + col),
x->get_stride(), as_cuda_type(y->get_const_values() + col),
y->get_stride(), as_cuda_type(work.get_data()));
kernel::finalize_dot_computation<block_size><<<1, block_dim>>>(
grid_dim.x, as_cuda_type(work.get_const_data()),
as_cuda_type(result->get_values() + col));
}
}
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_COMPUTE_DOT_KERNEL);
namespace kernel {
template <typename ValueType>
__global__ __launch_bounds__(default_block_size) void compute_sqrt(
size_type num_cols, ValueType *__restrict__ work)
{
const auto tidx =
static_cast<size_type>(blockDim.x) * blockIdx.x + threadIdx.x;
if (tidx < num_cols) {
work[tidx] = sqrt(abs(work[tidx]));
}
}
} // namespace kernel
template <typename ValueType>
void compute_norm2(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *x,
matrix::Dense<ValueType> *result)
{
if (cublas::is_supported<ValueType>::value) {
for (size_type col = 0; col < x->get_size()[1]; ++col) {
cublas::norm2(exec->get_cublas_handle(), x->get_size()[0],
x->get_const_values() + col, x->get_stride(),
result->get_values() + col);
}
} else {
compute_dot(exec, x, x, result);
const dim3 block_size(default_block_size, 1, 1);
const dim3 grid_size(ceildiv(result->get_size()[1], block_size.x), 1,
1);
kernel::compute_sqrt<<<grid_size, block_size, 0, 0>>>(
result->get_size()[1], as_cuda_type(result->get_values()));
}
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_COMPUTE_NORM2_KERNEL);
namespace kernel {
template <typename ValueType, typename IndexType>
__global__ __launch_bounds__(default_block_size) void fill_in_coo(
size_type num_rows, size_type num_cols, size_type stride,
const size_type *__restrict__ row_ptrs,
const ValueType *__restrict__ source, IndexType *__restrict__ row_idxs,
IndexType *__restrict__ col_idxs, ValueType *__restrict__ values)
{
const auto tidx = threadIdx.x + blockDim.x * blockIdx.x;
if (tidx < num_rows) {
size_type write_to = row_ptrs[tidx];
for (size_type i = 0; i < num_cols; i++) {
if (source[stride * tidx + i] != zero<ValueType>()) {
values[write_to] = source[stride * tidx + i];
col_idxs[write_to] = i;
row_idxs[write_to] = tidx;
write_to++;
}
}
}
}
} // namespace kernel
template <typename ValueType, typename IndexType>
void convert_to_coo(std::shared_ptr<const CudaExecutor> exec,
matrix::Coo<ValueType, IndexType> *result,
const matrix::Dense<ValueType> *source)
{
auto num_rows = result->get_size()[0];
auto num_cols = result->get_size()[1];
auto row_idxs = result->get_row_idxs();
auto col_idxs = result->get_col_idxs();
auto values = result->get_values();
auto stride = source->get_stride();
auto nnz_prefix_sum = Array<size_type>(exec, num_rows);
calculate_nonzeros_per_row(exec, source, &nnz_prefix_sum);
const size_type grid_dim = ceildiv(num_rows, default_block_size);
auto add_values = Array<size_type>(exec, grid_dim);
start_prefix_sum<default_block_size><<<grid_dim, default_block_size>>>(
num_rows, as_cuda_type(nnz_prefix_sum.get_data()),
as_cuda_type(add_values.get_data()));
finalize_prefix_sum<default_block_size><<<grid_dim, default_block_size>>>(
num_rows, as_cuda_type(nnz_prefix_sum.get_data()),
as_cuda_type(add_values.get_data()));
kernel::fill_in_coo<<<grid_dim, default_block_size>>>(
num_rows, num_cols, stride,
as_cuda_type(nnz_prefix_sum.get_const_data()),
as_cuda_type(source->get_const_values()), as_cuda_type(row_idxs),
as_cuda_type(col_idxs), as_cuda_type(values));
nnz_prefix_sum.clear();
add_values.clear();
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_DENSE_CONVERT_TO_COO_KERNEL);
namespace kernel {
template <typename ValueType, typename IndexType>
__global__ __launch_bounds__(default_block_size) void count_nnz_per_row(
size_type num_rows, size_type num_cols, size_type stride,
const ValueType *__restrict__ work, IndexType *__restrict__ result)
{
constexpr auto warp_size = cuda_config::warp_size;
const auto tidx = threadIdx.x + blockIdx.x * blockDim.x;
const auto row_idx = tidx / warp_size;
if (row_idx < num_rows) {
IndexType part_result{};
for (auto i = threadIdx.x % warp_size; i < num_cols; i += warp_size) {
if (work[stride * row_idx + i] != zero<ValueType>()) {
part_result += 1;
}
}
auto warp_tile =
group::tiled_partition<warp_size>(group::this_thread_block());
result[row_idx] = reduce(
warp_tile, part_result,
[](const size_type &a, const size_type &b) { return a + b; });
}
}
template <typename ValueType, typename IndexType>
__global__ __launch_bounds__(default_block_size) void fill_in_csr(
size_type num_rows, size_type num_cols, size_type stride,
const ValueType *__restrict__ source, IndexType *__restrict__ row_ptrs,
IndexType *__restrict__ col_idxs, ValueType *__restrict__ values)
{
const auto tidx = threadIdx.x + blockDim.x * blockIdx.x;
if (tidx < num_rows) {
auto write_to = row_ptrs[tidx];
for (auto i = 0; i < num_cols; i++) {
if (source[stride * tidx + i] != zero<ValueType>()) {
values[write_to] = source[stride * tidx + i];
col_idxs[write_to] = i;
write_to++;
}
}
}
}
} // namespace kernel
template <typename ValueType, typename IndexType>
void convert_to_csr(std::shared_ptr<const CudaExecutor> exec,
matrix::Csr<ValueType, IndexType> *result,
const matrix::Dense<ValueType> *source)
{
auto num_rows = result->get_size()[0];
auto num_cols = result->get_size()[1];
auto row_ptrs = result->get_row_ptrs();
auto col_idxs = result->get_col_idxs();
auto values = result->get_values();
auto stride = source->get_stride();
const auto rows_per_block =
ceildiv(default_block_size, cuda_config::warp_size);
const auto grid_dim_nnz = ceildiv(source->get_size()[0], rows_per_block);
kernel::count_nnz_per_row<<<grid_dim_nnz, default_block_size>>>(
num_rows, num_cols, stride, as_cuda_type(source->get_const_values()),
as_cuda_type(row_ptrs));
size_type grid_dim = ceildiv(num_rows + 1, default_block_size);
auto add_values = Array<IndexType>(exec, grid_dim);
start_prefix_sum<default_block_size>
<<<grid_dim, default_block_size>>>(num_rows + 1, as_cuda_type(row_ptrs),
as_cuda_type(add_values.get_data()));
finalize_prefix_sum<default_block_size><<<grid_dim, default_block_size>>>(
num_rows + 1, as_cuda_type(row_ptrs),
as_cuda_type(add_values.get_const_data()));
kernel::fill_in_csr<<<grid_dim, default_block_size>>>(
num_rows, num_cols, stride, as_cuda_type(source->get_const_values()),
as_cuda_type(row_ptrs), as_cuda_type(col_idxs), as_cuda_type(values));
add_values.clear();
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_DENSE_CONVERT_TO_CSR_KERNEL);
namespace kernel {
template <typename ValueType, typename IndexType>
__global__ __launch_bounds__(default_block_size) void fill_in_ell(
size_type num_rows, size_type num_cols, size_type source_stride,
const ValueType *__restrict__ source, size_type max_nnz_per_row,
size_type result_stride, IndexType *__restrict__ col_ptrs,
ValueType *__restrict__ values)
{
const auto tidx = threadIdx.x + blockDim.x * blockIdx.x;
if (tidx < num_rows) {
IndexType col_idx = 0;
for (size_type col = 0; col < num_cols; col++) {
if (source[tidx * source_stride + col] != zero<ValueType>()) {
col_ptrs[col_idx * result_stride + tidx] = col;
values[col_idx * result_stride + tidx] =
source[tidx * source_stride + col];
col_idx++;
}
}
for (size_type j = col_idx; j < max_nnz_per_row; j++) {
col_ptrs[j * result_stride + tidx] = 0;
values[j * result_stride + tidx] = zero<ValueType>();
}
} else if (tidx < result_stride) {
for (size_type j = 0; j < max_nnz_per_row; j++) {
col_ptrs[j * result_stride + tidx] = 0;
values[j * result_stride + tidx] = zero<ValueType>();
}
}
}
} // namespace kernel
template <typename ValueType, typename IndexType>
void convert_to_ell(std::shared_ptr<const CudaExecutor> exec,
matrix::Ell<ValueType, IndexType> *result,
const matrix::Dense<ValueType> *source)
{
auto num_rows = result->get_size()[0];
auto num_cols = result->get_size()[1];
auto max_nnz_per_row = result->get_num_stored_elements_per_row();
auto col_ptrs = result->get_col_idxs();
auto values = result->get_values();
auto source_stride = source->get_stride();
auto result_stride = result->get_stride();
auto grid_dim = ceildiv(result_stride, default_block_size);
kernel::fill_in_ell<<<grid_dim, default_block_size>>>(
num_rows, num_cols, source_stride,
as_cuda_type(source->get_const_values()), max_nnz_per_row,
result_stride, as_cuda_type(col_ptrs), as_cuda_type(values));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_DENSE_CONVERT_TO_ELL_KERNEL);
template <typename ValueType, typename IndexType>
void convert_to_hybrid(std::shared_ptr<const CudaExecutor> exec,
matrix::Hybrid<ValueType, IndexType> *result,
const matrix::Dense<ValueType> *source)
GKO_NOT_IMPLEMENTED;
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_DENSE_CONVERT_TO_HYBRID_KERNEL);
namespace kernel {
__global__
__launch_bounds__(cuda_config::warp_size) void calculate_slice_lengths(
size_type num_rows, size_type slice_size, int slice_num,
size_type stride_factor, const size_type *__restrict__ nnz_per_row,
size_type *__restrict__ slice_lengths,
size_type *__restrict__ slice_sets)
{
constexpr auto warp_size = cuda_config::warp_size;
const auto sliceid = blockIdx.x;
const auto tid_in_warp = threadIdx.x;
if (sliceid * slice_size + tid_in_warp < num_rows) {
size_type thread_result = 0;
for (auto i = tid_in_warp; i < slice_size; i += warp_size) {
thread_result =
(i + slice_size * sliceid < num_rows)
? max(thread_result, nnz_per_row[sliceid * slice_size + i])
: thread_result;
}
auto warp_tile =
group::tiled_partition<warp_size>(group::this_thread_block());
auto warp_result = reduce(
warp_tile, thread_result,
[](const size_type &a, const size_type &b) { return max(a, b); });
if (tid_in_warp == 0) {
auto slice_length =
ceildiv(warp_result, stride_factor) * stride_factor;
slice_lengths[sliceid] = slice_length;
slice_sets[sliceid] = slice_length;
}
}
}
template <typename ValueType, typename IndexType>
__global__ __launch_bounds__(default_block_size) void fill_in_sellp(
size_type num_rows, size_type num_cols, size_type slice_size,
size_type stride, const ValueType *__restrict__ source,
size_type *__restrict__ slice_lengths, size_type *__restrict__ slice_sets,
IndexType *__restrict__ col_idxs, ValueType *__restrict__ vals)
{
const auto global_row = threadIdx.x + blockIdx.x * blockDim.x;
const auto row = global_row % slice_size;
const auto sliceid = global_row / slice_size;
if (global_row < num_rows) {
size_type sellp_ind = slice_sets[sliceid] * slice_size + row;
for (size_type col = 0; col < num_cols; col++) {
auto val = source[global_row * stride + col];
if (val != zero<ValueType>()) {
col_idxs[sellp_ind] = col;
vals[sellp_ind] = val;
sellp_ind += slice_size;
}
}
for (size_type i = sellp_ind;
i <
(slice_sets[sliceid] + slice_lengths[sliceid]) * slice_size + row;
i += slice_size) {
col_idxs[i] = 0;
vals[i] = zero<ValueType>();
}
}
}
} // namespace kernel
template <typename ValueType, typename IndexType>
void convert_to_sellp(std::shared_ptr<const CudaExecutor> exec,
matrix::Sellp<ValueType, IndexType> *result,
const matrix::Dense<ValueType> *source)
{
const auto stride = source->get_stride();
const auto num_rows = result->get_size()[0];
const auto num_cols = result->get_size()[1];
auto vals = result->get_values();
auto col_idxs = result->get_col_idxs();
auto slice_lengths = result->get_slice_lengths();
auto slice_sets = result->get_slice_sets();
const auto slice_size = (result->get_slice_size() == 0)
? matrix::default_slice_size
: result->get_slice_size();
const auto stride_factor = (result->get_stride_factor() == 0)
? matrix::default_stride_factor
: result->get_stride_factor();
const int slice_num = ceildiv(num_rows, slice_size);
auto nnz_per_row = Array<size_type>(exec, num_rows);
calculate_nonzeros_per_row(exec, source, &nnz_per_row);
auto grid_dim = slice_num;
kernel::calculate_slice_lengths<<<grid_dim, cuda_config::warp_size>>>(
num_rows, slice_size, slice_num, stride_factor,
as_cuda_type(nnz_per_row.get_const_data()), as_cuda_type(slice_lengths),
as_cuda_type(slice_sets));
auto add_values =
Array<size_type>(exec, ceildiv(slice_num + 1, default_block_size));
grid_dim = ceildiv(slice_num + 1, default_block_size);
start_prefix_sum<default_block_size><<<grid_dim, default_block_size>>>(
slice_num + 1, as_cuda_type(slice_sets),
as_cuda_type(add_values.get_data()));
finalize_prefix_sum<default_block_size><<<grid_dim, default_block_size>>>(
slice_num + 1, as_cuda_type(slice_sets),
as_cuda_type(add_values.get_const_data()));
grid_dim = ceildiv(num_rows, default_block_size);
kernel::fill_in_sellp<<<grid_dim, default_block_size>>>(
num_rows, num_cols, slice_size, stride,
as_cuda_type(source->get_const_values()), as_cuda_type(slice_lengths),
as_cuda_type(slice_sets), as_cuda_type(col_idxs), as_cuda_type(vals));
add_values.clear();
nnz_per_row.clear();
}
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
GKO_DECLARE_DENSE_CONVERT_TO_SELLP_KERNEL);
namespace kernel {
__global__ __launch_bounds__(default_block_size) void reduce_nnz(
size_type size, const size_type *__restrict__ nnz_per_row,
size_type *__restrict__ result)
{
extern __shared__ size_type block_sum[];
reduce_array(size, nnz_per_row, block_sum,
[](const size_type &x, const size_type &y) { return x + y; });
if (threadIdx.x == 0) {
result[blockIdx.x] = block_sum[0];
}
}
} // namespace kernel
template <typename ValueType>
void count_nonzeros(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *source, size_type *result)
{
const auto num_rows = source->get_size()[0];
auto nnz_per_row = Array<size_type>(exec, num_rows);
calculate_nonzeros_per_row(exec, source, &nnz_per_row);
const auto n = ceildiv(num_rows, default_block_size);
const size_type grid_dim =
(n <= default_block_size) ? n : default_block_size;
auto block_results = Array<size_type>(exec, grid_dim);
kernel::reduce_nnz<<<grid_dim, default_block_size,
default_block_size * sizeof(size_type)>>>(
num_rows, as_cuda_type(nnz_per_row.get_const_data()),
as_cuda_type(block_results.get_data()));
auto d_result = Array<size_type>(exec, 1);
kernel::reduce_nnz<<<1, default_block_size,
default_block_size * sizeof(size_type)>>>(
grid_dim, as_cuda_type(block_results.get_const_data()),
as_cuda_type(d_result.get_data()));
exec->get_master()->copy_from(exec.get(), 1, d_result.get_const_data(),
result);
d_result.clear();
block_results.clear();
nnz_per_row.clear();
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_DENSE_COUNT_NONZEROS_KERNEL);
namespace kernel {
__global__ __launch_bounds__(default_block_size) void reduce_max_nnz(
size_type size, const size_type *__restrict__ nnz_per_row,
size_type *__restrict__ result)
{
extern __shared__ size_type block_max[];
reduce_array(
size, nnz_per_row, block_max,
[](const size_type &x, const size_type &y) { return max(x, y); });
if (threadIdx.x == 0) {
result[blockIdx.x] = block_max[0];
}
}
} // namespace kernel
template <typename ValueType>
void calculate_max_nnz_per_row(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *source,
size_type *result)
{
const auto num_rows = source->get_size()[0];
auto nnz_per_row = Array<size_type>(exec, num_rows);
calculate_nonzeros_per_row(exec, source, &nnz_per_row);
const auto n = ceildiv(num_rows, default_block_size);
const size_type grid_dim =
(n <= default_block_size) ? n : default_block_size;
auto block_results = Array<size_type>(exec, grid_dim);
kernel::reduce_max_nnz<<<grid_dim, default_block_size,
default_block_size * sizeof(size_type)>>>(
num_rows, as_cuda_type(nnz_per_row.get_const_data()),
as_cuda_type(block_results.get_data()));
auto d_result = Array<size_type>(exec, 1);
kernel::reduce_max_nnz<<<1, default_block_size,
default_block_size * sizeof(size_type)>>>(
grid_dim, as_cuda_type(block_results.get_const_data()),
as_cuda_type(d_result.get_data()));
exec->get_master()->copy_from(exec.get(), 1, d_result.get_const_data(),
result);
d_result.clear();
block_results.clear();
nnz_per_row.clear();
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(
GKO_DECLARE_DENSE_CALCULATE_MAX_NNZ_PER_ROW_KERNEL);
template <typename ValueType>
void calculate_nonzeros_per_row(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *source,
Array<size_type> *result)
{
const dim3 block_size(default_block_size, 1, 1);
auto rows_per_block = ceildiv(default_block_size, cuda_config::warp_size);
const size_t grid_x = ceildiv(source->get_size()[0], rows_per_block);
const dim3 grid_size(grid_x, 1, 1);
kernel::count_nnz_per_row<<<grid_size, block_size>>>(
source->get_size()[0], source->get_size()[1], source->get_stride(),
as_cuda_type(source->get_const_values()),
as_cuda_type(result->get_data()));
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(
GKO_DECLARE_DENSE_CALCULATE_NONZEROS_PER_ROW_KERNEL);
namespace kernel {
__global__ __launch_bounds__(default_block_size) void reduce_max_nnz_per_slice(
size_type num_rows, size_type slice_size, size_type stride_factor,
const size_type *__restrict__ nnz_per_row, size_type *__restrict__ result)
{
const auto tidx = threadIdx.x + blockIdx.x * blockDim.x;
constexpr auto warp_size = cuda_config::warp_size;
const auto warpid = tidx / warp_size;
const auto tid_in_warp = tidx % warp_size;
size_type thread_result = 0;
for (auto i = tid_in_warp; i < slice_size; i += warp_size) {
if (warpid * warp_size + i < num_rows) {
thread_result =
max(thread_result, nnz_per_row[warpid * warp_size + i]);
}
}
auto warp_tile =
group::tiled_partition<warp_size>(group::this_thread_block());
auto warp_result = reduce(
warp_tile, thread_result,
[](const size_type &a, const size_type &b) { return max(a, b); });
if (tid_in_warp == 0) {
result[warpid] = ceildiv(warp_result, stride_factor) * stride_factor;
}
}
__global__ __launch_bounds__(default_block_size) void reduce_total_cols(
size_type num_slices, const size_type *__restrict__ max_nnz_per_slice,
size_type *__restrict__ result)
{
extern __shared__ size_type block_result[];
reduce_array(num_slices, max_nnz_per_slice, block_result,
[](const size_type &x, const size_type &y) { return x + y; });
if (threadIdx.x == 0) {
result[blockIdx.x] = block_result[0];
}
}
} // namespace kernel
template <typename ValueType>
void calculate_total_cols(std::shared_ptr<const CudaExecutor> exec,
const matrix::Dense<ValueType> *source,
size_type *result, size_type stride_factor,
size_type slice_size)
{
const auto num_rows = source->get_size()[0];
const auto num_cols = source->get_size()[1];
const auto slice_num = ceildiv(num_rows, slice_size);
auto nnz_per_row = Array<size_type>(exec, num_rows);
calculate_nonzeros_per_row(exec, source, &nnz_per_row);
auto max_nnz_per_slice = Array<size_type>(exec, slice_num);
const auto grid_dim = ceildiv(slice_num, default_block_size);
kernel::reduce_max_nnz_per_slice<<<grid_dim, default_block_size>>>(
num_rows, slice_size, stride_factor,
as_cuda_type(nnz_per_row.get_const_data()),
as_cuda_type(max_nnz_per_slice.get_data()));
auto block_results = Array<size_type>(exec, grid_dim);
kernel::reduce_total_cols<<<grid_dim, default_block_size,
default_block_size * sizeof(size_type)>>>(
slice_num, as_cuda_type(max_nnz_per_slice.get_const_data()),
as_cuda_type(block_results.get_data()));
auto d_result = Array<size_type>(exec, 1);
kernel::reduce_total_cols<<<1, default_block_size,
default_block_size * sizeof(size_type)>>>(
grid_dim, as_cuda_type(block_results.get_const_data()),
as_cuda_type(d_result.get_data()));
exec->get_master()->copy_from(exec.get(), 1, d_result.get_const_data(),
result);
block_results.clear();
nnz_per_row.clear();
max_nnz_per_slice.clear();
d_result.clear();
}
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(
GKO_DECLARE_DENSE_CALCULATE_TOTAL_COLS_KERNEL);
template <typename ValueType>
void transpose(std::shared_ptr<const CudaExecutor> exec,
matrix::Dense<ValueType> *trans,
const matrix::Dense<ValueType> *orig)
{
if (cublas::is_supported<ValueType>::value) {
auto handle = exec->get_cublas_handle();
GKO_ASSERT_NO_CUBLAS_ERRORS(
cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_HOST));
auto alpha = one<ValueType>();
auto beta = zero<ValueType>();
cublas::geam(handle, CUBLAS_OP_T, CUBLAS_OP_N, orig->get_size()[0],
orig->get_size()[1], &alpha, orig->get_const_values(),
orig->get_stride(), &beta,
static_cast<ValueType *>(nullptr), trans->get_size()[1],
trans->get_values(), trans->get_stride());
GKO_ASSERT_NO_CUBLAS_ERRORS(
cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_DEVICE));
} else {
GKO_NOT_IMPLEMENTED;
}
};
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_TRANSPOSE_KERNEL);
template <typename ValueType>
void conj_transpose(std::shared_ptr<const CudaExecutor> exec,
matrix::Dense<ValueType> *trans,
const matrix::Dense<ValueType> *orig)
{
if (cublas::is_supported<ValueType>::value) {
auto handle = exec->get_cublas_handle();
GKO_ASSERT_NO_CUBLAS_ERRORS(
cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_HOST));
auto alpha = one<ValueType>();
auto beta = zero<ValueType>();
cublas::geam(handle, CUBLAS_OP_C, CUBLAS_OP_N, orig->get_size()[0],
orig->get_size()[1], &alpha, orig->get_const_values(),
orig->get_stride(), &beta,
static_cast<ValueType *>(nullptr), trans->get_size()[1],
trans->get_values(), trans->get_stride());
GKO_ASSERT_NO_CUBLAS_ERRORS(
cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_HOST));
} else {
GKO_NOT_IMPLEMENTED;
}
};
GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_CONJ_TRANSPOSE_KERNEL);
} // namespace dense
} // namespace cuda
} // namespace kernels
} // namespace gko
|
c5d1dc67af53012824e40e525ae00b31d11788be.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <cfloat>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
#define cfd_NBLOCKS 16*6*2
//#define cfd_SUPER_BLOCKS_PER_SM 5
#define cfd_BLOCK_SIZE 256
//const int cfd_BLOCK_SIZE = 256;
const int cfd_nBlksPerCluster = 16;
const int cfd_nAtom = cfd_BLOCK_SIZE * cfd_NBLOCKS;
const int cfd_maxNeighbors = 8;
texture<float,1,hipReadModeElementType> tex_mx;
texture<float,1,hipReadModeElementType> tex_my;
texture<float,1,hipReadModeElementType> tex_mz;
texture<float,1,hipReadModeElementType> tex_energy;
texture<float,1,hipReadModeElementType> tex_density;
texture<int,1,hipReadModeElementType> tex_neighbor;
texture<float,1,hipReadModeElementType> tex_normals;
inline int * cfd_myBuildNeighborList_blkSchedule(const int nAtom,
int* neighborList, int blockSz)
{
//create non-uniform data sharing
//but avoid that tasks sharing the same data are neighbor tasks by randomization
vector<int> atomInds(nAtom);
vector<int> blkInds((nAtom+blockSz-1)/blockSz);
for(int i=0; i<blkInds.size(); ++i)
blkInds[i] = i;
random_shuffle(blkInds.begin(), blkInds.end());
int *blkOrder = (int*)malloc(blkInds.size()*sizeof(int));
for(int i=0; i<blkInds.size(); ++i)
blkOrder[i] = blkInds[i];
int j=0;
for(vector<int>::iterator it=blkInds.begin(); it!=blkInds.end(); ++it)
{
int blkInd = *it;
for(int i=0; i<blockSz; ++i)
atomInds[j++] = blkInd*blockSz + i;
}
int superBlockSz = blockSz * cfd_nBlksPerCluster;
// Build Neighbor List
for (int i = 0; i < nAtom; i++)
{
int start = i - i%superBlockSz; //difference is here
//int end = i + (superBlockSz - i%superBlockSz)-1;
int nNeighbors = 0;
do {
int j = start + rand() % superBlockSz;
if (i == j || j>=nAtom) continue; // An atom cannot be its own neighbor
neighborList[nNeighbors*nAtom + atomInds[i]] = atomInds[j];
nNeighbors ++;
} while(nNeighbors<cfd_maxNeighbors);
}
return blkOrder;
}
#define GAMMA 1.4f
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define NDIM 3
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM)
#define NVAR (VAR_DENSITY_ENERGY+1)
__host__ __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity)
{
velocity.x = momentum.x / density;
velocity.y = momentum.y / density;
velocity.z = momentum.z / density;
}
__host__ __device__ inline float compute_speed_sqd(float3& velocity)
{
return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z;
}
__host__ __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd)
{
return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd);
}
__host__ __device__ inline float compute_speed_of_sound(float& density, float& pressure)
{
return sqrtf(float(GAMMA)*pressure/density);
}
__host__ __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy)
{
fc_momentum_x.x = velocity.x*momentum.x + pressure;
fc_momentum_x.y = velocity.x*momentum.y;
fc_momentum_x.z = velocity.x*momentum.z;
fc_momentum_y.x = fc_momentum_x.y;
fc_momentum_y.y = velocity.y*momentum.y + pressure;
fc_momentum_y.z = velocity.y*momentum.z;
fc_momentum_z.x = fc_momentum_x.z;
fc_momentum_z.y = fc_momentum_y.z;
fc_momentum_z.z = velocity.z*momentum.z + pressure;
float de_p = density_energy+pressure;
fc_density_energy.x = velocity.x*de_p;
fc_density_energy.y = velocity.y*de_p;
fc_density_energy.z = velocity.z*de_p;
}
void check_cfd(int nelr, int* elements_surrounding_elements, float*
normals, float* density, float* mx, float* my, float* mz, float* density_energy, float* fluxes)
{
const float smoothing_coefficient = float(0.2f);
//const int i = (blockDim.x*blockIdx.x + threadIdx.x);
for(int i=0;i<cfd_NBLOCKS*cfd_BLOCK_SIZE;i++){
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = mx[i];
momentum_i.y = my[i];
momentum_i.z = mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x = mx[nb];
momentum_nb.y = my[nb];
momentum_nb.z = mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
/*if(((pow((fluxes[i + VAR_DENSITY*nelr] - flux_i_density),2)/flux_i_density)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x),2)/flux_i_momentum.x)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y),2)/flux_i_momentum.y)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z),2)/flux_i_momentum.z)>0.001)||\
((pow((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy),2)/flux_i_density_energy)>0.001))*/
if(((abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density)/flux_i_density)>0.01)&&(abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x)/flux_i_momentum.x)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y)/flux_i_momentum.y)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z)/flux_i_momentum.z)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z))>0.01))||\
((abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy)/flux_i_density_energy)>0.01)&&(abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy))>0.01)))
{printf("failed!%d,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",i,fluxes[i + VAR_DENSITY*nelr],flux_i_density,\
fluxes[i + (VAR_MOMENTUM+0)*nelr],flux_i_momentum.x,\
fluxes[i + (VAR_MOMENTUM+1)*nelr] , flux_i_momentum.y,\
fluxes[i + (VAR_MOMENTUM+2)*nelr],flux_i_momentum.z,\
fluxes[i + VAR_DENSITY_ENERGY*nelr],flux_i_density_energy);
return;}
}
printf("GOOD! passed!\n");
return;
}
__global__ void cfd_kernel(int nelr,int* elements_surrounding_elements, const float*
normals, const float* __restrict__ density, float* mx, float* my, float* mz, float* density_energy, float* fluxes,int p0,int p1,
int p2,int p3, int p4,int p5,int p6)
{
const float smoothing_coefficient = float(0.2f);
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float _temp1,_temp2,_temp3;
if(p0==0)
_temp1=density[i];
else if(p0==1)
_temp1=tex1Dfetch(tex_density,i);
else if(p0==3)
_temp1=__ldg(&density[i]);
float density_i = _temp1;//density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
if(p1==0)
_temp1=mx[i];
else if(p1==1)
_temp1=tex1Dfetch(tex_mx,i);
else if(p1==3)
_temp1=__ldg(&mx[i]);
momentum_i.x = _temp1;//tex1Dfetch(tex_mx,i);//mx[i];
if(p2==0)
_temp1=my[i];
else if(p2==1)
_temp1=tex1Dfetch(tex_my,i);
else if(p2==3)
_temp1=__ldg(&my[i]);
momentum_i.y = _temp1;//tex1Dfetch(tex_my,i);//my[i];
if(p3==0)
_temp1=mz[i];
else if(p3==1)
_temp1=tex1Dfetch(tex_mz,i);
else if(p3==3)
_temp1=__ldg(&mz[i]);
momentum_i.z = _temp1;//tex1Dfetch(tex_mz,i); //mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
if(p4==0)
_temp1=density_energy[i];
else if(p4==1)
_temp1=tex1Dfetch(tex_energy,i);
else if(p4==3)
_temp1=__ldg(&density_energy[i]);
float density_energy_i = _temp1;//tex1Dfetch(tex_energy,i);//density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
if(p5==0)
_temp1=elements_surrounding_elements[i + j*nelr];
else if(p5==1)
_temp1=tex1Dfetch(tex_neighbor,i+j*nelr);
else if(p5==3)
_temp1=__ldg(&elements_surrounding_elements[i + j*nelr]);
nb = _temp1;//tex1Dfetch(tex_neighbor,i+j*nelr);//elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
if(p6==0)
{_temp1=normals[i + (j + 0*cfd_maxNeighbors)*nelr]; _temp2=normals[i + (j + 1*cfd_maxNeighbors)*nelr];_temp3=normals[i + (j + 2*cfd_maxNeighbors)*nelr];}
else if(p6==1)
{_temp1=tex1Dfetch(tex_normals,i + (j + 0*cfd_maxNeighbors)*nelr);_temp2=tex1Dfetch(tex_normals,i + (j + 1*cfd_maxNeighbors)*nelr);_temp3=tex1Dfetch(tex_normals,i + (j + 2*cfd_maxNeighbors)*nelr);}
else if(p6==3)
{_temp1=__ldg(&normals[i + (j + 0*cfd_maxNeighbors)*nelr]);_temp2=__ldg(&normals[i + (j + 1*cfd_maxNeighbors)*nelr]);_temp3=__ldg(&normals[i + (j + 2*cfd_maxNeighbors)*nelr]);}
normal.x = _temp1;//tex1Dfetch(tex_normals,i + (j + 0*cfd_maxNeighbors)*nelr);//normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = _temp2;//tex1Dfetch(tex_normals,i + (j + 1*cfd_maxNeighbors)*nelr);//normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = _temp3;//tex1Dfetch(tex_normals,i + (j + 2*cfd_maxNeighbors)*nelr);//normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
if(p0==0)
_temp1=density[nb];
else if(p0==1)
_temp1=tex1Dfetch(tex_density,nb);
else if(p0==3)
_temp1=__ldg(&density[nb]);
density_nb = _temp1;//density[nb];
if(p1==0)
_temp1=mx[nb];
else if(p1==1)
_temp1=tex1Dfetch(tex_mx,nb);
else if(p1==3)
_temp1=__ldg(&mx[nb]);
momentum_nb.x = _temp1;//tex1Dfetch(tex_mx,nb);//mx[nb];
if(p2==0)
_temp1=my[nb];
else if(p2==1)
_temp1=tex1Dfetch(tex_my,nb);
else if(p2==3)
_temp1=__ldg(&my[nb]);
momentum_nb.y = _temp1;//tex1Dfetch(tex_my,nb);//my[nb];
if(p3==0)
_temp1=mz[nb];
else if(p3==1)
_temp1=tex1Dfetch(tex_mz,nb);
else if(p3==3)
_temp1=__ldg(&mz[nb]);
momentum_nb.z = _temp1;//tex1Dfetch(tex_mz,nb);//mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
if(p4==0)
_temp1=density_energy[nb];
else if(p4==1)
_temp1=tex1Dfetch(tex_energy,nb);
else if(p4==3)
_temp1=__ldg(&density_energy[nb]);
density_energy_nb =_temp1;// tex1Dfetch(tex_energy,nb);//density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
//if (threadIdx.x==0) atomicAdd(d_flag,1);
}
int main(int argc, char **argv) {
hipSetDevice(2);
srand(2013);
// Allocate problem data on host
//posVecType* position;
//forceVecType* force;
float *density;
float *mx;
float *my;
float *mz;
float *density_energy;
float *normals;
float *fluxes;
int* cfd_neighborList;
hipHostMalloc((void**)&density, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&mx, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&my, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&mz, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&density_energy, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
hipHostMalloc((void**)&fluxes, cfd_nAtom*NVAR*sizeof(float));
hipHostMalloc((void**)&cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
// Allocate device memory for position and force
//forceVecType* d_force;
//posVecType* d_position;
float *d_density;
float *d_mx;
float *d_my;
float *d_mz;
float *d_density_energy;
float *d_normals;
float *d_fluxes;
hipMalloc((void**)&d_density, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_mx, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_my, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_mz, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_density_energy, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
hipMalloc((void**)&d_fluxes, cfd_nAtom*NVAR*sizeof(float));
hipMemset(d_fluxes, 0, cfd_nAtom*NVAR*sizeof(float));
//hipMemset(d_force, 0, cfd_nAtom*sizeof(forceVecType));
// Allocate device memory for neighbor list
int* d_cfd_neighborList;
hipMalloc((void**)&d_cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
//cout << "Initializing test problem (this can take several "
// "minutes for large problems)\n";
// Initialize positions -- random distribution in cubic domain
// domainEdge constant specifies edge length
for (int i = 0; i < cfd_nAtom; i++)
{
density[i] = (float)(drand48());
density_energy[i] = (float)(drand48() );
mx[i] = (float)(drand48() );
my[i] = (float)(drand48() );
mz[i] = (float)(drand48() );
/*
density[i] = 1.1+i*0.01;
density_energy[i] = 1.1+i*0.01;
mx[i] = 1.1+i*0.01;
my[i] = 1.1+i*0.01;
mz[i] = 1.1+i*0.01;
*/
}
for(int i=0; i<cfd_nAtom*NDIM*cfd_maxNeighbors; ++i)
normals[i] = (float)(drand48());
cfd_myBuildNeighborList_blkSchedule(cfd_nAtom, cfd_neighborList, cfd_BLOCK_SIZE);
hipMemcpy(d_cfd_neighborList, cfd_neighborList, cfd_maxNeighbors*cfd_nAtom*sizeof(int), hipMemcpyHostToDevice);
// Copy data to GPU
hipMemcpy(d_density, density, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_mx, mx, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_my, my, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_mz, mz, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_density_energy, density_energy, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_normals, normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float), hipMemcpyHostToDevice);
hipSetDeviceFlags(hipDeviceMapHost);
int *flag_cfd,*d_flag_cfd;
hipHostMalloc((void**)&flag_cfd,sizeof( int),hipHostMallocMapped);
hipHostGetDevicePointer((void**)&d_flag_cfd,(void*)flag_cfd,0);
hipBindTexture(0,tex_mx,d_mx,cfd_nAtom*sizeof(float));
hipBindTexture(0,tex_my,d_my,cfd_nAtom*sizeof(float));
hipBindTexture(0,tex_mz,d_mz,cfd_nAtom*sizeof(float));
hipBindTexture(0,tex_energy,d_density_energy,cfd_nAtom*sizeof(float));
hipBindTexture(0,tex_density,d_density,cfd_nAtom*sizeof(float));
hipBindTexture(0,tex_neighbor,d_cfd_neighborList,cfd_maxNeighbors*cfd_nAtom*sizeof(int));
hipBindTexture(0,tex_normals,d_normals,cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_stop);
float kernel_time = 0.0f;
hipEventRecord(kernel_start, 0);
int cfd_gridSize = (cfd_nAtom-1+cfd_BLOCK_SIZE) / cfd_BLOCK_SIZE;
hipLaunchKernelGGL(( cfd_kernel), dim3(cfd_gridSize), dim3(cfd_BLOCK_SIZE), 0, 0, cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy,
d_fluxes,3,1,1,1,1,1,1);
hipDeviceSynchronize();
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
hipMemcpy(fluxes, d_fluxes, cfd_nAtom*NVAR*sizeof(float), hipMemcpyDeviceToHost);
check_cfd(cfd_nAtom,cfd_neighborList,normals,density,mx,my,mz,density_energy,fluxes);
//TODO:verified on small inputs
/*
ifstream fluxesF("../org/fluxes.txt");
for(int i=0; i<cfd_nAtom*NVAR; ++i) {
float f;
fluxesF >> f;
if(abs(f - fluxes[i]) > 0.001) {
fprintf(stderr, "Test failed! i = %d\n", i);
return 1;
}
}*/
// printf("Test passed!\n");
// fluxesF.close();
return 0;
}
| c5d1dc67af53012824e40e525ae00b31d11788be.cu |
#include <cassert>
#include <cfloat>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
#define cfd_NBLOCKS 16*6*2
//#define cfd_SUPER_BLOCKS_PER_SM 5
#define cfd_BLOCK_SIZE 256
//const int cfd_BLOCK_SIZE = 256;
const int cfd_nBlksPerCluster = 16;
const int cfd_nAtom = cfd_BLOCK_SIZE * cfd_NBLOCKS;
const int cfd_maxNeighbors = 8;
texture<float,1,cudaReadModeElementType> tex_mx;
texture<float,1,cudaReadModeElementType> tex_my;
texture<float,1,cudaReadModeElementType> tex_mz;
texture<float,1,cudaReadModeElementType> tex_energy;
texture<float,1,cudaReadModeElementType> tex_density;
texture<int,1,cudaReadModeElementType> tex_neighbor;
texture<float,1,cudaReadModeElementType> tex_normals;
inline int * cfd_myBuildNeighborList_blkSchedule(const int nAtom,
int* neighborList, int blockSz)
{
//create non-uniform data sharing
//but avoid that tasks sharing the same data are neighbor tasks by randomization
vector<int> atomInds(nAtom);
vector<int> blkInds((nAtom+blockSz-1)/blockSz);
for(int i=0; i<blkInds.size(); ++i)
blkInds[i] = i;
random_shuffle(blkInds.begin(), blkInds.end());
int *blkOrder = (int*)malloc(blkInds.size()*sizeof(int));
for(int i=0; i<blkInds.size(); ++i)
blkOrder[i] = blkInds[i];
int j=0;
for(vector<int>::iterator it=blkInds.begin(); it!=blkInds.end(); ++it)
{
int blkInd = *it;
for(int i=0; i<blockSz; ++i)
atomInds[j++] = blkInd*blockSz + i;
}
int superBlockSz = blockSz * cfd_nBlksPerCluster;
// Build Neighbor List
for (int i = 0; i < nAtom; i++)
{
int start = i - i%superBlockSz; //difference is here
//int end = i + (superBlockSz - i%superBlockSz)-1;
int nNeighbors = 0;
do {
int j = start + rand() % superBlockSz;
if (i == j || j>=nAtom) continue; // An atom cannot be its own neighbor
neighborList[nNeighbors*nAtom + atomInds[i]] = atomInds[j];
nNeighbors ++;
} while(nNeighbors<cfd_maxNeighbors);
}
return blkOrder;
}
#define GAMMA 1.4f
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define NDIM 3
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM)
#define NVAR (VAR_DENSITY_ENERGY+1)
__host__ __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity)
{
velocity.x = momentum.x / density;
velocity.y = momentum.y / density;
velocity.z = momentum.z / density;
}
__host__ __device__ inline float compute_speed_sqd(float3& velocity)
{
return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z;
}
__host__ __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd)
{
return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd);
}
__host__ __device__ inline float compute_speed_of_sound(float& density, float& pressure)
{
return sqrtf(float(GAMMA)*pressure/density);
}
__host__ __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy)
{
fc_momentum_x.x = velocity.x*momentum.x + pressure;
fc_momentum_x.y = velocity.x*momentum.y;
fc_momentum_x.z = velocity.x*momentum.z;
fc_momentum_y.x = fc_momentum_x.y;
fc_momentum_y.y = velocity.y*momentum.y + pressure;
fc_momentum_y.z = velocity.y*momentum.z;
fc_momentum_z.x = fc_momentum_x.z;
fc_momentum_z.y = fc_momentum_y.z;
fc_momentum_z.z = velocity.z*momentum.z + pressure;
float de_p = density_energy+pressure;
fc_density_energy.x = velocity.x*de_p;
fc_density_energy.y = velocity.y*de_p;
fc_density_energy.z = velocity.z*de_p;
}
void check_cfd(int nelr, int* elements_surrounding_elements, float*
normals, float* density, float* mx, float* my, float* mz, float* density_energy, float* fluxes)
{
const float smoothing_coefficient = float(0.2f);
//const int i = (blockDim.x*blockIdx.x + threadIdx.x);
for(int i=0;i<cfd_NBLOCKS*cfd_BLOCK_SIZE;i++){
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = mx[i];
momentum_i.y = my[i];
momentum_i.z = mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x = mx[nb];
momentum_nb.y = my[nb];
momentum_nb.z = mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
/*if(((pow((fluxes[i + VAR_DENSITY*nelr] - flux_i_density),2)/flux_i_density)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x),2)/flux_i_momentum.x)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y),2)/flux_i_momentum.y)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z),2)/flux_i_momentum.z)>0.001)||\
((pow((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy),2)/flux_i_density_energy)>0.001))*/
if(((abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density)/flux_i_density)>0.01)&&(abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x)/flux_i_momentum.x)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y)/flux_i_momentum.y)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z)/flux_i_momentum.z)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z))>0.01))||\
((abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy)/flux_i_density_energy)>0.01)&&(abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy))>0.01)))
{printf("failed!%d,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",i,fluxes[i + VAR_DENSITY*nelr],flux_i_density,\
fluxes[i + (VAR_MOMENTUM+0)*nelr],flux_i_momentum.x,\
fluxes[i + (VAR_MOMENTUM+1)*nelr] , flux_i_momentum.y,\
fluxes[i + (VAR_MOMENTUM+2)*nelr],flux_i_momentum.z,\
fluxes[i + VAR_DENSITY_ENERGY*nelr],flux_i_density_energy);
return;}
}
printf("GOOD! passed!\n");
return;
}
__global__ void cfd_kernel(int nelr,int* elements_surrounding_elements, const float*
normals, const float* __restrict__ density, float* mx, float* my, float* mz, float* density_energy, float* fluxes,int p0,int p1,
int p2,int p3, int p4,int p5,int p6)
{
const float smoothing_coefficient = float(0.2f);
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float _temp1,_temp2,_temp3;
if(p0==0)
_temp1=density[i];
else if(p0==1)
_temp1=tex1Dfetch(tex_density,i);
else if(p0==3)
_temp1=__ldg(&density[i]);
float density_i = _temp1;//density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
if(p1==0)
_temp1=mx[i];
else if(p1==1)
_temp1=tex1Dfetch(tex_mx,i);
else if(p1==3)
_temp1=__ldg(&mx[i]);
momentum_i.x = _temp1;//tex1Dfetch(tex_mx,i);//mx[i];
if(p2==0)
_temp1=my[i];
else if(p2==1)
_temp1=tex1Dfetch(tex_my,i);
else if(p2==3)
_temp1=__ldg(&my[i]);
momentum_i.y = _temp1;//tex1Dfetch(tex_my,i);//my[i];
if(p3==0)
_temp1=mz[i];
else if(p3==1)
_temp1=tex1Dfetch(tex_mz,i);
else if(p3==3)
_temp1=__ldg(&mz[i]);
momentum_i.z = _temp1;//tex1Dfetch(tex_mz,i); //mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
if(p4==0)
_temp1=density_energy[i];
else if(p4==1)
_temp1=tex1Dfetch(tex_energy,i);
else if(p4==3)
_temp1=__ldg(&density_energy[i]);
float density_energy_i = _temp1;//tex1Dfetch(tex_energy,i);//density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
if(p5==0)
_temp1=elements_surrounding_elements[i + j*nelr];
else if(p5==1)
_temp1=tex1Dfetch(tex_neighbor,i+j*nelr);
else if(p5==3)
_temp1=__ldg(&elements_surrounding_elements[i + j*nelr]);
nb = _temp1;//tex1Dfetch(tex_neighbor,i+j*nelr);//elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
if(p6==0)
{_temp1=normals[i + (j + 0*cfd_maxNeighbors)*nelr]; _temp2=normals[i + (j + 1*cfd_maxNeighbors)*nelr];_temp3=normals[i + (j + 2*cfd_maxNeighbors)*nelr];}
else if(p6==1)
{_temp1=tex1Dfetch(tex_normals,i + (j + 0*cfd_maxNeighbors)*nelr);_temp2=tex1Dfetch(tex_normals,i + (j + 1*cfd_maxNeighbors)*nelr);_temp3=tex1Dfetch(tex_normals,i + (j + 2*cfd_maxNeighbors)*nelr);}
else if(p6==3)
{_temp1=__ldg(&normals[i + (j + 0*cfd_maxNeighbors)*nelr]);_temp2=__ldg(&normals[i + (j + 1*cfd_maxNeighbors)*nelr]);_temp3=__ldg(&normals[i + (j + 2*cfd_maxNeighbors)*nelr]);}
normal.x = _temp1;//tex1Dfetch(tex_normals,i + (j + 0*cfd_maxNeighbors)*nelr);//normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = _temp2;//tex1Dfetch(tex_normals,i + (j + 1*cfd_maxNeighbors)*nelr);//normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = _temp3;//tex1Dfetch(tex_normals,i + (j + 2*cfd_maxNeighbors)*nelr);//normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
if(p0==0)
_temp1=density[nb];
else if(p0==1)
_temp1=tex1Dfetch(tex_density,nb);
else if(p0==3)
_temp1=__ldg(&density[nb]);
density_nb = _temp1;//density[nb];
if(p1==0)
_temp1=mx[nb];
else if(p1==1)
_temp1=tex1Dfetch(tex_mx,nb);
else if(p1==3)
_temp1=__ldg(&mx[nb]);
momentum_nb.x = _temp1;//tex1Dfetch(tex_mx,nb);//mx[nb];
if(p2==0)
_temp1=my[nb];
else if(p2==1)
_temp1=tex1Dfetch(tex_my,nb);
else if(p2==3)
_temp1=__ldg(&my[nb]);
momentum_nb.y = _temp1;//tex1Dfetch(tex_my,nb);//my[nb];
if(p3==0)
_temp1=mz[nb];
else if(p3==1)
_temp1=tex1Dfetch(tex_mz,nb);
else if(p3==3)
_temp1=__ldg(&mz[nb]);
momentum_nb.z = _temp1;//tex1Dfetch(tex_mz,nb);//mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
if(p4==0)
_temp1=density_energy[nb];
else if(p4==1)
_temp1=tex1Dfetch(tex_energy,nb);
else if(p4==3)
_temp1=__ldg(&density_energy[nb]);
density_energy_nb =_temp1;// tex1Dfetch(tex_energy,nb);//density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
//if (threadIdx.x==0) atomicAdd(d_flag,1);
}
int main(int argc, char **argv) {
cudaSetDevice(2);
srand(2013);
// Allocate problem data on host
//posVecType* position;
//forceVecType* force;
float *density;
float *mx;
float *my;
float *mz;
float *density_energy;
float *normals;
float *fluxes;
int* cfd_neighborList;
cudaMallocHost((void**)&density, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&mx, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&my, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&mz, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&density_energy, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
cudaMallocHost((void**)&fluxes, cfd_nAtom*NVAR*sizeof(float));
cudaMallocHost((void**)&cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
// Allocate device memory for position and force
//forceVecType* d_force;
//posVecType* d_position;
float *d_density;
float *d_mx;
float *d_my;
float *d_mz;
float *d_density_energy;
float *d_normals;
float *d_fluxes;
cudaMalloc((void**)&d_density, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_mx, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_my, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_mz, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_density_energy, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
cudaMalloc((void**)&d_fluxes, cfd_nAtom*NVAR*sizeof(float));
cudaMemset(d_fluxes, 0, cfd_nAtom*NVAR*sizeof(float));
//cudaMemset(d_force, 0, cfd_nAtom*sizeof(forceVecType));
// Allocate device memory for neighbor list
int* d_cfd_neighborList;
cudaMalloc((void**)&d_cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
//cout << "Initializing test problem (this can take several "
// "minutes for large problems)\n";
// Initialize positions -- random distribution in cubic domain
// domainEdge constant specifies edge length
for (int i = 0; i < cfd_nAtom; i++)
{
density[i] = (float)(drand48());
density_energy[i] = (float)(drand48() );
mx[i] = (float)(drand48() );
my[i] = (float)(drand48() );
mz[i] = (float)(drand48() );
/*
density[i] = 1.1+i*0.01;
density_energy[i] = 1.1+i*0.01;
mx[i] = 1.1+i*0.01;
my[i] = 1.1+i*0.01;
mz[i] = 1.1+i*0.01;
*/
}
for(int i=0; i<cfd_nAtom*NDIM*cfd_maxNeighbors; ++i)
normals[i] = (float)(drand48());
cfd_myBuildNeighborList_blkSchedule(cfd_nAtom, cfd_neighborList, cfd_BLOCK_SIZE);
cudaMemcpy(d_cfd_neighborList, cfd_neighborList, cfd_maxNeighbors*cfd_nAtom*sizeof(int), cudaMemcpyHostToDevice);
// Copy data to GPU
cudaMemcpy(d_density, density, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_mx, mx, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_my, my, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_mz, mz, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_density_energy, density_energy, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_normals, normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float), cudaMemcpyHostToDevice);
cudaSetDeviceFlags(cudaDeviceMapHost);
int *flag_cfd,*d_flag_cfd;
cudaHostAlloc((void**)&flag_cfd,sizeof( int),cudaHostAllocMapped);
cudaHostGetDevicePointer((void**)&d_flag_cfd,(void*)flag_cfd,0);
cudaBindTexture(0,tex_mx,d_mx,cfd_nAtom*sizeof(float));
cudaBindTexture(0,tex_my,d_my,cfd_nAtom*sizeof(float));
cudaBindTexture(0,tex_mz,d_mz,cfd_nAtom*sizeof(float));
cudaBindTexture(0,tex_energy,d_density_energy,cfd_nAtom*sizeof(float));
cudaBindTexture(0,tex_density,d_density,cfd_nAtom*sizeof(float));
cudaBindTexture(0,tex_neighbor,d_cfd_neighborList,cfd_maxNeighbors*cfd_nAtom*sizeof(int));
cudaBindTexture(0,tex_normals,d_normals,cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_stop);
float kernel_time = 0.0f;
cudaEventRecord(kernel_start, 0);
int cfd_gridSize = (cfd_nAtom-1+cfd_BLOCK_SIZE) / cfd_BLOCK_SIZE;
cfd_kernel<<<cfd_gridSize, cfd_BLOCK_SIZE>>>(cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy,
d_fluxes,3,1,1,1,1,1,1);
cudaDeviceSynchronize();
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
cudaMemcpy(fluxes, d_fluxes, cfd_nAtom*NVAR*sizeof(float), cudaMemcpyDeviceToHost);
check_cfd(cfd_nAtom,cfd_neighborList,normals,density,mx,my,mz,density_energy,fluxes);
//TODO:verified on small inputs
/*
ifstream fluxesF("../org/fluxes.txt");
for(int i=0; i<cfd_nAtom*NVAR; ++i) {
float f;
fluxesF >> f;
if(abs(f - fluxes[i]) > 0.001) {
fprintf(stderr, "Test failed! i = %d\n", i);
return 1;
}
}*/
// printf("Test passed!\n");
// fluxesF.close();
return 0;
}
|
374ed81cc7940341f93cecced22cdcc9afe511f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "hl_base.h"
#include "hl_cuda.h"
#include "hl_cuda.ph"
#include "hl_aggregate.h"
#include "hl_thread.ph"
#include "hl_matrix_base.cuh"
#include "paddle/utils/Logging.h"
/**
* @brief matrix row operator.
*/
template<class Agg, int blockSize>
__global__ void KeMatrixRowOp(Agg agg,
real *E,
real *Sum,
int dimN) {
__shared__ real sum_s[blockSize];
int cnt = (dimN + blockSize -1) / blockSize;
int rowId = blockIdx.x + blockIdx.y*gridDim.x;
int index = rowId*dimN;
int tid = threadIdx.x;
int lmt = tid;
real tmp = agg.init();
for (int ii = 0; ii < cnt && lmt < dimN; ii++) {
tmp = agg(tmp, E[index + lmt]);
lmt += blockSize;
}
sum_s[tid] = tmp;
__syncthreads();
for (int stride = blockSize/2; stride > 0; stride = stride/2) {
if (tid < stride) {
sum_s[tid] = agg(sum_s[tid], sum_s[tid + stride]);
}
__syncthreads();
}
__syncthreads();
if (tid == 0) {
Sum[rowId] = sum_s[0];
}
}
template <class Agg>
void hl_matrix_row_op(Agg agg,
real *A_d,
real *C_d,
int dimM,
int dimN) {
int blocksX = dimM;
int blocksY = 1;
dim3 threads(128, 1);
dim3 grid(blocksX, blocksY);
hipLaunchKernelGGL(( KeMatrixRowOp<Agg, 128>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT ,
agg, A_d, C_d, dimN);
}
void hl_matrix_row_sum(real *A_d, real *C_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
hl_matrix_row_op(aggregate::sum(),
A_d,
C_d,
dimM,
dimN);
CHECK_SYNC("hl_matrix_row_sum failed");
}
void hl_matrix_row_max(real *A_d, real *C_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
hl_matrix_row_op(aggregate::max(),
A_d,
C_d,
dimM,
dimN);
CHECK_SYNC("hl_matrix_row_max failed");
}
void hl_matrix_row_min(real *A_d, real *C_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
hl_matrix_row_op(aggregate::min(),
A_d,
C_d,
dimM,
dimN);
CHECK_SYNC("hl_matrix_row_min failed");
}
/**
* @brief matrix column operator.
*/
template<class Agg>
__global__ void KeMatrixColumnOp(Agg agg,
real *E,
real *Sum,
int dimM,
int dimN) {
int rowIdx = blockIdx.x * blockDim.x + threadIdx.x;
real tmp = agg.init();
if (rowIdx < dimN) {
for (int index = 0; index < dimM; index++) {
tmp = agg(tmp, E[dimN * index + rowIdx]);
}
Sum[rowIdx] = tmp;
}
}
template<class Agg, int blockDimX, int blockDimY>
__global__ void KeMatrixColumnOp_S(Agg agg,
real *E,
real *Sum,
int dimM,
int dimN) {
__shared__ real _sum[blockDimX*blockDimY];
int rowIdx = blockIdx.x * blockDim.x + threadIdx.x;
int index = threadIdx.y;
real tmp = agg.init();
if (rowIdx < dimN) {
for (; index < dimM;) {
tmp = agg(tmp, E[dimN * index + rowIdx]);
index += blockDimY;
}
}
_sum[threadIdx.x + threadIdx.y*blockDimX] = tmp;
__syncthreads();
if (rowIdx < dimN) {
if (threadIdx.y ==0) {
real tmp = agg.init();
for (int i=0; i < blockDimY; i++) {
tmp = agg(tmp, _sum[threadIdx.x + i*blockDimX]);
}
Sum[rowIdx] = tmp;
}
}
}
template <class Agg>
void hl_matrix_column_op(Agg agg,
real *A_d,
real *C_d,
int dimM,
int dimN) {
if (dimN >= 8192) {
int blocksX = (dimN + 128 -1) / 128;
int blocksY = 1;
dim3 threads(128, 1);
dim3 grid(blocksX, blocksY);
hipLaunchKernelGGL(( KeMatrixColumnOp<Agg>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT ,
agg, A_d, C_d, dimM, dimN);
} else {
int blocksX = (dimN + 32 -1) / 32;
int blocksY = 1;
dim3 threads(32, 32);
dim3 grid(blocksX, blocksY);
hipLaunchKernelGGL(( KeMatrixColumnOp_S<Agg, 32, 32>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT,
agg, A_d, C_d, dimM, dimN);
}
return;
}
void hl_matrix_column_sum(real *A_d, real *C_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
hl_matrix_column_op(aggregate::sum(),
A_d,
C_d,
dimM,
dimN);
CHECK_SYNC("hl_matrix_column_sum failed");
}
void hl_matrix_column_max(real *A_d, real *C_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
hl_matrix_column_op(aggregate::max(),
A_d,
C_d,
dimM,
dimN);
CHECK_SYNC("hl_matrix_column_max failed");
}
void hl_matrix_column_min(real *A_d, real *C_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
hl_matrix_column_op(aggregate::min(),
A_d,
C_d,
dimM,
dimN);
CHECK_SYNC("hl_matrix_column_min failed");
}
template <int blockSize>
__global__ void KeVectorSum(real *E, real *Sum, int dimM) {
__shared__ double sum_s[blockSize];
int tid = threadIdx.x;
int index = blockIdx.y*blockDim.x+threadIdx.x;
sum_s[tid] = 0.0f;
while (index < dimM) {
sum_s[tid] += E[index];
index += blockDim.x*gridDim.y;
}
__syncthreads();
for (int stride = blockSize/2; stride > 0; stride = stride/2) {
if (tid < stride) {
sum_s[tid] += sum_s[tid + stride];
}
__syncthreads();
}
__syncthreads();
if (tid == 0) {
Sum[blockIdx.y] = sum_s[0];
}
}
void hl_vector_sum(real *A_d, real *C_h, int dimM) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_h);
int blockSize = 128;
int gridSize = 128;
int blocksX = 1;
int blocksY = gridSize;
dim3 threads(blockSize, 1);
dim3 grid(blocksX, blocksY);
struct _hl_event_st hl_event_st = {.cu_event = t_resource.event};
hl_event_t hl_event = &hl_event_st;
while (!hl_cuda_event_is_ready(hl_event)) {}
hipLaunchKernelGGL(( KeVectorSum<128>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT ,
A_d, t_resource.gpu_mem, dimM);
hipLaunchKernelGGL(( KeVectorSum<128>), dim3(1), dim3(threads), 0, STREAM_DEFAULT ,
t_resource.gpu_mem, t_resource.cpu_mem, 128);
hl_memcpy_async(C_h, t_resource.cpu_mem, sizeof(real), HPPL_STREAM_DEFAULT);
hl_stream_record_event(HPPL_STREAM_DEFAULT, hl_event);
hl_stream_synchronize(HPPL_STREAM_DEFAULT);
hipError_t err = (hipError_t)hl_get_device_last_error();
CHECK_EQ(hipSuccess, err)
<< "CUDA error: " << hl_get_device_error_string((size_t)err);
}
template <int blockSize>
__global__ void KeVectorAbsSum(real *E, real *Sum, int dimM) {
__shared__ double sum_s[blockSize];
int tid = threadIdx.x;
int index = blockIdx.y*blockDim.x+threadIdx.x;
sum_s[tid] = 0.0f;
while (index < dimM) {
sum_s[tid] += abs(E[index]);
index += blockDim.x*gridDim.y;
}
__syncthreads();
for (int stride = blockSize/2; stride > 0; stride = stride/2) {
if (tid < stride) {
sum_s[tid] += sum_s[tid + stride];
}
__syncthreads();
}
__syncthreads();
if (tid == 0) {
Sum[blockIdx.y] = sum_s[0];
}
}
void hl_vector_abs_sum(real *A_d, real *C_h, int dimM) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_h);
int blockSize = 128;
int gridSize = 128;
int blocksX = 1;
int blocksY = gridSize;
dim3 threads(blockSize, 1);
dim3 grid(blocksX, blocksY);
struct _hl_event_st hl_event_st = {.cu_event = t_resource.event};
hl_event_t hl_event = &hl_event_st;
while (!hl_cuda_event_is_ready(hl_event)) {}
hipLaunchKernelGGL(( KeVectorAbsSum<128>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT ,
A_d, t_resource.gpu_mem, dimM);
hipLaunchKernelGGL(( KeVectorAbsSum<128>), dim3(1), dim3(threads), 0, STREAM_DEFAULT ,
t_resource.gpu_mem, t_resource.cpu_mem, 128);
hl_memcpy_async(C_h, t_resource.cpu_mem, sizeof(real), HPPL_STREAM_DEFAULT);
hl_stream_record_event(HPPL_STREAM_DEFAULT, hl_event);
hl_stream_synchronize(HPPL_STREAM_DEFAULT);
hipError_t err = (hipError_t)hl_get_device_last_error();
CHECK_EQ(hipSuccess, err)
<< "CUDA error: " << hl_get_device_error_string((size_t)err);
}
| 374ed81cc7940341f93cecced22cdcc9afe511f9.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "hl_base.h"
#include "hl_cuda.h"
#include "hl_cuda.ph"
#include "hl_aggregate.h"
#include "hl_thread.ph"
#include "hl_matrix_base.cuh"
#include "paddle/utils/Logging.h"
/**
* @brief matrix row operator.
*/
template<class Agg, int blockSize>
__global__ void KeMatrixRowOp(Agg agg,
real *E,
real *Sum,
int dimN) {
__shared__ real sum_s[blockSize];
int cnt = (dimN + blockSize -1) / blockSize;
int rowId = blockIdx.x + blockIdx.y*gridDim.x;
int index = rowId*dimN;
int tid = threadIdx.x;
int lmt = tid;
real tmp = agg.init();
for (int ii = 0; ii < cnt && lmt < dimN; ii++) {
tmp = agg(tmp, E[index + lmt]);
lmt += blockSize;
}
sum_s[tid] = tmp;
__syncthreads();
for (int stride = blockSize/2; stride > 0; stride = stride/2) {
if (tid < stride) {
sum_s[tid] = agg(sum_s[tid], sum_s[tid + stride]);
}
__syncthreads();
}
__syncthreads();
if (tid == 0) {
Sum[rowId] = sum_s[0];
}
}
template <class Agg>
void hl_matrix_row_op(Agg agg,
real *A_d,
real *C_d,
int dimM,
int dimN) {
int blocksX = dimM;
int blocksY = 1;
dim3 threads(128, 1);
dim3 grid(blocksX, blocksY);
KeMatrixRowOp<Agg, 128><<< grid, threads, 0, STREAM_DEFAULT >>>
(agg, A_d, C_d, dimN);
}
void hl_matrix_row_sum(real *A_d, real *C_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
hl_matrix_row_op(aggregate::sum(),
A_d,
C_d,
dimM,
dimN);
CHECK_SYNC("hl_matrix_row_sum failed");
}
void hl_matrix_row_max(real *A_d, real *C_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
hl_matrix_row_op(aggregate::max(),
A_d,
C_d,
dimM,
dimN);
CHECK_SYNC("hl_matrix_row_max failed");
}
void hl_matrix_row_min(real *A_d, real *C_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
hl_matrix_row_op(aggregate::min(),
A_d,
C_d,
dimM,
dimN);
CHECK_SYNC("hl_matrix_row_min failed");
}
/**
* @brief matrix column operator.
*/
template<class Agg>
__global__ void KeMatrixColumnOp(Agg agg,
real *E,
real *Sum,
int dimM,
int dimN) {
int rowIdx = blockIdx.x * blockDim.x + threadIdx.x;
real tmp = agg.init();
if (rowIdx < dimN) {
for (int index = 0; index < dimM; index++) {
tmp = agg(tmp, E[dimN * index + rowIdx]);
}
Sum[rowIdx] = tmp;
}
}
template<class Agg, int blockDimX, int blockDimY>
__global__ void KeMatrixColumnOp_S(Agg agg,
real *E,
real *Sum,
int dimM,
int dimN) {
__shared__ real _sum[blockDimX*blockDimY];
int rowIdx = blockIdx.x * blockDim.x + threadIdx.x;
int index = threadIdx.y;
real tmp = agg.init();
if (rowIdx < dimN) {
for (; index < dimM;) {
tmp = agg(tmp, E[dimN * index + rowIdx]);
index += blockDimY;
}
}
_sum[threadIdx.x + threadIdx.y*blockDimX] = tmp;
__syncthreads();
if (rowIdx < dimN) {
if (threadIdx.y ==0) {
real tmp = agg.init();
for (int i=0; i < blockDimY; i++) {
tmp = agg(tmp, _sum[threadIdx.x + i*blockDimX]);
}
Sum[rowIdx] = tmp;
}
}
}
template <class Agg>
void hl_matrix_column_op(Agg agg,
real *A_d,
real *C_d,
int dimM,
int dimN) {
if (dimN >= 8192) {
int blocksX = (dimN + 128 -1) / 128;
int blocksY = 1;
dim3 threads(128, 1);
dim3 grid(blocksX, blocksY);
KeMatrixColumnOp<Agg><<< grid, threads, 0, STREAM_DEFAULT >>>
(agg, A_d, C_d, dimM, dimN);
} else {
int blocksX = (dimN + 32 -1) / 32;
int blocksY = 1;
dim3 threads(32, 32);
dim3 grid(blocksX, blocksY);
KeMatrixColumnOp_S<Agg, 32, 32><<< grid, threads, 0, STREAM_DEFAULT>>>
(agg, A_d, C_d, dimM, dimN);
}
return;
}
void hl_matrix_column_sum(real *A_d, real *C_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
hl_matrix_column_op(aggregate::sum(),
A_d,
C_d,
dimM,
dimN);
CHECK_SYNC("hl_matrix_column_sum failed");
}
void hl_matrix_column_max(real *A_d, real *C_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
hl_matrix_column_op(aggregate::max(),
A_d,
C_d,
dimM,
dimN);
CHECK_SYNC("hl_matrix_column_max failed");
}
void hl_matrix_column_min(real *A_d, real *C_d, int dimM, int dimN) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_d);
hl_matrix_column_op(aggregate::min(),
A_d,
C_d,
dimM,
dimN);
CHECK_SYNC("hl_matrix_column_min failed");
}
template <int blockSize>
__global__ void KeVectorSum(real *E, real *Sum, int dimM) {
__shared__ double sum_s[blockSize];
int tid = threadIdx.x;
int index = blockIdx.y*blockDim.x+threadIdx.x;
sum_s[tid] = 0.0f;
while (index < dimM) {
sum_s[tid] += E[index];
index += blockDim.x*gridDim.y;
}
__syncthreads();
for (int stride = blockSize/2; stride > 0; stride = stride/2) {
if (tid < stride) {
sum_s[tid] += sum_s[tid + stride];
}
__syncthreads();
}
__syncthreads();
if (tid == 0) {
Sum[blockIdx.y] = sum_s[0];
}
}
void hl_vector_sum(real *A_d, real *C_h, int dimM) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_h);
int blockSize = 128;
int gridSize = 128;
int blocksX = 1;
int blocksY = gridSize;
dim3 threads(blockSize, 1);
dim3 grid(blocksX, blocksY);
struct _hl_event_st hl_event_st = {.cu_event = t_resource.event};
hl_event_t hl_event = &hl_event_st;
while (!hl_cuda_event_is_ready(hl_event)) {}
KeVectorSum<128><<< grid, threads, 0, STREAM_DEFAULT >>>
(A_d, t_resource.gpu_mem, dimM);
KeVectorSum<128><<< 1, threads, 0, STREAM_DEFAULT >>>
(t_resource.gpu_mem, t_resource.cpu_mem, 128);
hl_memcpy_async(C_h, t_resource.cpu_mem, sizeof(real), HPPL_STREAM_DEFAULT);
hl_stream_record_event(HPPL_STREAM_DEFAULT, hl_event);
hl_stream_synchronize(HPPL_STREAM_DEFAULT);
cudaError_t err = (cudaError_t)hl_get_device_last_error();
CHECK_EQ(cudaSuccess, err)
<< "CUDA error: " << hl_get_device_error_string((size_t)err);
}
template <int blockSize>
__global__ void KeVectorAbsSum(real *E, real *Sum, int dimM) {
__shared__ double sum_s[blockSize];
int tid = threadIdx.x;
int index = blockIdx.y*blockDim.x+threadIdx.x;
sum_s[tid] = 0.0f;
while (index < dimM) {
sum_s[tid] += abs(E[index]);
index += blockDim.x*gridDim.y;
}
__syncthreads();
for (int stride = blockSize/2; stride > 0; stride = stride/2) {
if (tid < stride) {
sum_s[tid] += sum_s[tid + stride];
}
__syncthreads();
}
__syncthreads();
if (tid == 0) {
Sum[blockIdx.y] = sum_s[0];
}
}
void hl_vector_abs_sum(real *A_d, real *C_h, int dimM) {
CHECK_NOTNULL(A_d);
CHECK_NOTNULL(C_h);
int blockSize = 128;
int gridSize = 128;
int blocksX = 1;
int blocksY = gridSize;
dim3 threads(blockSize, 1);
dim3 grid(blocksX, blocksY);
struct _hl_event_st hl_event_st = {.cu_event = t_resource.event};
hl_event_t hl_event = &hl_event_st;
while (!hl_cuda_event_is_ready(hl_event)) {}
KeVectorAbsSum<128><<< grid, threads, 0, STREAM_DEFAULT >>>
(A_d, t_resource.gpu_mem, dimM);
KeVectorAbsSum<128><<< 1, threads, 0, STREAM_DEFAULT >>>
(t_resource.gpu_mem, t_resource.cpu_mem, 128);
hl_memcpy_async(C_h, t_resource.cpu_mem, sizeof(real), HPPL_STREAM_DEFAULT);
hl_stream_record_event(HPPL_STREAM_DEFAULT, hl_event);
hl_stream_synchronize(HPPL_STREAM_DEFAULT);
cudaError_t err = (cudaError_t)hl_get_device_last_error();
CHECK_EQ(cudaSuccess, err)
<< "CUDA error: " << hl_get_device_error_string((size_t)err);
}
|
76f2e19c1c34b61f4ecfe7711fce91a713efac35.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/bnll_layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
const float kBNLL_THRESHOLD = 50.;
template <typename Dtype>
__global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ?
in[index] + log(1. + exp(-in[index])) :
log(1. + exp(in[index]));
}
}
template <typename Dtype>
void BNLLLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( BNLLForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void BNLLBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD)));
out_diff[index] = in_diff[index] * expval / (expval + 1.);
}
}
template <typename Dtype>
void BNLLLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( BNLLBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, bottom_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BNLLLayer);
} // namespace caffe
| 76f2e19c1c34b61f4ecfe7711fce91a713efac35.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/bnll_layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
const float kBNLL_THRESHOLD = 50.;
template <typename Dtype>
__global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ?
in[index] + log(1. + exp(-in[index])) :
log(1. + exp(in[index]));
}
}
template <typename Dtype>
void BNLLLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
BNLLForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void BNLLBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD)));
out_diff[index] = in_diff[index] * expval / (expval + 1.);
}
}
template <typename Dtype>
void BNLLLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
BNLLBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BNLLLayer);
} // namespace caffe
|
df50f31bec24068847f10b114b2fb68743fd0bd3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (C) 2015 Davis E. King ([email protected])
// License: Boost Software License See LICENSE.txt for the full license.
#include "cuda_utils.h"
#include "cuda_dlib.h"
namespace dlib
{
namespace cuda
{
// -----------------------------------------------------------------------------------
void set_device (
int dev
)
{
CHECK_CUDA(hipSetDevice(dev));
}
int get_device (
)
{
int dev = 0;
CHECK_CUDA(hipGetDevice(&dev));
return dev;
}
std::string get_device_name (
int device
)
{
hipDeviceProp_t props;
CHECK_CUDA(hipGetDeviceProperties(&props, device));
return props.name;
}
void set_current_device_blocking_sync(
)
{
CHECK_CUDA(hipSetDeviceFlags(hipDeviceScheduleBlockingSync));
}
int get_num_devices (
)
{
int num_devices;
CHECK_CUDA(hipGetDeviceCount(&num_devices));
return num_devices;
}
bool can_access_peer (int device_id, int peer_device_id)
{
int can_access;
CHECK_CUDA(hipDeviceCanAccessPeer(&can_access, device_id, peer_device_id));
return can_access;
}
bool can_access_peer (const tensor& device, const tensor& peer_device)
{
return can_access_peer(device.device_id(), peer_device.device_id());
}
void device_synchronize (int dev)
{
raii_set_device set_dev(dev);
CHECK_CUDA(hipDeviceSynchronize());
}
void device_synchronize (const tensor& dev) { device_synchronize(dev.device_id()); }
enable_peer_access::
enable_peer_access(
int device_id,
int peer_device_id
) : call_disable(false), device_id(device_id), peer_device_id(peer_device_id)
{
raii_set_device set_dev(device_id);
auto err = hipDeviceEnablePeerAccess(peer_device_id, 0);
if (err == hipSuccess)
{
call_disable = true;
}
else if (err == hipErrorPeerAccessAlreadyEnabled)
{
// call hipGetLastError() to dispose of this error since we don't
// care.
auto err2 = hipGetLastError();
if (err2 != hipErrorPeerAccessAlreadyEnabled)
CHECK_CUDA(err2);
}
else
{
CHECK_CUDA(err);
}
}
enable_peer_access::
~enable_peer_access() noexcept(false)
{
if (call_disable)
{
raii_set_device set_dev(device_id);
CHECK_CUDA(hipDeviceDisablePeerAccess(peer_device_id));
}
}
// -----------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------
__global__ void _cuda_multiply1(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i]*s2[i];
}
}
__global__ void _cuda_multiply2(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n, size_t max_size)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = 0;
for (size_t j = i; j < max_size; j += n)
d[i] += s1[j%s1_n]*s2[j%s2_n];
}
}
__global__ void _cuda_multiply3(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i%s1_n]*s2[i%s2_n];
}
}
__global__ void _cuda_multiply1_add_to(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] += s1[i]*s2[i];
}
}
__global__ void _cuda_multiply2_add_to(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n, size_t max_size)
{
for (auto i : grid_stride_range(0, n))
{
for (size_t j = i; j < max_size; j += n)
d[i] += s1[j%s1_n]*s2[j%s2_n];
}
}
__global__ void _cuda_multiply3_add_to(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] += s1[i%s1_n]*s2[i%s2_n];
}
}
void multiply (
bool add_to,
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
DLIB_CASSERT(dest.k() == src1.k() && src1.k() == src2.k() &&
dest.nr() == src1.nr() && src1.nr() == src2.nr() &&
dest.nc() == src1.nc() && src1.nc() == src2.nc() ,"");
const long MD = ::max(::max(dest.num_samples(),src1.num_samples()),src2.num_samples());
DLIB_CASSERT((dest.num_samples()==1 || dest.num_samples()==MD) &&
(src1.num_samples()==1 || src1.num_samples()==MD) &&
(src2.num_samples()==1 || src2.num_samples()==MD) ,"");
if (dest.size() == 0)
return;
const size_t max_size = ::max(::max(dest.size(),src1.size()),src2.size());
const auto d = dest.host();
const auto s1 = src1.host();
const auto s2 = src2.host();
if (dest.size() == src1.size() && src1.size() == src2.size())
{
if (add_to)
launch_kernel(_cuda_multiply1_add_to,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), src1.size());
else
launch_kernel(_cuda_multiply1,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), src1.size());
}
else if (dest.num_samples() == 1)
{
if (add_to)
launch_kernel(_cuda_multiply2_add_to,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size(), max_size);
else
launch_kernel(_cuda_multiply2,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size(), max_size);
}
else
{
if (add_to)
launch_kernel(_cuda_multiply3_add_to,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size());
else
launch_kernel(_cuda_multiply3,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size());
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_multiply_conv(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
for (auto i : grid_stride_range(0, n))
{
auto k = (i/bs)%ks;
d[i] = s1[i]*s2[k];
}
}
__global__ void _cuda_multiply_conv2(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
// zero initialize d before we begin.
for (auto i : grid_stride_range(0, ks))
d[i] = 0;
__syncthreads();
// loop over all the image planes
for (auto i : grid_stride_range_y(0, n))
{
// sum all the elements in the i-th image plane
float temp = 0;
for (auto j : grid_stride_range(i*bs, (i+1)*bs))
temp += s1[j]*s2[j];
auto k = i%ks;
// and store the sum into d[k]
warp_reduce_atomic_add(d[k], temp);
}
}
__global__ void _cuda_multiply_conv_add_to(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
for (auto i : grid_stride_range(0, n))
{
auto k = (i/bs)%ks;
d[i] += s1[i]*s2[k];
}
}
__global__ void _cuda_multiply_conv2_add_to(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
// loop over all the image planes
for (auto i : grid_stride_range_y(0, n))
{
// sum all the elements in the i-th image plane
float temp = 0;
for (auto j : grid_stride_range(i*bs, (i+1)*bs))
temp += s1[j]*s2[j];
auto k = i%ks;
// and store the sum into d[k]
warp_reduce_atomic_add(d[k], temp);
}
}
void multiply_conv (
bool add_to,
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
if (have_same_dimensions(dest,src1))
{
DLIB_CASSERT(src2.num_samples() == 1 && src2.nr() == 1 && src2.nc() == 1 && src2.k() == src1.k(),"");
if (dest.size() == 0)
return;
if (add_to)
launch_kernel(_cuda_multiply_conv_add_to,max_jobs(dest.size()),
dest.device(), src1.device(), src1.size(), src2.device(), src1.nr()*src1.nc(), src1.k());
else
launch_kernel(_cuda_multiply_conv,max_jobs(dest.size()),
dest.device(), src1.device(), src1.size(), src2.device(), src1.nr()*src1.nc(), src1.k());
}
else
{
DLIB_CASSERT(have_same_dimensions(src1,src2),"");
DLIB_CASSERT(dest.num_samples() == 1 && dest.nr() == 1 && dest.nc() == 1 && dest.k() == src1.k(),"");
if (dest.size() == 0)
return;
dim3 blocks(10,1);
dim3 threads(32,32); // x size must be 32 because we are using warp_reduce_atomic_add() in the kernel.
if (add_to)
hipLaunchKernelGGL(( _cuda_multiply_conv2_add_to), dim3(blocks),dim3(threads), 0, 0,
dest.device(), src1.device(), src1.num_samples()*src1.k(), src2.device(), src1.nr()*src1.nc(), src1.k());
else
hipLaunchKernelGGL(( _cuda_multiply_conv2), dim3(blocks),dim3(threads), 0, 0,
dest.device(), src1.device(), src1.num_samples()*src1.k(), src2.device(), src1.nr()*src1.nc(), src1.k());
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_add1(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i]+s2[i];
}
}
__global__ void _cuda_add2(float* d, const float* s1, const float* s2,
size_t dn, size_t dk, size_t dr, size_t dc,
size_t s1n, size_t s1k, size_t s1r, size_t s1c,
size_t s2n, size_t s2k, size_t s2r, size_t s2c)
{
for (auto i : grid_stride_range(0, dn*dk*dr*dc))
{
size_t n,k,r,c;
unpack_idx(i, dk,dr,dc, n,k,r,c);
float v1 = 0;
float v2 = 0;
if (n < s1n &&
k < s1k &&
r < s1r &&
c < s1c )
{
v1 = s1[pack_idx(s1k,s1r,s1c, n,k,r,c)];
}
if (n < s2n &&
k < s2k &&
r < s2r &&
c < s2c )
{
v2 = s2[pack_idx(s2k,s2r,s2c, n,k,r,c)];
}
d[i] = v1+v2;
}
}
void add (
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
if (dest.size() == 0)
return;
// Do the simple and fast version if everything has the same dimensions
if (have_same_dimensions(dest, src1) &&
have_same_dimensions(dest, src2))
{
launch_kernel(_cuda_add1,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.size());
}
else
{
// Otherwise, do the more complex version with bounds checking.
launch_kernel(_cuda_add2,max_jobs(dest.size()),
dest.device(), src1.device(), src2.device(),
dest.num_samples(), dest.k(), dest.nr(), dest.nc(),
src1.num_samples(), src1.k(), src1.nr(), src1.nc(),
src2.num_samples(), src2.k(), src2.nr(), src2.nc()
);
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform1(float* d, const float* s, size_t n, float A, float B)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s[i] + B;
}
}
__global__ void _cuda_affine_transform1_0(float* d, const float* s, size_t n, float A)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s[i];
}
}
void affine_transform(
tensor& dest,
const tensor& src,
const float A,
const float B
)
{
DLIB_CASSERT(dest.size()==src.size(),"");
if (B != 0)
launch_kernel(_cuda_affine_transform1,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A, B);
else
launch_kernel(_cuda_affine_transform1_0,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A);
}
void affine_transform(
tensor& dest,
const tensor& src,
const float A
)
{
DLIB_CASSERT(dest.size()==src.size(),"");
launch_kernel(_cuda_affine_transform1_0,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform4(float* d, const float* s1, const float* s2, size_t n, float A, float B, float C)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s1[i] + B*s2[i] + C;
}
}
__global__ void _cuda_affine_transform4_0(float* d, const float* s1, const float* s2, size_t n, float A, float B)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s1[i] + B*s2[i];
}
}
void affine_transform(
tensor& dest,
const tensor& src1,
const tensor& src2,
const float A,
const float B,
const float C
)
{
DLIB_CASSERT(dest.size()==src1.size(),"");
DLIB_CASSERT(dest.size()==src2.size(),"");
if (C != 0)
launch_kernel(_cuda_affine_transform4,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B, C);
else
launch_kernel(_cuda_affine_transform4_0,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B);
}
void affine_transform(
tensor& dest,
const tensor& src1,
const tensor& src2,
const float A,
const float B
)
{
DLIB_CASSERT(dest.size()==src1.size(),"");
DLIB_CASSERT(dest.size()==src2.size(),"");
launch_kernel(_cuda_affine_transform4_0,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_add_scaled(float* d, const float* s, size_t n, float scale)
{
for (auto i : grid_stride_range(0, n))
{
d[i] += scale*s[i];
}
}
void add_scaled(
tensor& dest,
const float scale,
const tensor& src
)
{
DLIB_CASSERT(dest.size()==src.size(),"");
launch_kernel(_cuda_add_scaled,max_jobs(dest.size()),dest.device(), src.device(), dest.size(), scale);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform5(
float* d, const float* s1, const float* s2, const float* s3, size_t n, float A, float B, float C, float D
)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s1[i] + B*s2[i] + C*s3[i] + D;
}
}
void affine_transform(
tensor& dest,
const tensor& src1,
const tensor& src2,
const tensor& src3,
const float A,
const float B,
const float C,
const float D
)
{
DLIB_CASSERT(dest.size()==src1.size(),"");
DLIB_CASSERT(dest.size()==src2.size(),"");
DLIB_CASSERT(dest.size()==src3.size(),"");
launch_kernel(_cuda_affine_transform5,max_jobs(dest.size()),dest.device(), src1.device(),
src2.device(), src3.device(), dest.size(), A, B, C, D);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform_range(
float* d, const float* s1, const float* s2, const float* s3, size_t begin, size_t end, float A, float B, float C
)
{
for (auto i : grid_stride_range(begin, end))
{
d[i] = A*s1[i] + B*s2[i] + C*s3[i];
}
}
void affine_transform_range(
size_t begin,
size_t end,
tensor& dest,
const tensor& src1,
const tensor& src2,
const tensor& src3,
const float A,
const float B,
const float C
)
{
DLIB_CASSERT(dest.size()==src1.size(),"");
DLIB_CASSERT(dest.size()==src2.size(),"");
DLIB_CASSERT(dest.size()==src3.size(),"");
DLIB_CASSERT(begin <= end && end <= dest.size(),"");
launch_kernel(_cuda_affine_transform_range,max_jobs(end-begin),
dest.device(), src1.device(),
src2.device(), src3.device(), begin, end, A, B, C);
}
// -----------------------------------------------------------------------------------
__global__ void _cuda_affine_transform2(float* d, const float* s, size_t n, const float* A, const float* B)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A[i]*s[i] + B[i];
}
}
__global__ void _cuda_affine_transform3(float* d, const float* s, size_t n, const float* A, const float* B, size_t bs)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A[i%bs]*s[i] + B[i%bs];
}
}
void affine_transform(
tensor& dest,
const tensor& src,
const tensor& A,
const tensor& B
)
{
DLIB_CASSERT(have_same_dimensions(dest, src),"");
DLIB_CASSERT(
((A.num_samples()==1 && B.num_samples()==1) ||
(A.num_samples()==src.num_samples() && B.num_samples()==src.num_samples())),"");
DLIB_CASSERT(
A.nr()==B.nr() && B.nr()==src.nr() &&
A.nc()==B.nc() && B.nc()==src.nc() &&
A.k() ==B.k() && B.k()==src.k(),
"\nA.nr(): " << A.nr() << "\nB.nr(): " << B.nr() << "\nsrc.nr(): " << src.nr()
<<"\nA.nc(): " << A.nc() << "\nB.nc(): " << B.nc() << "\nsrc.nc(): " << src.nc()
<<"\nA.k(): " << A.k() << "\nB.k(): " << B.k() << "\nsrc.k(): " << src.k()
);
if (A.num_samples() == 1)
{
launch_kernel(_cuda_affine_transform3,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A.device(), B.device(), A.size());
}
else
{
launch_kernel(_cuda_affine_transform2,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A.device(), B.device());
}
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_compute_adam_update(
size_t begin,
size_t end,
float* s,
float* m,
float* v,
const float alpha,
const float weight_decay,
const float momentum1,
const float momentum2,
const float* params,
const float* params_grad
)
{
const float eps = 1e-8;
// The loop is equivalent to doing this:
// m = momentum1*m + (1-momentum1) * (weight_decay*params + params_grad);
// v = momentum2*v + (1-momentum2)*squared(weight_decay*params + params_grad);
// s = -alpha*m/(sqrt(v) + eps);
for (auto i : grid_stride_range(begin, end))
{
float g = (weight_decay*params[i] + params_grad[i]);
m[i] = momentum1*m[i] + (1-momentum1)*g;
v[i] = momentum2*v[i] + (1-momentum2)*g*g;
s[i] = -alpha*m[i]/(std::sqrt(v[i]) + eps);
}
}
void compute_adam_update (
size_t begin,
size_t end,
tensor& s,
tensor& m,
tensor& v,
const float t,
const float learning_rate,
const float weight_decay,
const float momentum1,
const float momentum2,
const tensor& params,
const tensor& params_grad
)
{
DLIB_CASSERT(s.size() == m.size() &&
s.size() == v.size() &&
s.size() == params.size() &&
s.size() == params_grad.size(),"");
DLIB_CASSERT(begin <= end && end <= params.size(),"");
const float alpha = learning_rate*std::sqrt(1-::pow(momentum2,t))/(1-::pow(momentum1, t));
launch_kernel(_cuda_compute_adam_update,max_jobs(end-begin),
begin, end, s.device(), m.device(), v.device(), alpha, weight_decay,
momentum1, momentum2, params.device(), params_grad.device());
}
// -----------------------------------------------------------------------------------
__global__ void _cuda_affine_transform_conv(float* d, const float* s, size_t n, const float* A, const float* B, size_t bs, size_t ks)
{
for (auto i : grid_stride_range(0, n))
{
auto k = (i/bs)%ks;
d[i] = A[k]*s[i] + B[k];
}
}
void affine_transform_conv(
tensor& dest,
const tensor& src,
const tensor& A,
const tensor& B
)
{
DLIB_CASSERT(have_same_dimensions(dest, src),"");
DLIB_CASSERT(have_same_dimensions(A, B),"");
DLIB_CASSERT(A.num_samples() == 1 && A.nr() == 1 && A.nc() == 1 && A.k() == src.k(),"");
launch_kernel(_cuda_affine_transform_conv,max_jobs(dest.size()),
dest.device(), src.device(), src.size(), A.device(), B.device(), src.nr()*src.nc(), src.k());
}
// -----------------------------------------------------------------------------------
__global__ void _add_bias_gradient(float* out, const float* in, size_t n, size_t total_n)
{
for (auto i : grid_stride_range(0, n))
{
out[i] = in[i];
for (size_t j = i+n; j < total_n; j+=n)
out[i] += in[j];
}
}
void assign_bias_gradient (
tensor& grad,
const tensor& gradient_input
)
{
DLIB_CASSERT(
grad.num_samples() == 1 &&
gradient_input.k() == grad.k() &&
gradient_input.nr() == grad.nr() &&
gradient_input.nc() == grad.nc() &&
gradient_input.size() > 0,"");
launch_kernel(_add_bias_gradient,max_jobs(grad.size()),grad.device(), gradient_input.device(), grad.size(), gradient_input.size());
}
// -----------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------
__global__ void _cuda_threshold(float* d, size_t n, float thresh)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = d[i]>thresh ? 1:0;
}
}
void threshold (
tensor& data,
float thresh
)
{
launch_kernel(_cuda_threshold,max_jobs(data.size()),data.device(), data.size(), thresh);
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_dot(const float* a, const float* b, size_t n, float* result)
{
// Parallel sum everything into local temp variables.
float temp = 0;
for(auto i : grid_stride_range(0, n))
temp += a[i]*b[i];
// Then do the warp reduce add thing to merge into one output value.
warp_reduce_atomic_add(*result, temp);
}
void dot (
const tensor& a,
const tensor& b,
tensor& result,
size_t idx
)
{
DLIB_CASSERT(a.size() == b.size(), "");
DLIB_CASSERT(idx < result.size(), "");
launch_kernel(_cuda_dot, max_jobs(a.size()), a.device(), b.device(), a.size(), result.device()+idx);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_prelu(const float* s, float* d, size_t n, const float* pp)
{
const float p = *pp;
for (auto i : grid_stride_range(0, n))
{
if (s[i] > 0)
d[i] = s[i];
else
d[i] = p*s[i];
}
}
void prelu (
tensor& dest,
const tensor& src,
const tensor& param
)
{
launch_kernel(_cuda_prelu, max_jobs(dest.size()),
src.device(), dest.device(), src.size(), param.device());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_prelu_gradient(float* out, const float* s, const float* gi, size_t n, const float* pp, float* ppgrad)
{
const float p = *pp;
float pgrad = 0;
for(auto i : grid_stride_range(0, n))
{
if (s[i] > 0)
{
out[i] += gi[i];
}
else
{
out[i] += p*gi[i];
pgrad += gi[i]*s[i];
}
}
// Then do the warp reduce add thing to merge into one output value.
warp_reduce_atomic_add(*ppgrad, pgrad);
}
void prelu_gradient (
tensor& grad,
const tensor& src,
const tensor& gradient_input,
const tensor& param,
tensor& params_grad
)
{
params_grad = 0;
launch_kernel(_cuda_prelu_gradient, max_jobs(grad.size()),
grad.device(), src.device(), gradient_input.device(), grad.size(),
param.device(), params_grad.device());
}
// ----------------------------------------------------------------------------------------
void copy_tensor(
tensor& dest,
size_t dest_k_offset,
const tensor& src,
size_t src_k_offset,
size_t count_k
)
{
const size_t dest_sample_size = static_cast<size_t>(dest.nc() * dest.nr() * dest.k());
const size_t src_sample_size = static_cast<size_t>(src.nc() * src.nr() * src.k());
const size_t block_size = count_k * dest.nc() * dest.nr();
DLIB_CASSERT(dest.num_samples() == src.num_samples() &&
dest.nc() == src.nc() && dest.nr() == src.nr(), "All sources should fit into dest tensor size");
DLIB_CASSERT(dest.k() - dest_k_offset >= count_k, "Not enough space in dest tensor");
DLIB_CASSERT(src.k() - src_k_offset >= count_k, "Not enough space in src tensor");
float* dest_p = dest.device() + dest_k_offset * dest.nc() * dest.nr();
const float* src_p = src.device() + src_k_offset * src.nc() * src.nr();;
for (long i = 0; i < src.num_samples(); ++i)
{
CHECK_CUDA(hipMemcpy(dest_p, src_p, block_size * sizeof(float), hipMemcpyDeviceToDevice));
dest_p += dest_sample_size;
src_p += src_sample_size;
}
}
// ----------------------------------------------------------------------------------------
}
}
| df50f31bec24068847f10b114b2fb68743fd0bd3.cu | // Copyright (C) 2015 Davis E. King ([email protected])
// License: Boost Software License See LICENSE.txt for the full license.
#include "cuda_utils.h"
#include "cuda_dlib.h"
namespace dlib
{
namespace cuda
{
// -----------------------------------------------------------------------------------
void set_device (
int dev
)
{
CHECK_CUDA(cudaSetDevice(dev));
}
int get_device (
)
{
int dev = 0;
CHECK_CUDA(cudaGetDevice(&dev));
return dev;
}
std::string get_device_name (
int device
)
{
cudaDeviceProp props;
CHECK_CUDA(cudaGetDeviceProperties(&props, device));
return props.name;
}
void set_current_device_blocking_sync(
)
{
CHECK_CUDA(cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync));
}
int get_num_devices (
)
{
int num_devices;
CHECK_CUDA(cudaGetDeviceCount(&num_devices));
return num_devices;
}
bool can_access_peer (int device_id, int peer_device_id)
{
int can_access;
CHECK_CUDA(cudaDeviceCanAccessPeer(&can_access, device_id, peer_device_id));
return can_access;
}
bool can_access_peer (const tensor& device, const tensor& peer_device)
{
return can_access_peer(device.device_id(), peer_device.device_id());
}
void device_synchronize (int dev)
{
raii_set_device set_dev(dev);
CHECK_CUDA(cudaDeviceSynchronize());
}
void device_synchronize (const tensor& dev) { device_synchronize(dev.device_id()); }
enable_peer_access::
enable_peer_access(
int device_id,
int peer_device_id
) : call_disable(false), device_id(device_id), peer_device_id(peer_device_id)
{
raii_set_device set_dev(device_id);
auto err = cudaDeviceEnablePeerAccess(peer_device_id, 0);
if (err == cudaSuccess)
{
call_disable = true;
}
else if (err == cudaErrorPeerAccessAlreadyEnabled)
{
// call cudaGetLastError() to dispose of this error since we don't
// care.
auto err2 = cudaGetLastError();
if (err2 != cudaErrorPeerAccessAlreadyEnabled)
CHECK_CUDA(err2);
}
else
{
CHECK_CUDA(err);
}
}
enable_peer_access::
~enable_peer_access() noexcept(false)
{
if (call_disable)
{
raii_set_device set_dev(device_id);
CHECK_CUDA(cudaDeviceDisablePeerAccess(peer_device_id));
}
}
// -----------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------
__global__ void _cuda_multiply1(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i]*s2[i];
}
}
__global__ void _cuda_multiply2(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n, size_t max_size)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = 0;
for (size_t j = i; j < max_size; j += n)
d[i] += s1[j%s1_n]*s2[j%s2_n];
}
}
__global__ void _cuda_multiply3(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i%s1_n]*s2[i%s2_n];
}
}
__global__ void _cuda_multiply1_add_to(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] += s1[i]*s2[i];
}
}
__global__ void _cuda_multiply2_add_to(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n, size_t max_size)
{
for (auto i : grid_stride_range(0, n))
{
for (size_t j = i; j < max_size; j += n)
d[i] += s1[j%s1_n]*s2[j%s2_n];
}
}
__global__ void _cuda_multiply3_add_to(float* d, const float* s1, const float* s2,
size_t n, size_t s1_n, size_t s2_n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] += s1[i%s1_n]*s2[i%s2_n];
}
}
void multiply (
bool add_to,
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
DLIB_CASSERT(dest.k() == src1.k() && src1.k() == src2.k() &&
dest.nr() == src1.nr() && src1.nr() == src2.nr() &&
dest.nc() == src1.nc() && src1.nc() == src2.nc() ,"");
const long MD = std::max(std::max(dest.num_samples(),src1.num_samples()),src2.num_samples());
DLIB_CASSERT((dest.num_samples()==1 || dest.num_samples()==MD) &&
(src1.num_samples()==1 || src1.num_samples()==MD) &&
(src2.num_samples()==1 || src2.num_samples()==MD) ,"");
if (dest.size() == 0)
return;
const size_t max_size = std::max(std::max(dest.size(),src1.size()),src2.size());
const auto d = dest.host();
const auto s1 = src1.host();
const auto s2 = src2.host();
if (dest.size() == src1.size() && src1.size() == src2.size())
{
if (add_to)
launch_kernel(_cuda_multiply1_add_to,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), src1.size());
else
launch_kernel(_cuda_multiply1,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), src1.size());
}
else if (dest.num_samples() == 1)
{
if (add_to)
launch_kernel(_cuda_multiply2_add_to,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size(), max_size);
else
launch_kernel(_cuda_multiply2,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size(), max_size);
}
else
{
if (add_to)
launch_kernel(_cuda_multiply3_add_to,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size());
else
launch_kernel(_cuda_multiply3,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(),
dest.size(), src1.size(), src2.size());
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_multiply_conv(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
for (auto i : grid_stride_range(0, n))
{
auto k = (i/bs)%ks;
d[i] = s1[i]*s2[k];
}
}
__global__ void _cuda_multiply_conv2(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
// zero initialize d before we begin.
for (auto i : grid_stride_range(0, ks))
d[i] = 0;
__syncthreads();
// loop over all the image planes
for (auto i : grid_stride_range_y(0, n))
{
// sum all the elements in the i-th image plane
float temp = 0;
for (auto j : grid_stride_range(i*bs, (i+1)*bs))
temp += s1[j]*s2[j];
auto k = i%ks;
// and store the sum into d[k]
warp_reduce_atomic_add(d[k], temp);
}
}
__global__ void _cuda_multiply_conv_add_to(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
for (auto i : grid_stride_range(0, n))
{
auto k = (i/bs)%ks;
d[i] += s1[i]*s2[k];
}
}
__global__ void _cuda_multiply_conv2_add_to(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks)
{
// loop over all the image planes
for (auto i : grid_stride_range_y(0, n))
{
// sum all the elements in the i-th image plane
float temp = 0;
for (auto j : grid_stride_range(i*bs, (i+1)*bs))
temp += s1[j]*s2[j];
auto k = i%ks;
// and store the sum into d[k]
warp_reduce_atomic_add(d[k], temp);
}
}
void multiply_conv (
bool add_to,
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
if (have_same_dimensions(dest,src1))
{
DLIB_CASSERT(src2.num_samples() == 1 && src2.nr() == 1 && src2.nc() == 1 && src2.k() == src1.k(),"");
if (dest.size() == 0)
return;
if (add_to)
launch_kernel(_cuda_multiply_conv_add_to,max_jobs(dest.size()),
dest.device(), src1.device(), src1.size(), src2.device(), src1.nr()*src1.nc(), src1.k());
else
launch_kernel(_cuda_multiply_conv,max_jobs(dest.size()),
dest.device(), src1.device(), src1.size(), src2.device(), src1.nr()*src1.nc(), src1.k());
}
else
{
DLIB_CASSERT(have_same_dimensions(src1,src2),"");
DLIB_CASSERT(dest.num_samples() == 1 && dest.nr() == 1 && dest.nc() == 1 && dest.k() == src1.k(),"");
if (dest.size() == 0)
return;
dim3 blocks(10,1);
dim3 threads(32,32); // x size must be 32 because we are using warp_reduce_atomic_add() in the kernel.
if (add_to)
_cuda_multiply_conv2_add_to<<<blocks,threads>>>(
dest.device(), src1.device(), src1.num_samples()*src1.k(), src2.device(), src1.nr()*src1.nc(), src1.k());
else
_cuda_multiply_conv2<<<blocks,threads>>>(
dest.device(), src1.device(), src1.num_samples()*src1.k(), src2.device(), src1.nr()*src1.nc(), src1.k());
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_add1(float* d, const float* s1, const float* s2, size_t n)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = s1[i]+s2[i];
}
}
__global__ void _cuda_add2(float* d, const float* s1, const float* s2,
size_t dn, size_t dk, size_t dr, size_t dc,
size_t s1n, size_t s1k, size_t s1r, size_t s1c,
size_t s2n, size_t s2k, size_t s2r, size_t s2c)
{
for (auto i : grid_stride_range(0, dn*dk*dr*dc))
{
size_t n,k,r,c;
unpack_idx(i, dk,dr,dc, n,k,r,c);
float v1 = 0;
float v2 = 0;
if (n < s1n &&
k < s1k &&
r < s1r &&
c < s1c )
{
v1 = s1[pack_idx(s1k,s1r,s1c, n,k,r,c)];
}
if (n < s2n &&
k < s2k &&
r < s2r &&
c < s2c )
{
v2 = s2[pack_idx(s2k,s2r,s2c, n,k,r,c)];
}
d[i] = v1+v2;
}
}
void add (
tensor& dest,
const tensor& src1,
const tensor& src2
)
{
if (dest.size() == 0)
return;
// Do the simple and fast version if everything has the same dimensions
if (have_same_dimensions(dest, src1) &&
have_same_dimensions(dest, src2))
{
launch_kernel(_cuda_add1,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.size());
}
else
{
// Otherwise, do the more complex version with bounds checking.
launch_kernel(_cuda_add2,max_jobs(dest.size()),
dest.device(), src1.device(), src2.device(),
dest.num_samples(), dest.k(), dest.nr(), dest.nc(),
src1.num_samples(), src1.k(), src1.nr(), src1.nc(),
src2.num_samples(), src2.k(), src2.nr(), src2.nc()
);
}
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform1(float* d, const float* s, size_t n, float A, float B)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s[i] + B;
}
}
__global__ void _cuda_affine_transform1_0(float* d, const float* s, size_t n, float A)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s[i];
}
}
void affine_transform(
tensor& dest,
const tensor& src,
const float A,
const float B
)
{
DLIB_CASSERT(dest.size()==src.size(),"");
if (B != 0)
launch_kernel(_cuda_affine_transform1,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A, B);
else
launch_kernel(_cuda_affine_transform1_0,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A);
}
void affine_transform(
tensor& dest,
const tensor& src,
const float A
)
{
DLIB_CASSERT(dest.size()==src.size(),"");
launch_kernel(_cuda_affine_transform1_0,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform4(float* d, const float* s1, const float* s2, size_t n, float A, float B, float C)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s1[i] + B*s2[i] + C;
}
}
__global__ void _cuda_affine_transform4_0(float* d, const float* s1, const float* s2, size_t n, float A, float B)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s1[i] + B*s2[i];
}
}
void affine_transform(
tensor& dest,
const tensor& src1,
const tensor& src2,
const float A,
const float B,
const float C
)
{
DLIB_CASSERT(dest.size()==src1.size(),"");
DLIB_CASSERT(dest.size()==src2.size(),"");
if (C != 0)
launch_kernel(_cuda_affine_transform4,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B, C);
else
launch_kernel(_cuda_affine_transform4_0,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B);
}
void affine_transform(
tensor& dest,
const tensor& src1,
const tensor& src2,
const float A,
const float B
)
{
DLIB_CASSERT(dest.size()==src1.size(),"");
DLIB_CASSERT(dest.size()==src2.size(),"");
launch_kernel(_cuda_affine_transform4_0,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_add_scaled(float* d, const float* s, size_t n, float scale)
{
for (auto i : grid_stride_range(0, n))
{
d[i] += scale*s[i];
}
}
void add_scaled(
tensor& dest,
const float scale,
const tensor& src
)
{
DLIB_CASSERT(dest.size()==src.size(),"");
launch_kernel(_cuda_add_scaled,max_jobs(dest.size()),dest.device(), src.device(), dest.size(), scale);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform5(
float* d, const float* s1, const float* s2, const float* s3, size_t n, float A, float B, float C, float D
)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A*s1[i] + B*s2[i] + C*s3[i] + D;
}
}
void affine_transform(
tensor& dest,
const tensor& src1,
const tensor& src2,
const tensor& src3,
const float A,
const float B,
const float C,
const float D
)
{
DLIB_CASSERT(dest.size()==src1.size(),"");
DLIB_CASSERT(dest.size()==src2.size(),"");
DLIB_CASSERT(dest.size()==src3.size(),"");
launch_kernel(_cuda_affine_transform5,max_jobs(dest.size()),dest.device(), src1.device(),
src2.device(), src3.device(), dest.size(), A, B, C, D);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_affine_transform_range(
float* d, const float* s1, const float* s2, const float* s3, size_t begin, size_t end, float A, float B, float C
)
{
for (auto i : grid_stride_range(begin, end))
{
d[i] = A*s1[i] + B*s2[i] + C*s3[i];
}
}
void affine_transform_range(
size_t begin,
size_t end,
tensor& dest,
const tensor& src1,
const tensor& src2,
const tensor& src3,
const float A,
const float B,
const float C
)
{
DLIB_CASSERT(dest.size()==src1.size(),"");
DLIB_CASSERT(dest.size()==src2.size(),"");
DLIB_CASSERT(dest.size()==src3.size(),"");
DLIB_CASSERT(begin <= end && end <= dest.size(),"");
launch_kernel(_cuda_affine_transform_range,max_jobs(end-begin),
dest.device(), src1.device(),
src2.device(), src3.device(), begin, end, A, B, C);
}
// -----------------------------------------------------------------------------------
__global__ void _cuda_affine_transform2(float* d, const float* s, size_t n, const float* A, const float* B)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A[i]*s[i] + B[i];
}
}
__global__ void _cuda_affine_transform3(float* d, const float* s, size_t n, const float* A, const float* B, size_t bs)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = A[i%bs]*s[i] + B[i%bs];
}
}
void affine_transform(
tensor& dest,
const tensor& src,
const tensor& A,
const tensor& B
)
{
DLIB_CASSERT(have_same_dimensions(dest, src),"");
DLIB_CASSERT(
((A.num_samples()==1 && B.num_samples()==1) ||
(A.num_samples()==src.num_samples() && B.num_samples()==src.num_samples())),"");
DLIB_CASSERT(
A.nr()==B.nr() && B.nr()==src.nr() &&
A.nc()==B.nc() && B.nc()==src.nc() &&
A.k() ==B.k() && B.k()==src.k(),
"\nA.nr(): " << A.nr() << "\nB.nr(): " << B.nr() << "\nsrc.nr(): " << src.nr()
<<"\nA.nc(): " << A.nc() << "\nB.nc(): " << B.nc() << "\nsrc.nc(): " << src.nc()
<<"\nA.k(): " << A.k() << "\nB.k(): " << B.k() << "\nsrc.k(): " << src.k()
);
if (A.num_samples() == 1)
{
launch_kernel(_cuda_affine_transform3,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A.device(), B.device(), A.size());
}
else
{
launch_kernel(_cuda_affine_transform2,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A.device(), B.device());
}
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_compute_adam_update(
size_t begin,
size_t end,
float* s,
float* m,
float* v,
const float alpha,
const float weight_decay,
const float momentum1,
const float momentum2,
const float* params,
const float* params_grad
)
{
const float eps = 1e-8;
// The loop is equivalent to doing this:
// m = momentum1*m + (1-momentum1) * (weight_decay*params + params_grad);
// v = momentum2*v + (1-momentum2)*squared(weight_decay*params + params_grad);
// s = -alpha*m/(sqrt(v) + eps);
for (auto i : grid_stride_range(begin, end))
{
float g = (weight_decay*params[i] + params_grad[i]);
m[i] = momentum1*m[i] + (1-momentum1)*g;
v[i] = momentum2*v[i] + (1-momentum2)*g*g;
s[i] = -alpha*m[i]/(std::sqrt(v[i]) + eps);
}
}
void compute_adam_update (
size_t begin,
size_t end,
tensor& s,
tensor& m,
tensor& v,
const float t,
const float learning_rate,
const float weight_decay,
const float momentum1,
const float momentum2,
const tensor& params,
const tensor& params_grad
)
{
DLIB_CASSERT(s.size() == m.size() &&
s.size() == v.size() &&
s.size() == params.size() &&
s.size() == params_grad.size(),"");
DLIB_CASSERT(begin <= end && end <= params.size(),"");
const float alpha = learning_rate*std::sqrt(1-std::pow(momentum2,t))/(1-std::pow(momentum1, t));
launch_kernel(_cuda_compute_adam_update,max_jobs(end-begin),
begin, end, s.device(), m.device(), v.device(), alpha, weight_decay,
momentum1, momentum2, params.device(), params_grad.device());
}
// -----------------------------------------------------------------------------------
__global__ void _cuda_affine_transform_conv(float* d, const float* s, size_t n, const float* A, const float* B, size_t bs, size_t ks)
{
for (auto i : grid_stride_range(0, n))
{
auto k = (i/bs)%ks;
d[i] = A[k]*s[i] + B[k];
}
}
void affine_transform_conv(
tensor& dest,
const tensor& src,
const tensor& A,
const tensor& B
)
{
DLIB_CASSERT(have_same_dimensions(dest, src),"");
DLIB_CASSERT(have_same_dimensions(A, B),"");
DLIB_CASSERT(A.num_samples() == 1 && A.nr() == 1 && A.nc() == 1 && A.k() == src.k(),"");
launch_kernel(_cuda_affine_transform_conv,max_jobs(dest.size()),
dest.device(), src.device(), src.size(), A.device(), B.device(), src.nr()*src.nc(), src.k());
}
// -----------------------------------------------------------------------------------
__global__ void _add_bias_gradient(float* out, const float* in, size_t n, size_t total_n)
{
for (auto i : grid_stride_range(0, n))
{
out[i] = in[i];
for (size_t j = i+n; j < total_n; j+=n)
out[i] += in[j];
}
}
void assign_bias_gradient (
tensor& grad,
const tensor& gradient_input
)
{
DLIB_CASSERT(
grad.num_samples() == 1 &&
gradient_input.k() == grad.k() &&
gradient_input.nr() == grad.nr() &&
gradient_input.nc() == grad.nc() &&
gradient_input.size() > 0,"");
launch_kernel(_add_bias_gradient,max_jobs(grad.size()),grad.device(), gradient_input.device(), grad.size(), gradient_input.size());
}
// -----------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------
__global__ void _cuda_threshold(float* d, size_t n, float thresh)
{
for (auto i : grid_stride_range(0, n))
{
d[i] = d[i]>thresh ? 1:0;
}
}
void threshold (
tensor& data,
float thresh
)
{
launch_kernel(_cuda_threshold,max_jobs(data.size()),data.device(), data.size(), thresh);
}
// ------------------------------------------------------------------------------------
__global__ void _cuda_dot(const float* a, const float* b, size_t n, float* result)
{
// Parallel sum everything into local temp variables.
float temp = 0;
for(auto i : grid_stride_range(0, n))
temp += a[i]*b[i];
// Then do the warp reduce add thing to merge into one output value.
warp_reduce_atomic_add(*result, temp);
}
void dot (
const tensor& a,
const tensor& b,
tensor& result,
size_t idx
)
{
DLIB_CASSERT(a.size() == b.size(), "");
DLIB_CASSERT(idx < result.size(), "");
launch_kernel(_cuda_dot, max_jobs(a.size()), a.device(), b.device(), a.size(), result.device()+idx);
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_prelu(const float* s, float* d, size_t n, const float* pp)
{
const float p = *pp;
for (auto i : grid_stride_range(0, n))
{
if (s[i] > 0)
d[i] = s[i];
else
d[i] = p*s[i];
}
}
void prelu (
tensor& dest,
const tensor& src,
const tensor& param
)
{
launch_kernel(_cuda_prelu, max_jobs(dest.size()),
src.device(), dest.device(), src.size(), param.device());
}
// ----------------------------------------------------------------------------------------
__global__ void _cuda_prelu_gradient(float* out, const float* s, const float* gi, size_t n, const float* pp, float* ppgrad)
{
const float p = *pp;
float pgrad = 0;
for(auto i : grid_stride_range(0, n))
{
if (s[i] > 0)
{
out[i] += gi[i];
}
else
{
out[i] += p*gi[i];
pgrad += gi[i]*s[i];
}
}
// Then do the warp reduce add thing to merge into one output value.
warp_reduce_atomic_add(*ppgrad, pgrad);
}
void prelu_gradient (
tensor& grad,
const tensor& src,
const tensor& gradient_input,
const tensor& param,
tensor& params_grad
)
{
params_grad = 0;
launch_kernel(_cuda_prelu_gradient, max_jobs(grad.size()),
grad.device(), src.device(), gradient_input.device(), grad.size(),
param.device(), params_grad.device());
}
// ----------------------------------------------------------------------------------------
void copy_tensor(
tensor& dest,
size_t dest_k_offset,
const tensor& src,
size_t src_k_offset,
size_t count_k
)
{
const size_t dest_sample_size = static_cast<size_t>(dest.nc() * dest.nr() * dest.k());
const size_t src_sample_size = static_cast<size_t>(src.nc() * src.nr() * src.k());
const size_t block_size = count_k * dest.nc() * dest.nr();
DLIB_CASSERT(dest.num_samples() == src.num_samples() &&
dest.nc() == src.nc() && dest.nr() == src.nr(), "All sources should fit into dest tensor size");
DLIB_CASSERT(dest.k() - dest_k_offset >= count_k, "Not enough space in dest tensor");
DLIB_CASSERT(src.k() - src_k_offset >= count_k, "Not enough space in src tensor");
float* dest_p = dest.device() + dest_k_offset * dest.nc() * dest.nr();
const float* src_p = src.device() + src_k_offset * src.nc() * src.nr();;
for (long i = 0; i < src.num_samples(); ++i)
{
CHECK_CUDA(cudaMemcpy(dest_p, src_p, block_size * sizeof(float), cudaMemcpyDeviceToDevice));
dest_p += dest_sample_size;
src_p += src_sample_size;
}
}
// ----------------------------------------------------------------------------------------
}
}
|
155355b4d80ef20b55145cfb3beddf8b73728d80.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/** @file test_precision.cu
* @brief test the precision of stored values (float/double?)
*
* @author Lukas Pospisil
*/
#include "pascinference.h"
#include <math.h>
using namespace pascinference;
#ifdef USE_ROCM
__global__ void print_precision(){
double a;
for(int i=0; i < 25;i++){
a = 1.0 + pow(10.0,-i);
printf(" 1+10^{- %d} : %.30f\n", i, a );
}
}
#endif
int main( int argc, char *argv[] )
{
/* call initialize */
if(!Initialize<SeqArrayVector>(argc, argv)){
return 0;
}
coutMaster << std::setprecision(30);
coutMaster << "CPU:" << std::endl;
double a;
for(int i=0; i < 25;i++){
a = 1.0 + pow(10.0,-i);
coutMaster << " 1+10^{-" << std::setw(3) << i << "}: " << a << std::endl;
}
#ifdef USE_ROCM
coutMaster << std::endl;
coutMaster << "GPU:" << std::endl;
hipLaunchKernelGGL(( print_precision), dim3(1), dim3(1), 0, 0, );
gpuErrchk( hipDeviceSynchronize() );
#endif
Finalize<SeqArrayVector>();
return 0;
}
| 155355b4d80ef20b55145cfb3beddf8b73728d80.cu | /** @file test_precision.cu
* @brief test the precision of stored values (float/double?)
*
* @author Lukas Pospisil
*/
#include "pascinference.h"
#include <math.h>
using namespace pascinference;
#ifdef USE_CUDA
__global__ void print_precision(){
double a;
for(int i=0; i < 25;i++){
a = 1.0 + pow(10.0,-i);
printf(" 1+10^{- %d} : %.30f\n", i, a );
}
}
#endif
int main( int argc, char *argv[] )
{
/* call initialize */
if(!Initialize<SeqArrayVector>(argc, argv)){
return 0;
}
coutMaster << std::setprecision(30);
coutMaster << "CPU:" << std::endl;
double a;
for(int i=0; i < 25;i++){
a = 1.0 + pow(10.0,-i);
coutMaster << " 1+10^{-" << std::setw(3) << i << "}: " << a << std::endl;
}
#ifdef USE_CUDA
coutMaster << std::endl;
coutMaster << "GPU:" << std::endl;
print_precision<<<1, 1>>>();
gpuErrchk( cudaDeviceSynchronize() );
#endif
Finalize<SeqArrayVector>();
return 0;
}
|
b0e8331d9cb1a637dbd5edea6535dce0b0bdda5c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
//-----include header files, JY-----
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b) // addKernel
{ // addKernel function, addKernel
int i = threadIdx.x;
c[i] = a[i] + b[i];
} | b0e8331d9cb1a637dbd5edea6535dce0b0bdda5c.cu | #include "includes.h"
//-----include header files, ¤Þ¤J¼ÐÀYÀÉ-----
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b) // addKernel¨ç¼Æ
{ // addKernel function, addKernel¨ç¼Æ
int i = threadIdx.x;
c[i] = a[i] + b[i];
} |
3cca1eca5452a76a266c15f772267810e271d25f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaDLL.h"
#include <stdio.h>
#include <hipfft.h>
#include <hip/hip_complex.h>
#include <hip/device_functions.h>
#include <math.h>
#include <float.h>
///////////////////////////////
///////////// Device specific operations
//////////////////////////
//#define IDX2R(i,j,N) (((i)*(N))+(j)) //easy way to address 2D array
__global__ void fftshift_2D(hipfftComplex *data, int arraysize, int row)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < arraysize; i += numThreads) {
int k=i%row;
int j=i/row;
float a = 1 - 2 * ((k + j) & 1);
data[i].x *= a;
data[i].y *= a;
}
}
__device__ static __inline__ float cmagf2(float x, float y)
{
float a, b, v, w, t;
a = fabsf(x);
b = fabsf(y);
if (a > b) {
v = a;
w = b;
}
else {
v = b;
w = a;
}
t = w / v;
t = 1.0f + t * t;
t = v * sqrtf(t);
if ((v == 0.0f) || (v > 3.402823466e38f) || (w > 3.402823466e38f)) {
t = v + w;
}
return t;
}
////////////////////////////////
////////GPU Kernels
//////////////////////////////
//this kernel requires fftshift
__global__ void makeKernel(float* KernelPhase, int row, int column, float* ImgProperties, float MagXscaling) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
float MagX = ImgProperties[1];
float pixSize= ImgProperties[0];
float nm = ImgProperties[2];
float lambda = ImgProperties[3];
float pixdxInv = MagX/pixSize*MagXscaling; // Magnification/pixSize
float km = nm/lambda; // nm / lambda
for (int i = threadID; i < row*column; i += numThreads) {
int dx = i%row;
int dy = i/row;
float kdx = float( dx - row/2)*pixdxInv;
float kdy = float( dy - row/2)*pixdxInv;
float temp = km*km - kdx*kdx - kdy*kdy;
KernelPhase[i]= (temp >= 0) ? (sqrtf(temp)-km) : 0;
//This still needs quadrant swapping so this will not work in the ifft routine as is!
}
}
///Generates a kernel that is compatible with the non-shifted fft routine
__global__ void makeKernel_nonefftshift(float* KernelPhase, int row, int column, float* ImgProperties) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
float pixSize = ImgProperties[0];
float MagX = ImgProperties[1];
float nmed = ImgProperties[2];
float lambda = ImgProperties[3];
float MagXscaling = 1/ImgProperties[4];
float pixdxInv = MagX / pixSize*MagXscaling; // Magnification/pixSize
float km = nmed / lambda; // nmed / lambda
for (int i = threadID; i < row*column; i += numThreads) {
int dx = i % row;
int dy = i / row;
dx= ((dx - row / 2)>0) ? (dx - row) : dx;
dy= ((dy - row / 2)>0) ? (dy - row) : dy;
float kdx = float(dx)*pixdxInv;
float kdy = float(dy)*pixdxInv;
float temp = km*km - kdx*kdx - kdy*kdy;
KernelPhase[i] = (temp >= 0) ? (sqrtf(temp)-km) : 0;
}
}
__global__ void makeKernelPhase(float* KernelPhase, int row, int column, float* ImgProperties) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
const float pixdxInv = ImgProperties[1] / ImgProperties[0]; // Magnification/pixSize
const float km = ImgProperties[2] / ImgProperties[3]; // nm / lambda
for (int i = threadID; i < row*column; i += numThreads) {
int dx = i % row;
int dy = i / row;
dx = ((dx - row / 2)>0) ? (dx - row) : dx;
dy = ((dy - row / 2)>0) ? (dy - row) : dy;
float kdx = float(dx)*pixdxInv/row;
float kdy = float(dy)*pixdxInv/row;
float temp = km*km - kdx*kdx - kdy*kdy;
KernelPhase[i] = (temp >= 0) ? (sqrtf(temp)-km) : 0;
}
}
__global__ void TransferFunction(hipfftComplex* img3Darray, float* bfpMag, float* bfpPhase, float* kPhase, float* zDist, int totalsize, int imgsize)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
//additional counters
for (int i = threadID; i < totalsize; i += numThreads)
{
int j = i / imgsize;
int k = i % imgsize;
float mag = bfpMag[k];
float phase = bfpPhase[k]+(kPhase[k]*zDist[j]); //multiply here already , absorb the 2*pi in there
img3Darray[i].x = mag*cosf(phase);
img3Darray[i].y = mag*sinf(phase);
}
}
__global__ void Cmplx2ReIm(hipfftComplex* cmplxArray, float* reArray, float* imgArray, int size, int imgsize) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads){
int k = i/imgsize; //does this do anything????
reArray[i] = cmplxArray[i].x;
imgArray[i] = cmplxArray[i].y;
}
}
__global__ void Cmplx2Mag(hipfftComplex* cmplxArray, float* MagArray, int size, int imgsize) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads) {
int k = i / imgsize;
MagArray[i] = cmagf2(cmplxArray[i].x, cmplxArray[i].y);
//imgArray[i] = cmplxArray[i].y;
}
}
////////////////////////////////////////////////
//////////////// FUnction to compile into DLL
////////////////////////////////////////////////
void GPU_Holo_v1(float* h_bfpMag, float* h_bfpPhase,
float* h_ImgOutRe, float* h_ImgOutIm,
float* zscale, int* arraySize, float* imgProperties) {
// Declare all constants here from the array size
// arraySize={row,column,zrange, resizeRow}
// note that zscale has already been multiplied by 2pi, just so that C does not have to do so
const int row = arraySize[0];
const int column = arraySize[1];
const int zrange = arraySize[2];
const size_t memZsize = zrange * sizeof(float);
const int size2Darray = row * column;
const size_t mem2Darray = size2Darray * sizeof(float);
const int size3Darray = row * column * zrange;
const size_t mem3Darray = size3Darray * sizeof(float);
const size_t mem3dsize = size3Darray * sizeof(hipfftComplex);
const int resizeRow = arraySize[3];
const float MagXReScale = 1.0f / float(resizeRow);
// Declare all constant regarding the Kernel execution sizes, will need to add a possibility to modify these from the LV as arguments
const int BlockSizeAll = 512;
const int GridSizeKernel = (size2Darray + BlockSizeAll - 1) / BlockSizeAll;
const int GridSizeTransfer = (size3Darray/16 + BlockSizeAll - 1) / BlockSizeAll;
/////////////////////////////////////
/// Calculate the Propagation Kernel
/////////////////////////////////////
float* d_kernelPhase, float* d_imgProperties;
const size_t sizePrp = 4 * sizeof(float);
hipMalloc((void**)&d_kernelPhase, mem2Darray);
hipMalloc((void**)&d_imgProperties, sizePrp);
hipMemcpy(d_imgProperties, imgProperties, sizePrp, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( makeKernelPhase) , dim3(GridSizeKernel), dim3(BlockSizeAll), 0, 0 , d_kernelPhase, row, column, d_imgProperties);
//preallocate space for 3D array, this will be a bit costly but lets go ahead with it
float* d_bfpMag, float* d_bfpPhase, float *d_zscale;
hipfftComplex *d_3DiFFT;
hipMalloc((void**)&d_bfpMag, mem2Darray);
hipMalloc((void**)&d_bfpPhase, mem2Darray);
hipMalloc((void**)&d_zscale, memZsize);
hipMemcpy(d_bfpMag, h_bfpMag, mem2Darray, hipMemcpyHostToDevice);
hipMemcpy(d_bfpPhase, h_bfpPhase, mem2Darray, hipMemcpyHostToDevice);
hipMemcpy(d_zscale, zscale, memZsize, hipMemcpyHostToDevice);
hipMalloc((void**)&d_3DiFFT, mem3dsize);
//Execute Kernels
TransferFunction << <GridSizeTransfer, BlockSizeAll, 0, 0 >> > (d_3DiFFT, d_bfpMag, d_bfpPhase, d_kernelPhase, d_zscale, size3Darray, size2Darray);
//deallocate CUDA memory
hipFree(d_bfpMag);
hipFree(d_bfpPhase);
hipFree(d_zscale);
hipFree(d_imgProperties);
hipFree(d_kernelPhase);
//given that LV does not accept the cmplx number array format as any I/O I need to transform the cmplx 3D array into re and im.
// temporarily removed ... as the copy could be done in a single pass!
float* d_ImgOutRe, float* d_ImgOutIm;
hipMalloc((void**)&d_ImgOutRe, mem3Darray);
hipMalloc((void**)&d_ImgOutIm, mem3Darray);
/////////////////////////////////////////////////////////////////////////////////////////
///// Prepare batch 2D FFT plan, const declaration , should be just called a function
/////////////////////////////////////////////////////////////////////////////////////////
/* Create a batched 2D plan, or batch FFT , need to declare when each image begins! */
int istride = 1; //means every element is used in the computation
int ostride = 1; //means every element used in the computatio is output
int idist = row*column;
int odist = row*column;
int inembed[] = { row,column };
int onembed[] = { row,column };
const int NRANK = 2;
int n[NRANK] = { row,column };
int BATCH = zrange;
hipfftHandle BatchFFTPlan;
if (hipfftPlanMany(&BatchFFTPlan, NRANK, n,
inembed, istride, idist,// *inembed, istride, idist
onembed, ostride, odist,// *onembed, ostride, odist
HIPFFT_C2C, BATCH) != HIPFFT_SUCCESS)
{
fprintf(stderr, "CUFFT Error: Unable to create plan\n");
return;
}
//////// Execute the transform in-place
if (hipfftExecC2C(BatchFFTPlan, d_3DiFFT, d_3DiFFT, HIPFFT_BACKWARD) != HIPFFT_SUCCESS) {
fprintf(stderr, "CUFFT Error: Failed to execute plan\n");
return;
}
//free handle , Although might be able to reuse upon the last execution
hipfftDestroy(BatchFFTPlan);
///////////
// FFT ends
///////////
//Kernel to transform into a LV happy readable array
hipLaunchKernelGGL(( Cmplx2ReIm) , dim3(GridSizeTransfer), dim3(BlockSizeAll), 0, 0 , d_3DiFFT, d_ImgOutRe, d_ImgOutIm, size3Darray, size2Darray);
hipFree(d_3DiFFT);
hipMemcpy(h_ImgOutRe, d_ImgOutRe, mem3Darray, hipMemcpyDeviceToHost);
hipMemcpy(h_ImgOutIm, d_ImgOutIm, mem3Darray, hipMemcpyDeviceToHost);
hipFree(d_ImgOutRe);
hipFree(d_ImgOutIm);
}
void GPU_Holo_v2(float* h_bfpMag, float* h_bfpPhase,
float* h_ImgOutAmp, float* zscale, int* arraySize, float* imgProperties) {
// Declare all constants here from the array size
// arraySize={row,column,zrange, resizeRow}
// note that zscale has already been multiplied by 2pi, just so that C does not have to do so
const int row = arraySize[0];
const int column = arraySize[1];
const int zrange = arraySize[2];
const size_t memZsize = zrange * sizeof(float);
const int size2Darray = row * column;
const size_t mem2Darray = size2Darray * sizeof(float);
const int size3Darray = row * column * zrange;
const size_t mem3Darray = size3Darray * sizeof(float);
const size_t mem3dsize = size3Darray * sizeof(hipfftComplex);
const int resizeRow = arraySize[3];
const float MagXReScale = 1.0f / float(resizeRow);
// Declare all constant regarding the Kernel execution sizes, will need to add a possibility to modify these from the LV as arguments
const int BlockSizeAll = 512;
const int GridSizeKernel = (size2Darray + BlockSizeAll - 1) / BlockSizeAll;
const int GridSizeTransfer = (size3Darray / 16 + BlockSizeAll - 1) / BlockSizeAll;
/////////////////////////////////////
/// Calculate the Propagation Kernel
/////////////////////////////////////
float* d_kernelPhase, float* d_imgProperties;
const size_t sizePrp = 4 * sizeof(float);
hipMalloc((void**)&d_kernelPhase, mem2Darray);
hipMalloc((void**)&d_imgProperties, sizePrp);
hipMemcpy(d_imgProperties, imgProperties, sizePrp, hipMemcpyHostToDevice);
makeKernelPhase << < GridSizeKernel, BlockSizeAll, 0, 0 >> >(d_kernelPhase, row, column, d_imgProperties);
//preallocate space for 3D array, this will be a bit costly but lets go ahead with it
float* d_bfpMag, float* d_bfpPhase, float *d_zscale;
hipfftComplex *d_3DiFFT;
hipMalloc((void**)&d_bfpMag, mem2Darray);
hipMalloc((void**)&d_bfpPhase, mem2Darray);
hipMalloc((void**)&d_zscale, memZsize);
hipMemcpy(d_bfpMag, h_bfpMag, mem2Darray, hipMemcpyHostToDevice);
hipMemcpy(d_bfpPhase, h_bfpPhase, mem2Darray, hipMemcpyHostToDevice);
hipMemcpy(d_zscale, zscale, memZsize, hipMemcpyHostToDevice);
hipMalloc((void**)&d_3DiFFT, mem3dsize);
//Execute Kernels
TransferFunction << <GridSizeTransfer, BlockSizeAll, 0, 0 >> > (d_3DiFFT, d_bfpMag, d_bfpPhase, d_kernelPhase, d_zscale, size3Darray, size2Darray);
//deallocate CUDA memory
hipFree(d_bfpMag);
hipFree(d_bfpPhase);
hipFree(d_zscale);
hipFree(d_imgProperties);
hipFree(d_kernelPhase);
//given that LV does not accept the cmplx number array format as any I/O I need to transform the cmplx 3D array into re and im.
// temporarily removed ... as the copy could be done in a single pass!
float* d_ImgOutAmp;
hipMalloc((void**)&d_ImgOutAmp, mem3Darray);
/////////////////////////////////////////////////////////////////////////////////////////
///// Prepare batch 2D FFT plan, const declaration , should be just called a function
/////////////////////////////////////////////////////////////////////////////////////////
/* Create a batched 2D plan, or batch FFT , need to declare when each image begins! */
int istride = 1; //means every element is used in the computation
int ostride = 1; //means every element used in the computatio is output
int idist = row*column;
int odist = row*column;
int inembed[] = { row,column };
int onembed[] = { row,column };
const int NRANK = 2;
int n[NRANK] = { row,column };
int BATCH = zrange;
hipfftHandle BatchFFTPlan;
if (hipfftPlanMany(&BatchFFTPlan, NRANK, n,
inembed, istride, idist,// *inembed, istride, idist
onembed, ostride, odist,// *onembed, ostride, odist
HIPFFT_C2C, BATCH) != HIPFFT_SUCCESS)
{
fprintf(stderr, "CUFFT Error: Unable to create plan\n");
return;
}
//////// Execute the transform in-place
if (hipfftExecC2C(BatchFFTPlan, d_3DiFFT, d_3DiFFT, HIPFFT_BACKWARD) != HIPFFT_SUCCESS) {
fprintf(stderr, "CUFFT Error: Failed to execute plan\n");
return;
}
//free handle , Although might be able to reuse upon the last execution
hipfftDestroy(BatchFFTPlan);
///////////
// FFT ends
///////////
//Kernel to transform into a LV happy readable array
Cmplx2Mag << <GridSizeTransfer, BlockSizeAll, 0, 0 >> > (d_3DiFFT, d_ImgOutAmp, size3Darray, size2Darray);
hipFree(d_3DiFFT);
hipMemcpy(h_ImgOutAmp, d_ImgOutAmp, mem3Darray, hipMemcpyDeviceToHost);
hipFree(d_ImgOutAmp);
}
void PropagateZslices(float* h_bfpMag, float* h_bfpPhase,
float* h_ImgOutRe, float* h_ImgOutIm,
float* zscale, int* arraySize, float* imgProperties){
//Extract the size of the 2D and 3D arrays, and their respect allocation sizes
int row = arraySize[0];
int column = arraySize[1];
int zrange = arraySize[2];
int resizeRow = arraySize[3];
float MagXReScale = 1.0f/float(resizeRow);
//////////////////////////////////////////////////
//transfer data from host memory to GPU
//// idea is to avoid an expensive c++ allocation and copying values into a complex array format
////// Almost thinking of calculating the whole Kernel in the device to avoid 2 device transfers!
int numElements = row*column;
size_t mem2darray = numElements*sizeof(float);
const int BlockSizeAll = 512;
int GridSizeKernel = (numElements + BlockSizeAll-1)/BlockSizeAll;
float* d_kernelPhase;
hipMalloc((void**)&d_kernelPhase, mem2darray);
float *d_imgProperties;
size_t sizePrp = 4 * sizeof(float);
hipMalloc((void**)&d_imgProperties, sizePrp);
hipMemcpy(d_imgProperties, imgProperties, sizePrp, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( makeKernel_nonefftshift) , dim3(GridSizeKernel), dim3(BlockSizeAll),0,0 , d_kernelPhase, row, column, d_imgProperties, MagXReScale);
float* d_bfpMag;
float* d_bfpPhase;
hipMalloc((void**)&d_bfpMag, mem2darray);
hipMalloc((void**)&d_bfpPhase, mem2darray);
hipMemcpy(d_bfpMag, h_bfpMag, mem2darray, hipMemcpyHostToDevice);
hipMemcpy(d_bfpPhase, h_bfpPhase, mem2darray, hipMemcpyHostToDevice);
float *d_zscale;
size_t memzsize = zrange * sizeof(float);
hipMalloc((void**)&d_zscale, memzsize);
hipMemcpy(d_zscale, zscale, memzsize, hipMemcpyHostToDevice);
//preallocate space for 3D array, this will be a bit costly but lets go ahead with it
hipfftComplex *d_3DiFFT;
int size3Darray = row*column*zrange;
size_t mem3dsize = size3Darray * sizeof(hipfftComplex);
hipMalloc((void**)&d_3DiFFT, mem3dsize);
//Execute Kernels
int GridSizeTransfer = (numElements*zrange/16+BlockSizeAll-1)/BlockSizeAll;
hipLaunchKernelGGL(( TransferFunction) , dim3(GridSizeTransfer), dim3(BlockSizeAll),0,0 , d_3DiFFT, d_bfpMag , d_bfpPhase, d_kernelPhase, d_zscale, size3Darray, numElements);
//given that LV does not accept the cmplx number array format as any I/O I need to transform the cmplx 3D array into re and im.
// temporarily removed ... as the copy could be done in a single pass!
float* d_ImgOutRe;
float* d_ImgOutIm;
size_t mem3dfloat = size3Darray*sizeof(float);
hipMalloc((void**)&d_ImgOutRe, mem3dfloat);
hipMalloc((void**)&d_ImgOutIm, mem3dfloat);
/////////////////////////////////////////////////////////////////////////////////////////
///// Prepare batch 2D FFT plan, const declaration
/////////////////////////////////////////////////////////////////////////////////////////
/* Create a batched 2D plan, or batch FFT , need to declare when each image begins! */
int istride = 1; //means every element is used in the computation
int ostride = 1; //means every element used in the computatio is output
int idist = row*column;
int odist = row*column;
int inembed[] = { row,column };
int onembed[] = { row,column };
const int NRANK = 2;
int n[NRANK] = { row,column };
int BATCH = zrange;
hipfftHandle BatchFFTPlan;
if (hipfftPlanMany(&BatchFFTPlan, NRANK, n,
inembed, istride, idist,// *inembed, istride, idist
onembed, ostride, odist,// *onembed, ostride, odist
HIPFFT_C2C, BATCH) != HIPFFT_SUCCESS)
{
fprintf(stderr, "CUFFT Error: Unable to create plan\n");
return;
}
//////// Execute the transform in-place
if (hipfftExecC2C(BatchFFTPlan, d_3DiFFT, d_3DiFFT, HIPFFT_BACKWARD) != HIPFFT_SUCCESS) {
fprintf(stderr, "CUFFT Error: Failed to execute plan\n");
return;
}
//free handle , Although might be able to reuse upon the last execution
hipfftDestroy(BatchFFTPlan);
///////////
// FFT ends
///////////
//Kernel to transform into a LV happy readable array
hipLaunchKernelGGL(( Cmplx2ReIm) , dim3(GridSizeTransfer), dim3(BlockSizeAll),0,0 , d_3DiFFT, d_ImgOutRe, d_ImgOutIm, size3Darray,numElements);
//Copy device memory to hosts
hipMemcpy(h_ImgOutRe,d_ImgOutRe, mem3dfloat, hipMemcpyDeviceToHost);
hipMemcpy(h_ImgOutIm,d_ImgOutIm, mem3dfloat, hipMemcpyDeviceToHost);
//deallocate CUDA memory
hipFree(d_bfpMag);
hipFree(d_bfpPhase);
hipFree(d_kernelPhase);
hipFree(d_3DiFFT);
hipFree(d_zscale);
hipFree(d_imgProperties);
hipFree(d_ImgOutRe);
hipFree(d_ImgOutIm);
}
void PropagateZ_ReturnMagnitude(float* h_bfpMag, float* h_bfpPhase,
float* h_ImgOutMag, float* zscale, int* arraySize, float* imgProperties) {
//Extract the size of the 2D and 3D arrays, and their respect allocation sizes
int row = arraySize[0];
int column = arraySize[1];
int zrange = arraySize[2];
int resizeRow = arraySize[3];
float MagXReScale = 1.0f / float(resizeRow);
//////////////////////////////////////////////////
//transfer data from host memory to GPU
//// idea is to avoid an expensive c++ allocation and copying values into a complex array format
////// Almost thinking of calculating the whole Kernel in the device to avoid 2 device transfers!
int numElements = row*column;
size_t mem2darray = numElements * sizeof(float);
const int BlockSizeAll = 512;
int GridSizeKernel = (numElements + BlockSizeAll - 1) / BlockSizeAll;
float* d_kernelPhase;
hipMalloc((void**)&d_kernelPhase, mem2darray);
float *d_imgProperties;
size_t sizePrp = 4 * sizeof(float);
hipMalloc((void**)&d_imgProperties, sizePrp);
hipMemcpy(d_imgProperties, imgProperties, sizePrp, hipMemcpyHostToDevice);
makeKernel_nonefftshift << <GridSizeKernel, BlockSizeAll, 0, 0 >> >(d_kernelPhase, row, column, d_imgProperties, MagXReScale);
float* d_bfpMag;
float* d_bfpPhase;
hipMalloc((void**)&d_bfpMag, mem2darray);
hipMalloc((void**)&d_bfpPhase, mem2darray);
hipMemcpy(d_bfpMag, h_bfpMag, mem2darray, hipMemcpyHostToDevice);
hipMemcpy(d_bfpPhase, h_bfpPhase, mem2darray, hipMemcpyHostToDevice);
float *d_zscale;
size_t memzsize = zrange * sizeof(float);
hipMalloc((void**)&d_zscale, memzsize);
hipMemcpy(d_zscale, zscale, memzsize, hipMemcpyHostToDevice);
//preallocate space for 3D array, this will be a bit costly but lets go ahead with it
hipfftComplex *d_3DiFFT;
int size3Darray = row*column*zrange;
size_t mem3dsize = size3Darray * sizeof(hipfftComplex);
hipMalloc((void**)&d_3DiFFT, mem3dsize);
//Execute Kernels
int GridSizeTransfer = (numElements*zrange / 16 + BlockSizeAll - 1) / BlockSizeAll;
TransferFunction << <GridSizeTransfer, BlockSizeAll, 0, 0 >> > (d_3DiFFT, d_bfpMag, d_bfpPhase, d_kernelPhase, d_zscale, size3Darray, numElements);
//given that LV does not accept the cmplx number array format as any I/O I need to transform the cmplx 3D array into re and im.
// temporarily removed ... as the copy could be done in a single pass!
float* d_ImgOutMag;
//float* d_ImgOutIm;
size_t mem3dfloat = size3Darray * sizeof(float);
hipMalloc((void**)&d_ImgOutMag, mem3dfloat);
//hipMalloc((void**)&d_ImgOutIm, mem3dfloat);
/////////////////////////////////////////////////////////////////////////////////////////
///// Prepare batch 2D FFT plan, const declaration
/////////////////////////////////////////////////////////////////////////////////////////
/* Create a batched 2D plan, or batch FFT , need to declare when each image begins! */
int istride = 1; //means every element is used in the computation
int ostride = 1; //means every element used in the computatio is output
int idist = row*column;
int odist = row*column;
int inembed[] = { row,column };
int onembed[] = { row,column };
const int NRANK = 2;
int n[NRANK] = { row,column };
int BATCH = zrange;
hipfftHandle BatchFFTPlan;
if (hipfftPlanMany(&BatchFFTPlan, NRANK, n,
inembed, istride, idist,// *inembed, istride, idist
onembed, ostride, odist,// *onembed, ostride, odist
HIPFFT_C2C, BATCH) != HIPFFT_SUCCESS)
{
fprintf(stderr, "CUFFT Error: Unable to create plan\n");
return;
}
//////// Execute the transform in-place
if (hipfftExecC2C(BatchFFTPlan, d_3DiFFT, d_3DiFFT, HIPFFT_BACKWARD) != HIPFFT_SUCCESS) {
fprintf(stderr, "CUFFT Error: Failed to execute plan\n");
return;
}
//free handle , Although might be able to reuse upon the last execution
hipfftDestroy(BatchFFTPlan);
///////////
// FFT ends
///////////
//Kernel to transform into a LV happy readable array
Cmplx2Mag << <GridSizeTransfer, BlockSizeAll, 0, 0 >> > (d_3DiFFT, d_ImgOutMag, size3Darray, numElements);
//Copy device memory to hosts
hipMemcpy(h_ImgOutMag, d_ImgOutMag, mem3dfloat, hipMemcpyDeviceToHost);
//hipMemcpy(h_ImgOutIm, d_ImgOutIm, mem3dfloat, hipMemcpyDeviceToHost);
//deallocate CUDA memory
hipFree(d_bfpMag);
hipFree(d_bfpPhase);
hipFree(d_kernelPhase);
hipFree(d_3DiFFT);
hipFree(d_zscale);
hipFree(d_imgProperties);
hipFree(d_ImgOutMag);
//hipFree(d_ImgOutIm);
}
void ReturnMagnitudeZStack(float* h_bfpMag, float* h_bfpPhase,
float* h_ImgOutMag, float* zscale, int* arraySize, float* imgProperties, int* GPUspecs) {
//Extract the size of the 2D and 3D arrays, and their respect allocation sizes
int row = arraySize[0];
int column = arraySize[1];
int zrange = arraySize[2];
//////////////////////////////////////////////////
//transfer data from host memory to GPU
//// idea is to avoid an expensive c++ allocation and copying values into a complex array format
////// Almost thinking of calculating the whole Kernel in the device to avoid 2 device transfers!
int numElements = row*column;
size_t mem2darray = numElements * sizeof(float);
const int BlockSizeAll = GPUspecs[0];
//originally 512
int GridSizeKernel = (numElements + BlockSizeAll - 1) / BlockSizeAll;
float* d_kernelPhase;
hipMalloc((void**)&d_kernelPhase, mem2darray);
float *d_imgProperties;
size_t sizePrp = 4 * sizeof(float);
hipMalloc((void**)&d_imgProperties, sizePrp);
hipMemcpy(d_imgProperties, imgProperties, sizePrp, hipMemcpyHostToDevice);
makeKernel_nonefftshift << <GridSizeKernel, BlockSizeAll, 0, 0 >> >(d_kernelPhase, row, column, d_imgProperties, MagXReScale);
float* d_bfpMag;
float* d_bfpPhase;
hipMalloc((void**)&d_bfpMag, mem2darray);
hipMalloc((void**)&d_bfpPhase, mem2darray);
hipMemcpy(d_bfpMag, h_bfpMag, mem2darray, hipMemcpyHostToDevice);
hipMemcpy(d_bfpPhase, h_bfpPhase, mem2darray, hipMemcpyHostToDevice);
float *d_zscale;
size_t memzsize = zrange * sizeof(float);
hipMalloc((void**)&d_zscale, memzsize);
hipMemcpy(d_zscale, zscale, memzsize, hipMemcpyHostToDevice);
//preallocate space for 3D array, this will be a bit costly but lets go ahead with it
hipfftComplex *d_3DiFFT;
int size3Darray = row*column*zrange;
size_t mem3dsize = size3Darray * sizeof(hipfftComplex);
hipMalloc((void**)&d_3DiFFT, mem3dsize);
//Execute Kernels
int GridSizeTransfer = (numElements*zrange / 16 + BlockSizeAll - 1) / BlockSizeAll;
TransferFunction << <GridSizeTransfer, BlockSizeAll, 0, 0 >> > (d_3DiFFT, d_bfpMag, d_bfpPhase, d_kernelPhase, d_zscale, size3Darray, numElements);
//Allocate cuda memory for 3D FFT
float* d_ImgOutMag;
size_t mem3dfloat = size3Darray * sizeof(float);
hipMalloc((void**)&d_ImgOutMag, mem3dfloat);
/////////////////////////////////////////////////////////////////////////////////////////
///// Prepare batch 2D FFT plan, const declaration
/////////////////////////////////////////////////////////////////////////////////////////
/* Create a batched 2D plan, or batch FFT , need to declare when each image begins! */
int istride = 1; //means every element is used in the computation
int ostride = 1; //means every element used in the computatio is output
int idist = row*column;
int odist = row*column;
int inembed[] = { row,column };
int onembed[] = { row,column };
const int NRANK = 2;
int n[NRANK] = { row,column };
int BATCH = zrange;
hipfftHandle BatchFFTPlan;
if (hipfftPlanMany(&BatchFFTPlan, NRANK, n,
inembed, istride, idist,// *inembed, istride, idist
onembed, ostride, odist,// *onembed, ostride, odist
HIPFFT_C2C, BATCH) != HIPFFT_SUCCESS)
{
fprintf(stderr, "CUFFT Error: Unable to create plan\n");
return;
}
//////// Execute the transform in-place
if (hipfftExecC2C(BatchFFTPlan, d_3DiFFT, d_3DiFFT, HIPFFT_BACKWARD) != HIPFFT_SUCCESS) {
fprintf(stderr, "CUFFT Error: Failed to execute plan\n");
return;
}
//free handle , Although might be able to reuse upon the last execution
hipfftDestroy(BatchFFTPlan);
///////////
// FFT ends
///////////
//Kernel to transform into a LV happy readable array
Cmplx2Mag << <GridSizeTransfer, BlockSizeAll, 0, 0 >> > (d_3DiFFT, d_ImgOutMag, size3Darray, numElements);
//Copy device memory to hosts
hipMemcpy(h_ImgOutMag, d_ImgOutMag, mem3dfloat, hipMemcpyDeviceToHost);
//deallocate CUDA memory
hipFree(d_bfpMag);
hipFree(d_bfpPhase);
hipFree(d_kernelPhase);
hipFree(d_3DiFFT);
hipFree(d_zscale);
hipFree(d_imgProperties);
hipFree(d_ImgOutMag);
}
void TestMakeKernel3D(float* h_bfpMag, float* h_bfpPhase,
float* h_ImgOutRe, float* h_ImgOutIm,
float* zscale, int* arraySize, float* imgProperties) {
//Extract the size of the 2D and 3D arrays, and their respect allocation sizes
int row = arraySize[0];
int column = arraySize[1];
int zrange = arraySize[2];
int resizeRow = arraySize[3];
float MagXReScale = 1.0f / float(resizeRow);
const int BlockSize = 512;
int GridSize = 32 * 16 * 4;
//////////////////////////////////////////////////
//transfer data from host memory to GPU
//// idea is to avoid an expensive c++ allocation and copying values into a complex array format
////// Almost thinking of calculating the whole Kernel in the device to avoid 2 device transfers!
int numElements = row*column;
size_t mem2darray = numElements * sizeof(float);
float* d_kernelPhase;
hipMalloc((void**)&d_kernelPhase, mem2darray);
float *d_imgProperties;
size_t sizePrp = 4 * sizeof(float);
hipMalloc((void**)&d_imgProperties, sizePrp);
hipMemcpy(d_imgProperties, imgProperties, sizePrp, hipMemcpyHostToDevice);
makeKernel << <GridSize, BlockSize, 0, 0 >> >(d_kernelPhase, row, column, d_imgProperties, MagXReScale);
float* d_bfpMag;
float* d_bfpPhase;
hipMalloc((void**)&d_bfpMag, mem2darray);
hipMalloc((void**)&d_bfpPhase, mem2darray);
hipMemcpy(d_bfpMag, h_bfpMag, mem2darray, hipMemcpyHostToDevice);
hipMemcpy(d_bfpPhase, h_bfpPhase, mem2darray, hipMemcpyHostToDevice);
float *d_zscale;
size_t memzsize = zrange * sizeof(float);
hipMalloc((void**)&d_zscale, memzsize);
hipMemcpy(d_zscale, zscale, memzsize, hipMemcpyHostToDevice);
//preallocate space for 3D array, this will be a bit costly but lets go ahead with it
hipfftComplex *d_3DiFFT;
int size3Darray = row*column*zrange;
size_t mem3dsize = size3Darray * sizeof(hipfftComplex);
hipMalloc((void**)&d_3DiFFT, mem3dsize);
//given that LV does not accept the cmplx number array format as any I/O I need to transform the cmplx 3D array into re and im.
float* d_ImgOutRe;
float* d_ImgOutIm;
size_t mem3dfloat = size3Darray * sizeof(float);
hipMalloc((void**)&d_ImgOutRe, mem3dfloat);
hipMalloc((void**)&d_ImgOutIm, mem3dfloat);
//Execute Kernels
//TransferFunction << <GridSize, BlockSize, 0, 0 >> > (d_3DiFFT, d_bfpMag, d_bfpPhase, d_kernelPhase, d_zscale, size3Darray, numElements);
//Kernel to transform into a LV happy readable array
//Cmplx2ReIm << <GridSize, BlockSize, 0, 0 >> > (d_3DiFFT, d_ImgOutRe, d_ImgOutIm, size3Darray);
//Copy device memory to host
hipMemcpy(h_ImgOutRe, d_ImgOutRe, mem3dfloat, hipMemcpyDeviceToHost);
hipMemcpy(h_ImgOutIm, d_ImgOutIm, mem3dfloat, hipMemcpyDeviceToHost);
hipMemcpy(h_bfpPhase, d_kernelPhase, mem2darray, hipMemcpyDeviceToHost);
//deallocate CUDA memory
hipFree(d_bfpMag);
hipFree(d_bfpPhase);
hipFree(d_kernelPhase);
hipFree(d_3DiFFT);
hipFree(d_zscale);
hipFree(d_imgProperties);
hipFree(d_ImgOutRe);
hipFree(d_ImgOutIm);
}
| 3cca1eca5452a76a266c15f772267810e271d25f.cu |
#include "CudaDLL.h"
#include <stdio.h>
#include <cufft.h>
#include <cuComplex.h>
#include <device_functions.h>
#include <math.h>
#include <float.h>
///////////////////////////////
///////////// Device specific operations
//////////////////////////
//#define IDX2R(i,j,N) (((i)*(N))+(j)) //easy way to address 2D array
__global__ void fftshift_2D(cufftComplex *data, int arraysize, int row)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < arraysize; i += numThreads) {
int k=i%row;
int j=i/row;
float a = 1 - 2 * ((k + j) & 1);
data[i].x *= a;
data[i].y *= a;
}
}
__device__ static __inline__ float cmagf2(float x, float y)
{
float a, b, v, w, t;
a = fabsf(x);
b = fabsf(y);
if (a > b) {
v = a;
w = b;
}
else {
v = b;
w = a;
}
t = w / v;
t = 1.0f + t * t;
t = v * sqrtf(t);
if ((v == 0.0f) || (v > 3.402823466e38f) || (w > 3.402823466e38f)) {
t = v + w;
}
return t;
}
////////////////////////////////
////////GPU Kernels
//////////////////////////////
//this kernel requires fftshift
__global__ void makeKernel(float* KernelPhase, int row, int column, float* ImgProperties, float MagXscaling) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
float MagX = ImgProperties[1];
float pixSize= ImgProperties[0];
float nm = ImgProperties[2];
float lambda = ImgProperties[3];
float pixdxInv = MagX/pixSize*MagXscaling; // Magnification/pixSize
float km = nm/lambda; // nm / lambda
for (int i = threadID; i < row*column; i += numThreads) {
int dx = i%row;
int dy = i/row;
float kdx = float( dx - row/2)*pixdxInv;
float kdy = float( dy - row/2)*pixdxInv;
float temp = km*km - kdx*kdx - kdy*kdy;
KernelPhase[i]= (temp >= 0) ? (sqrtf(temp)-km) : 0;
//This still needs quadrant swapping so this will not work in the ifft routine as is!
}
}
///Generates a kernel that is compatible with the non-shifted fft routine
__global__ void makeKernel_nonefftshift(float* KernelPhase, int row, int column, float* ImgProperties) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
float pixSize = ImgProperties[0];
float MagX = ImgProperties[1];
float nmed = ImgProperties[2];
float lambda = ImgProperties[3];
float MagXscaling = 1/ImgProperties[4];
float pixdxInv = MagX / pixSize*MagXscaling; // Magnification/pixSize
float km = nmed / lambda; // nmed / lambda
for (int i = threadID; i < row*column; i += numThreads) {
int dx = i % row;
int dy = i / row;
dx= ((dx - row / 2)>0) ? (dx - row) : dx;
dy= ((dy - row / 2)>0) ? (dy - row) : dy;
float kdx = float(dx)*pixdxInv;
float kdy = float(dy)*pixdxInv;
float temp = km*km - kdx*kdx - kdy*kdy;
KernelPhase[i] = (temp >= 0) ? (sqrtf(temp)-km) : 0;
}
}
__global__ void makeKernelPhase(float* KernelPhase, int row, int column, float* ImgProperties) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
const float pixdxInv = ImgProperties[1] / ImgProperties[0]; // Magnification/pixSize
const float km = ImgProperties[2] / ImgProperties[3]; // nm / lambda
for (int i = threadID; i < row*column; i += numThreads) {
int dx = i % row;
int dy = i / row;
dx = ((dx - row / 2)>0) ? (dx - row) : dx;
dy = ((dy - row / 2)>0) ? (dy - row) : dy;
float kdx = float(dx)*pixdxInv/row;
float kdy = float(dy)*pixdxInv/row;
float temp = km*km - kdx*kdx - kdy*kdy;
KernelPhase[i] = (temp >= 0) ? (sqrtf(temp)-km) : 0;
}
}
__global__ void TransferFunction(cufftComplex* img3Darray, float* bfpMag, float* bfpPhase, float* kPhase, float* zDist, int totalsize, int imgsize)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
//additional counters
for (int i = threadID; i < totalsize; i += numThreads)
{
int j = i / imgsize;
int k = i % imgsize;
float mag = bfpMag[k];
float phase = bfpPhase[k]+(kPhase[k]*zDist[j]); //multiply here already , absorb the 2*pi in there
img3Darray[i].x = mag*cosf(phase);
img3Darray[i].y = mag*sinf(phase);
}
}
__global__ void Cmplx2ReIm(cufftComplex* cmplxArray, float* reArray, float* imgArray, int size, int imgsize) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads){
int k = i/imgsize; //does this do anything????
reArray[i] = cmplxArray[i].x;
imgArray[i] = cmplxArray[i].y;
}
}
__global__ void Cmplx2Mag(cufftComplex* cmplxArray, float* MagArray, int size, int imgsize) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads) {
int k = i / imgsize;
MagArray[i] = cmagf2(cmplxArray[i].x, cmplxArray[i].y);
//imgArray[i] = cmplxArray[i].y;
}
}
////////////////////////////////////////////////
//////////////// FUnction to compile into DLL
////////////////////////////////////////////////
void GPU_Holo_v1(float* h_bfpMag, float* h_bfpPhase,
float* h_ImgOutRe, float* h_ImgOutIm,
float* zscale, int* arraySize, float* imgProperties) {
// Declare all constants here from the array size
// arraySize={row,column,zrange, resizeRow}
// note that zscale has already been multiplied by 2pi, just so that C does not have to do so
const int row = arraySize[0];
const int column = arraySize[1];
const int zrange = arraySize[2];
const size_t memZsize = zrange * sizeof(float);
const int size2Darray = row * column;
const size_t mem2Darray = size2Darray * sizeof(float);
const int size3Darray = row * column * zrange;
const size_t mem3Darray = size3Darray * sizeof(float);
const size_t mem3dsize = size3Darray * sizeof(cufftComplex);
const int resizeRow = arraySize[3];
const float MagXReScale = 1.0f / float(resizeRow);
// Declare all constant regarding the Kernel execution sizes, will need to add a possibility to modify these from the LV as arguments
const int BlockSizeAll = 512;
const int GridSizeKernel = (size2Darray + BlockSizeAll - 1) / BlockSizeAll;
const int GridSizeTransfer = (size3Darray/16 + BlockSizeAll - 1) / BlockSizeAll;
/////////////////////////////////////
/// Calculate the Propagation Kernel
/////////////////////////////////////
float* d_kernelPhase, float* d_imgProperties;
const size_t sizePrp = 4 * sizeof(float);
cudaMalloc((void**)&d_kernelPhase, mem2Darray);
cudaMalloc((void**)&d_imgProperties, sizePrp);
cudaMemcpy(d_imgProperties, imgProperties, sizePrp, cudaMemcpyHostToDevice);
makeKernelPhase <<< GridSizeKernel, BlockSizeAll, 0, 0 >>>(d_kernelPhase, row, column, d_imgProperties);
//preallocate space for 3D array, this will be a bit costly but lets go ahead with it
float* d_bfpMag, float* d_bfpPhase, float *d_zscale;
cufftComplex *d_3DiFFT;
cudaMalloc((void**)&d_bfpMag, mem2Darray);
cudaMalloc((void**)&d_bfpPhase, mem2Darray);
cudaMalloc((void**)&d_zscale, memZsize);
cudaMemcpy(d_bfpMag, h_bfpMag, mem2Darray, cudaMemcpyHostToDevice);
cudaMemcpy(d_bfpPhase, h_bfpPhase, mem2Darray, cudaMemcpyHostToDevice);
cudaMemcpy(d_zscale, zscale, memZsize, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_3DiFFT, mem3dsize);
//Execute Kernels
TransferFunction << <GridSizeTransfer, BlockSizeAll, 0, 0 >> > (d_3DiFFT, d_bfpMag, d_bfpPhase, d_kernelPhase, d_zscale, size3Darray, size2Darray);
//deallocate CUDA memory
cudaFree(d_bfpMag);
cudaFree(d_bfpPhase);
cudaFree(d_zscale);
cudaFree(d_imgProperties);
cudaFree(d_kernelPhase);
//given that LV does not accept the cmplx number array format as any I/O I need to transform the cmplx 3D array into re and im.
// temporarily removed ... as the copy could be done in a single pass!
float* d_ImgOutRe, float* d_ImgOutIm;
cudaMalloc((void**)&d_ImgOutRe, mem3Darray);
cudaMalloc((void**)&d_ImgOutIm, mem3Darray);
/////////////////////////////////////////////////////////////////////////////////////////
///// Prepare batch 2D FFT plan, const declaration , should be just called a function
/////////////////////////////////////////////////////////////////////////////////////////
/* Create a batched 2D plan, or batch FFT , need to declare when each image begins! */
int istride = 1; //means every element is used in the computation
int ostride = 1; //means every element used in the computatio is output
int idist = row*column;
int odist = row*column;
int inembed[] = { row,column };
int onembed[] = { row,column };
const int NRANK = 2;
int n[NRANK] = { row,column };
int BATCH = zrange;
cufftHandle BatchFFTPlan;
if (cufftPlanMany(&BatchFFTPlan, NRANK, n,
inembed, istride, idist,// *inembed, istride, idist
onembed, ostride, odist,// *onembed, ostride, odist
CUFFT_C2C, BATCH) != CUFFT_SUCCESS)
{
fprintf(stderr, "CUFFT Error: Unable to create plan\n");
return;
}
//////// Execute the transform in-place
if (cufftExecC2C(BatchFFTPlan, d_3DiFFT, d_3DiFFT, CUFFT_INVERSE) != CUFFT_SUCCESS) {
fprintf(stderr, "CUFFT Error: Failed to execute plan\n");
return;
}
//free handle , Although might be able to reuse upon the last execution
cufftDestroy(BatchFFTPlan);
///////////
// FFT ends
///////////
//Kernel to transform into a LV happy readable array
Cmplx2ReIm <<<GridSizeTransfer, BlockSizeAll, 0, 0 >>> (d_3DiFFT, d_ImgOutRe, d_ImgOutIm, size3Darray, size2Darray);
cudaFree(d_3DiFFT);
cudaMemcpy(h_ImgOutRe, d_ImgOutRe, mem3Darray, cudaMemcpyDeviceToHost);
cudaMemcpy(h_ImgOutIm, d_ImgOutIm, mem3Darray, cudaMemcpyDeviceToHost);
cudaFree(d_ImgOutRe);
cudaFree(d_ImgOutIm);
}
void GPU_Holo_v2(float* h_bfpMag, float* h_bfpPhase,
float* h_ImgOutAmp, float* zscale, int* arraySize, float* imgProperties) {
// Declare all constants here from the array size
// arraySize={row,column,zrange, resizeRow}
// note that zscale has already been multiplied by 2pi, just so that C does not have to do so
const int row = arraySize[0];
const int column = arraySize[1];
const int zrange = arraySize[2];
const size_t memZsize = zrange * sizeof(float);
const int size2Darray = row * column;
const size_t mem2Darray = size2Darray * sizeof(float);
const int size3Darray = row * column * zrange;
const size_t mem3Darray = size3Darray * sizeof(float);
const size_t mem3dsize = size3Darray * sizeof(cufftComplex);
const int resizeRow = arraySize[3];
const float MagXReScale = 1.0f / float(resizeRow);
// Declare all constant regarding the Kernel execution sizes, will need to add a possibility to modify these from the LV as arguments
const int BlockSizeAll = 512;
const int GridSizeKernel = (size2Darray + BlockSizeAll - 1) / BlockSizeAll;
const int GridSizeTransfer = (size3Darray / 16 + BlockSizeAll - 1) / BlockSizeAll;
/////////////////////////////////////
/// Calculate the Propagation Kernel
/////////////////////////////////////
float* d_kernelPhase, float* d_imgProperties;
const size_t sizePrp = 4 * sizeof(float);
cudaMalloc((void**)&d_kernelPhase, mem2Darray);
cudaMalloc((void**)&d_imgProperties, sizePrp);
cudaMemcpy(d_imgProperties, imgProperties, sizePrp, cudaMemcpyHostToDevice);
makeKernelPhase << < GridSizeKernel, BlockSizeAll, 0, 0 >> >(d_kernelPhase, row, column, d_imgProperties);
//preallocate space for 3D array, this will be a bit costly but lets go ahead with it
float* d_bfpMag, float* d_bfpPhase, float *d_zscale;
cufftComplex *d_3DiFFT;
cudaMalloc((void**)&d_bfpMag, mem2Darray);
cudaMalloc((void**)&d_bfpPhase, mem2Darray);
cudaMalloc((void**)&d_zscale, memZsize);
cudaMemcpy(d_bfpMag, h_bfpMag, mem2Darray, cudaMemcpyHostToDevice);
cudaMemcpy(d_bfpPhase, h_bfpPhase, mem2Darray, cudaMemcpyHostToDevice);
cudaMemcpy(d_zscale, zscale, memZsize, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_3DiFFT, mem3dsize);
//Execute Kernels
TransferFunction << <GridSizeTransfer, BlockSizeAll, 0, 0 >> > (d_3DiFFT, d_bfpMag, d_bfpPhase, d_kernelPhase, d_zscale, size3Darray, size2Darray);
//deallocate CUDA memory
cudaFree(d_bfpMag);
cudaFree(d_bfpPhase);
cudaFree(d_zscale);
cudaFree(d_imgProperties);
cudaFree(d_kernelPhase);
//given that LV does not accept the cmplx number array format as any I/O I need to transform the cmplx 3D array into re and im.
// temporarily removed ... as the copy could be done in a single pass!
float* d_ImgOutAmp;
cudaMalloc((void**)&d_ImgOutAmp, mem3Darray);
/////////////////////////////////////////////////////////////////////////////////////////
///// Prepare batch 2D FFT plan, const declaration , should be just called a function
/////////////////////////////////////////////////////////////////////////////////////////
/* Create a batched 2D plan, or batch FFT , need to declare when each image begins! */
int istride = 1; //means every element is used in the computation
int ostride = 1; //means every element used in the computatio is output
int idist = row*column;
int odist = row*column;
int inembed[] = { row,column };
int onembed[] = { row,column };
const int NRANK = 2;
int n[NRANK] = { row,column };
int BATCH = zrange;
cufftHandle BatchFFTPlan;
if (cufftPlanMany(&BatchFFTPlan, NRANK, n,
inembed, istride, idist,// *inembed, istride, idist
onembed, ostride, odist,// *onembed, ostride, odist
CUFFT_C2C, BATCH) != CUFFT_SUCCESS)
{
fprintf(stderr, "CUFFT Error: Unable to create plan\n");
return;
}
//////// Execute the transform in-place
if (cufftExecC2C(BatchFFTPlan, d_3DiFFT, d_3DiFFT, CUFFT_INVERSE) != CUFFT_SUCCESS) {
fprintf(stderr, "CUFFT Error: Failed to execute plan\n");
return;
}
//free handle , Although might be able to reuse upon the last execution
cufftDestroy(BatchFFTPlan);
///////////
// FFT ends
///////////
//Kernel to transform into a LV happy readable array
Cmplx2Mag << <GridSizeTransfer, BlockSizeAll, 0, 0 >> > (d_3DiFFT, d_ImgOutAmp, size3Darray, size2Darray);
cudaFree(d_3DiFFT);
cudaMemcpy(h_ImgOutAmp, d_ImgOutAmp, mem3Darray, cudaMemcpyDeviceToHost);
cudaFree(d_ImgOutAmp);
}
void PropagateZslices(float* h_bfpMag, float* h_bfpPhase,
float* h_ImgOutRe, float* h_ImgOutIm,
float* zscale, int* arraySize, float* imgProperties){
//Extract the size of the 2D and 3D arrays, and their respect allocation sizes
int row = arraySize[0];
int column = arraySize[1];
int zrange = arraySize[2];
int resizeRow = arraySize[3];
float MagXReScale = 1.0f/float(resizeRow);
//////////////////////////////////////////////////
//transfer data from host memory to GPU
//// idea is to avoid an expensive c++ allocation and copying values into a complex array format
////// Almost thinking of calculating the whole Kernel in the device to avoid 2 device transfers!
int numElements = row*column;
size_t mem2darray = numElements*sizeof(float);
const int BlockSizeAll = 512;
int GridSizeKernel = (numElements + BlockSizeAll-1)/BlockSizeAll;
float* d_kernelPhase;
cudaMalloc((void**)&d_kernelPhase, mem2darray);
float *d_imgProperties;
size_t sizePrp = 4 * sizeof(float);
cudaMalloc((void**)&d_imgProperties, sizePrp);
cudaMemcpy(d_imgProperties, imgProperties, sizePrp, cudaMemcpyHostToDevice);
makeKernel_nonefftshift <<<GridSizeKernel, BlockSizeAll,0,0 >>>(d_kernelPhase, row, column, d_imgProperties, MagXReScale);
float* d_bfpMag;
float* d_bfpPhase;
cudaMalloc((void**)&d_bfpMag, mem2darray);
cudaMalloc((void**)&d_bfpPhase, mem2darray);
cudaMemcpy(d_bfpMag, h_bfpMag, mem2darray, cudaMemcpyHostToDevice);
cudaMemcpy(d_bfpPhase, h_bfpPhase, mem2darray, cudaMemcpyHostToDevice);
float *d_zscale;
size_t memzsize = zrange * sizeof(float);
cudaMalloc((void**)&d_zscale, memzsize);
cudaMemcpy(d_zscale, zscale, memzsize, cudaMemcpyHostToDevice);
//preallocate space for 3D array, this will be a bit costly but lets go ahead with it
cufftComplex *d_3DiFFT;
int size3Darray = row*column*zrange;
size_t mem3dsize = size3Darray * sizeof(cufftComplex);
cudaMalloc((void**)&d_3DiFFT, mem3dsize);
//Execute Kernels
int GridSizeTransfer = (numElements*zrange/16+BlockSizeAll-1)/BlockSizeAll;
TransferFunction <<<GridSizeTransfer, BlockSizeAll,0,0 >>> (d_3DiFFT, d_bfpMag , d_bfpPhase, d_kernelPhase, d_zscale, size3Darray, numElements);
//given that LV does not accept the cmplx number array format as any I/O I need to transform the cmplx 3D array into re and im.
// temporarily removed ... as the copy could be done in a single pass!
float* d_ImgOutRe;
float* d_ImgOutIm;
size_t mem3dfloat = size3Darray*sizeof(float);
cudaMalloc((void**)&d_ImgOutRe, mem3dfloat);
cudaMalloc((void**)&d_ImgOutIm, mem3dfloat);
/////////////////////////////////////////////////////////////////////////////////////////
///// Prepare batch 2D FFT plan, const declaration
/////////////////////////////////////////////////////////////////////////////////////////
/* Create a batched 2D plan, or batch FFT , need to declare when each image begins! */
int istride = 1; //means every element is used in the computation
int ostride = 1; //means every element used in the computatio is output
int idist = row*column;
int odist = row*column;
int inembed[] = { row,column };
int onembed[] = { row,column };
const int NRANK = 2;
int n[NRANK] = { row,column };
int BATCH = zrange;
cufftHandle BatchFFTPlan;
if (cufftPlanMany(&BatchFFTPlan, NRANK, n,
inembed, istride, idist,// *inembed, istride, idist
onembed, ostride, odist,// *onembed, ostride, odist
CUFFT_C2C, BATCH) != CUFFT_SUCCESS)
{
fprintf(stderr, "CUFFT Error: Unable to create plan\n");
return;
}
//////// Execute the transform in-place
if (cufftExecC2C(BatchFFTPlan, d_3DiFFT, d_3DiFFT, CUFFT_INVERSE) != CUFFT_SUCCESS) {
fprintf(stderr, "CUFFT Error: Failed to execute plan\n");
return;
}
//free handle , Although might be able to reuse upon the last execution
cufftDestroy(BatchFFTPlan);
///////////
// FFT ends
///////////
//Kernel to transform into a LV happy readable array
Cmplx2ReIm <<<GridSizeTransfer, BlockSizeAll,0,0 >>> (d_3DiFFT, d_ImgOutRe, d_ImgOutIm, size3Darray,numElements);
//Copy device memory to hosts
cudaMemcpy(h_ImgOutRe,d_ImgOutRe, mem3dfloat, cudaMemcpyDeviceToHost);
cudaMemcpy(h_ImgOutIm,d_ImgOutIm, mem3dfloat, cudaMemcpyDeviceToHost);
//deallocate CUDA memory
cudaFree(d_bfpMag);
cudaFree(d_bfpPhase);
cudaFree(d_kernelPhase);
cudaFree(d_3DiFFT);
cudaFree(d_zscale);
cudaFree(d_imgProperties);
cudaFree(d_ImgOutRe);
cudaFree(d_ImgOutIm);
}
void PropagateZ_ReturnMagnitude(float* h_bfpMag, float* h_bfpPhase,
float* h_ImgOutMag, float* zscale, int* arraySize, float* imgProperties) {
//Extract the size of the 2D and 3D arrays, and their respect allocation sizes
int row = arraySize[0];
int column = arraySize[1];
int zrange = arraySize[2];
int resizeRow = arraySize[3];
float MagXReScale = 1.0f / float(resizeRow);
//////////////////////////////////////////////////
//transfer data from host memory to GPU
//// idea is to avoid an expensive c++ allocation and copying values into a complex array format
////// Almost thinking of calculating the whole Kernel in the device to avoid 2 device transfers!
int numElements = row*column;
size_t mem2darray = numElements * sizeof(float);
const int BlockSizeAll = 512;
int GridSizeKernel = (numElements + BlockSizeAll - 1) / BlockSizeAll;
float* d_kernelPhase;
cudaMalloc((void**)&d_kernelPhase, mem2darray);
float *d_imgProperties;
size_t sizePrp = 4 * sizeof(float);
cudaMalloc((void**)&d_imgProperties, sizePrp);
cudaMemcpy(d_imgProperties, imgProperties, sizePrp, cudaMemcpyHostToDevice);
makeKernel_nonefftshift << <GridSizeKernel, BlockSizeAll, 0, 0 >> >(d_kernelPhase, row, column, d_imgProperties, MagXReScale);
float* d_bfpMag;
float* d_bfpPhase;
cudaMalloc((void**)&d_bfpMag, mem2darray);
cudaMalloc((void**)&d_bfpPhase, mem2darray);
cudaMemcpy(d_bfpMag, h_bfpMag, mem2darray, cudaMemcpyHostToDevice);
cudaMemcpy(d_bfpPhase, h_bfpPhase, mem2darray, cudaMemcpyHostToDevice);
float *d_zscale;
size_t memzsize = zrange * sizeof(float);
cudaMalloc((void**)&d_zscale, memzsize);
cudaMemcpy(d_zscale, zscale, memzsize, cudaMemcpyHostToDevice);
//preallocate space for 3D array, this will be a bit costly but lets go ahead with it
cufftComplex *d_3DiFFT;
int size3Darray = row*column*zrange;
size_t mem3dsize = size3Darray * sizeof(cufftComplex);
cudaMalloc((void**)&d_3DiFFT, mem3dsize);
//Execute Kernels
int GridSizeTransfer = (numElements*zrange / 16 + BlockSizeAll - 1) / BlockSizeAll;
TransferFunction << <GridSizeTransfer, BlockSizeAll, 0, 0 >> > (d_3DiFFT, d_bfpMag, d_bfpPhase, d_kernelPhase, d_zscale, size3Darray, numElements);
//given that LV does not accept the cmplx number array format as any I/O I need to transform the cmplx 3D array into re and im.
// temporarily removed ... as the copy could be done in a single pass!
float* d_ImgOutMag;
//float* d_ImgOutIm;
size_t mem3dfloat = size3Darray * sizeof(float);
cudaMalloc((void**)&d_ImgOutMag, mem3dfloat);
//cudaMalloc((void**)&d_ImgOutIm, mem3dfloat);
/////////////////////////////////////////////////////////////////////////////////////////
///// Prepare batch 2D FFT plan, const declaration
/////////////////////////////////////////////////////////////////////////////////////////
/* Create a batched 2D plan, or batch FFT , need to declare when each image begins! */
int istride = 1; //means every element is used in the computation
int ostride = 1; //means every element used in the computatio is output
int idist = row*column;
int odist = row*column;
int inembed[] = { row,column };
int onembed[] = { row,column };
const int NRANK = 2;
int n[NRANK] = { row,column };
int BATCH = zrange;
cufftHandle BatchFFTPlan;
if (cufftPlanMany(&BatchFFTPlan, NRANK, n,
inembed, istride, idist,// *inembed, istride, idist
onembed, ostride, odist,// *onembed, ostride, odist
CUFFT_C2C, BATCH) != CUFFT_SUCCESS)
{
fprintf(stderr, "CUFFT Error: Unable to create plan\n");
return;
}
//////// Execute the transform in-place
if (cufftExecC2C(BatchFFTPlan, d_3DiFFT, d_3DiFFT, CUFFT_INVERSE) != CUFFT_SUCCESS) {
fprintf(stderr, "CUFFT Error: Failed to execute plan\n");
return;
}
//free handle , Although might be able to reuse upon the last execution
cufftDestroy(BatchFFTPlan);
///////////
// FFT ends
///////////
//Kernel to transform into a LV happy readable array
Cmplx2Mag << <GridSizeTransfer, BlockSizeAll, 0, 0 >> > (d_3DiFFT, d_ImgOutMag, size3Darray, numElements);
//Copy device memory to hosts
cudaMemcpy(h_ImgOutMag, d_ImgOutMag, mem3dfloat, cudaMemcpyDeviceToHost);
//cudaMemcpy(h_ImgOutIm, d_ImgOutIm, mem3dfloat, cudaMemcpyDeviceToHost);
//deallocate CUDA memory
cudaFree(d_bfpMag);
cudaFree(d_bfpPhase);
cudaFree(d_kernelPhase);
cudaFree(d_3DiFFT);
cudaFree(d_zscale);
cudaFree(d_imgProperties);
cudaFree(d_ImgOutMag);
//cudaFree(d_ImgOutIm);
}
void ReturnMagnitudeZStack(float* h_bfpMag, float* h_bfpPhase,
float* h_ImgOutMag, float* zscale, int* arraySize, float* imgProperties, int* GPUspecs) {
//Extract the size of the 2D and 3D arrays, and their respect allocation sizes
int row = arraySize[0];
int column = arraySize[1];
int zrange = arraySize[2];
//////////////////////////////////////////////////
//transfer data from host memory to GPU
//// idea is to avoid an expensive c++ allocation and copying values into a complex array format
////// Almost thinking of calculating the whole Kernel in the device to avoid 2 device transfers!
int numElements = row*column;
size_t mem2darray = numElements * sizeof(float);
const int BlockSizeAll = GPUspecs[0];
//originally 512
int GridSizeKernel = (numElements + BlockSizeAll - 1) / BlockSizeAll;
float* d_kernelPhase;
cudaMalloc((void**)&d_kernelPhase, mem2darray);
float *d_imgProperties;
size_t sizePrp = 4 * sizeof(float);
cudaMalloc((void**)&d_imgProperties, sizePrp);
cudaMemcpy(d_imgProperties, imgProperties, sizePrp, cudaMemcpyHostToDevice);
makeKernel_nonefftshift << <GridSizeKernel, BlockSizeAll, 0, 0 >> >(d_kernelPhase, row, column, d_imgProperties, MagXReScale);
float* d_bfpMag;
float* d_bfpPhase;
cudaMalloc((void**)&d_bfpMag, mem2darray);
cudaMalloc((void**)&d_bfpPhase, mem2darray);
cudaMemcpy(d_bfpMag, h_bfpMag, mem2darray, cudaMemcpyHostToDevice);
cudaMemcpy(d_bfpPhase, h_bfpPhase, mem2darray, cudaMemcpyHostToDevice);
float *d_zscale;
size_t memzsize = zrange * sizeof(float);
cudaMalloc((void**)&d_zscale, memzsize);
cudaMemcpy(d_zscale, zscale, memzsize, cudaMemcpyHostToDevice);
//preallocate space for 3D array, this will be a bit costly but lets go ahead with it
cufftComplex *d_3DiFFT;
int size3Darray = row*column*zrange;
size_t mem3dsize = size3Darray * sizeof(cufftComplex);
cudaMalloc((void**)&d_3DiFFT, mem3dsize);
//Execute Kernels
int GridSizeTransfer = (numElements*zrange / 16 + BlockSizeAll - 1) / BlockSizeAll;
TransferFunction << <GridSizeTransfer, BlockSizeAll, 0, 0 >> > (d_3DiFFT, d_bfpMag, d_bfpPhase, d_kernelPhase, d_zscale, size3Darray, numElements);
//Allocate cuda memory for 3D FFT
float* d_ImgOutMag;
size_t mem3dfloat = size3Darray * sizeof(float);
cudaMalloc((void**)&d_ImgOutMag, mem3dfloat);
/////////////////////////////////////////////////////////////////////////////////////////
///// Prepare batch 2D FFT plan, const declaration
/////////////////////////////////////////////////////////////////////////////////////////
/* Create a batched 2D plan, or batch FFT , need to declare when each image begins! */
int istride = 1; //means every element is used in the computation
int ostride = 1; //means every element used in the computatio is output
int idist = row*column;
int odist = row*column;
int inembed[] = { row,column };
int onembed[] = { row,column };
const int NRANK = 2;
int n[NRANK] = { row,column };
int BATCH = zrange;
cufftHandle BatchFFTPlan;
if (cufftPlanMany(&BatchFFTPlan, NRANK, n,
inembed, istride, idist,// *inembed, istride, idist
onembed, ostride, odist,// *onembed, ostride, odist
CUFFT_C2C, BATCH) != CUFFT_SUCCESS)
{
fprintf(stderr, "CUFFT Error: Unable to create plan\n");
return;
}
//////// Execute the transform in-place
if (cufftExecC2C(BatchFFTPlan, d_3DiFFT, d_3DiFFT, CUFFT_INVERSE) != CUFFT_SUCCESS) {
fprintf(stderr, "CUFFT Error: Failed to execute plan\n");
return;
}
//free handle , Although might be able to reuse upon the last execution
cufftDestroy(BatchFFTPlan);
///////////
// FFT ends
///////////
//Kernel to transform into a LV happy readable array
Cmplx2Mag << <GridSizeTransfer, BlockSizeAll, 0, 0 >> > (d_3DiFFT, d_ImgOutMag, size3Darray, numElements);
//Copy device memory to hosts
cudaMemcpy(h_ImgOutMag, d_ImgOutMag, mem3dfloat, cudaMemcpyDeviceToHost);
//deallocate CUDA memory
cudaFree(d_bfpMag);
cudaFree(d_bfpPhase);
cudaFree(d_kernelPhase);
cudaFree(d_3DiFFT);
cudaFree(d_zscale);
cudaFree(d_imgProperties);
cudaFree(d_ImgOutMag);
}
void TestMakeKernel3D(float* h_bfpMag, float* h_bfpPhase,
float* h_ImgOutRe, float* h_ImgOutIm,
float* zscale, int* arraySize, float* imgProperties) {
//Extract the size of the 2D and 3D arrays, and their respect allocation sizes
int row = arraySize[0];
int column = arraySize[1];
int zrange = arraySize[2];
int resizeRow = arraySize[3];
float MagXReScale = 1.0f / float(resizeRow);
const int BlockSize = 512;
int GridSize = 32 * 16 * 4;
//////////////////////////////////////////////////
//transfer data from host memory to GPU
//// idea is to avoid an expensive c++ allocation and copying values into a complex array format
////// Almost thinking of calculating the whole Kernel in the device to avoid 2 device transfers!
int numElements = row*column;
size_t mem2darray = numElements * sizeof(float);
float* d_kernelPhase;
cudaMalloc((void**)&d_kernelPhase, mem2darray);
float *d_imgProperties;
size_t sizePrp = 4 * sizeof(float);
cudaMalloc((void**)&d_imgProperties, sizePrp);
cudaMemcpy(d_imgProperties, imgProperties, sizePrp, cudaMemcpyHostToDevice);
makeKernel << <GridSize, BlockSize, 0, 0 >> >(d_kernelPhase, row, column, d_imgProperties, MagXReScale);
float* d_bfpMag;
float* d_bfpPhase;
cudaMalloc((void**)&d_bfpMag, mem2darray);
cudaMalloc((void**)&d_bfpPhase, mem2darray);
cudaMemcpy(d_bfpMag, h_bfpMag, mem2darray, cudaMemcpyHostToDevice);
cudaMemcpy(d_bfpPhase, h_bfpPhase, mem2darray, cudaMemcpyHostToDevice);
float *d_zscale;
size_t memzsize = zrange * sizeof(float);
cudaMalloc((void**)&d_zscale, memzsize);
cudaMemcpy(d_zscale, zscale, memzsize, cudaMemcpyHostToDevice);
//preallocate space for 3D array, this will be a bit costly but lets go ahead with it
cufftComplex *d_3DiFFT;
int size3Darray = row*column*zrange;
size_t mem3dsize = size3Darray * sizeof(cufftComplex);
cudaMalloc((void**)&d_3DiFFT, mem3dsize);
//given that LV does not accept the cmplx number array format as any I/O I need to transform the cmplx 3D array into re and im.
float* d_ImgOutRe;
float* d_ImgOutIm;
size_t mem3dfloat = size3Darray * sizeof(float);
cudaMalloc((void**)&d_ImgOutRe, mem3dfloat);
cudaMalloc((void**)&d_ImgOutIm, mem3dfloat);
//Execute Kernels
//TransferFunction << <GridSize, BlockSize, 0, 0 >> > (d_3DiFFT, d_bfpMag, d_bfpPhase, d_kernelPhase, d_zscale, size3Darray, numElements);
//Kernel to transform into a LV happy readable array
//Cmplx2ReIm << <GridSize, BlockSize, 0, 0 >> > (d_3DiFFT, d_ImgOutRe, d_ImgOutIm, size3Darray);
//Copy device memory to host
cudaMemcpy(h_ImgOutRe, d_ImgOutRe, mem3dfloat, cudaMemcpyDeviceToHost);
cudaMemcpy(h_ImgOutIm, d_ImgOutIm, mem3dfloat, cudaMemcpyDeviceToHost);
cudaMemcpy(h_bfpPhase, d_kernelPhase, mem2darray, cudaMemcpyDeviceToHost);
//deallocate CUDA memory
cudaFree(d_bfpMag);
cudaFree(d_bfpPhase);
cudaFree(d_kernelPhase);
cudaFree(d_3DiFFT);
cudaFree(d_zscale);
cudaFree(d_imgProperties);
cudaFree(d_ImgOutRe);
cudaFree(d_ImgOutIm);
}
|
84dae9606aa687dbd5b806b446d9f325ac89ff9d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <assert.h>
#include "DomaineMath.h"
#include "HeatTransfert.h"
#include "Device.h"
#include <math.h>
#include <algorithm>
#include "MathTools.h"
using std::cout;
using std::endl;
extern __global__ void heatDiffusion(float* ptrDevIn, float* ptrDevOut, float k, int w, int h);
extern __global__ void heatEcrasement(float* ptrDevCurrent, float* ptrDevH, int w, int h);
extern __global__ void heatImageHSB(float* ptrDevIn, uchar4* ptrDevOut, CalibreurF calibreur, int w, int h);
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
HeatTransfert::HeatTransfert(int w, int h, int blindIterations, float k) :
calibreur(IntervalF(0, 1), IntervalF(0.7, 0)), isAInput(true), nbIteration(blindIterations), nbBlindIteration(blindIterations), k(k), ptrH(
new float[w * h]), ptrA(new float[w * h]), ptrB(new float[w * h])
{
std::fill(ptrH, ptrH + w * h, 0);
std::fill(ptrB, ptrB + w * h, 0);
// Inputs
this->w = w;
this->h = h;
this->t =0;
this->dg = dim3(256, 1, 1);
this->db = dim3(256, 1, 1);
// Outputs
this->title = "HeatTransfert Cuda";
// fill/initialize the image
for (int i = 300; i <= 500; ++i)
for (int j = 300; j <= 500; ++j)
ptrH[i * w + j] = 1;
for (int j = 179; j <= 195; ++j)
{
for (int i = 179; i <= 195; ++i)
ptrH[i * w + j] = 0.2;
for (int i = 605; i <= 621; ++i)
ptrH[i * w + j] = 0.2;
}
for (int j = 605; j <= 621; ++j)
{
for (int i = 179; i <= 195; ++i)
ptrH[i * w + j] = 0.2;
for (int i = 605; i <= 621; ++i)
ptrH[i * w + j] = 0.2;
}
ptrH[295 * w + 400] = ptrH[505 * w + 400] = ptrH[400 * w + 505] = ptrH[400 * w + 295] = 0.2f;
for (int i = 0; i < w * h; ++i)
ptrA[i] = ptrH[i];
instanciateHeatDevImage(&ptrH, &ptrDevH, w * h * sizeof(float));
instanciateHeatDevImage(&ptrA, &ptrDevA, w * h * sizeof(float));
instanciateHeatDevImage(&ptrB, &ptrDevB, w * h * sizeof(float));
}
HeatTransfert::~HeatTransfert()
{
destructHeatDevImage(&ptrDevA);
destructHeatDevImage(&ptrDevB);
destructHeatDevImage(&ptrDevH);
delete[] ptrA;
delete[] ptrB;
delete[] ptrH;
}
/*-------------------------*\
|* Methode override *|
\*-------------------------*/
void HeatTransfert::animationStep()
{
this->t += 1;
}
void HeatTransfert::process(uchar4* ptrDevPixels, int w, int h)
{
float *input, *output;
input = isAInput ? ptrDevA : ptrDevB;
output = isAInput ? ptrDevB : ptrDevA;
launchHeatDiffusion(input, output, k, w, h);
launchHeatEcrasement(output, ptrDevH, w, h);
launchHeatDiffusion(output, input, k, w, h);
launchHeatEcrasement(input, ptrDevH, w, h);
launchHeatImageHSB(output, ptrDevPixels, calibreur, w, h);
}
__host__ void HeatTransfert::instanciateHeatDevImage(float** ptr, float** ptrDev, size_t size)
{
HANDLE_ERROR(hipMalloc((void** )ptrDev, size));
HANDLE_ERROR(hipMemcpy(*ptrDev, *ptr, size, hipMemcpyHostToDevice));
}
__host__ void HeatTransfert::destructHeatDevImage(float** ptrDev)
{
HANDLE_ERROR(hipFree(*ptrDev));
}
__host__ void HeatTransfert::launchHeatDiffusion(float* ptrDevIn, float* ptrDevOut, float k, int w, int h)
{
hipLaunchKernelGGL(( heatDiffusion), dim3(this->dg), dim3(this->db), 0, 0, ptrDevIn, ptrDevOut, k, w, h);
}
__host__ void HeatTransfert::launchHeatEcrasement(float* ptrDevCurrent, float* ptrDevH, int w, int h)
{
hipLaunchKernelGGL(( heatEcrasement), dim3(this->dg),dim3(this->db), 0, 0, ptrDevCurrent, ptrDevH, w, h);
}
__host__ void HeatTransfert::launchHeatImageHSB(float* ptrDevIn, uchar4* ptrDevOut, CalibreurF& calibreur, int w, int h)
{
hipLaunchKernelGGL(( heatImageHSB), dim3(this->dg),dim3(this->db), 0, 0, ptrDevIn, ptrDevOut, calibreur, w, h);
}
float HeatTransfert::getT(void)
{
return nbIteration;
}
string HeatTransfert::getTitle(void)
{
return title;
}
int HeatTransfert::getW(void)
{
return w;
}
int HeatTransfert::getH(void)
{
return h;
}
/**
* Override
*/
float HeatTransfert::getAnimationPara(void)
{
return t;
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 84dae9606aa687dbd5b806b446d9f325ac89ff9d.cu | #include <iostream>
#include <assert.h>
#include "DomaineMath.h"
#include "HeatTransfert.h"
#include "Device.h"
#include <math.h>
#include <algorithm>
#include "MathTools.h"
using std::cout;
using std::endl;
extern __global__ void heatDiffusion(float* ptrDevIn, float* ptrDevOut, float k, int w, int h);
extern __global__ void heatEcrasement(float* ptrDevCurrent, float* ptrDevH, int w, int h);
extern __global__ void heatImageHSB(float* ptrDevIn, uchar4* ptrDevOut, CalibreurF calibreur, int w, int h);
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
HeatTransfert::HeatTransfert(int w, int h, int blindIterations, float k) :
calibreur(IntervalF(0, 1), IntervalF(0.7, 0)), isAInput(true), nbIteration(blindIterations), nbBlindIteration(blindIterations), k(k), ptrH(
new float[w * h]), ptrA(new float[w * h]), ptrB(new float[w * h])
{
std::fill(ptrH, ptrH + w * h, 0);
std::fill(ptrB, ptrB + w * h, 0);
// Inputs
this->w = w;
this->h = h;
this->t =0;
this->dg = dim3(256, 1, 1);
this->db = dim3(256, 1, 1);
// Outputs
this->title = "HeatTransfert Cuda";
// fill/initialize the image
for (int i = 300; i <= 500; ++i)
for (int j = 300; j <= 500; ++j)
ptrH[i * w + j] = 1;
for (int j = 179; j <= 195; ++j)
{
for (int i = 179; i <= 195; ++i)
ptrH[i * w + j] = 0.2;
for (int i = 605; i <= 621; ++i)
ptrH[i * w + j] = 0.2;
}
for (int j = 605; j <= 621; ++j)
{
for (int i = 179; i <= 195; ++i)
ptrH[i * w + j] = 0.2;
for (int i = 605; i <= 621; ++i)
ptrH[i * w + j] = 0.2;
}
ptrH[295 * w + 400] = ptrH[505 * w + 400] = ptrH[400 * w + 505] = ptrH[400 * w + 295] = 0.2f;
for (int i = 0; i < w * h; ++i)
ptrA[i] = ptrH[i];
instanciateHeatDevImage(&ptrH, &ptrDevH, w * h * sizeof(float));
instanciateHeatDevImage(&ptrA, &ptrDevA, w * h * sizeof(float));
instanciateHeatDevImage(&ptrB, &ptrDevB, w * h * sizeof(float));
}
HeatTransfert::~HeatTransfert()
{
destructHeatDevImage(&ptrDevA);
destructHeatDevImage(&ptrDevB);
destructHeatDevImage(&ptrDevH);
delete[] ptrA;
delete[] ptrB;
delete[] ptrH;
}
/*-------------------------*\
|* Methode override *|
\*-------------------------*/
void HeatTransfert::animationStep()
{
this->t += 1;
}
void HeatTransfert::process(uchar4* ptrDevPixels, int w, int h)
{
float *input, *output;
input = isAInput ? ptrDevA : ptrDevB;
output = isAInput ? ptrDevB : ptrDevA;
launchHeatDiffusion(input, output, k, w, h);
launchHeatEcrasement(output, ptrDevH, w, h);
launchHeatDiffusion(output, input, k, w, h);
launchHeatEcrasement(input, ptrDevH, w, h);
launchHeatImageHSB(output, ptrDevPixels, calibreur, w, h);
}
__host__ void HeatTransfert::instanciateHeatDevImage(float** ptr, float** ptrDev, size_t size)
{
HANDLE_ERROR(cudaMalloc((void** )ptrDev, size));
HANDLE_ERROR(cudaMemcpy(*ptrDev, *ptr, size, cudaMemcpyHostToDevice));
}
__host__ void HeatTransfert::destructHeatDevImage(float** ptrDev)
{
HANDLE_ERROR(cudaFree(*ptrDev));
}
__host__ void HeatTransfert::launchHeatDiffusion(float* ptrDevIn, float* ptrDevOut, float k, int w, int h)
{
heatDiffusion<<<this->dg, this->db>>>(ptrDevIn, ptrDevOut, k, w, h);
}
__host__ void HeatTransfert::launchHeatEcrasement(float* ptrDevCurrent, float* ptrDevH, int w, int h)
{
heatEcrasement<<<this->dg,this->db>>>(ptrDevCurrent, ptrDevH, w, h);
}
__host__ void HeatTransfert::launchHeatImageHSB(float* ptrDevIn, uchar4* ptrDevOut, CalibreurF& calibreur, int w, int h)
{
heatImageHSB<<<this->dg,this->db>>>(ptrDevIn, ptrDevOut, calibreur, w, h);
}
float HeatTransfert::getT(void)
{
return nbIteration;
}
string HeatTransfert::getTitle(void)
{
return title;
}
int HeatTransfert::getW(void)
{
return w;
}
int HeatTransfert::getH(void)
{
return h;
}
/**
* Override
*/
float HeatTransfert::getAnimationPara(void)
{
return t;
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
dbba8a1175a32e3b95ca070a52d5c89a2dd50d5f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2014 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "rocblas.h"
#ifdef DEBUG
#define CUDA_CALL(F) if( (F) != hipSuccess ) \
{printf("Error %s at %s:%d\n", hipGetErrorString(hipGetLastError()), \
__FILE__,__LINE__); exit(-1);}
#define CUDA_CHECK() if( (hipPeekAtLastError()) != hipSuccess ) \
{printf("Error %s at %s:%d\n", hipGetErrorString(hipGetLastError()), \
__FILE__,__LINE__-1); exit(-1);}
#else
#define CUDA_CALL(F) (F)
#define CUDA_CHECK()
#endif
/* macro for index calculations */
#define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) )
/* matrix size and thread dimensions */
#define SIZE 1024
/* define blocksize X and blocksize Y and blocksize K */
#define THREADS_PER_BLOCK_X 16 // Thread block size, x dimension
#define THREADS_PER_BLOCK_Y 16 // Thread block size, y dimension
#define BLOCK_K 16 // square block of K size
__global__ void GPU_shmem2(const int m, double const * const a, double const * const b, double *c )
{
/* setup some constanst for later use */
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * THREADS_PER_BLOCK_Y;
const int ibx = blockIdx.x * THREADS_PER_BLOCK_X;
/* shared memory arrays for A and B */
__shared__ double as[ THREADS_PER_BLOCK_X ][ BLOCK_K + 1 ];
__shared__ double bs[ BLOCK_K ][ THREADS_PER_BLOCK_Y + 1 ];
/* space for C to be held in registers */
double c_tmp = 0.0 ;
/* calculate my initial offset into A and B */
int aoff = INDX( ibx + tx, ty, m );
int boff = INDX( tx, iby + ty, m );
/* main loop over blocks of K */
for( int Kblock = 0; Kblock < m; Kblock+=BLOCK_K )
{
/* read block of A into shared memory */
as[ tx ][ ty ] = a[ aoff ];
/* read block of B into shared memory */
bs[ tx ][ ty ] = b[ boff ];
__syncthreads();
/* increment A and B offsets for next round of data reads */
boff += BLOCK_K;
aoff += m * BLOCK_K;
/* triply nested loop to perform the matmult on the blocks */
#pragma unroll
for( int k = 0 ; k < BLOCK_K ; k++ )
{
c_tmp += as[ tx ][ k ] * bs[ k ][ ty ];
}
__syncthreads();
} /* end for Kblock */
/* set C to its proper index int the C matrix */
int coff = INDX( ibx + tx, iby + ty, m );
/* write results to the C matrix */
c[ coff ] = c_tmp;
} /* end GPU_shmem2 */
int main( int argc, char *argv[] )
{
const int size = SIZE;
fprintf(stdout, "Matrix size is %d\n",size);
double *h_a, *h_b, *h_c, *h_c1;
double *d_a, *d_b, *d_c;
size_t numbytes = (size_t ) size * (size_t ) size * sizeof( double );
h_a = (double *) malloc( numbytes );
if( h_a == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_b = (double *) malloc( numbytes );
if( h_b == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_c = (double *) malloc( numbytes );
if( h_c == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_c1 = (double *) malloc( numbytes );
if( h_c1 == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
/* zero out the host memory for C matrices */
memset( h_c, 0, numbytes );
memset( h_c1, 0, numbytes );
fprintf( stdout, "Total memory required is %lf MB\n",
3.0 * (double) numbytes / 1000000.0 );
/* initialize the A and B matrices */
for( int i = 0; i < size * size; i++ )
{
h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
}
/* allocate a, b, c in gpu memory */
CUDA_CALL( hipMalloc( (void **)&d_a, numbytes ) );
CUDA_CALL( hipMalloc( (void **)&d_b, numbytes ) );
CUDA_CALL( hipMalloc( (void **)&d_c, numbytes ));
/* copy a and b to device */
CUDA_CALL( hipMemcpy( d_a, h_a, numbytes, hipMemcpyHostToDevice ) );
CUDA_CALL( hipMemcpy( d_b, h_b, numbytes, hipMemcpyHostToDevice ) );
hipblasHandle_t handle;
hipblasStatus_t stat = hipblasCreate( &handle );
double alpha = 1.0;
double beta = 0.0;
/* start timers */
hipEvent_t start, stop;
CUDA_CALL( hipEventCreate( &start ) );
CUDA_CALL( hipEventCreate( &stop ) );
CUDA_CALL( hipEventRecord( start, 0 ) );
/* call CUBLAS dgemm */
hipblasDgemm( handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
size, size, size,
&alpha,
d_a, size,
d_b, size,
&beta,
d_c, size );
/* stop timers */
CUDA_CALL( hipEventRecord( stop, 0 ) );
CUDA_CALL( hipEventSynchronize( stop ) );
float elapsedTime;
CUDA_CALL( hipEventElapsedTime( &elapsedTime, start, stop ) );
/* print GPU CUBLAS timing information */
fprintf(stdout, "Total time GPU CUBLAS is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* copy C from device to host for error checking */
CUDA_CALL( hipMemcpy( h_c, d_c, numbytes, hipMemcpyDeviceToHost ) );
/* reset C on device to zero */
CUDA_CALL( hipMemset( d_c, 0, numbytes ) );
/* setup grid and block sizes */
dim3 threads( THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, 1 );
dim3 blocks( size / THREADS_PER_BLOCK_X, size / THREADS_PER_BLOCK_Y, 1 );
/* start timers */
CUDA_CALL( hipEventRecord( start, 0 ) );
/* call GPU_naive */
hipLaunchKernelGGL(( GPU_shmem2), dim3(blocks), dim3(threads) , 0, 0, size, d_a, d_b, d_c );
CUDA_CHECK()
CUDA_CALL( hipDeviceSynchronize() );
/* stop timers */
CUDA_CALL( hipEventRecord( stop, 0 ) );
CUDA_CALL( hipEventSynchronize( stop ) );
CUDA_CALL( hipEventElapsedTime( &elapsedTime, start, stop ) );
/* print data for GPU naive */
fprintf(stdout, "Total time GPU SHMEM is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* copy C back to host */
CUDA_CALL( hipMemcpy( h_c1, d_c, numbytes, hipMemcpyDeviceToHost ) );
hipblasDestroy( handle );
CUDA_CALL( hipEventDestroy( start ) );
CUDA_CALL( hipEventDestroy( stop ) );
/* check CUBLAS versus GPU NAIVE numerical results */
double temp = 0.0;
for( int i = 0; i < size * size; i++ )
{
temp += ( h_c[i] - h_c1[i] ) * ( h_c[i] - h_c1[i] );
} /* end for */
printf("error is %f\n",temp);
if( temp > 10 ) printf("Error value is suspiciously high!\n");
/* cleanup */
CUDA_CALL( hipFree( d_a ) );
CUDA_CALL( hipFree( d_b ) );
CUDA_CALL( hipFree( d_c ) );
free( h_a );
free( h_b );
free( h_c );
free( h_c1 );
CUDA_CALL( hipDeviceReset() );
return 0;
}
| dbba8a1175a32e3b95ca070a52d5c89a2dd50d5f.cu | /*
* Copyright 2014 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "cublas_v2.h"
#ifdef DEBUG
#define CUDA_CALL(F) if( (F) != cudaSuccess ) \
{printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \
__FILE__,__LINE__); exit(-1);}
#define CUDA_CHECK() if( (cudaPeekAtLastError()) != cudaSuccess ) \
{printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \
__FILE__,__LINE__-1); exit(-1);}
#else
#define CUDA_CALL(F) (F)
#define CUDA_CHECK()
#endif
/* macro for index calculations */
#define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) )
/* matrix size and thread dimensions */
#define SIZE 1024
/* define blocksize X and blocksize Y and blocksize K */
#define THREADS_PER_BLOCK_X 16 // Thread block size, x dimension
#define THREADS_PER_BLOCK_Y 16 // Thread block size, y dimension
#define BLOCK_K 16 // square block of K size
__global__ void GPU_shmem2(const int m, double const * const a, double const * const b, double *c )
{
/* setup some constanst for later use */
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * THREADS_PER_BLOCK_Y;
const int ibx = blockIdx.x * THREADS_PER_BLOCK_X;
/* shared memory arrays for A and B */
__shared__ double as[ THREADS_PER_BLOCK_X ][ BLOCK_K + 1 ];
__shared__ double bs[ BLOCK_K ][ THREADS_PER_BLOCK_Y + 1 ];
/* space for C to be held in registers */
double c_tmp = 0.0 ;
/* calculate my initial offset into A and B */
int aoff = INDX( ibx + tx, ty, m );
int boff = INDX( tx, iby + ty, m );
/* main loop over blocks of K */
for( int Kblock = 0; Kblock < m; Kblock+=BLOCK_K )
{
/* read block of A into shared memory */
as[ tx ][ ty ] = a[ aoff ];
/* read block of B into shared memory */
bs[ tx ][ ty ] = b[ boff ];
__syncthreads();
/* increment A and B offsets for next round of data reads */
boff += BLOCK_K;
aoff += m * BLOCK_K;
/* triply nested loop to perform the matmult on the blocks */
#pragma unroll
for( int k = 0 ; k < BLOCK_K ; k++ )
{
c_tmp += as[ tx ][ k ] * bs[ k ][ ty ];
}
__syncthreads();
} /* end for Kblock */
/* set C to its proper index int the C matrix */
int coff = INDX( ibx + tx, iby + ty, m );
/* write results to the C matrix */
c[ coff ] = c_tmp;
} /* end GPU_shmem2 */
int main( int argc, char *argv[] )
{
const int size = SIZE;
fprintf(stdout, "Matrix size is %d\n",size);
double *h_a, *h_b, *h_c, *h_c1;
double *d_a, *d_b, *d_c;
size_t numbytes = (size_t ) size * (size_t ) size * sizeof( double );
h_a = (double *) malloc( numbytes );
if( h_a == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_b = (double *) malloc( numbytes );
if( h_b == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_c = (double *) malloc( numbytes );
if( h_c == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_c1 = (double *) malloc( numbytes );
if( h_c1 == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
/* zero out the host memory for C matrices */
memset( h_c, 0, numbytes );
memset( h_c1, 0, numbytes );
fprintf( stdout, "Total memory required is %lf MB\n",
3.0 * (double) numbytes / 1000000.0 );
/* initialize the A and B matrices */
for( int i = 0; i < size * size; i++ )
{
h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
}
/* allocate a, b, c in gpu memory */
CUDA_CALL( cudaMalloc( (void **)&d_a, numbytes ) );
CUDA_CALL( cudaMalloc( (void **)&d_b, numbytes ) );
CUDA_CALL( cudaMalloc( (void **)&d_c, numbytes ));
/* copy a and b to device */
CUDA_CALL( cudaMemcpy( d_a, h_a, numbytes, cudaMemcpyHostToDevice ) );
CUDA_CALL( cudaMemcpy( d_b, h_b, numbytes, cudaMemcpyHostToDevice ) );
cublasHandle_t handle;
cublasStatus_t stat = cublasCreate( &handle );
double alpha = 1.0;
double beta = 0.0;
/* start timers */
cudaEvent_t start, stop;
CUDA_CALL( cudaEventCreate( &start ) );
CUDA_CALL( cudaEventCreate( &stop ) );
CUDA_CALL( cudaEventRecord( start, 0 ) );
/* call CUBLAS dgemm */
cublasDgemm( handle, CUBLAS_OP_N, CUBLAS_OP_N,
size, size, size,
&alpha,
d_a, size,
d_b, size,
&beta,
d_c, size );
/* stop timers */
CUDA_CALL( cudaEventRecord( stop, 0 ) );
CUDA_CALL( cudaEventSynchronize( stop ) );
float elapsedTime;
CUDA_CALL( cudaEventElapsedTime( &elapsedTime, start, stop ) );
/* print GPU CUBLAS timing information */
fprintf(stdout, "Total time GPU CUBLAS is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* copy C from device to host for error checking */
CUDA_CALL( cudaMemcpy( h_c, d_c, numbytes, cudaMemcpyDeviceToHost ) );
/* reset C on device to zero */
CUDA_CALL( cudaMemset( d_c, 0, numbytes ) );
/* setup grid and block sizes */
dim3 threads( THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, 1 );
dim3 blocks( size / THREADS_PER_BLOCK_X, size / THREADS_PER_BLOCK_Y, 1 );
/* start timers */
CUDA_CALL( cudaEventRecord( start, 0 ) );
/* call GPU_naive */
GPU_shmem2<<< blocks, threads >>> ( size, d_a, d_b, d_c );
CUDA_CHECK()
CUDA_CALL( cudaDeviceSynchronize() );
/* stop timers */
CUDA_CALL( cudaEventRecord( stop, 0 ) );
CUDA_CALL( cudaEventSynchronize( stop ) );
CUDA_CALL( cudaEventElapsedTime( &elapsedTime, start, stop ) );
/* print data for GPU naive */
fprintf(stdout, "Total time GPU SHMEM is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* copy C back to host */
CUDA_CALL( cudaMemcpy( h_c1, d_c, numbytes, cudaMemcpyDeviceToHost ) );
cublasDestroy( handle );
CUDA_CALL( cudaEventDestroy( start ) );
CUDA_CALL( cudaEventDestroy( stop ) );
/* check CUBLAS versus GPU NAIVE numerical results */
double temp = 0.0;
for( int i = 0; i < size * size; i++ )
{
temp += ( h_c[i] - h_c1[i] ) * ( h_c[i] - h_c1[i] );
} /* end for */
printf("error is %f\n",temp);
if( temp > 10 ) printf("Error value is suspiciously high!\n");
/* cleanup */
CUDA_CALL( cudaFree( d_a ) );
CUDA_CALL( cudaFree( d_b ) );
CUDA_CALL( cudaFree( d_c ) );
free( h_a );
free( h_b );
free( h_c );
free( h_c1 );
CUDA_CALL( cudaDeviceReset() );
return 0;
}
|
4a8a82e482f7d0fbca833251537197b5d04b4cbc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <join/join_common_utils.cuh>
#include <join/join_common_utils.hpp>
#include <join/mixed_join_common_utils.cuh>
#include <cudf/ast/detail/expression_evaluator.cuh>
#include <cudf/ast/detail/expression_parser.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/span.hpp>
#include <hipcub/hipcub.hpp>
namespace cudf {
namespace detail {
namespace cg = cooperative_groups;
template <cudf::size_type block_size, bool has_nulls>
__launch_bounds__(block_size) __global__
void mixed_join_semi(table_device_view left_table,
table_device_view right_table,
table_device_view probe,
table_device_view build,
row_equality const equality_probe,
join_kind const join_type,
cudf::detail::semi_map_type::device_view hash_table_view,
size_type* join_output_l,
cudf::ast::detail::expression_device_view device_expression_data,
cudf::size_type const* join_result_offsets,
bool const swap_tables)
{
// Normally the casting of a shared memory array is used to create multiple
// arrays of different types from the shared memory buffer, but here it is
// used to circumvent conflicts between arrays of different types between
// different template instantiations due to the extern specifier.
extern __shared__ char raw_intermediate_storage[];
cudf::ast::detail::IntermediateDataType<has_nulls>* intermediate_storage =
reinterpret_cast<cudf::ast::detail::IntermediateDataType<has_nulls>*>(raw_intermediate_storage);
auto thread_intermediate_storage =
&intermediate_storage[threadIdx.x * device_expression_data.num_intermediates];
cudf::size_type const left_num_rows = left_table.num_rows();
cudf::size_type const right_num_rows = right_table.num_rows();
auto const outer_num_rows = (swap_tables ? right_num_rows : left_num_rows);
cudf::size_type outer_row_index = threadIdx.x + blockIdx.x * block_size;
auto evaluator = cudf::ast::detail::expression_evaluator<has_nulls>(
left_table, right_table, device_expression_data);
row_hash hash_probe{nullate::DYNAMIC{has_nulls}, probe};
if (outer_row_index < outer_num_rows) {
// Figure out the number of elements for this key.
auto equality = single_expression_equality<has_nulls>{
evaluator, thread_intermediate_storage, swap_tables, equality_probe};
if ((join_type == join_kind::LEFT_ANTI_JOIN) !=
(hash_table_view.contains(outer_row_index, hash_probe, equality))) {
*(join_output_l + join_result_offsets[outer_row_index]) = outer_row_index;
}
}
}
template __global__ void mixed_join_semi<DEFAULT_JOIN_BLOCK_SIZE, true>(
table_device_view left_table,
table_device_view right_table,
table_device_view probe,
table_device_view build,
row_equality const equality_probe,
join_kind const join_type,
cudf::detail::semi_map_type::device_view hash_table_view,
size_type* join_output_l,
cudf::ast::detail::expression_device_view device_expression_data,
cudf::size_type const* join_result_offsets,
bool const swap_tables);
template __global__ void mixed_join_semi<DEFAULT_JOIN_BLOCK_SIZE, false>(
table_device_view left_table,
table_device_view right_table,
table_device_view probe,
table_device_view build,
row_equality const equality_probe,
join_kind const join_type,
cudf::detail::semi_map_type::device_view hash_table_view,
size_type* join_output_l,
cudf::ast::detail::expression_device_view device_expression_data,
cudf::size_type const* join_result_offsets,
bool const swap_tables);
} // namespace detail
} // namespace cudf
| 4a8a82e482f7d0fbca833251537197b5d04b4cbc.cu | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <join/join_common_utils.cuh>
#include <join/join_common_utils.hpp>
#include <join/mixed_join_common_utils.cuh>
#include <cudf/ast/detail/expression_evaluator.cuh>
#include <cudf/ast/detail/expression_parser.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/span.hpp>
#include <cub/cub.cuh>
namespace cudf {
namespace detail {
namespace cg = cooperative_groups;
template <cudf::size_type block_size, bool has_nulls>
__launch_bounds__(block_size) __global__
void mixed_join_semi(table_device_view left_table,
table_device_view right_table,
table_device_view probe,
table_device_view build,
row_equality const equality_probe,
join_kind const join_type,
cudf::detail::semi_map_type::device_view hash_table_view,
size_type* join_output_l,
cudf::ast::detail::expression_device_view device_expression_data,
cudf::size_type const* join_result_offsets,
bool const swap_tables)
{
// Normally the casting of a shared memory array is used to create multiple
// arrays of different types from the shared memory buffer, but here it is
// used to circumvent conflicts between arrays of different types between
// different template instantiations due to the extern specifier.
extern __shared__ char raw_intermediate_storage[];
cudf::ast::detail::IntermediateDataType<has_nulls>* intermediate_storage =
reinterpret_cast<cudf::ast::detail::IntermediateDataType<has_nulls>*>(raw_intermediate_storage);
auto thread_intermediate_storage =
&intermediate_storage[threadIdx.x * device_expression_data.num_intermediates];
cudf::size_type const left_num_rows = left_table.num_rows();
cudf::size_type const right_num_rows = right_table.num_rows();
auto const outer_num_rows = (swap_tables ? right_num_rows : left_num_rows);
cudf::size_type outer_row_index = threadIdx.x + blockIdx.x * block_size;
auto evaluator = cudf::ast::detail::expression_evaluator<has_nulls>(
left_table, right_table, device_expression_data);
row_hash hash_probe{nullate::DYNAMIC{has_nulls}, probe};
if (outer_row_index < outer_num_rows) {
// Figure out the number of elements for this key.
auto equality = single_expression_equality<has_nulls>{
evaluator, thread_intermediate_storage, swap_tables, equality_probe};
if ((join_type == join_kind::LEFT_ANTI_JOIN) !=
(hash_table_view.contains(outer_row_index, hash_probe, equality))) {
*(join_output_l + join_result_offsets[outer_row_index]) = outer_row_index;
}
}
}
template __global__ void mixed_join_semi<DEFAULT_JOIN_BLOCK_SIZE, true>(
table_device_view left_table,
table_device_view right_table,
table_device_view probe,
table_device_view build,
row_equality const equality_probe,
join_kind const join_type,
cudf::detail::semi_map_type::device_view hash_table_view,
size_type* join_output_l,
cudf::ast::detail::expression_device_view device_expression_data,
cudf::size_type const* join_result_offsets,
bool const swap_tables);
template __global__ void mixed_join_semi<DEFAULT_JOIN_BLOCK_SIZE, false>(
table_device_view left_table,
table_device_view right_table,
table_device_view probe,
table_device_view build,
row_equality const equality_probe,
join_kind const join_type,
cudf::detail::semi_map_type::device_view hash_table_view,
size_type* join_output_l,
cudf::ast::detail::expression_device_view device_expression_data,
cudf::size_type const* join_result_offsets,
bool const swap_tables);
} // namespace detail
} // namespace cudf
|
941d3087231dba76193a000cbafa49f02623d05f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "idx_print.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
idx_print), dim3(gridBlock),dim3(threadBlock), 0, 0, );
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
idx_print), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
idx_print), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 941d3087231dba76193a000cbafa49f02623d05f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "idx_print.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
idx_print<<<gridBlock,threadBlock>>>();
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
idx_print<<<gridBlock,threadBlock>>>();
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
idx_print<<<gridBlock,threadBlock>>>();
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
581f7db65eff6b3f115ee6319b976f21d73d8cbd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void evolve_fv(float * beta_d, float * gamma_up_d,
float * Un_d, flux_func_ptr flux_func,
float * qx_plus_half, float * qx_minus_half,
float * qy_plus_half, float * qy_minus_half,
float * fx_plus_half, float * fx_minus_half,
float * fy_plus_half, float * fy_minus_half,
int nx, int ny, int nz, int vec_dim, float alpha, float gamma,
float dx, float dy, float dt,
int kx_offset, int ky_offset) {
/**
First part of evolution through one timestep using finite volume methods.
Reconstructs state vector to cell boundaries using slope limiter
and calculates fluxes there.
NOTE: we assume that beta is smooth so can get value at cell boundaries with simple averaging
Parameters
----------
beta_d : float *
shift vector at each grid point.
gamma_up_d : float *
gamma matrix at each grid point
Un_d : float *
state vector at each grid point in each layer
flux_func : flux_func_ptr
pointer to function to be used to calulate fluxes
qx_plus_half, qx_minus_half : float *
state vector reconstructed at right and left boundaries
qy_plus_half, qy_minus_half : float *
state vector reconstructed at top and bottom boundaries
fx_plus_half, fx_minus_half : float *
flux vector at right and left boundaries
fy_plus_half, fy_minus_half : float *
flux vector at top and bottom boundaries
nx, ny, nz : int
dimensions of grid
alpha, gamma : float
lapse function and adiabatic index
dx, dy, dt : float
grid dimensions and timestep
kx_offset, ky_offset : int
x, y offset for current kernel
*/
int x = kx_offset + blockIdx.x * blockDim.x + threadIdx.x;
int y = ky_offset + blockIdx.y * blockDim.y + threadIdx.y;
int z = threadIdx.z;
int offset = ((z * ny + y) * nx + x) * vec_dim;
float * q_p, *q_m, * f;
q_p = (float *)malloc(vec_dim * sizeof(float));
q_m = (float *)malloc(vec_dim * sizeof(float));
f = (float *)malloc(vec_dim * sizeof(float));
if ((x > 0) && (x < (nx-1)) && (y > 0) && (y < (ny-1)) && (z < nz)) {
// x-direction
for (int i = 0; i < vec_dim; i++) {
float S_upwind = (Un_d[((z * ny + y) * nx + x+1) * vec_dim + i] -
Un_d[((z * ny + y) * nx + x) * vec_dim + i]);
float S_downwind = (Un_d[((z * ny + y) * nx + x) * vec_dim + i] -
Un_d[((z * ny + y) * nx + x-1) * vec_dim + i]);
float S = 0.5 * (S_upwind + S_downwind); // S_av
float r = 1.0e6;
// make sure don't divide by zero
if (abs(S_downwind) > 1.0e-7) {
r = S_upwind / S_downwind;
}
S *= phi(r);
q_p[i] = Un_d[offset + i] + S * 0.5;
q_m[i] = Un_d[offset + i] - S * 0.5;
}
// fluxes
flux_func(q_p, f, 0, gamma_up_d, alpha, beta_d, gamma);
for (int i = 0; i < vec_dim; i++) {
qx_plus_half[offset + i] = q_p[i];
fx_plus_half[offset + i] = f[i];
}
flux_func(q_m, f, 0, gamma_up_d, alpha, beta_d, gamma);
for (int i = 0; i < vec_dim; i++) {
qx_minus_half[offset + i] = q_m[i];
fx_minus_half[offset + i] = f[i];
//if (nan_check(q_p[i]) || nan_check(q_m[i]) || nan_check(fx_plus_half[offset + i]) || nan_check(fx_minus_half[offset + i])) printf("(%d, %d, %d) i: %d, qx_p: %f, qx_m: %f, fx_p: %f, fx_m: %f\n", x, y, z, i, q_p[i], q_m[i], fx_plus_half[offset + i], fx_minus_half[offset + i]);
}
// y-direction
for (int i = 0; i < vec_dim; i++) {
float S_upwind = (Un_d[((z * ny + y+1) * nx + x) * vec_dim + i] -
Un_d[((z * ny + y) * nx + x) * vec_dim + i]);
float S_downwind = (Un_d[((z * ny + y) * nx + x) * vec_dim + i] -
Un_d[((z * ny + y-1) * nx + x) * vec_dim + i]);
float S = 0.5 * (S_upwind + S_downwind); // S_av
float r = 1.0e6;
// make sure don't divide by zero
if (abs(S_downwind) > 1.0e-7) {
r = S_upwind / S_downwind;
}
S *= phi(r);
q_p[i] = Un_d[offset + i] + S * 0.5;
q_m[i] = Un_d[offset + i] - S * 0.5;
}
// fluxes
flux_func(q_p, f, 1, gamma_up_d, alpha, beta_d, gamma);
for (int i = 0; i < vec_dim; i++) {
qy_plus_half[offset + i] = q_p[i];
fy_plus_half[offset + i] = f[i];
}
flux_func(q_m, f, 1, gamma_up_d, alpha, beta_d, gamma);
for (int i = 0; i < vec_dim; i++) {
qy_minus_half[offset + i] = q_m[i];
fy_minus_half[offset + i] = f[i];
//if (nan_check(q_p[i]) || nan_check(q_m[i])) printf("(%d, %d, %d) i: %d, qy_p: %f, qy_m: %f\n", x, y, z, i, q_p[i], q_m[i]);
}
}
free(q_p);
free(q_m);
free(f);
}
__global__ void evolve_z(float * beta_d, float * gamma_up_d,
float * Un_d, flux_func_ptr flux_func,
float * qz_plus_half, float * qz_minus_half,
float * fz_plus_half, float * fz_minus_half,
int nx, int ny, int nz, int vec_dim, float alpha, float gamma,
float dz, float dt,
int kx_offset, int ky_offset) {
/**
First part of evolution through one timestep using finite volume methods.
Reconstructs state vector to cell boundaries using slope limiter
and calculates fluxes there.
NOTE: we assume that beta is smooth so can get value at cell boundaries with simple averaging
Parameters
----------
beta_d : float *
shift vector at each grid point.
gamma_up_d : float *
gamma matrix at each grid point
Un_d : float *
state vector at each grid point in each layer
flux_func : flux_func_ptr
pointer to function to be used to calculate fluxes
qz_plus_half, qz_minus_half : float *
state vector reconstructed at top and bottom boundaries
fz_plus_half, fz_minus_half : float *
flux vector at top and bottom boundaries
nx, ny, nz : int
dimensions of grid
vec_dim : int
dimension of state vector
alpha, gamma : float
lapse function and adiabatic index
dz, dt : float
vertical grid spacing and timestep
kx_offset, ky_offset : int
x, y offset for current kernel
*/
int x = kx_offset + blockIdx.x * blockDim.x + threadIdx.x;
int y = ky_offset + blockIdx.y * blockDim.y + threadIdx.y;
int z = threadIdx.z;
int offset = ((z * ny + y) * nx + x) * vec_dim;
float * q_p, *q_m, * f;
q_p = (float *)malloc(vec_dim * sizeof(float));
q_m = (float *)malloc(vec_dim * sizeof(float));
f = (float *)malloc(vec_dim * sizeof(float));
if ((x > 0) && (x < (nx-1)) && (y > 0) && (y < (ny-1)) && (z > 0) && (z < (nz-1))) {
// z-direction
for (int i = 0; i < vec_dim; i++) {
float S_upwind = (Un_d[(((z+1) * ny + y) * nx + x) * vec_dim + i] -
Un_d[((z * ny + y) * nx + x) * vec_dim + i]);
float S_downwind = (Un_d[((z * ny + y) * nx + x) * vec_dim + i] -
Un_d[(((z-1) * ny + y) * nx + x) * vec_dim + i]);
float S = 0.5 * (S_upwind + S_downwind); // S_av
float r = 1.0e6;
// make sure don't divide by zero
if (abs(S_downwind) > 1.0e-7) {
r = S_upwind / S_downwind;
}
S *= phi(r);
q_p[i] = Un_d[offset + i] + S * 0.5;
q_m[i] = Un_d[offset + i] - S * 0.5;
}
// fluxes
flux_func(q_p, f, 2, gamma_up_d, alpha, beta_d, gamma);
for (int i = 0; i < vec_dim; i++) {
qz_plus_half[offset + i] = q_p[i];
fz_plus_half[offset + i] = f[i];
}
flux_func(q_m, f, 2, gamma_up_d, alpha, beta_d, gamma);
for (int i = 0; i < vec_dim; i++) {
qz_minus_half[offset + i] = q_m[i];
fz_minus_half[offset + i] = f[i];
}
}
free(q_p);
free(q_m);
free(f);
}
__global__ void evolve_fv_fluxes(float * F,
float * qx_plus_half, float * qx_minus_half,
float * qy_plus_half, float * qy_minus_half,
float * fx_plus_half, float * fx_minus_half,
float * fy_plus_half, float * fy_minus_half,
int nx, int ny, int nz, int vec_dim, float alpha,
float dx, float dy, float dt,
int kx_offset, int ky_offset) {
/**
Calculates fluxes in finite volume evolution by solving the Riemann
problem at the cell boundaries.
Parameters
----------
F : float *
flux vector at each point in grid and each layer
qx_plus_half, qx_minus_half : float *
state vector reconstructed at right and left boundaries
qy_plus_half, qy_minus_half : float *
state vector reconstructed at top and bottom boundaries
fx_plus_half, fx_minus_half : float *
flux vector at right and left boundaries
fy_plus_half, fy_minus_half : float *
flux vector at top and bottom boundaries
nx, ny, nz : int
dimensions of grid
vec_dim : int
dimension of state vector
alpha : float
lapse function
dx, dy, dt : float
gridpoint spacing and timestep spacing
kx_offset, ky_offset : int
x, y offset for current kernel
*/
int x = kx_offset + blockIdx.x * blockDim.x + threadIdx.x;
int y = ky_offset + blockIdx.y * blockDim.y + threadIdx.y;
int z = threadIdx.z;
float fx_m, fx_p, fy_m, fy_p;
// do fluxes
if ((x > 1) && (x < (nx-2)) && (y > 1) && (y < (ny-2)) && (z < nz)) {
for (int i = 0; i < vec_dim; i++) {
// x-boundary
// from i-1
fx_m = 0.5 * (
fx_plus_half[((z * ny + y) * nx + x-1) * vec_dim + i] +
fx_minus_half[((z * ny + y) * nx + x) * vec_dim + i] +
qx_plus_half[((z * ny + y) * nx + x-1) * vec_dim + i] -
qx_minus_half[((z * ny + y) * nx + x) * vec_dim + i]);
// from i+1
fx_p = 0.5 * (
fx_plus_half[((z * ny + y) * nx + x) * vec_dim + i] +
fx_minus_half[((z * ny + y) * nx + x+1) * vec_dim + i] +
qx_plus_half[((z * ny + y) * nx + x) * vec_dim + i] -
qx_minus_half[((z * ny + y) * nx + x+1) * vec_dim + i]);
// y-boundary
// from j-1
fy_m = 0.5 * (
fy_plus_half[((z * ny + y-1) * nx + x) * vec_dim + i] +
fy_minus_half[((z * ny + y) * nx + x) * vec_dim + i] +
qy_plus_half[((z * ny + y-1) * nx + x) * vec_dim + i] -
qy_minus_half[((z * ny + y) * nx + x) * vec_dim + i]);
// from j+1
fy_p = 0.5 * (
fy_plus_half[((z * ny + y) * nx + x) * vec_dim + i] +
fy_minus_half[((z * ny + y+1) * nx + x) * vec_dim + i] +
qy_plus_half[((z * ny + y) * nx + x) * vec_dim + i] -
qy_minus_half[((z * ny + y+1) * nx + x) * vec_dim + i]);
float old_F = F[((z * ny + y) * nx + x)*vec_dim + i];
F[((z * ny + y) * nx + x)*vec_dim + i] =
-alpha * ((fx_p - fx_m)/dx + (fy_p - fy_m)/dy);
// hack?
if (nan_check(F[((z * ny + y) * nx + x)*vec_dim + i])) {
//printf("nan :( (%d, %d, %d) i: %d, fx_p: %f, fx_m: %f, fy_p: %f, fy_m: %f\n", x, y, z, i, fx_p, fx_m, fy_p, fy_m);
F[((z * ny + y) * nx + x)*vec_dim + i] = old_F;
}
}
//printf("fxm, fxp: %f, %f fym, fyp: %f, %f F(tau): %f\n", fx_m, fx_p, fy_m, fy_p, F[((z * ny + y) * nx + x)*vec_dim +4]);
}
}
__global__ void evolve_z_fluxes(float * F,
float * qz_plus_half, float * qz_minus_half,
float * fz_plus_half, float * fz_minus_half,
int nx, int ny, int nz, int vec_dim, float alpha,
float dz, float dt,
int kx_offset, int ky_offset) {
/**
Calculates fluxes in finite volume evolution by solving the Riemann
problem at the cell boundaries in z direction.
Parameters
----------
F : float *
flux vector at each point in grid and each layer
qz_plus_half, qz_minus_half : float *
state vector reconstructed at right and left boundaries
fz_plus_half, fz_minus_half : float *
flux vector at top and bottom boundaries
nx, ny, nz : int
dimensions of grid
vec_dim : int
dimension of state vector
alpha : float
lapse function
dz, dt : float
gridpoint spacing and timestep spacing
kx_offset, ky_offset : int
x, y offset for current kernel
*/
int x = kx_offset + blockIdx.x * blockDim.x + threadIdx.x;
int y = ky_offset + blockIdx.y * blockDim.y + threadIdx.y;
int z = threadIdx.z;
// do fluxes
if ((x > 0) && (x < (nx-1)) && (y > 0) && (y < (ny-1)) && (z > 0) && (z < (nz-1))) {
for (int i = 0; i < vec_dim; i++) {
// z-boundary
// from i-1
float fz_m = 0.5 * (
fz_plus_half[(((z-1) * ny + y) * nx + x) * vec_dim + i] +
fz_minus_half[((z * ny + y) * nx + x) * vec_dim + i] +
qz_plus_half[(((z-1) * ny + y) * nx + x) * vec_dim + i] -
qz_minus_half[((z * ny + y) * nx + x) * vec_dim + i]);
// from i+1
float fz_p = 0.5 * (
fz_plus_half[((z * ny + y) * nx + x) * vec_dim + i] +
fz_minus_half[(((z+1) * ny + y) * nx + x) * vec_dim + i] +
qz_plus_half[((z * ny + y) * nx + x) * vec_dim + i] -
qz_minus_half[(((z+1) * ny + y) * nx + x) * vec_dim + i]);
float old_F = F[((z * ny + y) * nx + x)*vec_dim + i];
F[((z * ny + y) * nx + x)*vec_dim + i] =
F[((z * ny + y) * nx + x)*vec_dim + i]
- alpha * (fz_p - fz_m) / dz;
// hack?
if (nan_check(F[((z * ny + y) * nx + x)*vec_dim + i])) F[((z * ny + y) * nx + x)*vec_dim + i] = old_F;
}
}
}
__global__ void evolve_fv_heating(float * gamma_up,
float * Up, float * U_half,
float * qx_plus_half, float * qx_minus_half,
float * qy_plus_half, float * qy_minus_half,
float * fx_plus_half, float * fx_minus_half,
float * fy_plus_half, float * fy_minus_half,
float * sum_phs, float * rho, float * Q_d,
int nx, int ny, int nlayers, float alpha, float gamma,
float dx, float dy, float dt,
bool burning, float Cv, float E_He,
int kx_offset, int ky_offset) {
/**
Does the heating part of the evolution.
Parameters
----------
gamma_up : float *
gamma matrix at each grid point
Up : float *
state vector at next timestep
U_half : float *
state vector at half timestep
qx_plus_half, qx_minus_half : float *
state vector reconstructed at right and left boundaries
qy_plus_half, qy_minus_half : float *
state vector reconstructed at top and bottom boundaries
fx_plus_half, fx_minus_half : float *
flux vector at right and left boundaries
fy_plus_half, fy_minus_half : float *
flux vector at top and bottom boundaries
sum_phs : float *
sum of Phi in different layers
rho : float *
list of densities in different layers
Q_d : float *
heating rate in each layer
nx, ny, nlayers : int
dimensions of grid
alpha, gamma : float
lapse function and adiabatic index
dx, dy, dt : float
gridpoint spacing and timestep spacing
burning : bool
is burning present in this system?
Cv, E_He : float
specific heat in constant volume and energy release per unit mass of He
kx_offset, ky_offset : int
x, y offset for current kernel
*/
int x = kx_offset + blockIdx.x * blockDim.x + threadIdx.x;
int y = ky_offset + blockIdx.y * blockDim.y + threadIdx.y;
int z = threadIdx.z;
int offset = (z * ny + y) * nx + x;
// calculate Q
//calc_Q(Up, rho_d, Q_d, nx, ny, nlayers, kx_offset, ky_offset, burning);
float W = 1.0;
float X_dot = 0.0;
// do source terms
if ((x < nx) && (y < ny) && (z < nlayers)) {
float * q_swe;
q_swe = (float *)malloc(4 * sizeof(float));
for (int i = 0; i < 4; i++) {
q_swe[i] = U_half[offset * 4 + i];
}
W = W_swe(q_swe, gamma_up);
float * A, * phis;
A = (float *)malloc(nlayers * sizeof(float));
phis = (float *)malloc(nlayers * sizeof(float));
for (int i = 0; i < nlayers; i++) {
phis[i] = U_half[((i * ny + y) * nx + x)* 4];
}
calc_As(rho, phis, A, nlayers, gamma, phis[0], rho[0]);
float p = p_from_swe(q_swe, gamma_up, rho[z], gamma, W, A[z]);
float Y = q_swe[3] / q_swe[0];
X_dot = calc_Q_swe(rho[z], p, gamma, Y, Cv) / E_He;
//printf("p: %f, A: %f, X_dot : %f\n", p, A[z], X_dot);
free(phis);
free(A);
free(q_swe);
U_half[offset*4] /= W;
}
__syncthreads();
if ((x < nx) && (y < ny) && (z < nlayers)) {
sum_phs[offset] = 0.0;
float sum_qs = 0.0;
float deltaQx = 0.0;
float deltaQy = 0.0;
if (z < (nlayers - 1)) {
sum_qs += (Q_d[z + 1] - Q_d[z]);
deltaQx = Q_d[z] *
(U_half[offset*4+1] - U_half[(((z + 1) * ny + y) * nx + x)*4+1]) /
(W * U_half[offset*4]);
deltaQy = (Q_d[z]) *
(U_half[offset*4+2] - U_half[(((z + 1) * ny + y) * nx + x)*4+2]) /
(W * U_half[offset*4]);
}
if (z > 0) {
sum_qs += -rho[z-1] / rho[z] * (Q_d[z] - Q_d[z - 1]);
deltaQx = rho[z-1] / rho[z] * Q_d[z] *
(U_half[offset*4+1] - U_half[(((z - 1) * ny + y) * nx + x)*4+1]) /
(W * U_half[offset*4]);
deltaQy = rho[z-1] / rho[z] * Q_d[z] *
(U_half[offset*4+2] - U_half[(((z - 1) * ny + y) * nx + x)*4+2]) /
(W * U_half[offset*4]);
}
for (int j = 0; j < z; j++) {
sum_phs[offset] += rho[j] / rho[z] *
U_half[((j * ny + y) * nx + x)*4];
}
for (int j = z+1; j < nlayers; j++) {
sum_phs[offset] += U_half[((j * ny + y) * nx + x)*4];
}
// NOTE: for now going to make Xdot a constant
//const float X_dot = 0.01;
// D
Up[offset*4] += dt * alpha * sum_qs;
//if (x < 10 && y < 10) printf("(%d, %d, %d) Q: %f, sum_qs: %f, deltaQx: %f, deltaQy: %f\n", x, y, z, Q_d[z], sum_qs, deltaQx, deltaQy);
// Sx
Up[offset*4+1] += dt * alpha * (-deltaQx);
// Sy
Up[offset*4+2] += dt * alpha * (-deltaQy);
// DX
Up[offset*4+3] += dt * alpha * X_dot;
}
}
__global__ void evolve2(float * Un_d, float * Up, float * U_half,
float * sum_phs, int nx, int ny, int nlayers, int ng,
float alpha, float dx, float dy, float dt,
int kx_offset, int ky_offset) {
/**
Adds buoyancy terms.
Parameters
----------
Un_d : float *
state vector at each grid point in each layer at current timestep
Up : float *
state vector at next timestep
U_half : float *
state vector at half timestep
sum_phs : float *
sum of Phi in different layers
nx, ny, nlayers : int
dimensions of grid
ng : int
number of ghost cells
alpha : float
lapse function
dx, dy, dt : float
gridpoint spacing and timestep spacing
kx_offset, ky_offset : int
x, y offset for current kernel
*/
int x = kx_offset + blockIdx.x * blockDim.x + threadIdx.x;
int y = ky_offset + blockIdx.y * blockDim.y + threadIdx.y;
int z = threadIdx.z;
int offset = (z * ny + y) * nx + x;
//printf("kx_offset: %i\n", kx_offset);
if ((x > 1) && (x < (nx-2)) && (y > 1) && (y < (ny-2)) && (z < nlayers)) {
float a_upwind = sum_phs[(z * ny + y) * nx + x+1] - sum_phs[offset];
float a_downwind = sum_phs[offset] - sum_phs[(z * ny + y) * nx + x-1];
float a = 0.5 * (a_upwind + a_downwind);
float r = 1.0e6;
if (abs(a_downwind) > 1.0e-10) {
r = a_upwind / a_downwind;
}
a *= dt * alpha * U_half[offset*4] * 0.5 * phi(r);
if (abs(a) < 0.9 * dx / dt) {
Up[offset*4+1] -= a;
}
a_upwind = sum_phs[(z * ny + y+1) * nx + x] - sum_phs[offset];
a_downwind = sum_phs[offset] - sum_phs[(z * ny + y-1) * nx + x];
a = 0.5 * (a_upwind + a_downwind);
r = 1.0e6;
if (abs(a_downwind) > 1.0e-10) {
r = a_upwind / a_downwind;
}
a *= dt * alpha * U_half[offset*4] * 0.5 * phi(r);
if (abs(a) < 0.9 * dy / dt) {
Up[offset*4+2] -= a;
}
// copy back to grid
for (int i = 0; i < 4; i++) {
Un_d[offset*4+i] = Up[offset*4+i];
}
}
}
void homogeneuous_fv(dim3 * kernels, dim3 * threads, dim3 * blocks,
int * cumulative_kernels, float * beta_d, float * gamma_up_d,
float * Un_d, float * F_d,
float * qx_p_d, float * qx_m_d, float * qy_p_d, float * qy_m_d,
float * qz_p_d, float * qz_m_d,
float * fx_p_d, float * fx_m_d, float * fy_p_d, float * fy_m_d,
float * fz_p_d, float * fz_m_d,
int nx, int ny, int nz, int vec_dim, int ng, float alpha, float gamma,
float dx, float dy, float dz, float dt, int rank,
flux_func_ptr h_flux_func, bool do_z) {
/**
Solves the homogeneous part of the equation (ie the bit without source terms).
Parameters
----------
kernels, threads, blocks : dim3 *
number of kernels, threads and blocks for each process/kernel
cumulative_kernels : int *
Cumulative total of kernels in ranks < rank of current MPI process
beta_d : float *
shift vector at each grid point
gamma_up_d : float *
gamma matrix at each grid point
Un_d : float *
state vector at each grid point in each layer at current timestep
F_d : float *
flux vector
qx_p_d, qx_m_d : float *
state vector reconstructed at right and left boundaries
qy_p_d, qy_m_d : float *
state vector reconstructed at top and bottom boundaries
fx_p_d, fx_m_d : float *
flux vector at right and left boundaries
fy_p_d, fy_m_d : float *
flux vector at top and bottom boundaries
nx, ny, nz : int
dimensions of grid
alpha, gamma : float
lapse function and adiabatic index
dx, dy, dz, dt : float
gridpoint spacing and timestep spacing
rank : int
rank of MPI process
flux_func : flux_func_ptr
pointer to function to be used to calculate fluxes
do_z : bool
should we evolve in the z direction?
*/
int kx_offset = 0;
int ky_offset = (kernels[0].y * blocks[0].y * threads[0].y - 2*ng) * rank;
int k_offset = 0;
if (rank > 0) {
k_offset = cumulative_kernels[rank - 1];
}
for (int j = 0; j < kernels[rank].y; j++) {
kx_offset = 0;
for (int i = 0; i < kernels[rank].x; i++) {
hipLaunchKernelGGL(( evolve_fv), dim3(blocks[k_offset + j * kernels[rank].x + i]), dim3(threads[k_offset + j * kernels[rank].x + i]), 0, 0, beta_d, gamma_up_d, Un_d, h_flux_func,
qx_p_d, qx_m_d, qy_p_d, qy_m_d,
fx_p_d, fx_m_d, fy_p_d, fy_m_d,
nx, ny, nz, vec_dim, alpha, gamma,
dx, dy, dt, kx_offset, ky_offset);
if (do_z) {
hipLaunchKernelGGL(( evolve_z), dim3(blocks[k_offset + j * kernels[rank].x + i]), dim3(threads[k_offset + j * kernels[rank].x + i]), 0, 0, beta_d, gamma_up_d, Un_d, h_flux_func,
qz_p_d, qz_m_d,
fz_p_d, fz_m_d,
nx, ny, nz, vec_dim, alpha, gamma,
dz, dt, kx_offset, ky_offset);
}
kx_offset += blocks[k_offset + j * kernels[rank].x + i].x * threads[k_offset + j * kernels[rank].x + i].x - 2*ng;
}
ky_offset += blocks[k_offset + j * kernels[rank].x].y * threads[k_offset + j * kernels[rank].x].y - 2*ng;
}
ky_offset = (kernels[0].y * blocks[0].y * threads[0].y - 2*ng) * rank;
for (int j = 0; j < kernels[rank].y; j++) {
kx_offset = 0;
for (int i = 0; i < kernels[rank].x; i++) {
hipLaunchKernelGGL(( evolve_fv_fluxes), dim3(blocks[k_offset + j * kernels[rank].x + i]), dim3(threads[k_offset + j * kernels[rank].x + i]), 0, 0,
F_d,
qx_p_d, qx_m_d, qy_p_d, qy_m_d,
fx_p_d, fx_m_d, fy_p_d, fy_m_d,
nx, ny, nz, vec_dim, alpha,
dx, dy, dt, kx_offset, ky_offset);
if (do_z) {
hipLaunchKernelGGL(( evolve_z_fluxes), dim3(blocks[k_offset + j * kernels[rank].x + i]), dim3(threads[k_offset + j * kernels[rank].x + i]), 0, 0,
F_d,
qz_p_d, qz_m_d,
fz_p_d, fz_m_d,
nx, ny, nz, vec_dim, alpha,
dz, dt, kx_offset, ky_offset);
}
kx_offset += blocks[k_offset + j * kernels[rank].x + i].x * threads[k_offset + j * kernels[rank].x + i].x - 2*ng;
}
ky_offset += blocks[k_offset + j * kernels[rank].x].y * threads[k_offset + j * kernels[rank].x].y - 2*ng;
}
}
void rk3(dim3 * kernels, dim3 * threads, dim3 * blocks,
int * cumulative_kernels,
float * beta_d, float * gamma_up_d, float * Un_d,
float * F_d, float * Up_d,
float * qx_p_d, float * qx_m_d, float * qy_p_d, float * qy_m_d,
float * qz_p_d, float * qz_m_d,
float * fx_p_d, float * fx_m_d, float * fy_p_d, float * fy_m_d,
float * fz_p_d, float * fz_m_d,
int nx, int ny, int nz, int vec_dim, int ng, float alpha, float gamma,
float dx, float dy, float dz, float dt,
float * Up_h, float * F_h, float * Un_h,
MPI_Comm comm, MPI_Status status, int rank, int n_processes,
flux_func_ptr flux_func, bool do_z) {
/**
Integrates the homogeneous part of the ODE in time using RK3.
Parameters
----------
kernels, threads, blocks : dim3 *
number of kernels, threads and blocks for each process/kernel
cumulative_kernels : int *
Cumulative total of kernels in ranks < rank of current MPI process
beta_d : float *
shift vector at each grid point
gamma_up_d : float *
gamma matrix at each grid point
Un_d : float *
state vector at each grid point in each layer at current timestep on device
F_d : float *
flux vector on device
Up_d : float *
state vector at next timestep on device
qx_p_d, qx_m_d : float *
state vector reconstructed at right and left boundaries
qy_p_d, qy_m_d : float *
state vector reconstructed at top and bottom boundaries
fx_p_d, fx_m_d : float *
flux vector at right and left boundaries
fy_p_d, fy_m_d : float *
flux vector at top and bottom boundaries
nx, ny, nz : int
dimensions of grid
vec_dim : int
dimension of state vector
ng : int
number of ghost cells
alpha, gamma : float
lapse function and adiabatic index
dx, dy, dz, dt : float
gridpoint spacing and timestep spacing
Up_h, F_h, Un_h : float *
state vector at next timestep, flux vector and state vector at current timestep on host
comm : MPI_Comm
MPI communicator
status: MPI_Status
status of MPI processes
rank, n_processes : int
rank of current MPI process and total number of MPI processes
flux_func : flux_func_ptr
pointer to function to be used to calculate fluxes
do_z : bool
should we evolve in the z direction?
*/
//cout << "\nu1\n\n\n";
// u1 = un + dt * F(un)
homogeneuous_fv(kernels, threads, blocks, cumulative_kernels,
beta_d, gamma_up_d, Un_d, F_d,
qx_p_d, qx_m_d, qy_p_d, qy_m_d, qz_p_d, qz_m_d,
fx_p_d, fx_m_d, fy_p_d, fy_m_d, fz_p_d, fz_m_d,
nx, ny, nz, vec_dim, ng, alpha, gamma,
dx, dy, dz, dt, rank, flux_func, do_z);
// copy back flux
hipMemcpy(F_h, F_d, nx*ny*nz*vec_dim*sizeof(float), hipMemcpyDeviceToHost);
if (n_processes == 1) {
bcs_fv(F_h, nx, ny, nz, ng, vec_dim);
} else {
int y_size = kernels[0].y * blocks[0].y * threads[0].y - 2*ng;
bcs_mpi(F_h, nx, ny, nz, vec_dim, ng, comm, status, rank, n_processes, y_size, do_z);
}
for (int n = 0; n < nx*ny*nz*vec_dim; n++) {
Up_h[n] = Un_h[n] + dt * F_h[n];
}
// enforce boundaries and copy back
if (n_processes == 1) {
bcs_fv(Up_h, nx, ny, nz, ng, vec_dim);
} else {
int y_size = kernels[0].y * blocks[0].y * threads[0].y - 2*ng;
bcs_mpi(Up_h, nx, ny, nz, vec_dim, ng, comm, status, rank, n_processes, y_size, do_z);
}
if (do_z) {
// HACK:
// going to do some hacky data sanitisation here
// NOTE: could argue that this is actually a form of artificial
// dissipation to ensure stability (as it is just smoothing out
// spikes in the data after all)
for (int x = 0; x < nx * ny * nz; x++) {
if (abs(Up_h[x*6]) > 1.0e2) {
Up_h[x*6] = 0.5;
}
if (abs(Up_h[x*6+4]) > 1.0e3 || Up_h[x*6+4] < 0.0) {
Up_h[x*6+4] = Up_h[x*6];
}
if (Up_h[x*6+5] > 1.0) Up_h[x*6+5] = 1.0;
if (Up_h[x*6+5] < 0.0) Up_h[x*6+5] = 0.0;
for (int i = 1; i < 4; i++) {
if (abs(Up_h[x*6+i]) > Up_h[x*6]) {
Up_h[x*6+i] = 0.0;
}
}
}
}
hipMemcpy(Un_d, Up_h, nx*ny*nz*vec_dim*sizeof(float), hipMemcpyHostToDevice);
//cout << "\nu2\n\n\n";
// u2 = 0.25 * (3*un + u1 + dt*F(u1))
homogeneuous_fv(kernels, threads, blocks, cumulative_kernels,
beta_d, gamma_up_d, Un_d, F_d,
qx_p_d, qx_m_d, qy_p_d, qy_m_d, qz_p_d, qz_m_d,
fx_p_d, fx_m_d, fy_p_d, fy_m_d, fz_p_d, fz_m_d,
nx, ny, nz, vec_dim, ng, alpha, gamma,
dx, dy, dz, dt, rank, flux_func, do_z);
// copy back flux
hipMemcpy(F_h, F_d, nx*ny*nz*vec_dim*sizeof(float), hipMemcpyDeviceToHost);
if (n_processes == 1) {
bcs_fv(F_h, nx, ny, nz, ng, vec_dim);
} else {
int y_size = kernels[0].y * blocks[0].y * threads[0].y - 2*ng;
bcs_mpi(F_h, nx, ny, nz, vec_dim, ng, comm, status, rank, n_processes, y_size, do_z);
}
for (int n = 0; n < nx*ny*nz*vec_dim; n++) {
Up_h[n] = 0.25 * (3.0 * Un_h[n] + Up_h[n] + dt * F_h[n]);
}
// enforce boundaries and copy back
if (n_processes == 1) {
bcs_fv(Up_h, nx, ny, nz, ng, vec_dim);
} else {
int y_size = kernels[0].y * blocks[0].y * threads[0].y - 2*ng;
bcs_mpi(Up_h, nx, ny, nz, vec_dim, ng, comm, status, rank, n_processes, y_size, do_z);
}
if (do_z) {
// HACK:
// going to do some hacky data sanitisation here
for (int x = 0; x < nx * ny * nz; x++) {
if (abs(Up_h[x*6]) > 1.0e2) {
Up_h[x*6] = 0.5;
}
if (abs(Up_h[x*6+4]) > 1.0e3 || Up_h[x*6+4] < 0.0) {
Up_h[x*6+4] = Up_h[x*6];
}
if (Up_h[x*6+5] > 1.0) Up_h[x*6+5] = 1.0;
if (Up_h[x*6+5] < 0.0) Up_h[x*6+5] = 0.0;
for (int i = 1; i < 4; i++) {
if (abs(Up_h[x*6+i]) > Up_h[x*6]) {
Up_h[x*6+i] = 0.0;
}
}
}
}
hipMemcpy(Un_d, Up_h, nx*ny*nz*vec_dim*sizeof(float), hipMemcpyHostToDevice);
//cout << "\nun+1\n\n\n";
// un+1 = (1/3) * (un + 2*u2 + 2*dt*F(u2))
homogeneuous_fv(kernels, threads, blocks, cumulative_kernels,
beta_d, gamma_up_d, Un_d, F_d,
qx_p_d, qx_m_d, qy_p_d, qy_m_d, qz_p_d, qz_m_d,
fx_p_d, fx_m_d, fy_p_d, fy_m_d, fz_p_d, fz_m_d,
nx, ny, nz, vec_dim, ng, alpha, gamma,
dx, dy, dz, dt, rank, flux_func, do_z);
// copy back flux
hipMemcpy(F_h, F_d, nx*ny*nz*vec_dim*sizeof(float), hipMemcpyDeviceToHost);
if (n_processes == 1) {
bcs_fv(F_h, nx, ny, nz, ng, vec_dim);
} else {
int y_size = kernels[0].y * blocks[0].y * threads[0].y - 2*ng;
bcs_mpi(F_h, nx, ny, nz, vec_dim, ng, comm, status, rank, n_processes, y_size, do_z);
}
for (int n = 0; n < nx*ny*nz*vec_dim; n++) {
Up_h[n] = (1/3.0) * (Un_h[n] + 2.0*Up_h[n] + 2.0*dt * F_h[n]);
}
// enforce boundaries
if (n_processes == 1) {
bcs_fv(Up_h, nx, ny, nz, ng, vec_dim);
} else {
int y_size = kernels[0].y * blocks[0].y * threads[0].y - 2*ng;
bcs_mpi(Up_h, nx, ny, nz, vec_dim, ng, comm, status, rank, n_processes, y_size, do_z);
}
if (do_z) {
// HACK: going to do some hacky data sanitisation here
for (int x = 0; x < nx * ny * nz; x++) {
if (abs(Up_h[x*6]) > 1.0e2) {
Up_h[x*6] = 0.5;
}
if (abs(Up_h[x*6+4]) > 1.0e3 || Up_h[x*6+4] < 0.0) {
Up_h[x*6+4] = Up_h[x*6];
}
if (Up_h[x*6+5] > 1.0) Up_h[x*6+5] = 1.0;
if (Up_h[x*6+5] < 0.0) Up_h[x*6+5] = 0.0;
for (int i = 1; i < 4; i++) {
if (abs(Up_h[x*6+i]) > Up_h[x*6]) {
Up_h[x*6+i] = 0.0;
}
}
}
}
for (int j = 0; j < nx*ny*nz*vec_dim; j++) {
Un_h[j] = Up_h[j];
}
}
void cuda_run(float * beta, float * gamma_up, float * Uc_h, float * Uf_h,
float * rho, float * Q,
int nx, int ny, int nlayers,
int nxf, int nyf, int nz, int ng,
int nt, float alpha, float gamma, float E_He, float Cv,
float zmin,
float dx, float dy, float dz, float dt, bool burning,
int dprint, char * filename,
MPI_Comm comm, MPI_Status status, int rank, int n_processes,
int * matching_indices) {
/**
Evolve system through nt timesteps, saving data to filename every dprint timesteps.
Parameters
----------
beta : float *
shift vector at each grid point
gamma_up : float *
gamma matrix at each grid point
Uc_h : float *
state vector at each grid point in each layer at current timestep on host in coarse grid
Uf_h : float *
state vector at each grid point in each layer at current timestep on host in fine grid
rho : float *
densities in each layer
Q : float *
heating rate at each point and in each layer
nx, ny, nlayers : int
dimensions of coarse grid
nxf, nyf, nz : int
dimensions of fine grid
ng : int
number of ghost cells
nt : int
total number of timesteps
alpha : float
lapse function
gamma : float
adiabatic index
E_He : float
energy release per unit mass of helium
Cv : float
specific heat in constant volume
zmin : float
height of sea floor
dx, dy, dz, dt : float
gridpoint spacing and timestep spacing
burning : bool
is burning included in this system?
dprint : int
number of timesteps between each printout
filename : char *
name of file to which output is printed
comm : MPI_Comm
MPI communicator
status: MPI_Status
status of MPI processes
rank, n_processes : int
rank of current MPI process and total number of MPI processes
matching_indices : int *
position of fine grid wrt coarse grid
*/
// set up GPU stuff
int count;
hipGetDeviceCount(&count);
if (rank == 0) {
hipError_t err = hipGetLastError();
// check that we actually have some GPUS
if (err != hipSuccess) {
printf("Error: %s\n", hipGetErrorString(err));
printf("Aborting program.\n");
return;
}
printf("Found %i CUDA devices\n", count);
}
// if rank > number of GPUs, exit now
if (rank >= count) {
return;
}
// redefine - we only want to run on as many cores as we have GPUs
if (n_processes > count) {
n_processes = count;
}
if (rank == 0) {
printf("Running on %i processor(s)\n", n_processes);
}
int maxThreads = 256;
int maxBlocks = 256; //64;
dim3 *kernels = new dim3[n_processes];
int *cumulative_kernels = new int[n_processes];
getNumKernels(max(nx, nxf), max(ny, nyf), max(nlayers, nz), ng, n_processes, &maxBlocks, &maxThreads, kernels, cumulative_kernels);
int total_kernels = cumulative_kernels[n_processes-1];
dim3 *blocks = new dim3[total_kernels];
dim3 *threads = new dim3[total_kernels];
getNumBlocksAndThreads(max(nx, nxf), max(ny, nyf), max(nlayers, nz), ng, maxBlocks, maxThreads, n_processes, kernels, blocks, threads);
printf("rank: %i\n", rank);
printf("kernels: (%i, %i)\n", kernels[rank].x, kernels[rank].y);
printf("cumulative kernels: %i\n", cumulative_kernels[rank]);
int k_offset = 0;
if (rank > 0) {
k_offset = cumulative_kernels[rank-1];
}
for (int i = k_offset; i < cumulative_kernels[rank]; i++) {
printf("blocks: (%i, %i, %i) , threads: (%i, %i, %i)\n",
blocks[i].x, blocks[i].y, blocks[i].z,
threads[i].x, threads[i].y, threads[i].z);
}
// gpu variables
float * beta_d, * gamma_up_d, * Uc_d, * Uf_d, * rho_d, * Q_d;
// initialise Uf_h
for (int i = 0; i < nxf*nyf*nz*6; i++) {
Uf_h[i] = 0.0;
}
// set device
hipSetDevice(rank);
// allocate memory on device
hipMalloc((void**)&beta_d, 3*sizeof(float));
hipMalloc((void**)&gamma_up_d, 9*sizeof(float));
hipMalloc((void**)&Uc_d, nx*ny*nlayers*4*sizeof(float));
hipMalloc((void**)&Uf_d, nxf*nyf*nz*6*sizeof(float));
hipMalloc((void**)&rho_d, nlayers*sizeof(float));
hipMalloc((void**)&Q_d, nlayers*sizeof(float));
// copy stuff to GPU
hipMemcpy(beta_d, beta, 3*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(gamma_up_d, gamma_up, 9*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(Uc_d, Uc_h, nx*ny*nlayers*4*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(Uf_d, Uf_h, nxf*nyf*nz*6*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(rho_d, rho, nlayers*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(Q_d, Q, nlayers*sizeof(float), hipMemcpyHostToDevice);
float *Upc_d, *Uc_half_d, *Upf_d, *Uf_half_d, *old_phi_d, *sum_phs_d;
hipMalloc((void**)&Upc_d, nx*ny*nlayers*4*sizeof(float));
hipMalloc((void**)&Uc_half_d, nx*ny*nlayers*4*sizeof(float));
hipMalloc((void**)&Upf_d, nxf*nyf*nz*6*sizeof(float));
hipMalloc((void**)&Uf_half_d, nxf*nyf*nz*6*sizeof(float));
hipMalloc((void**)&old_phi_d, nlayers*nx*ny*sizeof(float));
hipMalloc((void**)&sum_phs_d, nlayers*nx*ny*sizeof(float));
// need to fill old_phi with current phi to initialise
float *pphi = new float[nlayers*nx*ny];
for (int i = 0; i < nlayers*nx*ny; i++) {
pphi[i] = Uc_h[i*4];
}
hipMemcpy(old_phi_d, pphi, nx*ny*nlayers*sizeof(float), hipMemcpyHostToDevice);
float *qx_p_d, *qx_m_d, *qy_p_d, *qy_m_d, *qz_p_d, *qz_m_d, *fx_p_d, *fx_m_d, *fy_p_d, *fy_m_d, *fz_p_d, *fz_m_d;
float *Upc_h = new float[nx*ny*nlayers*4];
float *Fc_h = new float[nx*ny*nlayers*4];
float *Upf_h = new float[nxf*nyf*nz*6];
float *Ff_h = new float[nxf*nyf*nz*6];
float * sum_phs_h = new float[nx*ny*nlayers];
// initialise
for (int j = 0; j < nxf*nyf*nz*6; j++) {
Upf_h[j] = 0.0;
}
int grid_size = max(nx*ny*nlayers*4, nxf*nyf*nz*6);
hipMalloc((void**)&qx_p_d, grid_size*sizeof(float));
hipMalloc((void**)&qx_m_d, grid_size*sizeof(float));
hipMalloc((void**)&qy_p_d, grid_size*sizeof(float));
hipMalloc((void**)&qy_m_d, grid_size*sizeof(float));
hipMalloc((void**)&qz_p_d, grid_size*sizeof(float));
hipMalloc((void**)&qz_m_d, grid_size*sizeof(float));
hipMalloc((void**)&fx_p_d, grid_size*sizeof(float));
hipMalloc((void**)&fx_m_d, grid_size*sizeof(float));
hipMalloc((void**)&fy_p_d, grid_size*sizeof(float));
hipMalloc((void**)&fy_m_d, grid_size*sizeof(float));
hipMalloc((void**)&fz_p_d, grid_size*sizeof(float));
hipMalloc((void**)&fz_m_d, grid_size*sizeof(float));
float * q_comp_d;
hipMalloc((void**)&q_comp_d, nx*ny*nlayers*6*sizeof(float));
float * qf_swe;
hipMalloc((void**)&qf_swe, nxf*nyf*nz*4*sizeof(float));
int * matching_indices_d;
hipMalloc((void**)&matching_indices_d, 4*sizeof(int));
hipMemcpy(matching_indices_d, matching_indices, 4*sizeof(int), hipMemcpyHostToDevice);
// make host-side function pointers to __device__ functions
flux_func_ptr h_compressible_fluxes;
flux_func_ptr h_shallow_water_fluxes;
// copy function pointers to host equivalent
hipMemcpyFromSymbol(&h_compressible_fluxes, d_compressible_fluxes, sizeof(flux_func_ptr));
hipMemcpyFromSymbol(&h_shallow_water_fluxes, d_shallow_water_fluxes, sizeof(flux_func_ptr));
if (strcmp(filename, "na") != 0) {
hid_t outFile, dset, mem_space, file_space;
if (rank == 0) {
// create file
outFile = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
// create dataspace
int ndims = 5;
hsize_t dims[] = {hsize_t((nt+1)/dprint+1), hsize_t(nlayers), (ny), hsize_t(nx), 4};
file_space = H5Screate_simple(ndims, dims, NULL);
hid_t plist = H5Pcreate(H5P_DATASET_CREATE);
H5Pset_layout(plist, H5D_CHUNKED);
hsize_t chunk_dims[] = {1, hsize_t(nlayers), hsize_t(ny), hsize_t(nx), 4};
H5Pset_chunk(plist, ndims, chunk_dims);
// create dataset
dset = H5Dcreate(outFile, "SwerveOutput", H5T_NATIVE_FLOAT, file_space, H5P_DEFAULT, plist, H5P_DEFAULT);
H5Pclose(plist);
// make a memory dataspace
mem_space = H5Screate_simple(ndims, chunk_dims, NULL);
// select a hyperslab
file_space = H5Dget_space(dset);
hsize_t start[] = {0, 0, 0, 0, 0};
hsize_t hcount[] = {1, hsize_t(nlayers), hsize_t(ny), hsize_t(nx), 4};
H5Sselect_hyperslab(file_space, H5S_SELECT_SET, start, NULL, hcount, NULL);
// write to dataset
printf("Printing t = %i\n", 0);
H5Dwrite(dset, H5T_NATIVE_FLOAT, mem_space, file_space, H5P_DEFAULT, Uc_h);
// close file dataspace
H5Sclose(file_space);
}
hipError_t err;
err = hipGetLastError();
if (err != hipSuccess){
cout << "Before evolution\n";
printf("Error: %s\n", hipGetErrorString(err));
}
// main loop
for (int t = 0; t < nt; t++) {
cout << "Evolving t = " << t << '\n';
int ky_offset = (kernels[0].y * blocks[0].y * threads[0].y - 2*ng) * rank;
// good here
/*cout << "\nCoarse grid before prolonging\n\n";
for (int y = 0; y < ny; y++) {
for (int x = 0; x < nx; x++) {
cout << '(' << x << ',' << y << "): ";
for (int z = 0; z < nlayers; z++) {
cout << Uc_h[(((z*ny + y)*nx)+x)*4] << ',';
}
cout << '\n';
}
}*/
//cout << "\n\nProlonging\n\n";
// prolong to fine grid
prolong_grid(kernels, threads, blocks, cumulative_kernels,
Uc_d, Uf_d, nx, ny, nlayers, nxf, nyf, nz, dx, dy, dz, dt, zmin, gamma_up_d,
rho_d, gamma, matching_indices_d, ng, rank, q_comp_d, old_phi_d);
hipMemcpy(Uf_h, Uf_d, nxf*nyf*nz*6*sizeof(float), hipMemcpyDeviceToHost);
err = hipGetLastError();
if (err != hipSuccess){
cout << "After prolonging\n";
printf("Error: %s\n", hipGetErrorString(err));
}
/*cout << "\nFine grid after prolonging\n\n";
for (int y = 0; y < nyf; y++) {
for (int x = 0; x < nxf; x++) {
cout << '(' << x << ',' << y << "): ";
for (int z = 0; z < nz; z++) {
cout << Uf_h[(((z*nyf + y)*nxf)+x)*6+4] << ',';
}
cout << '\n';
}
}*/
// enforce boundaries
if (n_processes == 1) {
bcs_fv(Uf_h, nxf, nyf, nz, ng, 6);
} else {
int y_size = kernels[0].y * blocks[0].y * threads[0].y - 2*ng;
bcs_mpi(Uf_h, nxf, nyf, nz, 6, ng, comm, status, rank, n_processes, y_size, true);
}
/*cout << "\nFine grid after prolonging\n\n";
for (int y = 0; y < nyf; y++) {
for (int x = 0; x < nxf; x++) {
cout << '(' << x << ',' << y << "): ";
for (int z = 0; z < nz; z++) {
cout << Uf_h[(((z*nyf + y)*nxf)+x)*6+4] << ',';
}
cout << '\n';
}
}*/
hipMemcpy(Uf_d, Uf_h, nxf*nyf*nz*6*sizeof(float), hipMemcpyHostToDevice);
err = hipGetLastError();
if (err != hipSuccess) {
cout << "Before fine rk3\n";
printf("Error: %s\n", hipGetErrorString(err));
}
// evolve fine grid through two subcycles
for (int i = 0; i < 2; i++) {
rk3(kernels, threads, blocks, cumulative_kernels,
beta_d, gamma_up_d, Uf_d, Uf_half_d, Upf_d,
qx_p_d, qx_m_d, qy_p_d, qy_m_d, qz_p_d, qz_m_d,
fx_p_d, fx_m_d, fy_p_d, fy_m_d, fz_p_d, fz_m_d,
nxf, nyf, nz, 6, ng, alpha, gamma,
dx*0.5, dy*0.5, dz, dt*0.5, Upf_h, Ff_h, Uf_h,
comm, status, rank, n_processes,
h_compressible_fluxes, true);
// enforce boundaries is done within rk3
/*if (n_processes == 1) {
bcs_fv(Uf_h, nxf, nyf, nz, ng, 6);
} else {
int y_size = kernels[0].y * blocks[0].y * threads[0].y - 2*ng;
bcs_mpi(Uf_h, nxf, nyf, nz, 6, ng, comm, status, rank, n_processes, y_size);
}*/
/*cout << "\nFine grid\n\n";
for (int y = 0; y < nyf; y++) {
for (int x = 0; x < nxf; x++) {
cout << '(' << x << ',' << y << "): ";
for (int z = 0; z < nz; z++) {
if (abs(Uf_h[(((z*nyf + y)*nxf)+x)*6+4]) > 30.0)
cout << Uf_h[(((z*nyf + y)*nxf)+x)*6+4] << ',';
}
cout << '\n';
}
}*/
hipDeviceSynchronize();
// hack on the burning
float * H = new float[nxf*nyf*nz];
calc_Q(rho, Uf_h, nxf, nyf, nz, gamma, gamma_up, H, Cv);
for (int z = 0; z < nz; z++) {
for (int y = ng; y < nyf-ng; y++) {
for (int x = ng; x < nxf - ng; x++) {
// tau
Uf_h[((z * nyf + y) * nxf + x) * 6 + 4] += dt * 0.5 * alpha * Uf_h[((z * nyf + y) * nxf + x) * 6] * H[(z * nyf + y) * nxf + x];
float X_dot = H[(z * nyf + y) * nxf + x] / E_He;
// DX
Uf_h[((z * nyf + y) * nxf + x) * 6 + 5] += dt * 0.5 * alpha * rho[0] * X_dot;
}
}
}
delete[] H;
if (n_processes == 1) {
bcs_fv(Uf_h, nxf, nyf, nz, ng, 6);
} else {
int y_size = kernels[0].y * blocks[0].y * threads[0].y - 2*ng;
bcs_mpi(Uf_h, nxf, nyf, nz, 6, ng, comm, status, rank, n_processes, y_size, false);
}
// copy to device
hipMemcpy(Uf_d, Uf_h, nxf*nyf*nz*6*sizeof(float), hipMemcpyHostToDevice);
}
err = hipGetLastError();
if (err != hipSuccess){
cout << "Before restricting\n";
printf("Error: %s\n", hipGetErrorString(err));
}
//cout << "\n\nRestricting\n\n";
// probably good here
/*cout << "\nFine grid before restricting\n\n";
for (int y = 0; y < nyf; y++) {
for (int x = 0; x < nxf; x++) {
cout << '(' << x << ',' << y << "): ";
for (int z = 0; z < nz; z++) {
cout << Uf_h[(((z*nyf + y)*nxf)+x)*6+4] << ',';
}
cout << '\n';
}
}*/
/*cout << "\nCoarse grid before restricting\n\n";
for (int z = 0; z < nlayers; z++) {
for (int y = 0; y < ny; y++) {
for (int x = 0; x < nx; x++) {
cout << '(' << x << ',' << y << ',' << z << "): " << Uc_h[(((z*ny+y)*nx)+x)*4+1] << ',' << Uc_h[(((z*ny+y)*nx)+x)*4+2] << ',' << Uc_h[(((z*ny+y)*nx)+x)*4+3] << '\n';
}
}
}*/
// restrict to coarse grid
restrict_grid(kernels, threads, blocks, cumulative_kernels,
Uc_d, Uf_d, nx, ny, nlayers, nxf, nyf, nz,
dz, zmin, matching_indices_d,
rho_d, gamma, gamma_up_d, ng, rank, qf_swe);
err = hipGetLastError();
if (err != hipSuccess){
cout << "After restricting\n";
printf("Error: %s\n", hipGetErrorString(err));
}
hipMemcpy(Uc_h, Uc_d, nx*ny*nlayers*4*sizeof(float), hipMemcpyDeviceToHost);
err = hipGetLastError();
if (err != hipSuccess){
cout << "After copying\n";
printf("Error: %s\n", hipGetErrorString(err));
}
// enforce boundaries
if (n_processes == 1) {
bcs_fv(Uc_h, nx, ny, nlayers, ng, 4);
} else {
int y_size = kernels[0].y * blocks[0].y * threads[0].y - 2*ng;
bcs_mpi(Uc_h, nx, ny, nlayers, 4, ng, comm, status, rank, n_processes, y_size, false);
}
hipMemcpy(Uc_d, Uc_h, nx*ny*nlayers*4*sizeof(float), hipMemcpyHostToDevice);
/*cout << "\nCoarse grid after restricting\n\n";
for (int y = 0; y < ny; y++) {
for (int x = 0; x < nx; x++) {
cout << '(' << x << ',' << y << "): ";
for (int z = 0; z < nlayers; z++) {
cout << Uc_h[(((z*ny + y)*nx)+x)*4] << ',';
}
cout << '\n';
}
}*/
err = hipGetLastError();
if (err != hipSuccess){
cout << "Coarse rk3\n";
printf("Error: %s\n", hipGetErrorString(err));
}
rk3(kernels, threads, blocks, cumulative_kernels,
beta_d, gamma_up_d, Uc_d, Uc_half_d, Upc_d,
qx_p_d, qx_m_d, qy_p_d, qy_m_d, qz_p_d, qz_m_d,
fx_p_d, fx_m_d, fy_p_d, fy_m_d, fz_p_d, fz_m_d,
nx, ny, nlayers, 4, ng, alpha, gamma,
dx, dy, dz, dt, Upc_h, Fc_h, Uc_h,
comm, status, rank, n_processes,
h_shallow_water_fluxes, false);
err = hipGetLastError();
if (err != hipSuccess){
cout << "Done coarse rk3\n";
printf("Error: %s\n", hipGetErrorString(err));
}
hipMemcpy(Uc_d, Uc_h, nx*ny*nlayers*4*sizeof(float), hipMemcpyHostToDevice);
// update old_phi
for (int i = 0; i < nlayers*nx*ny; i++) {
pphi[i] = Uc_h[i*4];
}
hipMemcpy(old_phi_d, pphi, nx*ny*nlayers*sizeof(float), hipMemcpyHostToDevice);
/*cout << "\nCoarse grid after rk3\n\n";
for (int y = 0; y < ny; y++) {
for (int x = 0; x < nx; x++) {
cout << '(' << x << ',' << y << "): ";
for (int z = 0; z < nlayers; z++) {
cout << Uc_h[(((z*ny + y)*nx)+x)*4] << ',';
}
cout << '\n';
}
}*/
hipMemcpy(Upc_d, Uc_h, nx*ny*nlayers*4*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(Uc_half_d, Uc_h, nx*ny*nlayers*4*sizeof(float), hipMemcpyHostToDevice);
float kx_offset = 0;
ky_offset = (kernels[0].y * blocks[0].y * threads[0].y - 2*ng) * rank;
for (int j = 0; j < kernels[rank].y; j++) {
kx_offset = 0;
for (int i = 0; i < kernels[rank].x; i++) {
hipLaunchKernelGGL(( evolve_fv_heating), dim3(blocks[k_offset + j * kernels[rank].x + i]), dim3(threads[k_offset + j * kernels[rank].x + i]), 0, 0,
gamma_up_d,
Upc_d, Uc_half_d,
qx_p_d, qx_m_d, qy_p_d, qy_m_d,
fx_p_d, fx_m_d, fy_p_d, fy_m_d,
sum_phs_d, rho_d, Q_d,
nx, ny, nlayers, alpha, gamma,
dx, dy, dt, burning, Cv, E_He,
kx_offset, ky_offset);
kx_offset += blocks[k_offset + j * kernels[rank].x + i].x * threads[k_offset + j * kernels[rank].x + i].x - 2*ng;
}
ky_offset += blocks[k_offset + j * kernels[rank].x].y * threads[k_offset + j * kernels[rank].x].y - 2*ng;
}
hipMemcpy(Upc_h, Upc_d, nx*ny*nlayers*4*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(sum_phs_h, sum_phs_d, nx*ny*nlayers*sizeof(float), hipMemcpyDeviceToHost);
// enforce boundaries
if (n_processes == 1) {
bcs_fv(Upc_h, nx, ny, nlayers, ng, 4);
bcs_fv(sum_phs_h, nx, ny, nlayers, ng, 1);
} else {
int y_size = kernels[0].y * blocks[0].y * threads[0].y - 2*ng;
bcs_mpi(Upc_h, nx, ny, nlayers, 4, ng, comm, status, rank, n_processes, y_size, false);
bcs_mpi(sum_phs_h, nx, ny, nlayers, 1, ng, comm, status, rank, n_processes, y_size, false);
}
hipMemcpy(Upc_d, Upc_h, nx*ny*nlayers*4*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(sum_phs_d, sum_phs_h, nx*ny*nlayers*sizeof(float), hipMemcpyHostToDevice);
kx_offset = 0;
ky_offset = (kernels[0].y * blocks[0].y * threads[0].y - 2*ng) * rank;
for (int j = 0; j < kernels[rank].y; j++) {
kx_offset = 0;
for (int i = 0; i < kernels[rank].x; i++) {
hipLaunchKernelGGL(( evolve2), dim3(blocks[k_offset + j * kernels[rank].x + i]), dim3(threads[k_offset + j * kernels[rank].x + i]), 0, 0, Uc_d,
Upc_d, Uc_half_d, sum_phs_d,
nx, ny, nlayers, ng, alpha,
dx, dy, dt, kx_offset, ky_offset);
kx_offset += blocks[k_offset + j * kernels[rank].x + i].x * threads[k_offset + j * kernels[rank].x + i].x - 2*ng;
}
ky_offset += blocks[k_offset + j * kernels[rank].x].y * threads[k_offset + j * kernels[rank].x].y - 2*ng;
}
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
// boundaries
hipMemcpy(Uc_h, Uc_d, nx*ny*nlayers*4*sizeof(float), hipMemcpyDeviceToHost);
if (n_processes == 1) {
bcs_fv(Uc_h, nx, ny, nlayers, ng, 4);
} else {
int y_size = kernels[0].y * blocks[0].y * threads[0].y - 2*ng;
bcs_mpi(Uc_h, nx, ny, nlayers, 4, ng, comm, status, rank, n_processes, y_size, false);
}
hipMemcpy(Uc_d, Uc_h, nx*ny*nlayers*4*sizeof(float), hipMemcpyHostToDevice);
int mpi_err;
if ((t+1) % dprint == 0) {
if (rank == 0) {
printf("Printing t = %i\n", t+1);
if (n_processes > 1) { // only do MPI stuff if needed
float * buf = new float[nx*ny*nlayers*4];
int tag = 0;
for (int source = 1; source < n_processes; source++) {
mpi_err = MPI_Recv(buf, nx*ny*nlayers*4, MPI_FLOAT, source, tag, comm, &status);
check_mpi_error(mpi_err);
// copy data back to grid
ky_offset = (kernels[0].y * blocks[0].y * threads[0].y - 2*ng) * rank;
// cheating slightly and using the fact that are moving from bottom to top to make calculations a bit easier.
for (int z = 0; z < nlayers; z++) {
for (int y = ky_offset; y < ny; y++) {
for (int x = 0; x < nx; x++) {
for (int i = 0; i < 4; i++) {
Uc_h[((z * ny + y) * nx + x) * 4 + i] = buf[((z * ny + y) * nx + x) * 4 + i];
}
}
}
}
}
delete[] buf;
}
// receive data from other processes and copy to grid
// select a hyperslab
file_space = H5Dget_space(dset);
hsize_t start[] = {hsize_t((t+1)/dprint), 0, 0, 0, 0};
hsize_t hcount[] = {1, hsize_t(nlayers), hsize_t(ny), hsize_t(nx), 4};
H5Sselect_hyperslab(file_space, H5S_SELECT_SET, start, NULL, hcount, NULL);
// write to dataset
H5Dwrite(dset, H5T_NATIVE_FLOAT, mem_space, file_space, H5P_DEFAULT, Uc_h);
// close file dataspae
H5Sclose(file_space);
} else { // send data to rank 0
int tag = 0;
mpi_err = MPI_Ssend(Uc_h, ny*nx*nlayers*4, MPI_FLOAT, 0, tag, comm);
check_mpi_error(mpi_err);
}
}
}
if (rank == 0) {
H5Sclose(mem_space);
H5Fclose(outFile);
}
} else { // don't print
for (int t = 0; t < nt; t++) {
// prolong to fine grid
prolong_grid(kernels, threads, blocks, cumulative_kernels, Uc_d,
Uf_d, nx, ny, nlayers, nxf, nyf, nz, dx, dy, dz,
dt, zmin, gamma_up,
rho_d, gamma, matching_indices_d, ng, rank, q_comp_d, old_phi_d);
hipMemcpy(Uf_h, Uf_d, nxf*nyf*nz*6*sizeof(float), hipMemcpyDeviceToHost);
// evolve fine grid through two subcycles
for (int i = 0; i < 2; i++) {
rk3(kernels, threads, blocks, cumulative_kernels,
beta_d, gamma_up_d, Uf_d, Uf_half_d, Upf_d,
qx_p_d, qx_m_d, qy_p_d, qy_m_d, qz_p_d, qz_m_d,
fx_p_d, fx_m_d, fy_p_d, fy_m_d, fz_p_d, fz_m_d,
nxf, nyf, nz, 6, ng, alpha, gamma,
dx*0.5, dy*0.5, dz, dt*0.5, Upf_h, Ff_h, Uf_h,
comm, status, rank, n_processes,
h_compressible_fluxes, true);
// if not last step, copy output array to input array
if (i < 1) {
for (int j = 0; j < nxf*nyf*nz*6; j++) {
Uf_h[j] = Upf_h[j];
}
}
}
// restrict to coarse grid
restrict_grid(kernels, threads, blocks, cumulative_kernels,
Uc_d, Uf_d, nx, ny, nlayers, nxf, nyf, nz,
dz, zmin, matching_indices_d,
rho_d, gamma, gamma_up_d, ng, rank, qf_swe);
rk3(kernels, threads, blocks, cumulative_kernels,
beta_d, gamma_up_d, Uc_d, Uc_half_d, Upc_d,
qx_p_d, qx_m_d, qy_p_d, qy_m_d, qz_p_d, qz_m_d,
fx_p_d, fx_m_d, fy_p_d, fy_m_d, fz_p_d, fz_m_d,
nx, ny, nlayers, 4, ng, alpha, gamma,
dx, dy, dz, dt, Upc_h, Fc_h, Uc_h,
comm, status, rank, n_processes,
h_shallow_water_fluxes, false);
/*int k_offset = 0;
if (rank > 0) {
k_offset = cumulative_kernels[rank-1];
}
for (int j = 0; j < kernels[rank].y; j++) {
kx_offset = 0;
for (int i = 0; i < kernels[rank].x; i++) {
hipLaunchKernelGGL(( evolve_fv_heating), dim3(blocks[k_offset + j * kernels[rank].x + i]), dim3(threads[k_offset + j * kernels[rank].x + i]), 0, 0,
gamma_up_d, Un_d,
Up_d, U_half_d,
qx_p_d, qx_m_d, qy_p_d, qy_m_d,
fx_p_d, fx_m_d, fy_p_d, fy_m_d,
sum_phs_d, rho_d, Q_d,
nx, ny, nlayers, alpha, gamma,
dx, dy, dt, burning, Cv, E_He,
kx_offset, ky_offset);
kx_offset += blocks[k_offset + j * kernels[rank].x + i].x * threads[k_offset + j * kernels[rank].x + i].x - 2*ng;
}
ky_offset += blocks[k_offset + j * kernels[rank].x].y * threads[j * kernels[rank].x].y - 2*ng;
}
kx_offset = 0;
ky_offset = (kernels[0].y * blocks[0].y * threads[0].y - 2*ng) * rank;
for (int j = 0; j < kernels[rank].y; j++) {
kx_offset = 0;
for (int i = 0; i < kernels[rank].x; i++) {
hipLaunchKernelGGL(( evolve2), dim3(blocks[k_offset + j * kernels[rank].x + i]), dim3(threads[k_offset + j * kernels[rank].x + i]), 0, 0, gamma_up_d, Un_d,
Up_d, U_half_d, sum_phs_d, rho_d, Q_d,
nx, ny, nlayers, ng, alpha,
dx, dy, dt, kx_offset, ky_offset);
kx_offset += blocks[k_offset + j * kernels[rank].x + i].x * threads[k_offset + j * kernels[rank].x + i].x - 2*ng;
}
ky_offset += blocks[k_offset + j * kernels[rank].x].y * threads[k_offset + j * kernels[rank].x].y - 2*ng;
}*/
hipDeviceSynchronize();
// boundaries
hipMemcpy(Uc_h, Uc_d, nx*ny*nlayers*4*sizeof(float), hipMemcpyDeviceToHost);
if (n_processes == 1) {
bcs_fv(Uc_h, nx, ny, nlayers, ng, 4);
} else {
int y_size = kernels[0].y * blocks[0].y * threads[0].y - 2*ng;
bcs_mpi(Uc_h, nx, ny, nlayers, 4, ng, comm, status, rank, n_processes, y_size, false);
}
hipMemcpy(Uc_d, Uc_h, nx*ny*nlayers*4*sizeof(float), hipMemcpyHostToDevice);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
}
}
// delete some stuff
hipFree(beta_d);
hipFree(gamma_up_d);
hipFree(Uc_d);
hipFree(Uf_d);
hipFree(rho_d);
hipFree(Q_d);
hipFree(Upc_d);
hipFree(Uc_half_d);
hipFree(Upf_d);
hipFree(Uf_half_d);
hipFree(old_phi_d);
hipFree(sum_phs_d);
hipFree(qx_p_d);
hipFree(qx_m_d);
hipFree(qy_p_d);
hipFree(qy_m_d);
hipFree(qz_p_d);
hipFree(qz_m_d);
hipFree(fx_p_d);
hipFree(fx_m_d);
hipFree(fy_p_d);
hipFree(fy_m_d);
hipFree(fz_p_d);
hipFree(fz_m_d);
hipFree(q_comp_d);
hipFree(qf_swe);
hipFree(matching_indices_d);
delete[] kernels;
delete[] cumulative_kernels;
delete[] threads;
delete[] blocks;
delete[] Upc_h;
delete[] Fc_h;
delete[] Upf_h;
delete[] Ff_h;
delete[] pphi;
delete[] sum_phs_h;
}
| 581f7db65eff6b3f115ee6319b976f21d73d8cbd.cu | __global__ void evolve_fv(float * beta_d, float * gamma_up_d,
float * Un_d, flux_func_ptr flux_func,
float * qx_plus_half, float * qx_minus_half,
float * qy_plus_half, float * qy_minus_half,
float * fx_plus_half, float * fx_minus_half,
float * fy_plus_half, float * fy_minus_half,
int nx, int ny, int nz, int vec_dim, float alpha, float gamma,
float dx, float dy, float dt,
int kx_offset, int ky_offset) {
/**
First part of evolution through one timestep using finite volume methods.
Reconstructs state vector to cell boundaries using slope limiter
and calculates fluxes there.
NOTE: we assume that beta is smooth so can get value at cell boundaries with simple averaging
Parameters
----------
beta_d : float *
shift vector at each grid point.
gamma_up_d : float *
gamma matrix at each grid point
Un_d : float *
state vector at each grid point in each layer
flux_func : flux_func_ptr
pointer to function to be used to calulate fluxes
qx_plus_half, qx_minus_half : float *
state vector reconstructed at right and left boundaries
qy_plus_half, qy_minus_half : float *
state vector reconstructed at top and bottom boundaries
fx_plus_half, fx_minus_half : float *
flux vector at right and left boundaries
fy_plus_half, fy_minus_half : float *
flux vector at top and bottom boundaries
nx, ny, nz : int
dimensions of grid
alpha, gamma : float
lapse function and adiabatic index
dx, dy, dt : float
grid dimensions and timestep
kx_offset, ky_offset : int
x, y offset for current kernel
*/
int x = kx_offset + blockIdx.x * blockDim.x + threadIdx.x;
int y = ky_offset + blockIdx.y * blockDim.y + threadIdx.y;
int z = threadIdx.z;
int offset = ((z * ny + y) * nx + x) * vec_dim;
float * q_p, *q_m, * f;
q_p = (float *)malloc(vec_dim * sizeof(float));
q_m = (float *)malloc(vec_dim * sizeof(float));
f = (float *)malloc(vec_dim * sizeof(float));
if ((x > 0) && (x < (nx-1)) && (y > 0) && (y < (ny-1)) && (z < nz)) {
// x-direction
for (int i = 0; i < vec_dim; i++) {
float S_upwind = (Un_d[((z * ny + y) * nx + x+1) * vec_dim + i] -
Un_d[((z * ny + y) * nx + x) * vec_dim + i]);
float S_downwind = (Un_d[((z * ny + y) * nx + x) * vec_dim + i] -
Un_d[((z * ny + y) * nx + x-1) * vec_dim + i]);
float S = 0.5 * (S_upwind + S_downwind); // S_av
float r = 1.0e6;
// make sure don't divide by zero
if (abs(S_downwind) > 1.0e-7) {
r = S_upwind / S_downwind;
}
S *= phi(r);
q_p[i] = Un_d[offset + i] + S * 0.5;
q_m[i] = Un_d[offset + i] - S * 0.5;
}
// fluxes
flux_func(q_p, f, 0, gamma_up_d, alpha, beta_d, gamma);
for (int i = 0; i < vec_dim; i++) {
qx_plus_half[offset + i] = q_p[i];
fx_plus_half[offset + i] = f[i];
}
flux_func(q_m, f, 0, gamma_up_d, alpha, beta_d, gamma);
for (int i = 0; i < vec_dim; i++) {
qx_minus_half[offset + i] = q_m[i];
fx_minus_half[offset + i] = f[i];
//if (nan_check(q_p[i]) || nan_check(q_m[i]) || nan_check(fx_plus_half[offset + i]) || nan_check(fx_minus_half[offset + i])) printf("(%d, %d, %d) i: %d, qx_p: %f, qx_m: %f, fx_p: %f, fx_m: %f\n", x, y, z, i, q_p[i], q_m[i], fx_plus_half[offset + i], fx_minus_half[offset + i]);
}
// y-direction
for (int i = 0; i < vec_dim; i++) {
float S_upwind = (Un_d[((z * ny + y+1) * nx + x) * vec_dim + i] -
Un_d[((z * ny + y) * nx + x) * vec_dim + i]);
float S_downwind = (Un_d[((z * ny + y) * nx + x) * vec_dim + i] -
Un_d[((z * ny + y-1) * nx + x) * vec_dim + i]);
float S = 0.5 * (S_upwind + S_downwind); // S_av
float r = 1.0e6;
// make sure don't divide by zero
if (abs(S_downwind) > 1.0e-7) {
r = S_upwind / S_downwind;
}
S *= phi(r);
q_p[i] = Un_d[offset + i] + S * 0.5;
q_m[i] = Un_d[offset + i] - S * 0.5;
}
// fluxes
flux_func(q_p, f, 1, gamma_up_d, alpha, beta_d, gamma);
for (int i = 0; i < vec_dim; i++) {
qy_plus_half[offset + i] = q_p[i];
fy_plus_half[offset + i] = f[i];
}
flux_func(q_m, f, 1, gamma_up_d, alpha, beta_d, gamma);
for (int i = 0; i < vec_dim; i++) {
qy_minus_half[offset + i] = q_m[i];
fy_minus_half[offset + i] = f[i];
//if (nan_check(q_p[i]) || nan_check(q_m[i])) printf("(%d, %d, %d) i: %d, qy_p: %f, qy_m: %f\n", x, y, z, i, q_p[i], q_m[i]);
}
}
free(q_p);
free(q_m);
free(f);
}
__global__ void evolve_z(float * beta_d, float * gamma_up_d,
float * Un_d, flux_func_ptr flux_func,
float * qz_plus_half, float * qz_minus_half,
float * fz_plus_half, float * fz_minus_half,
int nx, int ny, int nz, int vec_dim, float alpha, float gamma,
float dz, float dt,
int kx_offset, int ky_offset) {
/**
First part of evolution through one timestep using finite volume methods.
Reconstructs state vector to cell boundaries using slope limiter
and calculates fluxes there.
NOTE: we assume that beta is smooth so can get value at cell boundaries with simple averaging
Parameters
----------
beta_d : float *
shift vector at each grid point.
gamma_up_d : float *
gamma matrix at each grid point
Un_d : float *
state vector at each grid point in each layer
flux_func : flux_func_ptr
pointer to function to be used to calculate fluxes
qz_plus_half, qz_minus_half : float *
state vector reconstructed at top and bottom boundaries
fz_plus_half, fz_minus_half : float *
flux vector at top and bottom boundaries
nx, ny, nz : int
dimensions of grid
vec_dim : int
dimension of state vector
alpha, gamma : float
lapse function and adiabatic index
dz, dt : float
vertical grid spacing and timestep
kx_offset, ky_offset : int
x, y offset for current kernel
*/
int x = kx_offset + blockIdx.x * blockDim.x + threadIdx.x;
int y = ky_offset + blockIdx.y * blockDim.y + threadIdx.y;
int z = threadIdx.z;
int offset = ((z * ny + y) * nx + x) * vec_dim;
float * q_p, *q_m, * f;
q_p = (float *)malloc(vec_dim * sizeof(float));
q_m = (float *)malloc(vec_dim * sizeof(float));
f = (float *)malloc(vec_dim * sizeof(float));
if ((x > 0) && (x < (nx-1)) && (y > 0) && (y < (ny-1)) && (z > 0) && (z < (nz-1))) {
// z-direction
for (int i = 0; i < vec_dim; i++) {
float S_upwind = (Un_d[(((z+1) * ny + y) * nx + x) * vec_dim + i] -
Un_d[((z * ny + y) * nx + x) * vec_dim + i]);
float S_downwind = (Un_d[((z * ny + y) * nx + x) * vec_dim + i] -
Un_d[(((z-1) * ny + y) * nx + x) * vec_dim + i]);
float S = 0.5 * (S_upwind + S_downwind); // S_av
float r = 1.0e6;
// make sure don't divide by zero
if (abs(S_downwind) > 1.0e-7) {
r = S_upwind / S_downwind;
}
S *= phi(r);
q_p[i] = Un_d[offset + i] + S * 0.5;
q_m[i] = Un_d[offset + i] - S * 0.5;
}
// fluxes
flux_func(q_p, f, 2, gamma_up_d, alpha, beta_d, gamma);
for (int i = 0; i < vec_dim; i++) {
qz_plus_half[offset + i] = q_p[i];
fz_plus_half[offset + i] = f[i];
}
flux_func(q_m, f, 2, gamma_up_d, alpha, beta_d, gamma);
for (int i = 0; i < vec_dim; i++) {
qz_minus_half[offset + i] = q_m[i];
fz_minus_half[offset + i] = f[i];
}
}
free(q_p);
free(q_m);
free(f);
}
__global__ void evolve_fv_fluxes(float * F,
float * qx_plus_half, float * qx_minus_half,
float * qy_plus_half, float * qy_minus_half,
float * fx_plus_half, float * fx_minus_half,
float * fy_plus_half, float * fy_minus_half,
int nx, int ny, int nz, int vec_dim, float alpha,
float dx, float dy, float dt,
int kx_offset, int ky_offset) {
/**
Calculates fluxes in finite volume evolution by solving the Riemann
problem at the cell boundaries.
Parameters
----------
F : float *
flux vector at each point in grid and each layer
qx_plus_half, qx_minus_half : float *
state vector reconstructed at right and left boundaries
qy_plus_half, qy_minus_half : float *
state vector reconstructed at top and bottom boundaries
fx_plus_half, fx_minus_half : float *
flux vector at right and left boundaries
fy_plus_half, fy_minus_half : float *
flux vector at top and bottom boundaries
nx, ny, nz : int
dimensions of grid
vec_dim : int
dimension of state vector
alpha : float
lapse function
dx, dy, dt : float
gridpoint spacing and timestep spacing
kx_offset, ky_offset : int
x, y offset for current kernel
*/
int x = kx_offset + blockIdx.x * blockDim.x + threadIdx.x;
int y = ky_offset + blockIdx.y * blockDim.y + threadIdx.y;
int z = threadIdx.z;
float fx_m, fx_p, fy_m, fy_p;
// do fluxes
if ((x > 1) && (x < (nx-2)) && (y > 1) && (y < (ny-2)) && (z < nz)) {
for (int i = 0; i < vec_dim; i++) {
// x-boundary
// from i-1
fx_m = 0.5 * (
fx_plus_half[((z * ny + y) * nx + x-1) * vec_dim + i] +
fx_minus_half[((z * ny + y) * nx + x) * vec_dim + i] +
qx_plus_half[((z * ny + y) * nx + x-1) * vec_dim + i] -
qx_minus_half[((z * ny + y) * nx + x) * vec_dim + i]);
// from i+1
fx_p = 0.5 * (
fx_plus_half[((z * ny + y) * nx + x) * vec_dim + i] +
fx_minus_half[((z * ny + y) * nx + x+1) * vec_dim + i] +
qx_plus_half[((z * ny + y) * nx + x) * vec_dim + i] -
qx_minus_half[((z * ny + y) * nx + x+1) * vec_dim + i]);
// y-boundary
// from j-1
fy_m = 0.5 * (
fy_plus_half[((z * ny + y-1) * nx + x) * vec_dim + i] +
fy_minus_half[((z * ny + y) * nx + x) * vec_dim + i] +
qy_plus_half[((z * ny + y-1) * nx + x) * vec_dim + i] -
qy_minus_half[((z * ny + y) * nx + x) * vec_dim + i]);
// from j+1
fy_p = 0.5 * (
fy_plus_half[((z * ny + y) * nx + x) * vec_dim + i] +
fy_minus_half[((z * ny + y+1) * nx + x) * vec_dim + i] +
qy_plus_half[((z * ny + y) * nx + x) * vec_dim + i] -
qy_minus_half[((z * ny + y+1) * nx + x) * vec_dim + i]);
float old_F = F[((z * ny + y) * nx + x)*vec_dim + i];
F[((z * ny + y) * nx + x)*vec_dim + i] =
-alpha * ((fx_p - fx_m)/dx + (fy_p - fy_m)/dy);
// hack?
if (nan_check(F[((z * ny + y) * nx + x)*vec_dim + i])) {
//printf("nan :( (%d, %d, %d) i: %d, fx_p: %f, fx_m: %f, fy_p: %f, fy_m: %f\n", x, y, z, i, fx_p, fx_m, fy_p, fy_m);
F[((z * ny + y) * nx + x)*vec_dim + i] = old_F;
}
}
//printf("fxm, fxp: %f, %f fym, fyp: %f, %f F(tau): %f\n", fx_m, fx_p, fy_m, fy_p, F[((z * ny + y) * nx + x)*vec_dim +4]);
}
}
__global__ void evolve_z_fluxes(float * F,
float * qz_plus_half, float * qz_minus_half,
float * fz_plus_half, float * fz_minus_half,
int nx, int ny, int nz, int vec_dim, float alpha,
float dz, float dt,
int kx_offset, int ky_offset) {
/**
Calculates fluxes in finite volume evolution by solving the Riemann
problem at the cell boundaries in z direction.
Parameters
----------
F : float *
flux vector at each point in grid and each layer
qz_plus_half, qz_minus_half : float *
state vector reconstructed at right and left boundaries
fz_plus_half, fz_minus_half : float *
flux vector at top and bottom boundaries
nx, ny, nz : int
dimensions of grid
vec_dim : int
dimension of state vector
alpha : float
lapse function
dz, dt : float
gridpoint spacing and timestep spacing
kx_offset, ky_offset : int
x, y offset for current kernel
*/
int x = kx_offset + blockIdx.x * blockDim.x + threadIdx.x;
int y = ky_offset + blockIdx.y * blockDim.y + threadIdx.y;
int z = threadIdx.z;
// do fluxes
if ((x > 0) && (x < (nx-1)) && (y > 0) && (y < (ny-1)) && (z > 0) && (z < (nz-1))) {
for (int i = 0; i < vec_dim; i++) {
// z-boundary
// from i-1
float fz_m = 0.5 * (
fz_plus_half[(((z-1) * ny + y) * nx + x) * vec_dim + i] +
fz_minus_half[((z * ny + y) * nx + x) * vec_dim + i] +
qz_plus_half[(((z-1) * ny + y) * nx + x) * vec_dim + i] -
qz_minus_half[((z * ny + y) * nx + x) * vec_dim + i]);
// from i+1
float fz_p = 0.5 * (
fz_plus_half[((z * ny + y) * nx + x) * vec_dim + i] +
fz_minus_half[(((z+1) * ny + y) * nx + x) * vec_dim + i] +
qz_plus_half[((z * ny + y) * nx + x) * vec_dim + i] -
qz_minus_half[(((z+1) * ny + y) * nx + x) * vec_dim + i]);
float old_F = F[((z * ny + y) * nx + x)*vec_dim + i];
F[((z * ny + y) * nx + x)*vec_dim + i] =
F[((z * ny + y) * nx + x)*vec_dim + i]
- alpha * (fz_p - fz_m) / dz;
// hack?
if (nan_check(F[((z * ny + y) * nx + x)*vec_dim + i])) F[((z * ny + y) * nx + x)*vec_dim + i] = old_F;
}
}
}
__global__ void evolve_fv_heating(float * gamma_up,
float * Up, float * U_half,
float * qx_plus_half, float * qx_minus_half,
float * qy_plus_half, float * qy_minus_half,
float * fx_plus_half, float * fx_minus_half,
float * fy_plus_half, float * fy_minus_half,
float * sum_phs, float * rho, float * Q_d,
int nx, int ny, int nlayers, float alpha, float gamma,
float dx, float dy, float dt,
bool burning, float Cv, float E_He,
int kx_offset, int ky_offset) {
/**
Does the heating part of the evolution.
Parameters
----------
gamma_up : float *
gamma matrix at each grid point
Up : float *
state vector at next timestep
U_half : float *
state vector at half timestep
qx_plus_half, qx_minus_half : float *
state vector reconstructed at right and left boundaries
qy_plus_half, qy_minus_half : float *
state vector reconstructed at top and bottom boundaries
fx_plus_half, fx_minus_half : float *
flux vector at right and left boundaries
fy_plus_half, fy_minus_half : float *
flux vector at top and bottom boundaries
sum_phs : float *
sum of Phi in different layers
rho : float *
list of densities in different layers
Q_d : float *
heating rate in each layer
nx, ny, nlayers : int
dimensions of grid
alpha, gamma : float
lapse function and adiabatic index
dx, dy, dt : float
gridpoint spacing and timestep spacing
burning : bool
is burning present in this system?
Cv, E_He : float
specific heat in constant volume and energy release per unit mass of He
kx_offset, ky_offset : int
x, y offset for current kernel
*/
int x = kx_offset + blockIdx.x * blockDim.x + threadIdx.x;
int y = ky_offset + blockIdx.y * blockDim.y + threadIdx.y;
int z = threadIdx.z;
int offset = (z * ny + y) * nx + x;
// calculate Q
//calc_Q(Up, rho_d, Q_d, nx, ny, nlayers, kx_offset, ky_offset, burning);
float W = 1.0;
float X_dot = 0.0;
// do source terms
if ((x < nx) && (y < ny) && (z < nlayers)) {
float * q_swe;
q_swe = (float *)malloc(4 * sizeof(float));
for (int i = 0; i < 4; i++) {
q_swe[i] = U_half[offset * 4 + i];
}
W = W_swe(q_swe, gamma_up);
float * A, * phis;
A = (float *)malloc(nlayers * sizeof(float));
phis = (float *)malloc(nlayers * sizeof(float));
for (int i = 0; i < nlayers; i++) {
phis[i] = U_half[((i * ny + y) * nx + x)* 4];
}
calc_As(rho, phis, A, nlayers, gamma, phis[0], rho[0]);
float p = p_from_swe(q_swe, gamma_up, rho[z], gamma, W, A[z]);
float Y = q_swe[3] / q_swe[0];
X_dot = calc_Q_swe(rho[z], p, gamma, Y, Cv) / E_He;
//printf("p: %f, A: %f, X_dot : %f\n", p, A[z], X_dot);
free(phis);
free(A);
free(q_swe);
U_half[offset*4] /= W;
}
__syncthreads();
if ((x < nx) && (y < ny) && (z < nlayers)) {
sum_phs[offset] = 0.0;
float sum_qs = 0.0;
float deltaQx = 0.0;
float deltaQy = 0.0;
if (z < (nlayers - 1)) {
sum_qs += (Q_d[z + 1] - Q_d[z]);
deltaQx = Q_d[z] *
(U_half[offset*4+1] - U_half[(((z + 1) * ny + y) * nx + x)*4+1]) /
(W * U_half[offset*4]);
deltaQy = (Q_d[z]) *
(U_half[offset*4+2] - U_half[(((z + 1) * ny + y) * nx + x)*4+2]) /
(W * U_half[offset*4]);
}
if (z > 0) {
sum_qs += -rho[z-1] / rho[z] * (Q_d[z] - Q_d[z - 1]);
deltaQx = rho[z-1] / rho[z] * Q_d[z] *
(U_half[offset*4+1] - U_half[(((z - 1) * ny + y) * nx + x)*4+1]) /
(W * U_half[offset*4]);
deltaQy = rho[z-1] / rho[z] * Q_d[z] *
(U_half[offset*4+2] - U_half[(((z - 1) * ny + y) * nx + x)*4+2]) /
(W * U_half[offset*4]);
}
for (int j = 0; j < z; j++) {
sum_phs[offset] += rho[j] / rho[z] *
U_half[((j * ny + y) * nx + x)*4];
}
for (int j = z+1; j < nlayers; j++) {
sum_phs[offset] += U_half[((j * ny + y) * nx + x)*4];
}
// NOTE: for now going to make Xdot a constant
//const float X_dot = 0.01;
// D
Up[offset*4] += dt * alpha * sum_qs;
//if (x < 10 && y < 10) printf("(%d, %d, %d) Q: %f, sum_qs: %f, deltaQx: %f, deltaQy: %f\n", x, y, z, Q_d[z], sum_qs, deltaQx, deltaQy);
// Sx
Up[offset*4+1] += dt * alpha * (-deltaQx);
// Sy
Up[offset*4+2] += dt * alpha * (-deltaQy);
// DX
Up[offset*4+3] += dt * alpha * X_dot;
}
}
__global__ void evolve2(float * Un_d, float * Up, float * U_half,
float * sum_phs, int nx, int ny, int nlayers, int ng,
float alpha, float dx, float dy, float dt,
int kx_offset, int ky_offset) {
/**
Adds buoyancy terms.
Parameters
----------
Un_d : float *
state vector at each grid point in each layer at current timestep
Up : float *
state vector at next timestep
U_half : float *
state vector at half timestep
sum_phs : float *
sum of Phi in different layers
nx, ny, nlayers : int
dimensions of grid
ng : int
number of ghost cells
alpha : float
lapse function
dx, dy, dt : float
gridpoint spacing and timestep spacing
kx_offset, ky_offset : int
x, y offset for current kernel
*/
int x = kx_offset + blockIdx.x * blockDim.x + threadIdx.x;
int y = ky_offset + blockIdx.y * blockDim.y + threadIdx.y;
int z = threadIdx.z;
int offset = (z * ny + y) * nx + x;
//printf("kx_offset: %i\n", kx_offset);
if ((x > 1) && (x < (nx-2)) && (y > 1) && (y < (ny-2)) && (z < nlayers)) {
float a_upwind = sum_phs[(z * ny + y) * nx + x+1] - sum_phs[offset];
float a_downwind = sum_phs[offset] - sum_phs[(z * ny + y) * nx + x-1];
float a = 0.5 * (a_upwind + a_downwind);
float r = 1.0e6;
if (abs(a_downwind) > 1.0e-10) {
r = a_upwind / a_downwind;
}
a *= dt * alpha * U_half[offset*4] * 0.5 * phi(r);
if (abs(a) < 0.9 * dx / dt) {
Up[offset*4+1] -= a;
}
a_upwind = sum_phs[(z * ny + y+1) * nx + x] - sum_phs[offset];
a_downwind = sum_phs[offset] - sum_phs[(z * ny + y-1) * nx + x];
a = 0.5 * (a_upwind + a_downwind);
r = 1.0e6;
if (abs(a_downwind) > 1.0e-10) {
r = a_upwind / a_downwind;
}
a *= dt * alpha * U_half[offset*4] * 0.5 * phi(r);
if (abs(a) < 0.9 * dy / dt) {
Up[offset*4+2] -= a;
}
// copy back to grid
for (int i = 0; i < 4; i++) {
Un_d[offset*4+i] = Up[offset*4+i];
}
}
}
void homogeneuous_fv(dim3 * kernels, dim3 * threads, dim3 * blocks,
int * cumulative_kernels, float * beta_d, float * gamma_up_d,
float * Un_d, float * F_d,
float * qx_p_d, float * qx_m_d, float * qy_p_d, float * qy_m_d,
float * qz_p_d, float * qz_m_d,
float * fx_p_d, float * fx_m_d, float * fy_p_d, float * fy_m_d,
float * fz_p_d, float * fz_m_d,
int nx, int ny, int nz, int vec_dim, int ng, float alpha, float gamma,
float dx, float dy, float dz, float dt, int rank,
flux_func_ptr h_flux_func, bool do_z) {
/**
Solves the homogeneous part of the equation (ie the bit without source terms).
Parameters
----------
kernels, threads, blocks : dim3 *
number of kernels, threads and blocks for each process/kernel
cumulative_kernels : int *
Cumulative total of kernels in ranks < rank of current MPI process
beta_d : float *
shift vector at each grid point
gamma_up_d : float *
gamma matrix at each grid point
Un_d : float *
state vector at each grid point in each layer at current timestep
F_d : float *
flux vector
qx_p_d, qx_m_d : float *
state vector reconstructed at right and left boundaries
qy_p_d, qy_m_d : float *
state vector reconstructed at top and bottom boundaries
fx_p_d, fx_m_d : float *
flux vector at right and left boundaries
fy_p_d, fy_m_d : float *
flux vector at top and bottom boundaries
nx, ny, nz : int
dimensions of grid
alpha, gamma : float
lapse function and adiabatic index
dx, dy, dz, dt : float
gridpoint spacing and timestep spacing
rank : int
rank of MPI process
flux_func : flux_func_ptr
pointer to function to be used to calculate fluxes
do_z : bool
should we evolve in the z direction?
*/
int kx_offset = 0;
int ky_offset = (kernels[0].y * blocks[0].y * threads[0].y - 2*ng) * rank;
int k_offset = 0;
if (rank > 0) {
k_offset = cumulative_kernels[rank - 1];
}
for (int j = 0; j < kernels[rank].y; j++) {
kx_offset = 0;
for (int i = 0; i < kernels[rank].x; i++) {
evolve_fv<<<blocks[k_offset + j * kernels[rank].x + i], threads[k_offset + j * kernels[rank].x + i]>>>(beta_d, gamma_up_d, Un_d, h_flux_func,
qx_p_d, qx_m_d, qy_p_d, qy_m_d,
fx_p_d, fx_m_d, fy_p_d, fy_m_d,
nx, ny, nz, vec_dim, alpha, gamma,
dx, dy, dt, kx_offset, ky_offset);
if (do_z) {
evolve_z<<<blocks[k_offset + j * kernels[rank].x + i], threads[k_offset + j * kernels[rank].x + i]>>>(beta_d, gamma_up_d, Un_d, h_flux_func,
qz_p_d, qz_m_d,
fz_p_d, fz_m_d,
nx, ny, nz, vec_dim, alpha, gamma,
dz, dt, kx_offset, ky_offset);
}
kx_offset += blocks[k_offset + j * kernels[rank].x + i].x * threads[k_offset + j * kernels[rank].x + i].x - 2*ng;
}
ky_offset += blocks[k_offset + j * kernels[rank].x].y * threads[k_offset + j * kernels[rank].x].y - 2*ng;
}
ky_offset = (kernels[0].y * blocks[0].y * threads[0].y - 2*ng) * rank;
for (int j = 0; j < kernels[rank].y; j++) {
kx_offset = 0;
for (int i = 0; i < kernels[rank].x; i++) {
evolve_fv_fluxes<<<blocks[k_offset + j * kernels[rank].x + i], threads[k_offset + j * kernels[rank].x + i]>>>(
F_d,
qx_p_d, qx_m_d, qy_p_d, qy_m_d,
fx_p_d, fx_m_d, fy_p_d, fy_m_d,
nx, ny, nz, vec_dim, alpha,
dx, dy, dt, kx_offset, ky_offset);
if (do_z) {
evolve_z_fluxes<<<blocks[k_offset + j * kernels[rank].x + i], threads[k_offset + j * kernels[rank].x + i]>>>(
F_d,
qz_p_d, qz_m_d,
fz_p_d, fz_m_d,
nx, ny, nz, vec_dim, alpha,
dz, dt, kx_offset, ky_offset);
}
kx_offset += blocks[k_offset + j * kernels[rank].x + i].x * threads[k_offset + j * kernels[rank].x + i].x - 2*ng;
}
ky_offset += blocks[k_offset + j * kernels[rank].x].y * threads[k_offset + j * kernels[rank].x].y - 2*ng;
}
}
void rk3(dim3 * kernels, dim3 * threads, dim3 * blocks,
int * cumulative_kernels,
float * beta_d, float * gamma_up_d, float * Un_d,
float * F_d, float * Up_d,
float * qx_p_d, float * qx_m_d, float * qy_p_d, float * qy_m_d,
float * qz_p_d, float * qz_m_d,
float * fx_p_d, float * fx_m_d, float * fy_p_d, float * fy_m_d,
float * fz_p_d, float * fz_m_d,
int nx, int ny, int nz, int vec_dim, int ng, float alpha, float gamma,
float dx, float dy, float dz, float dt,
float * Up_h, float * F_h, float * Un_h,
MPI_Comm comm, MPI_Status status, int rank, int n_processes,
flux_func_ptr flux_func, bool do_z) {
/**
Integrates the homogeneous part of the ODE in time using RK3.
Parameters
----------
kernels, threads, blocks : dim3 *
number of kernels, threads and blocks for each process/kernel
cumulative_kernels : int *
Cumulative total of kernels in ranks < rank of current MPI process
beta_d : float *
shift vector at each grid point
gamma_up_d : float *
gamma matrix at each grid point
Un_d : float *
state vector at each grid point in each layer at current timestep on device
F_d : float *
flux vector on device
Up_d : float *
state vector at next timestep on device
qx_p_d, qx_m_d : float *
state vector reconstructed at right and left boundaries
qy_p_d, qy_m_d : float *
state vector reconstructed at top and bottom boundaries
fx_p_d, fx_m_d : float *
flux vector at right and left boundaries
fy_p_d, fy_m_d : float *
flux vector at top and bottom boundaries
nx, ny, nz : int
dimensions of grid
vec_dim : int
dimension of state vector
ng : int
number of ghost cells
alpha, gamma : float
lapse function and adiabatic index
dx, dy, dz, dt : float
gridpoint spacing and timestep spacing
Up_h, F_h, Un_h : float *
state vector at next timestep, flux vector and state vector at current timestep on host
comm : MPI_Comm
MPI communicator
status: MPI_Status
status of MPI processes
rank, n_processes : int
rank of current MPI process and total number of MPI processes
flux_func : flux_func_ptr
pointer to function to be used to calculate fluxes
do_z : bool
should we evolve in the z direction?
*/
//cout << "\nu1\n\n\n";
// u1 = un + dt * F(un)
homogeneuous_fv(kernels, threads, blocks, cumulative_kernels,
beta_d, gamma_up_d, Un_d, F_d,
qx_p_d, qx_m_d, qy_p_d, qy_m_d, qz_p_d, qz_m_d,
fx_p_d, fx_m_d, fy_p_d, fy_m_d, fz_p_d, fz_m_d,
nx, ny, nz, vec_dim, ng, alpha, gamma,
dx, dy, dz, dt, rank, flux_func, do_z);
// copy back flux
cudaMemcpy(F_h, F_d, nx*ny*nz*vec_dim*sizeof(float), cudaMemcpyDeviceToHost);
if (n_processes == 1) {
bcs_fv(F_h, nx, ny, nz, ng, vec_dim);
} else {
int y_size = kernels[0].y * blocks[0].y * threads[0].y - 2*ng;
bcs_mpi(F_h, nx, ny, nz, vec_dim, ng, comm, status, rank, n_processes, y_size, do_z);
}
for (int n = 0; n < nx*ny*nz*vec_dim; n++) {
Up_h[n] = Un_h[n] + dt * F_h[n];
}
// enforce boundaries and copy back
if (n_processes == 1) {
bcs_fv(Up_h, nx, ny, nz, ng, vec_dim);
} else {
int y_size = kernels[0].y * blocks[0].y * threads[0].y - 2*ng;
bcs_mpi(Up_h, nx, ny, nz, vec_dim, ng, comm, status, rank, n_processes, y_size, do_z);
}
if (do_z) {
// HACK:
// going to do some hacky data sanitisation here
// NOTE: could argue that this is actually a form of artificial
// dissipation to ensure stability (as it is just smoothing out
// spikes in the data after all)
for (int x = 0; x < nx * ny * nz; x++) {
if (abs(Up_h[x*6]) > 1.0e2) {
Up_h[x*6] = 0.5;
}
if (abs(Up_h[x*6+4]) > 1.0e3 || Up_h[x*6+4] < 0.0) {
Up_h[x*6+4] = Up_h[x*6];
}
if (Up_h[x*6+5] > 1.0) Up_h[x*6+5] = 1.0;
if (Up_h[x*6+5] < 0.0) Up_h[x*6+5] = 0.0;
for (int i = 1; i < 4; i++) {
if (abs(Up_h[x*6+i]) > Up_h[x*6]) {
Up_h[x*6+i] = 0.0;
}
}
}
}
cudaMemcpy(Un_d, Up_h, nx*ny*nz*vec_dim*sizeof(float), cudaMemcpyHostToDevice);
//cout << "\nu2\n\n\n";
// u2 = 0.25 * (3*un + u1 + dt*F(u1))
homogeneuous_fv(kernels, threads, blocks, cumulative_kernels,
beta_d, gamma_up_d, Un_d, F_d,
qx_p_d, qx_m_d, qy_p_d, qy_m_d, qz_p_d, qz_m_d,
fx_p_d, fx_m_d, fy_p_d, fy_m_d, fz_p_d, fz_m_d,
nx, ny, nz, vec_dim, ng, alpha, gamma,
dx, dy, dz, dt, rank, flux_func, do_z);
// copy back flux
cudaMemcpy(F_h, F_d, nx*ny*nz*vec_dim*sizeof(float), cudaMemcpyDeviceToHost);
if (n_processes == 1) {
bcs_fv(F_h, nx, ny, nz, ng, vec_dim);
} else {
int y_size = kernels[0].y * blocks[0].y * threads[0].y - 2*ng;
bcs_mpi(F_h, nx, ny, nz, vec_dim, ng, comm, status, rank, n_processes, y_size, do_z);
}
for (int n = 0; n < nx*ny*nz*vec_dim; n++) {
Up_h[n] = 0.25 * (3.0 * Un_h[n] + Up_h[n] + dt * F_h[n]);
}
// enforce boundaries and copy back
if (n_processes == 1) {
bcs_fv(Up_h, nx, ny, nz, ng, vec_dim);
} else {
int y_size = kernels[0].y * blocks[0].y * threads[0].y - 2*ng;
bcs_mpi(Up_h, nx, ny, nz, vec_dim, ng, comm, status, rank, n_processes, y_size, do_z);
}
if (do_z) {
// HACK:
// going to do some hacky data sanitisation here
for (int x = 0; x < nx * ny * nz; x++) {
if (abs(Up_h[x*6]) > 1.0e2) {
Up_h[x*6] = 0.5;
}
if (abs(Up_h[x*6+4]) > 1.0e3 || Up_h[x*6+4] < 0.0) {
Up_h[x*6+4] = Up_h[x*6];
}
if (Up_h[x*6+5] > 1.0) Up_h[x*6+5] = 1.0;
if (Up_h[x*6+5] < 0.0) Up_h[x*6+5] = 0.0;
for (int i = 1; i < 4; i++) {
if (abs(Up_h[x*6+i]) > Up_h[x*6]) {
Up_h[x*6+i] = 0.0;
}
}
}
}
cudaMemcpy(Un_d, Up_h, nx*ny*nz*vec_dim*sizeof(float), cudaMemcpyHostToDevice);
//cout << "\nun+1\n\n\n";
// un+1 = (1/3) * (un + 2*u2 + 2*dt*F(u2))
homogeneuous_fv(kernels, threads, blocks, cumulative_kernels,
beta_d, gamma_up_d, Un_d, F_d,
qx_p_d, qx_m_d, qy_p_d, qy_m_d, qz_p_d, qz_m_d,
fx_p_d, fx_m_d, fy_p_d, fy_m_d, fz_p_d, fz_m_d,
nx, ny, nz, vec_dim, ng, alpha, gamma,
dx, dy, dz, dt, rank, flux_func, do_z);
// copy back flux
cudaMemcpy(F_h, F_d, nx*ny*nz*vec_dim*sizeof(float), cudaMemcpyDeviceToHost);
if (n_processes == 1) {
bcs_fv(F_h, nx, ny, nz, ng, vec_dim);
} else {
int y_size = kernels[0].y * blocks[0].y * threads[0].y - 2*ng;
bcs_mpi(F_h, nx, ny, nz, vec_dim, ng, comm, status, rank, n_processes, y_size, do_z);
}
for (int n = 0; n < nx*ny*nz*vec_dim; n++) {
Up_h[n] = (1/3.0) * (Un_h[n] + 2.0*Up_h[n] + 2.0*dt * F_h[n]);
}
// enforce boundaries
if (n_processes == 1) {
bcs_fv(Up_h, nx, ny, nz, ng, vec_dim);
} else {
int y_size = kernels[0].y * blocks[0].y * threads[0].y - 2*ng;
bcs_mpi(Up_h, nx, ny, nz, vec_dim, ng, comm, status, rank, n_processes, y_size, do_z);
}
if (do_z) {
// HACK: going to do some hacky data sanitisation here
for (int x = 0; x < nx * ny * nz; x++) {
if (abs(Up_h[x*6]) > 1.0e2) {
Up_h[x*6] = 0.5;
}
if (abs(Up_h[x*6+4]) > 1.0e3 || Up_h[x*6+4] < 0.0) {
Up_h[x*6+4] = Up_h[x*6];
}
if (Up_h[x*6+5] > 1.0) Up_h[x*6+5] = 1.0;
if (Up_h[x*6+5] < 0.0) Up_h[x*6+5] = 0.0;
for (int i = 1; i < 4; i++) {
if (abs(Up_h[x*6+i]) > Up_h[x*6]) {
Up_h[x*6+i] = 0.0;
}
}
}
}
for (int j = 0; j < nx*ny*nz*vec_dim; j++) {
Un_h[j] = Up_h[j];
}
}
void cuda_run(float * beta, float * gamma_up, float * Uc_h, float * Uf_h,
float * rho, float * Q,
int nx, int ny, int nlayers,
int nxf, int nyf, int nz, int ng,
int nt, float alpha, float gamma, float E_He, float Cv,
float zmin,
float dx, float dy, float dz, float dt, bool burning,
int dprint, char * filename,
MPI_Comm comm, MPI_Status status, int rank, int n_processes,
int * matching_indices) {
/**
Evolve system through nt timesteps, saving data to filename every dprint timesteps.
Parameters
----------
beta : float *
shift vector at each grid point
gamma_up : float *
gamma matrix at each grid point
Uc_h : float *
state vector at each grid point in each layer at current timestep on host in coarse grid
Uf_h : float *
state vector at each grid point in each layer at current timestep on host in fine grid
rho : float *
densities in each layer
Q : float *
heating rate at each point and in each layer
nx, ny, nlayers : int
dimensions of coarse grid
nxf, nyf, nz : int
dimensions of fine grid
ng : int
number of ghost cells
nt : int
total number of timesteps
alpha : float
lapse function
gamma : float
adiabatic index
E_He : float
energy release per unit mass of helium
Cv : float
specific heat in constant volume
zmin : float
height of sea floor
dx, dy, dz, dt : float
gridpoint spacing and timestep spacing
burning : bool
is burning included in this system?
dprint : int
number of timesteps between each printout
filename : char *
name of file to which output is printed
comm : MPI_Comm
MPI communicator
status: MPI_Status
status of MPI processes
rank, n_processes : int
rank of current MPI process and total number of MPI processes
matching_indices : int *
position of fine grid wrt coarse grid
*/
// set up GPU stuff
int count;
cudaGetDeviceCount(&count);
if (rank == 0) {
cudaError_t err = cudaGetLastError();
// check that we actually have some GPUS
if (err != cudaSuccess) {
printf("Error: %s\n", cudaGetErrorString(err));
printf("Aborting program.\n");
return;
}
printf("Found %i CUDA devices\n", count);
}
// if rank > number of GPUs, exit now
if (rank >= count) {
return;
}
// redefine - we only want to run on as many cores as we have GPUs
if (n_processes > count) {
n_processes = count;
}
if (rank == 0) {
printf("Running on %i processor(s)\n", n_processes);
}
int maxThreads = 256;
int maxBlocks = 256; //64;
dim3 *kernels = new dim3[n_processes];
int *cumulative_kernels = new int[n_processes];
getNumKernels(max(nx, nxf), max(ny, nyf), max(nlayers, nz), ng, n_processes, &maxBlocks, &maxThreads, kernels, cumulative_kernels);
int total_kernels = cumulative_kernels[n_processes-1];
dim3 *blocks = new dim3[total_kernels];
dim3 *threads = new dim3[total_kernels];
getNumBlocksAndThreads(max(nx, nxf), max(ny, nyf), max(nlayers, nz), ng, maxBlocks, maxThreads, n_processes, kernels, blocks, threads);
printf("rank: %i\n", rank);
printf("kernels: (%i, %i)\n", kernels[rank].x, kernels[rank].y);
printf("cumulative kernels: %i\n", cumulative_kernels[rank]);
int k_offset = 0;
if (rank > 0) {
k_offset = cumulative_kernels[rank-1];
}
for (int i = k_offset; i < cumulative_kernels[rank]; i++) {
printf("blocks: (%i, %i, %i) , threads: (%i, %i, %i)\n",
blocks[i].x, blocks[i].y, blocks[i].z,
threads[i].x, threads[i].y, threads[i].z);
}
// gpu variables
float * beta_d, * gamma_up_d, * Uc_d, * Uf_d, * rho_d, * Q_d;
// initialise Uf_h
for (int i = 0; i < nxf*nyf*nz*6; i++) {
Uf_h[i] = 0.0;
}
// set device
cudaSetDevice(rank);
// allocate memory on device
cudaMalloc((void**)&beta_d, 3*sizeof(float));
cudaMalloc((void**)&gamma_up_d, 9*sizeof(float));
cudaMalloc((void**)&Uc_d, nx*ny*nlayers*4*sizeof(float));
cudaMalloc((void**)&Uf_d, nxf*nyf*nz*6*sizeof(float));
cudaMalloc((void**)&rho_d, nlayers*sizeof(float));
cudaMalloc((void**)&Q_d, nlayers*sizeof(float));
// copy stuff to GPU
cudaMemcpy(beta_d, beta, 3*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(gamma_up_d, gamma_up, 9*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(Uc_d, Uc_h, nx*ny*nlayers*4*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(Uf_d, Uf_h, nxf*nyf*nz*6*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(rho_d, rho, nlayers*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(Q_d, Q, nlayers*sizeof(float), cudaMemcpyHostToDevice);
float *Upc_d, *Uc_half_d, *Upf_d, *Uf_half_d, *old_phi_d, *sum_phs_d;
cudaMalloc((void**)&Upc_d, nx*ny*nlayers*4*sizeof(float));
cudaMalloc((void**)&Uc_half_d, nx*ny*nlayers*4*sizeof(float));
cudaMalloc((void**)&Upf_d, nxf*nyf*nz*6*sizeof(float));
cudaMalloc((void**)&Uf_half_d, nxf*nyf*nz*6*sizeof(float));
cudaMalloc((void**)&old_phi_d, nlayers*nx*ny*sizeof(float));
cudaMalloc((void**)&sum_phs_d, nlayers*nx*ny*sizeof(float));
// need to fill old_phi with current phi to initialise
float *pphi = new float[nlayers*nx*ny];
for (int i = 0; i < nlayers*nx*ny; i++) {
pphi[i] = Uc_h[i*4];
}
cudaMemcpy(old_phi_d, pphi, nx*ny*nlayers*sizeof(float), cudaMemcpyHostToDevice);
float *qx_p_d, *qx_m_d, *qy_p_d, *qy_m_d, *qz_p_d, *qz_m_d, *fx_p_d, *fx_m_d, *fy_p_d, *fy_m_d, *fz_p_d, *fz_m_d;
float *Upc_h = new float[nx*ny*nlayers*4];
float *Fc_h = new float[nx*ny*nlayers*4];
float *Upf_h = new float[nxf*nyf*nz*6];
float *Ff_h = new float[nxf*nyf*nz*6];
float * sum_phs_h = new float[nx*ny*nlayers];
// initialise
for (int j = 0; j < nxf*nyf*nz*6; j++) {
Upf_h[j] = 0.0;
}
int grid_size = max(nx*ny*nlayers*4, nxf*nyf*nz*6);
cudaMalloc((void**)&qx_p_d, grid_size*sizeof(float));
cudaMalloc((void**)&qx_m_d, grid_size*sizeof(float));
cudaMalloc((void**)&qy_p_d, grid_size*sizeof(float));
cudaMalloc((void**)&qy_m_d, grid_size*sizeof(float));
cudaMalloc((void**)&qz_p_d, grid_size*sizeof(float));
cudaMalloc((void**)&qz_m_d, grid_size*sizeof(float));
cudaMalloc((void**)&fx_p_d, grid_size*sizeof(float));
cudaMalloc((void**)&fx_m_d, grid_size*sizeof(float));
cudaMalloc((void**)&fy_p_d, grid_size*sizeof(float));
cudaMalloc((void**)&fy_m_d, grid_size*sizeof(float));
cudaMalloc((void**)&fz_p_d, grid_size*sizeof(float));
cudaMalloc((void**)&fz_m_d, grid_size*sizeof(float));
float * q_comp_d;
cudaMalloc((void**)&q_comp_d, nx*ny*nlayers*6*sizeof(float));
float * qf_swe;
cudaMalloc((void**)&qf_swe, nxf*nyf*nz*4*sizeof(float));
int * matching_indices_d;
cudaMalloc((void**)&matching_indices_d, 4*sizeof(int));
cudaMemcpy(matching_indices_d, matching_indices, 4*sizeof(int), cudaMemcpyHostToDevice);
// make host-side function pointers to __device__ functions
flux_func_ptr h_compressible_fluxes;
flux_func_ptr h_shallow_water_fluxes;
// copy function pointers to host equivalent
cudaMemcpyFromSymbol(&h_compressible_fluxes, d_compressible_fluxes, sizeof(flux_func_ptr));
cudaMemcpyFromSymbol(&h_shallow_water_fluxes, d_shallow_water_fluxes, sizeof(flux_func_ptr));
if (strcmp(filename, "na") != 0) {
hid_t outFile, dset, mem_space, file_space;
if (rank == 0) {
// create file
outFile = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
// create dataspace
int ndims = 5;
hsize_t dims[] = {hsize_t((nt+1)/dprint+1), hsize_t(nlayers), (ny), hsize_t(nx), 4};
file_space = H5Screate_simple(ndims, dims, NULL);
hid_t plist = H5Pcreate(H5P_DATASET_CREATE);
H5Pset_layout(plist, H5D_CHUNKED);
hsize_t chunk_dims[] = {1, hsize_t(nlayers), hsize_t(ny), hsize_t(nx), 4};
H5Pset_chunk(plist, ndims, chunk_dims);
// create dataset
dset = H5Dcreate(outFile, "SwerveOutput", H5T_NATIVE_FLOAT, file_space, H5P_DEFAULT, plist, H5P_DEFAULT);
H5Pclose(plist);
// make a memory dataspace
mem_space = H5Screate_simple(ndims, chunk_dims, NULL);
// select a hyperslab
file_space = H5Dget_space(dset);
hsize_t start[] = {0, 0, 0, 0, 0};
hsize_t hcount[] = {1, hsize_t(nlayers), hsize_t(ny), hsize_t(nx), 4};
H5Sselect_hyperslab(file_space, H5S_SELECT_SET, start, NULL, hcount, NULL);
// write to dataset
printf("Printing t = %i\n", 0);
H5Dwrite(dset, H5T_NATIVE_FLOAT, mem_space, file_space, H5P_DEFAULT, Uc_h);
// close file dataspace
H5Sclose(file_space);
}
cudaError_t err;
err = cudaGetLastError();
if (err != cudaSuccess){
cout << "Before evolution\n";
printf("Error: %s\n", cudaGetErrorString(err));
}
// main loop
for (int t = 0; t < nt; t++) {
cout << "Evolving t = " << t << '\n';
int ky_offset = (kernels[0].y * blocks[0].y * threads[0].y - 2*ng) * rank;
// good here
/*cout << "\nCoarse grid before prolonging\n\n";
for (int y = 0; y < ny; y++) {
for (int x = 0; x < nx; x++) {
cout << '(' << x << ',' << y << "): ";
for (int z = 0; z < nlayers; z++) {
cout << Uc_h[(((z*ny + y)*nx)+x)*4] << ',';
}
cout << '\n';
}
}*/
//cout << "\n\nProlonging\n\n";
// prolong to fine grid
prolong_grid(kernels, threads, blocks, cumulative_kernels,
Uc_d, Uf_d, nx, ny, nlayers, nxf, nyf, nz, dx, dy, dz, dt, zmin, gamma_up_d,
rho_d, gamma, matching_indices_d, ng, rank, q_comp_d, old_phi_d);
cudaMemcpy(Uf_h, Uf_d, nxf*nyf*nz*6*sizeof(float), cudaMemcpyDeviceToHost);
err = cudaGetLastError();
if (err != cudaSuccess){
cout << "After prolonging\n";
printf("Error: %s\n", cudaGetErrorString(err));
}
/*cout << "\nFine grid after prolonging\n\n";
for (int y = 0; y < nyf; y++) {
for (int x = 0; x < nxf; x++) {
cout << '(' << x << ',' << y << "): ";
for (int z = 0; z < nz; z++) {
cout << Uf_h[(((z*nyf + y)*nxf)+x)*6+4] << ',';
}
cout << '\n';
}
}*/
// enforce boundaries
if (n_processes == 1) {
bcs_fv(Uf_h, nxf, nyf, nz, ng, 6);
} else {
int y_size = kernels[0].y * blocks[0].y * threads[0].y - 2*ng;
bcs_mpi(Uf_h, nxf, nyf, nz, 6, ng, comm, status, rank, n_processes, y_size, true);
}
/*cout << "\nFine grid after prolonging\n\n";
for (int y = 0; y < nyf; y++) {
for (int x = 0; x < nxf; x++) {
cout << '(' << x << ',' << y << "): ";
for (int z = 0; z < nz; z++) {
cout << Uf_h[(((z*nyf + y)*nxf)+x)*6+4] << ',';
}
cout << '\n';
}
}*/
cudaMemcpy(Uf_d, Uf_h, nxf*nyf*nz*6*sizeof(float), cudaMemcpyHostToDevice);
err = cudaGetLastError();
if (err != cudaSuccess) {
cout << "Before fine rk3\n";
printf("Error: %s\n", cudaGetErrorString(err));
}
// evolve fine grid through two subcycles
for (int i = 0; i < 2; i++) {
rk3(kernels, threads, blocks, cumulative_kernels,
beta_d, gamma_up_d, Uf_d, Uf_half_d, Upf_d,
qx_p_d, qx_m_d, qy_p_d, qy_m_d, qz_p_d, qz_m_d,
fx_p_d, fx_m_d, fy_p_d, fy_m_d, fz_p_d, fz_m_d,
nxf, nyf, nz, 6, ng, alpha, gamma,
dx*0.5, dy*0.5, dz, dt*0.5, Upf_h, Ff_h, Uf_h,
comm, status, rank, n_processes,
h_compressible_fluxes, true);
// enforce boundaries is done within rk3
/*if (n_processes == 1) {
bcs_fv(Uf_h, nxf, nyf, nz, ng, 6);
} else {
int y_size = kernels[0].y * blocks[0].y * threads[0].y - 2*ng;
bcs_mpi(Uf_h, nxf, nyf, nz, 6, ng, comm, status, rank, n_processes, y_size);
}*/
/*cout << "\nFine grid\n\n";
for (int y = 0; y < nyf; y++) {
for (int x = 0; x < nxf; x++) {
cout << '(' << x << ',' << y << "): ";
for (int z = 0; z < nz; z++) {
if (abs(Uf_h[(((z*nyf + y)*nxf)+x)*6+4]) > 30.0)
cout << Uf_h[(((z*nyf + y)*nxf)+x)*6+4] << ',';
}
cout << '\n';
}
}*/
cudaDeviceSynchronize();
// hack on the burning
float * H = new float[nxf*nyf*nz];
calc_Q(rho, Uf_h, nxf, nyf, nz, gamma, gamma_up, H, Cv);
for (int z = 0; z < nz; z++) {
for (int y = ng; y < nyf-ng; y++) {
for (int x = ng; x < nxf - ng; x++) {
// tau
Uf_h[((z * nyf + y) * nxf + x) * 6 + 4] += dt * 0.5 * alpha * Uf_h[((z * nyf + y) * nxf + x) * 6] * H[(z * nyf + y) * nxf + x];
float X_dot = H[(z * nyf + y) * nxf + x] / E_He;
// DX
Uf_h[((z * nyf + y) * nxf + x) * 6 + 5] += dt * 0.5 * alpha * rho[0] * X_dot;
}
}
}
delete[] H;
if (n_processes == 1) {
bcs_fv(Uf_h, nxf, nyf, nz, ng, 6);
} else {
int y_size = kernels[0].y * blocks[0].y * threads[0].y - 2*ng;
bcs_mpi(Uf_h, nxf, nyf, nz, 6, ng, comm, status, rank, n_processes, y_size, false);
}
// copy to device
cudaMemcpy(Uf_d, Uf_h, nxf*nyf*nz*6*sizeof(float), cudaMemcpyHostToDevice);
}
err = cudaGetLastError();
if (err != cudaSuccess){
cout << "Before restricting\n";
printf("Error: %s\n", cudaGetErrorString(err));
}
//cout << "\n\nRestricting\n\n";
// probably good here
/*cout << "\nFine grid before restricting\n\n";
for (int y = 0; y < nyf; y++) {
for (int x = 0; x < nxf; x++) {
cout << '(' << x << ',' << y << "): ";
for (int z = 0; z < nz; z++) {
cout << Uf_h[(((z*nyf + y)*nxf)+x)*6+4] << ',';
}
cout << '\n';
}
}*/
/*cout << "\nCoarse grid before restricting\n\n";
for (int z = 0; z < nlayers; z++) {
for (int y = 0; y < ny; y++) {
for (int x = 0; x < nx; x++) {
cout << '(' << x << ',' << y << ',' << z << "): " << Uc_h[(((z*ny+y)*nx)+x)*4+1] << ',' << Uc_h[(((z*ny+y)*nx)+x)*4+2] << ',' << Uc_h[(((z*ny+y)*nx)+x)*4+3] << '\n';
}
}
}*/
// restrict to coarse grid
restrict_grid(kernels, threads, blocks, cumulative_kernels,
Uc_d, Uf_d, nx, ny, nlayers, nxf, nyf, nz,
dz, zmin, matching_indices_d,
rho_d, gamma, gamma_up_d, ng, rank, qf_swe);
err = cudaGetLastError();
if (err != cudaSuccess){
cout << "After restricting\n";
printf("Error: %s\n", cudaGetErrorString(err));
}
cudaMemcpy(Uc_h, Uc_d, nx*ny*nlayers*4*sizeof(float), cudaMemcpyDeviceToHost);
err = cudaGetLastError();
if (err != cudaSuccess){
cout << "After copying\n";
printf("Error: %s\n", cudaGetErrorString(err));
}
// enforce boundaries
if (n_processes == 1) {
bcs_fv(Uc_h, nx, ny, nlayers, ng, 4);
} else {
int y_size = kernels[0].y * blocks[0].y * threads[0].y - 2*ng;
bcs_mpi(Uc_h, nx, ny, nlayers, 4, ng, comm, status, rank, n_processes, y_size, false);
}
cudaMemcpy(Uc_d, Uc_h, nx*ny*nlayers*4*sizeof(float), cudaMemcpyHostToDevice);
/*cout << "\nCoarse grid after restricting\n\n";
for (int y = 0; y < ny; y++) {
for (int x = 0; x < nx; x++) {
cout << '(' << x << ',' << y << "): ";
for (int z = 0; z < nlayers; z++) {
cout << Uc_h[(((z*ny + y)*nx)+x)*4] << ',';
}
cout << '\n';
}
}*/
err = cudaGetLastError();
if (err != cudaSuccess){
cout << "Coarse rk3\n";
printf("Error: %s\n", cudaGetErrorString(err));
}
rk3(kernels, threads, blocks, cumulative_kernels,
beta_d, gamma_up_d, Uc_d, Uc_half_d, Upc_d,
qx_p_d, qx_m_d, qy_p_d, qy_m_d, qz_p_d, qz_m_d,
fx_p_d, fx_m_d, fy_p_d, fy_m_d, fz_p_d, fz_m_d,
nx, ny, nlayers, 4, ng, alpha, gamma,
dx, dy, dz, dt, Upc_h, Fc_h, Uc_h,
comm, status, rank, n_processes,
h_shallow_water_fluxes, false);
err = cudaGetLastError();
if (err != cudaSuccess){
cout << "Done coarse rk3\n";
printf("Error: %s\n", cudaGetErrorString(err));
}
cudaMemcpy(Uc_d, Uc_h, nx*ny*nlayers*4*sizeof(float), cudaMemcpyHostToDevice);
// update old_phi
for (int i = 0; i < nlayers*nx*ny; i++) {
pphi[i] = Uc_h[i*4];
}
cudaMemcpy(old_phi_d, pphi, nx*ny*nlayers*sizeof(float), cudaMemcpyHostToDevice);
/*cout << "\nCoarse grid after rk3\n\n";
for (int y = 0; y < ny; y++) {
for (int x = 0; x < nx; x++) {
cout << '(' << x << ',' << y << "): ";
for (int z = 0; z < nlayers; z++) {
cout << Uc_h[(((z*ny + y)*nx)+x)*4] << ',';
}
cout << '\n';
}
}*/
cudaMemcpy(Upc_d, Uc_h, nx*ny*nlayers*4*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(Uc_half_d, Uc_h, nx*ny*nlayers*4*sizeof(float), cudaMemcpyHostToDevice);
float kx_offset = 0;
ky_offset = (kernels[0].y * blocks[0].y * threads[0].y - 2*ng) * rank;
for (int j = 0; j < kernels[rank].y; j++) {
kx_offset = 0;
for (int i = 0; i < kernels[rank].x; i++) {
evolve_fv_heating<<<blocks[k_offset + j * kernels[rank].x + i], threads[k_offset + j * kernels[rank].x + i]>>>(
gamma_up_d,
Upc_d, Uc_half_d,
qx_p_d, qx_m_d, qy_p_d, qy_m_d,
fx_p_d, fx_m_d, fy_p_d, fy_m_d,
sum_phs_d, rho_d, Q_d,
nx, ny, nlayers, alpha, gamma,
dx, dy, dt, burning, Cv, E_He,
kx_offset, ky_offset);
kx_offset += blocks[k_offset + j * kernels[rank].x + i].x * threads[k_offset + j * kernels[rank].x + i].x - 2*ng;
}
ky_offset += blocks[k_offset + j * kernels[rank].x].y * threads[k_offset + j * kernels[rank].x].y - 2*ng;
}
cudaMemcpy(Upc_h, Upc_d, nx*ny*nlayers*4*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(sum_phs_h, sum_phs_d, nx*ny*nlayers*sizeof(float), cudaMemcpyDeviceToHost);
// enforce boundaries
if (n_processes == 1) {
bcs_fv(Upc_h, nx, ny, nlayers, ng, 4);
bcs_fv(sum_phs_h, nx, ny, nlayers, ng, 1);
} else {
int y_size = kernels[0].y * blocks[0].y * threads[0].y - 2*ng;
bcs_mpi(Upc_h, nx, ny, nlayers, 4, ng, comm, status, rank, n_processes, y_size, false);
bcs_mpi(sum_phs_h, nx, ny, nlayers, 1, ng, comm, status, rank, n_processes, y_size, false);
}
cudaMemcpy(Upc_d, Upc_h, nx*ny*nlayers*4*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(sum_phs_d, sum_phs_h, nx*ny*nlayers*sizeof(float), cudaMemcpyHostToDevice);
kx_offset = 0;
ky_offset = (kernels[0].y * blocks[0].y * threads[0].y - 2*ng) * rank;
for (int j = 0; j < kernels[rank].y; j++) {
kx_offset = 0;
for (int i = 0; i < kernels[rank].x; i++) {
evolve2<<<blocks[k_offset + j * kernels[rank].x + i], threads[k_offset + j * kernels[rank].x + i]>>>(Uc_d,
Upc_d, Uc_half_d, sum_phs_d,
nx, ny, nlayers, ng, alpha,
dx, dy, dt, kx_offset, ky_offset);
kx_offset += blocks[k_offset + j * kernels[rank].x + i].x * threads[k_offset + j * kernels[rank].x + i].x - 2*ng;
}
ky_offset += blocks[k_offset + j * kernels[rank].x].y * threads[k_offset + j * kernels[rank].x].y - 2*ng;
}
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
// boundaries
cudaMemcpy(Uc_h, Uc_d, nx*ny*nlayers*4*sizeof(float), cudaMemcpyDeviceToHost);
if (n_processes == 1) {
bcs_fv(Uc_h, nx, ny, nlayers, ng, 4);
} else {
int y_size = kernels[0].y * blocks[0].y * threads[0].y - 2*ng;
bcs_mpi(Uc_h, nx, ny, nlayers, 4, ng, comm, status, rank, n_processes, y_size, false);
}
cudaMemcpy(Uc_d, Uc_h, nx*ny*nlayers*4*sizeof(float), cudaMemcpyHostToDevice);
int mpi_err;
if ((t+1) % dprint == 0) {
if (rank == 0) {
printf("Printing t = %i\n", t+1);
if (n_processes > 1) { // only do MPI stuff if needed
float * buf = new float[nx*ny*nlayers*4];
int tag = 0;
for (int source = 1; source < n_processes; source++) {
mpi_err = MPI_Recv(buf, nx*ny*nlayers*4, MPI_FLOAT, source, tag, comm, &status);
check_mpi_error(mpi_err);
// copy data back to grid
ky_offset = (kernels[0].y * blocks[0].y * threads[0].y - 2*ng) * rank;
// cheating slightly and using the fact that are moving from bottom to top to make calculations a bit easier.
for (int z = 0; z < nlayers; z++) {
for (int y = ky_offset; y < ny; y++) {
for (int x = 0; x < nx; x++) {
for (int i = 0; i < 4; i++) {
Uc_h[((z * ny + y) * nx + x) * 4 + i] = buf[((z * ny + y) * nx + x) * 4 + i];
}
}
}
}
}
delete[] buf;
}
// receive data from other processes and copy to grid
// select a hyperslab
file_space = H5Dget_space(dset);
hsize_t start[] = {hsize_t((t+1)/dprint), 0, 0, 0, 0};
hsize_t hcount[] = {1, hsize_t(nlayers), hsize_t(ny), hsize_t(nx), 4};
H5Sselect_hyperslab(file_space, H5S_SELECT_SET, start, NULL, hcount, NULL);
// write to dataset
H5Dwrite(dset, H5T_NATIVE_FLOAT, mem_space, file_space, H5P_DEFAULT, Uc_h);
// close file dataspae
H5Sclose(file_space);
} else { // send data to rank 0
int tag = 0;
mpi_err = MPI_Ssend(Uc_h, ny*nx*nlayers*4, MPI_FLOAT, 0, tag, comm);
check_mpi_error(mpi_err);
}
}
}
if (rank == 0) {
H5Sclose(mem_space);
H5Fclose(outFile);
}
} else { // don't print
for (int t = 0; t < nt; t++) {
// prolong to fine grid
prolong_grid(kernels, threads, blocks, cumulative_kernels, Uc_d,
Uf_d, nx, ny, nlayers, nxf, nyf, nz, dx, dy, dz,
dt, zmin, gamma_up,
rho_d, gamma, matching_indices_d, ng, rank, q_comp_d, old_phi_d);
cudaMemcpy(Uf_h, Uf_d, nxf*nyf*nz*6*sizeof(float), cudaMemcpyDeviceToHost);
// evolve fine grid through two subcycles
for (int i = 0; i < 2; i++) {
rk3(kernels, threads, blocks, cumulative_kernels,
beta_d, gamma_up_d, Uf_d, Uf_half_d, Upf_d,
qx_p_d, qx_m_d, qy_p_d, qy_m_d, qz_p_d, qz_m_d,
fx_p_d, fx_m_d, fy_p_d, fy_m_d, fz_p_d, fz_m_d,
nxf, nyf, nz, 6, ng, alpha, gamma,
dx*0.5, dy*0.5, dz, dt*0.5, Upf_h, Ff_h, Uf_h,
comm, status, rank, n_processes,
h_compressible_fluxes, true);
// if not last step, copy output array to input array
if (i < 1) {
for (int j = 0; j < nxf*nyf*nz*6; j++) {
Uf_h[j] = Upf_h[j];
}
}
}
// restrict to coarse grid
restrict_grid(kernels, threads, blocks, cumulative_kernels,
Uc_d, Uf_d, nx, ny, nlayers, nxf, nyf, nz,
dz, zmin, matching_indices_d,
rho_d, gamma, gamma_up_d, ng, rank, qf_swe);
rk3(kernels, threads, blocks, cumulative_kernels,
beta_d, gamma_up_d, Uc_d, Uc_half_d, Upc_d,
qx_p_d, qx_m_d, qy_p_d, qy_m_d, qz_p_d, qz_m_d,
fx_p_d, fx_m_d, fy_p_d, fy_m_d, fz_p_d, fz_m_d,
nx, ny, nlayers, 4, ng, alpha, gamma,
dx, dy, dz, dt, Upc_h, Fc_h, Uc_h,
comm, status, rank, n_processes,
h_shallow_water_fluxes, false);
/*int k_offset = 0;
if (rank > 0) {
k_offset = cumulative_kernels[rank-1];
}
for (int j = 0; j < kernels[rank].y; j++) {
kx_offset = 0;
for (int i = 0; i < kernels[rank].x; i++) {
evolve_fv_heating<<<blocks[k_offset + j * kernels[rank].x + i], threads[k_offset + j * kernels[rank].x + i]>>>(
gamma_up_d, Un_d,
Up_d, U_half_d,
qx_p_d, qx_m_d, qy_p_d, qy_m_d,
fx_p_d, fx_m_d, fy_p_d, fy_m_d,
sum_phs_d, rho_d, Q_d,
nx, ny, nlayers, alpha, gamma,
dx, dy, dt, burning, Cv, E_He,
kx_offset, ky_offset);
kx_offset += blocks[k_offset + j * kernels[rank].x + i].x * threads[k_offset + j * kernels[rank].x + i].x - 2*ng;
}
ky_offset += blocks[k_offset + j * kernels[rank].x].y * threads[j * kernels[rank].x].y - 2*ng;
}
kx_offset = 0;
ky_offset = (kernels[0].y * blocks[0].y * threads[0].y - 2*ng) * rank;
for (int j = 0; j < kernels[rank].y; j++) {
kx_offset = 0;
for (int i = 0; i < kernels[rank].x; i++) {
evolve2<<<blocks[k_offset + j * kernels[rank].x + i], threads[k_offset + j * kernels[rank].x + i]>>>(gamma_up_d, Un_d,
Up_d, U_half_d, sum_phs_d, rho_d, Q_d,
nx, ny, nlayers, ng, alpha,
dx, dy, dt, kx_offset, ky_offset);
kx_offset += blocks[k_offset + j * kernels[rank].x + i].x * threads[k_offset + j * kernels[rank].x + i].x - 2*ng;
}
ky_offset += blocks[k_offset + j * kernels[rank].x].y * threads[k_offset + j * kernels[rank].x].y - 2*ng;
}*/
cudaDeviceSynchronize();
// boundaries
cudaMemcpy(Uc_h, Uc_d, nx*ny*nlayers*4*sizeof(float), cudaMemcpyDeviceToHost);
if (n_processes == 1) {
bcs_fv(Uc_h, nx, ny, nlayers, ng, 4);
} else {
int y_size = kernels[0].y * blocks[0].y * threads[0].y - 2*ng;
bcs_mpi(Uc_h, nx, ny, nlayers, 4, ng, comm, status, rank, n_processes, y_size, false);
}
cudaMemcpy(Uc_d, Uc_h, nx*ny*nlayers*4*sizeof(float), cudaMemcpyHostToDevice);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
}
}
// delete some stuff
cudaFree(beta_d);
cudaFree(gamma_up_d);
cudaFree(Uc_d);
cudaFree(Uf_d);
cudaFree(rho_d);
cudaFree(Q_d);
cudaFree(Upc_d);
cudaFree(Uc_half_d);
cudaFree(Upf_d);
cudaFree(Uf_half_d);
cudaFree(old_phi_d);
cudaFree(sum_phs_d);
cudaFree(qx_p_d);
cudaFree(qx_m_d);
cudaFree(qy_p_d);
cudaFree(qy_m_d);
cudaFree(qz_p_d);
cudaFree(qz_m_d);
cudaFree(fx_p_d);
cudaFree(fx_m_d);
cudaFree(fy_p_d);
cudaFree(fy_m_d);
cudaFree(fz_p_d);
cudaFree(fz_m_d);
cudaFree(q_comp_d);
cudaFree(qf_swe);
cudaFree(matching_indices_d);
delete[] kernels;
delete[] cumulative_kernels;
delete[] threads;
delete[] blocks;
delete[] Upc_h;
delete[] Fc_h;
delete[] Upf_h;
delete[] Ff_h;
delete[] pphi;
delete[] sum_phs_h;
}
|
7ce92e7c172d69d00008d680f4929e01f23a5bd7.hip | // !!! This is a file automatically generated by hipify!!!
#include "layer.h"
#include <random>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <cassert>
#include <math.h>
#include <algorithm>
#include <sstream>
#include <fstream>
#include <iostream>
using namespace cudl;
__global__ void PadForward(const int count, const float *in, float *out,
const int num, const int channel, const int height_in, const int width_in,
const int pad) {
CUDA_1D_KERNEL_LOOP(index, count) {
int i = index; // Preserve the original value
int height_out = height_in + pad + pad;
int width_out = width_in + pad + pad;
int w = i % width_in;
i /= width_in;
int h = i % height_in;
i /= height_in;
int c = i % channel;
i /= channel;
out[((i * channel + c) * height_out + h + pad) * width_out + pad + w] =
in[index];
}
}
__global__ void PadForwardPadZero(const int count, float *out,
const int num, const int channel, const int height_out, const int width_out,
const int pad) {
CUDA_1D_KERNEL_LOOP(index, count) {
int w = index % width_out;
int h = (index / width_out) % height_out;
if (h < pad || h > height_out - 1 - pad || w < pad || w > width_out - 1 - pad) {
out[index] = 0.f;
}
}
}
__global__ void PadBackward(const int count, const float *in, float *out,
const int num, const int channel, const int height_in, const int width_in,
const int pad) {
CUDA_1D_KERNEL_LOOP(index, count) {
int i = index; // Preserve original value
int height_out = height_in + pad + pad;
int width_out = width_in + pad + pad;
int w = i % width_in;
i /= width_in;
int h = i % height_in;
i /= height_in;
int c = i % channel;
i /= channel;
out[index] = in[((i * channel + c) * height_out + h + pad) *
width_out + pad + w];
}
}
/****************************************************************
* Layer definition *
****************************************************************/
Layer::Layer() {
/* do nothing */
}
Layer::~Layer() {
#if (DEBUG_FORWARD > 0 || DEBUG_BACKWARD > 0)
std::cout << "Destroy Layer: " << name_ << std::endl;
#endif
if (output_ != nullptr) delete output_;
if (grad_input_ != nullptr) delete grad_input_;
if (weights_ != nullptr) delete weights_;
if (biases_ != nullptr) delete biases_;
if (grad_weights_ != nullptr) delete grad_weights_;
if (grad_biases_ != nullptr) delete grad_biases_;
}
void Layer::init_weight_bias(unsigned int seed) {
checkCudaErrors(hipDeviceSynchronize());
if (weights_ == nullptr || biases_ == nullptr)
return;
// Create random network
std::random_device rd;
std::mt19937 gen(seed == 0 ? rd() : static_cast<unsigned int>(seed));
// He uniform distribution
float range = sqrt(6.f / input_->size()); // He's initialization
std::uniform_real_distribution<> dis(-range, range);
for (int i = 0; i < weights_->len(); i++)
weights_->ptr()[i] = static_cast<float>(dis(gen));
for (int i = 0; i < biases_->len(); i++)
biases_->ptr()[i] = 0.f;
// copy initialized value to the device
weights_->to(DeviceType::cuda);
biases_->to(DeviceType::cuda);
std::cout << ".. initialized " << name_ << " layer .." << std::endl;
}
void Layer::update_weights_biases(float learning_rate) {
float eps = -1.f * learning_rate;
if (weights_ != nullptr && grad_weights_ != nullptr) {
#if (DEBUG_UPDATE)
weights_->print(name_ + "::weights (before update)", true);
grad_weights_->print(name_ + "::gweights", true);
#endif // DEBUG_UPDATE
// w = w + eps * dw
checkCublasErrors(
hipblasSaxpy(cuda_->cublas(),
weights_->len(),
&eps,
grad_weights_->cuda(), 1,
weights_->cuda(), 1));
#if (DEBUG_UPDATE)
weights_->print(name_ + "weights (after update)", true);
// getchar();
#endif // DEBUG_UPDATE
}
if (biases_ != nullptr && grad_biases_ != nullptr) {
#if (DEBUG_UPDATE)
biases_->print(name_ + "biases (before update)", true);
grad_biases_->print(name_ + "gbiases", true);
#endif // DEBUG_UPDATE
// b = b + eps * db
checkCublasErrors(
hipblasSaxpy(cuda_->cublas(),
biases_->len(),
&eps,
grad_biases_->cuda(), 1,
biases_->cuda(), 1));
#if (DEBUG_UPDATE)
biases_->print(name_ + "biases (after update)", true);
// getchar();
#endif // DEBUG_UPDATE
}
}
float Layer::get_loss(Blob<float> *target) {
assert("No Loss layer has no loss." && false);
return EXIT_FAILURE;
}
int Layer::get_accuracy(Blob<float> *target) {
assert("No Loss layer cannot estimate accuracy." && false);
return EXIT_FAILURE;
}
int Layer::load_parameter() {
std::stringstream filename_weights, filename_biases;
// load weights and biases pretrained parameters
filename_weights << name_ << ".bin";
if (weights_->file_read(filename_weights.str()))
return -1;
filename_biases << name_ << ".bias.bin";
if (biases_->file_read(filename_biases.str()))
return -2;
std::cout << ".. loaded " << name_ << " pretrain parameter.." << std::endl;
return 0;
}
int Layer::save_parameter() {
std::stringstream filename_weights, filename_biases;
std::cout << ".. saving " << name_ << " parameter ..";
// Write weights file
if (weights_) {
filename_weights << name_ << ".bin";
if (weights_->file_write(filename_weights.str()))
return -1;
}
// Write bias file
if (biases_) {
filename_biases << name_ << ".bias.bin";
if (biases_->file_write(filename_biases.str()))
return -2;
}
std::cout << " done .." << std::endl;
return 0;
}
/****************************************************************
* Dense Layer *
****************************************************************/
Dense::Dense(std::string name, int output_size) {
name_ = name;
output_size_ = output_size;
}
Dense::~Dense() {
if (d_one_vec != nullptr)
hipFree(d_one_vec);
}
__global__ void init_one_vec(float *d_one_vec, size_t length) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= length) return;
d_one_vec[i] = 1.f;
}
Blob<float> *Dense::forward(Blob<float> *input) {
// initialize weights and biases
if (weights_ == nullptr) {
// setup parameter size information
input_size_ = input->c() * input->h() * input->w();
// initialize weight, bias, and output
weights_ = new Blob<float>(1, 1, input_size_, output_size_);
biases_ = new Blob<float>(1, 1, output_size_);
}
// initilaize input and output
if (input_ == nullptr || batch_size_ != input->n()) {
input_ = input;
batch_size_ = input->n();
if (output_ == nullptr)
output_ = new Blob<float>(batch_size_, output_size_);
else
output_->reset(batch_size_, output_size_);
output_->tensor();
if (d_one_vec != nullptr)
hipFree(d_one_vec);
checkCudaErrors(hipMalloc((void **) &d_one_vec, sizeof(float) * batch_size_));
init_one_vec << < (batch_size_ + BLOCK_DIM_1D - 1) / BLOCK_DIM_1D, BLOCK_DIM_1D >> > (d_one_vec, batch_size_);
// initialize weights and biases
if (load_pretrain_ && !freeze_) {
if (load_parameter()) {
std::cout << "error occurred.." << std::endl;
exit(-1);
}
} else if (!freeze_) {
init_weight_bias();
} else {
/* do nothing */
}
}
// output = weights^T * input (without biases)
checkCublasErrors(
hipblasSgemm(cuda_->cublas(),
HIPBLAS_OP_T, HIPBLAS_OP_N,
output_size_, batch_size_, input_size_,
&cuda_->one,
weights_->cuda(), input_size_,
input_->cuda(), input_size_,
&cuda_->zero,
output_->cuda(), output_size_));
// output += biases * d_one_vec^T
checkCublasErrors(hipblasSgemm(cuda_->cublas(),
HIPBLAS_OP_N, HIPBLAS_OP_N,
output_size_, batch_size_, 1,
&cuda_->one,
biases_->cuda(), output_size_,
d_one_vec, 1,
&cuda_->one,
output_->cuda(), output_size_));
#if (DEBUG_DENSE & 0x01)
input_->print( name_ + "::input", true);
weights_->print(name_ + "::weight", true);
biases_->print( name_ + "::bias", true);
output_->print( name_ + "::output", true);
#endif // DEBUG_DENSE
return output_;
}
Blob<float> *Dense::backward(Blob<float> *grad_output) {
if (grad_weights_ == nullptr) {
grad_weights_ = new Blob<float>(weights_->shape());
grad_biases_ = new Blob<float>(biases_->shape());
}
if (grad_input_ == nullptr || batch_size_ != grad_output->n()) {
grad_output_ = grad_output;
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
// db = (dy) * d_one_vec
hipblasSgemv(cuda_->cublas(),
HIPBLAS_OP_N,
output_size_, batch_size_,
&cuda_->one,
grad_output_->cuda(), output_size_,
d_one_vec, 1,
&cuda_->zero,
grad_biases_->cuda(), 1);
// dw = x * (dy)^T
hipblasSgemm(cuda_->cublas(),
HIPBLAS_OP_N, HIPBLAS_OP_T,
input_size_, output_size_, batch_size_,
&cuda_->one,
input_->cuda(), input_size_,
grad_output_->cuda(), output_size_,
&cuda_->zero,
grad_weights_->cuda(), input_size_);
// dx = W * dy
if (!gradient_stop_)
hipblasSgemm(cuda_->cublas(),
HIPBLAS_OP_N, HIPBLAS_OP_N,
input_size_, batch_size_, output_size_,
&cuda_->one,
weights_->cuda(), input_size_,
grad_output_->cuda(), output_size_,
&cuda_->zero,
grad_input_->cuda(), input_size_);
#if (DEBUG_DENSE & 0x02)
std::cout << name_ << "[BACKWARD]" << std::endl;
grad_output->print( name_ + "::gradients", true, grad_output->n());
grad_weights_->print(name_ + "::gfilter", true);
grad_biases_->print( name_ + "::gbias", true);
if (!gradient_stop_)
grad_input_->print( name_ + "::gdata", true);
#endif // DEBUG_DENSE
return grad_input_;
}
/****************************************************************
* Activation Layer *
****************************************************************/
Activation::Activation(std::string name, cudnnActivationMode_t mode, float coef) {
name_ = name;
mode_ = mode;
coef_ = coef;
cudnnCreateActivationDescriptor(&act_desc_);
cudnnSetActivationDescriptor(act_desc_, mode, CUDNN_PROPAGATE_NAN, coef);
}
Activation::~Activation() {
cudnnDestroyActivationDescriptor(act_desc_);
}
Blob<float> *Activation::forward(Blob<float> *input) {
if (input_ == nullptr || batch_size_ != input->n()) {
input_ = input;
input_desc_ = input->tensor();
batch_size_ = input->n();
if (output_ == nullptr)
output_ = new Blob<float>(input->shape());
else
output_->reset(input->shape());
output_desc_ = output_->tensor();
}
cudnnActivationForward(cuda_->cudnn(),
act_desc_,
&cuda_->one,
input_desc_,
input->cuda(),
&cuda_->zero,
output_desc_,
output_->cuda());
return output_;
}
Blob<float> *Activation::backward(Blob<float> *grad_output) {
if (grad_input_ == nullptr || batch_size_ != grad_output->n()) {
grad_output_ = grad_output;
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
cudnnActivationBackward(cuda_->cudnn(),
act_desc_,
&cuda_->one,
output_desc_, output_->cuda(),
output_desc_, grad_output->cuda(),
input_desc_, input_->cuda(),
&cuda_->zero,
input_desc_, grad_input_->cuda());
return grad_input_;
}
/****************************************************************
* Softmax definition *
****************************************************************/
Softmax::Softmax(std::string name) {
name_ = name;
}
Softmax::~Softmax() {
}
Blob<float> *Softmax::forward(Blob<float> *input) {
if (input_ == nullptr || batch_size_ != input->n()) {
input_ = input;
input_desc_ = input->tensor();
batch_size_ = input->n();
if (output_ == nullptr)
output_ = new Blob<float>(input->shape());
else
output_->reset(input->shape());
output_desc_ = output_->tensor();
}
#if (DEBUG_SOFTMAX & 0x01)
std::cout << name_ << "[FORWARD]" << std::endl;
input_->print(name_ + "::input", true, input->n());
#endif
checkCudnnErrors(
cudnnSoftmaxForward(cuda_->cudnn(), CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL,
&cuda_->one, input_desc_, input->cuda(),
&cuda_->zero, output_desc_, output_->cuda()));
#if (DEBUG_SOFTMAX & 0x01)
output_->print(name_ + "::output", true, input->n());
#endif
return output_;
}
Blob<float> *Softmax::backward(Blob<float> *target) {
checkCudaErrors(hipDeviceSynchronize());
if (grad_input_ == nullptr || batch_size_ != target->n()) {
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
// set grad_input_ as predict
checkCudaErrors(hipMemcpyAsync(grad_input_->cuda(),
output_->cuda(), output_->buf_size(),
hipMemcpyDeviceToDevice));
// set grad_input_ = predict - target
checkCublasErrors(
hipblasSaxpy(cuda_->cublas(), target->len(),
&cuda_->minus_one, target->cuda(), 1,
grad_input_->cuda(), 1));
// normalize the grad_output by the batch size
int grad_output_size = target->n() * target->c() * target->h() * target->w();
float scale = 1.f / static_cast<float>(target->n());
checkCublasErrors(hipblasSscal(cuda_->cublas(), grad_output_size, &scale, grad_input_->cuda(), 1));
#if (DEBUG_SOFTMAX & 0x02)
std::cout << name_ << "[BACKWARD]" << std::endl;
input_->print( name_ + "::input", true);
output_->print(name_ + "::predict", true);
target->print( name_ + "::y", true, target->n());
grad_input_->print(name_ + "::dx", true, target->n());
#endif
return grad_input_;
}
float Softmax::get_loss(Blob<float> *target) {
return loss_.loss(output_, target);
}
int Softmax::get_accuracy(Blob<float> *target) {
int batch_size = output_->n();
int output_size = output_->size();
assert(batch_size == target->n());
assert(output_size == target->size());
float *h_output, *h_target;
int idx_output, idx_target;
int hit_count = 0;
// get predicts and targets
h_output = output_->to(host);
h_target = target->to(host);
// idx_output = idx_target = 0;
for (int b = 0; b < batch_size; b++) {
idx_output = 0;
idx_target = 0;
for (int i = 1; i < 10; i++) {
if (h_output[b * output_size + i] > h_output[b * output_size + idx_output])
idx_output = i;
if (h_target[b * output_size + i] > h_target[b * output_size + idx_target])
idx_target = i;
}
if (idx_output == idx_target)
hit_count++;
}
return hit_count;
}
/****************************************************************
* Layer definition *
****************************************************************/
/**
* Convolutional layer with bias
*/
Conv2D::Conv2D(std::string name,
int out_channels,
int kernel_size,
int stride,
int padding,
int dilation) :
out_channels_(out_channels),
kernel_size_(kernel_size),
stride_(stride),
padding_(padding),
dilation_(dilation) {
name_ = name;
// create cudnn container handles
cudnnCreateFilterDescriptor(&filter_desc_);
cudnnCreateConvolutionDescriptor(&conv_desc_);
checkCudnnErrors(cudnnSetConvolution2dDescriptor(conv_desc_,
padding_, padding_, stride_, stride_, dilation_, dilation_,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
}
Conv2D::~Conv2D() {
// distroy cudnn container resources
cudnnDestroyFilterDescriptor(filter_desc_);
cudnnDestroyConvolutionDescriptor(conv_desc_);
// terminate internal created blobs
if (d_workspace != nullptr) hipFree(d_workspace);
}
void Conv2D::set_workspace() {
size_t temp_size = 0;
// forward
checkCudnnErrors(cudnnGetConvolutionForwardAlgorithm(cuda_->cudnn(),
input_desc_, filter_desc_, conv_desc_, output_desc_,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv_fwd_algo_));
checkCudnnErrors(cudnnGetConvolutionForwardWorkspaceSize(cuda_->cudnn(),
input_desc_, filter_desc_, conv_desc_, output_desc_,
conv_fwd_algo_, &temp_size));
workspace_size = ::max(workspace_size, temp_size);
// todo trainable check
// bwd - filter
checkCudnnErrors(cudnnGetConvolutionBackwardFilterAlgorithm(cuda_->cudnn(),
input_desc_, output_desc_, conv_desc_, filter_desc_,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0,
&conv_bwd_filter_algo_));
checkCudnnErrors(cudnnGetConvolutionBackwardFilterWorkspaceSize(cuda_->cudnn(),
input_desc_, output_desc_, conv_desc_, filter_desc_,
conv_bwd_filter_algo_, &temp_size));
workspace_size = ::max(workspace_size, temp_size);
// bwd - data
checkCudnnErrors(cudnnGetConvolutionBackwardDataAlgorithm(cuda_->cudnn(),
filter_desc_, output_desc_, conv_desc_, input_desc_,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0,
&conv_bwd_data_algo_));
checkCudnnErrors(cudnnGetConvolutionBackwardDataWorkspaceSize(cuda_->cudnn(),
filter_desc_, output_desc_, conv_desc_, input_desc_,
conv_bwd_data_algo_, &temp_size));
workspace_size = ::max(workspace_size, temp_size);
if (workspace_size > 0) {
if (d_workspace != nullptr) checkCudaErrors(hipFree(d_workspace));
checkCudaErrors(hipMalloc((void **) &d_workspace, workspace_size));
}
}
Blob<float> *Conv2D::forward(Blob<float> *input) {
// initialize weights and bias
if (weights_ == nullptr) {
// initialize containers handles
checkCudnnErrors(cudnnSetFilter4dDescriptor(filter_desc_,
CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
out_channels_, input->c(), kernel_size_, kernel_size_));
weights_ = new Blob<float>(out_channels_, input->c(), kernel_size_, kernel_size_);
biases_ = new Blob<float>(1, out_channels_); // bias size
bias_desc_ = biases_->tensor();
}
// initilaize input and output
if (input_ == nullptr || batch_size_ != input->n()) {
// initialize input
input_ = input;
input_desc_ = input->tensor();
batch_size_ = input->n();
// initilaize output
checkCudnnErrors(cudnnGetConvolution2dForwardOutputDim(
conv_desc_, input_desc_, filter_desc_,
&output_size_[0], &output_size_[1], &output_size_[2], &output_size_[3]));
if (output_ == nullptr)
output_ = new Blob<float>(output_size_);
else
output_->reset(output_size_);
output_desc_ = output_->tensor();
// initialize workspace for cudnn
set_workspace();
// initialize weights
if (load_pretrain_ && !freeze_) {
if (load_parameter()) {
std::cout << "error occurred.." << std::endl;
exit(-1);
}
} else if (!freeze_) {
init_weight_bias();
} else {
/* do nothing */
}
}
checkCudnnErrors(cudnnConvolutionForward(cuda_->cudnn(),
&cuda_->one, input_desc_, input_->cuda(),
filter_desc_, weights_->cuda(), conv_desc_, conv_fwd_algo_, d_workspace,
workspace_size,
&cuda_->zero, output_desc_, output_->cuda()));
checkCudnnErrors(cudnnAddTensor(cuda_->cudnn(),
&cuda_->one, bias_desc_, biases_->cuda(),
&cuda_->one, output_desc_, output_->cuda()));
#if (DEBUG_CONV & 0x01)
input_->print( name_ + "::input", true, input_->n(), 28);
weights_->print(name_ + "::weight", true);
biases_->print( name_ + "::bias", true);
output_->print( name_ + "::output", true);
#endif
return output_;
}
Blob<float> *Conv2D::backward(Blob<float> *grad_output) {
// initialize grad_output back-propagation space
if (grad_input_ == nullptr || batch_size_ != grad_output->n()) {
grad_output_ = grad_output;
grad_weights_ = new Blob<float>(weights_->shape());
grad_biases_ = new Blob<float>(1, biases_->c());
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
// gradients of biases
checkCudnnErrors(
cudnnConvolutionBackwardBias(cuda_->cudnn(),
&cuda_->one,
output_desc_, grad_output->cuda(),
&cuda_->zero,
bias_desc_, grad_biases_->cuda()));
// gradients of weights
checkCudnnErrors(
cudnnConvolutionBackwardFilter(cuda_->cudnn(),
&cuda_->one,
input_desc_, input_->cuda(),
output_desc_, grad_output_->cuda(),
conv_desc_, conv_bwd_filter_algo_, d_workspace, workspace_size,
&cuda_->zero,
filter_desc_, grad_weights_->cuda()));
// gradients of input data
if (!gradient_stop_) checkCudnnErrors(
cudnnConvolutionBackwardData(cuda_->cudnn(),
&cuda_->one,
filter_desc_, weights_->cuda(),
output_desc_, grad_output->cuda(),
conv_desc_, conv_bwd_data_algo_, d_workspace, workspace_size,
&cuda_->zero,
input_desc_, grad_input_->cuda()));
#if (DEBUG_CONV & 0x02)
std::cout << name_ << "[BACKWARD]" << std::endl;
grad_output->print( name_ + "::gradients", true);
grad_biases_->print(name_ + "gbias", true);
grad_weights_->print(name_+ "gfilter", true);
if (!gradient_stop_)
grad_input_->print(name_+"gdata", true);
#endif
#if (DEBUG_CONV & 0x04)
grad_output->print( name_ + "::gradients", true);
grad_biases_->print( name_ + "::gbias", true);
#endif
return grad_input_;
}
/****************************************************************
* Layer definition *
****************************************************************/
Pooling::Pooling(std::string name,
int kernel_size,
int padding,
int stride,
cudnnPoolingMode_t mode) :
kernel_size_(kernel_size),
padding_(padding),
stride_(stride),
mode_(mode) {
name_ = name;
cudnnCreatePoolingDescriptor(&pool_desc_);
cudnnSetPooling2dDescriptor(pool_desc_, mode_, CUDNN_PROPAGATE_NAN,
kernel_size_, kernel_size_, padding_, padding_, stride_, stride_);
}
Pooling::~Pooling() {
cudnnDestroyPoolingDescriptor(pool_desc_);
}
Blob<float> *Pooling::forward(Blob<float> *input) {
if (input_ == nullptr || batch_size_ != input->n()) {
input_ = input;
// resource initialize
input_desc_ = input_->tensor();
batch_size_ = input->n();
// setting output
cudnnGetPooling2dForwardOutputDim(pool_desc_, input_desc_,
&output_size_[0], &output_size_[1], &output_size_[2], &output_size_[3]);
if (output_ == nullptr)
output_ = new Blob<float>(output_size_);
else
output_->reset(output_size_);
output_desc_ = output_->tensor();
}
cudnnPoolingForward(cuda_->cudnn(), pool_desc_,
&cuda_->one, input_desc_, input_->cuda(),
&cuda_->zero, output_desc_, output_->cuda());
return output_;
}
Blob<float> *Pooling::backward(Blob<float> *grad_output) {
if (grad_input_ == nullptr || batch_size_ != grad_output->n()) {
grad_output_ = grad_output;
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
checkCudnnErrors(
cudnnPoolingBackward(cuda_->cudnn(), pool_desc_,
&cuda_->one,
output_desc_, output_->cuda(),
output_desc_, grad_output->cuda(),
input_desc_, input_->cuda(),
&cuda_->zero,
input_desc_, grad_input_->cuda()));
return grad_input_;
}
/****************************************************************
* FusedBatchNormalization definition *
****************************************************************/
FusedBatchNormalization::FusedBatchNormalization(std::string name, cudnnBatchNormMode_t mode) {
name_ = name;
mode_ = mode;
checkCudnnErrors(cudnnCreateTensorDescriptor(&bnScaleBiasMeanVarDesc_));
}
FusedBatchNormalization::~FusedBatchNormalization() {
cudnnDestroyTensorDescriptor(bnScaleBiasMeanVarDesc_);
}
Blob<float> *FusedBatchNormalization::forward(Blob<float> *input) {
// initialize weights and biases
if (weights_ == nullptr) {
// initialize weight, bias
size_ = input->c();
weights_ = new Blob<float>(1, size_, 1, 1);
biases_ = new Blob<float>(1, size_, 1, 1);
}
// initilaize input and output
if (input_ == nullptr || batch_size_ != input->n()) {
input_ = input;
batch_size_ = input->n();
input_desc_ = input_->tensor();
if (output_ == nullptr)
output_ = new Blob<float>(input->shape());
else
output_->reset(input->shape());
output_desc_ = output_->tensor();
hipMalloc(&resultRunningMean_, sizeof(float) * size_);
hipMalloc(&resultRunningVariance_, sizeof(float) * size_);
hipMalloc(&resultSaveMean_, sizeof(float) * size_);
hipMalloc(&resultSaveInvVariance_, sizeof(float) * size_);
checkCudnnErrors(cudnnDeriveBNTensorDescriptor(bnScaleBiasMeanVarDesc_, input_desc_, mode_));
// initialize weights and biases
if (load_pretrain_ && !freeze_) {
if (load_parameter()) {
std::cout << "error occurred.." << std::endl;
exit(-1);
}
} else if (!freeze_) {
init_weight_bias();
} else {
/* do nothing */
}
}
//y = beta*y + alpha *[bnBias + (bnScale * (x-estimatedMean)/sqrt(epsilon + estimatedVariance)]
checkCudnnErrors(
cudnnBatchNormalizationForwardTraining(cuda_->cudnn(), mode_, &cuda_->one, &cuda_->zero, input_desc_,
input_->cuda(), output_desc_, output_->cuda(),
bnScaleBiasMeanVarDesc_, weights_->cuda(), biases_->cuda(),
cuda_->one, resultRunningMean_,
resultRunningVariance_,
CUDNN_BN_MIN_EPSILON, resultSaveMean_, resultSaveInvVariance_));
#if (DEBUG_FBN & 0x01)
std::cout << name_ << "[FORWARD]" << std::endl;
input_->print( name_ + "::input", true, input_->n(), input_->h());
weights_->print(name_ + "::weight", true, weights_->n(), weights_->c());
biases_->print( name_ + "::bias", true, biases_->n(), biases_->c());
output_->print( name_ + "::output", true, output_->n(), output_->h());
#endif
return output_;
}
Blob<float> *FusedBatchNormalization::backward(Blob<float> *grad_input) {
// initialize grad_output back-propagation space
if (grad_input_ == nullptr || batch_size_ != grad_input->n()) {
grad_output_ = grad_input;
grad_weights_ = new Blob<float>(weights_->shape());
grad_biases_ = new Blob<float>(biases_->shape());
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
checkCudnnErrors(
cudnnBatchNormalizationBackward(cuda_->cudnn(), mode_, &cuda_->one, &cuda_->zero, &cuda_->one, &cuda_->zero,
input_desc_, input_->cuda(), output_desc_, grad_output_->cuda(),
input_desc_,
grad_input_->cuda(), bnScaleBiasMeanVarDesc_, weights_->cuda(),
grad_weights_->cuda(),
grad_biases_->cuda(), CUDNN_BN_MIN_EPSILON, resultSaveMean_,
resultSaveInvVariance_));
#if (DEBUG_FBN & 0x02)
std::cout << name_ << "[BACKWARD]" << std::endl;
grad_output_->print( name_ + "::gradients", true, grad_output_->n());
grad_weights_->print(name_ + "::gfilter", true);
grad_biases_->print( name_ + "::gbias", true);
if (!gradient_stop_)
grad_input_->print( name_ + "::gdata", true);
#endif // DEBUG_FBN
return grad_input_;
}
/****************************************************************
* Pad definition *
****************************************************************/
Pad::Pad(std::string name, std::array<int, 8> paddings, int pad_value) {
name_ = name;
paddings_ = paddings;
pad_value_ = pad_value;
}
Pad::~Pad() {}
Blob<float> *Pad::forward(Blob<float> *input) {
// initilaize input and output
if (input_ == nullptr || batch_size_ != input->n()) {
input_ = input;
batch_size_ = input->n();
if (output_ == nullptr)
output_ = new Blob<float>(input->n() + paddings_.at(0) + paddings_.at(1),
input->c() + paddings_.at(2) + paddings_.at(3),
input->h() + paddings_.at(4) + paddings_.at(5),
input->w() + paddings_.at(6) + paddings_.at(7));
else
output_->reset(input->n() + paddings_.at(0) + paddings_.at(1),
input->c() + paddings_.at(2) + paddings_.at(3),
input->h() + paddings_.at(4) + paddings_.at(5),
input->w() + paddings_.at(6) + paddings_.at(7));
}
// eigen implemented.
// char buffer1[10];
// char buffer2[10];
// sprintf(buffer1, "%d", (batch_size_ + BLOCK_DIM_1D - 1) / BLOCK_DIM_1D);
// sprintf(buffer2, "%d", BLOCK_DIM_1D);
// setenv("TF_EIGEN_GRIDSIZE", buffer1, 1);
// setenv("TF_EIGEN_BLOCKSIZE", buffer2, 1);
// Eigen::GpuStreamDevice stream;
// Eigen::GpuDevice gpu_device(&stream);
// Eigen::TensorMap <Eigen::Tensor<float, 4>> gpu_in(input_->cuda(), input_->n(), input_->c(), input_->h(),
// input_->w());
// Eigen::TensorMap <Eigen::Tensor<float, 4>> gpu_out(output_->cuda(), output_->n(), output_->c(), output_->h(),
// output_->w());
// Eigen::array<std::pair<int, int>, 4> pads;
// pads[0] = std::make_pair(paddings_.at(0), paddings_.at(1));
// pads[1] = std::make_pair(paddings_.at(2), paddings_.at(3));
// pads[2] = std::make_pair(paddings_.at(4), paddings_.at(5));
// pads[3] = std::make_pair(paddings_.at(6), paddings_.at(7));
// gpu_out.device(gpu_device) = gpu_in.pad(pads, pad_value_);
// hipDeviceSynchronize();
// cuda implemented.
PadForward << < (batch_size_ + BLOCK_DIM_1D - 1) / BLOCK_DIM_1D, BLOCK_DIM_1D >> >
(input_->len(), input_->cuda(), output_->cuda(), input_->n(), input_->c(), input_->h(), input_->w(), paddings_.at(
4));
PadForwardPadZero << < (batch_size_ + BLOCK_DIM_1D - 1) / BLOCK_DIM_1D, BLOCK_DIM_1D >> >
(output_->len(), output_->cuda(), output_->n(), output_->c(), output_->h(), output_->w(), paddings_.at(
4));
#if (DEBUG_PADDING & 0x01)
std::cout << name_ << "[FORWARD]" << std::endl;
input_->print( name_ + "::input", true, input_->n(), input_->h());
output_->print( name_ + "::output", true, output_->n(), output_->h());
#endif
return output_;
}
Blob<float> *Pad::backward(Blob<float> *grad_input) {
// initialize grad_output back-propagation space
if (grad_input_ == nullptr || batch_size_ != grad_input->n()) {
grad_output_ = grad_input;
batch_size_ = grad_input->n();
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(grad_input->n() - paddings_.at(0) - paddings_.at(1),
grad_input->c() - paddings_.at(2) - paddings_.at(3),
grad_input->h() - paddings_.at(4) - paddings_.at(5),
grad_input->w() - paddings_.at(6) - paddings_.at(7));
else
grad_input_->reset(grad_input->n() - paddings_.at(0) - paddings_.at(1),
grad_input->c() - paddings_.at(2) - paddings_.at(3),
grad_input->h() - paddings_.at(4) - paddings_.at(5),
grad_input->w() - paddings_.at(6) - paddings_.at(7));
}
if (!gradient_stop_){
// eigen implemented.
// char buffer1[10];
// char buffer2[10];
// sprintf(buffer1, "%d", (batch_size_ + BLOCK_DIM_1D - 1) / BLOCK_DIM_1D);
// sprintf(buffer2, "%d", BLOCK_DIM_1D);
// setenv("TF_EIGEN_GRIDSIZE", buffer1, 1);
// setenv("TF_EIGEN_BLOCKSIZE", buffer2, 1);
// Eigen::GpuStreamDevice stream;
// Eigen::GpuDevice gpu_device(&stream);
// Eigen::TensorMap <Eigen::Tensor<float, 4>> gpu_in(grad_output_->cuda(), grad_output_->n(), grad_output_->c(),
// grad_output_->h(),
// grad_output_->w());
// Eigen::TensorMap <Eigen::Tensor<float, 4>> gpu_out(grad_input_->cuda(), grad_input_->n(), grad_input_->c(),
// grad_input_->h(),
// grad_input_->w());
// Eigen::array<int, 4> offsets = {0, 0, paddings_.at(4), paddings_.at(6)};
// Eigen::array<int, 4> extents = {batch_size_, grad_output_->c(),
// grad_output_->w() - paddings_.at(6) - paddings_.at(7),
// grad_output_->h() - paddings_.at(4) - paddings_.at(5)};
// gpu_out.device(gpu_device) = gpu_in.slice(offsets, extents);
// hipDeviceSynchronize();
// cuda implemented.
PadBackward << < (batch_size_ + BLOCK_DIM_1D - 1) / BLOCK_DIM_1D, BLOCK_DIM_1D >> >
(grad_input_->len(), grad_output_->cuda(), grad_input_->cuda(), grad_output_->n(), grad_output_->c(), grad_output_->h(), grad_output_->w(), paddings_.at(
4));
}
#if (DEBUG_PADDING & 0x02)
std::cout << name_ << "[BACKWARD]" << std::endl;
grad_output_->print( name_ + "::gradients", true, grad_output_->n(), grad_output_->h());
grad_input_->print( name_ + "::gdata", true, grad_input_->n(), grad_input_->h());
if (!gradient_stop_)
grad_input_->print( name_ + "::gdata", true);
#endif // DEBUG_PADDING
return grad_input_;
} | 7ce92e7c172d69d00008d680f4929e01f23a5bd7.cu | #include "layer.h"
#include <random>
#include <cuda_runtime.h>
#include <curand.h>
#include <cassert>
#include <math.h>
#include <algorithm>
#include <sstream>
#include <fstream>
#include <iostream>
using namespace cudl;
__global__ void PadForward(const int count, const float *in, float *out,
const int num, const int channel, const int height_in, const int width_in,
const int pad) {
CUDA_1D_KERNEL_LOOP(index, count) {
int i = index; // Preserve the original value
int height_out = height_in + pad + pad;
int width_out = width_in + pad + pad;
int w = i % width_in;
i /= width_in;
int h = i % height_in;
i /= height_in;
int c = i % channel;
i /= channel;
out[((i * channel + c) * height_out + h + pad) * width_out + pad + w] =
in[index];
}
}
__global__ void PadForwardPadZero(const int count, float *out,
const int num, const int channel, const int height_out, const int width_out,
const int pad) {
CUDA_1D_KERNEL_LOOP(index, count) {
int w = index % width_out;
int h = (index / width_out) % height_out;
if (h < pad || h > height_out - 1 - pad || w < pad || w > width_out - 1 - pad) {
out[index] = 0.f;
}
}
}
__global__ void PadBackward(const int count, const float *in, float *out,
const int num, const int channel, const int height_in, const int width_in,
const int pad) {
CUDA_1D_KERNEL_LOOP(index, count) {
int i = index; // Preserve original value
int height_out = height_in + pad + pad;
int width_out = width_in + pad + pad;
int w = i % width_in;
i /= width_in;
int h = i % height_in;
i /= height_in;
int c = i % channel;
i /= channel;
out[index] = in[((i * channel + c) * height_out + h + pad) *
width_out + pad + w];
}
}
/****************************************************************
* Layer definition *
****************************************************************/
Layer::Layer() {
/* do nothing */
}
Layer::~Layer() {
#if (DEBUG_FORWARD > 0 || DEBUG_BACKWARD > 0)
std::cout << "Destroy Layer: " << name_ << std::endl;
#endif
if (output_ != nullptr) delete output_;
if (grad_input_ != nullptr) delete grad_input_;
if (weights_ != nullptr) delete weights_;
if (biases_ != nullptr) delete biases_;
if (grad_weights_ != nullptr) delete grad_weights_;
if (grad_biases_ != nullptr) delete grad_biases_;
}
void Layer::init_weight_bias(unsigned int seed) {
checkCudaErrors(cudaDeviceSynchronize());
if (weights_ == nullptr || biases_ == nullptr)
return;
// Create random network
std::random_device rd;
std::mt19937 gen(seed == 0 ? rd() : static_cast<unsigned int>(seed));
// He uniform distribution
float range = sqrt(6.f / input_->size()); // He's initialization
std::uniform_real_distribution<> dis(-range, range);
for (int i = 0; i < weights_->len(); i++)
weights_->ptr()[i] = static_cast<float>(dis(gen));
for (int i = 0; i < biases_->len(); i++)
biases_->ptr()[i] = 0.f;
// copy initialized value to the device
weights_->to(DeviceType::cuda);
biases_->to(DeviceType::cuda);
std::cout << ".. initialized " << name_ << " layer .." << std::endl;
}
void Layer::update_weights_biases(float learning_rate) {
float eps = -1.f * learning_rate;
if (weights_ != nullptr && grad_weights_ != nullptr) {
#if (DEBUG_UPDATE)
weights_->print(name_ + "::weights (before update)", true);
grad_weights_->print(name_ + "::gweights", true);
#endif // DEBUG_UPDATE
// w = w + eps * dw
checkCublasErrors(
cublasSaxpy(cuda_->cublas(),
weights_->len(),
&eps,
grad_weights_->cuda(), 1,
weights_->cuda(), 1));
#if (DEBUG_UPDATE)
weights_->print(name_ + "weights (after update)", true);
// getchar();
#endif // DEBUG_UPDATE
}
if (biases_ != nullptr && grad_biases_ != nullptr) {
#if (DEBUG_UPDATE)
biases_->print(name_ + "biases (before update)", true);
grad_biases_->print(name_ + "gbiases", true);
#endif // DEBUG_UPDATE
// b = b + eps * db
checkCublasErrors(
cublasSaxpy(cuda_->cublas(),
biases_->len(),
&eps,
grad_biases_->cuda(), 1,
biases_->cuda(), 1));
#if (DEBUG_UPDATE)
biases_->print(name_ + "biases (after update)", true);
// getchar();
#endif // DEBUG_UPDATE
}
}
float Layer::get_loss(Blob<float> *target) {
assert("No Loss layer has no loss." && false);
return EXIT_FAILURE;
}
int Layer::get_accuracy(Blob<float> *target) {
assert("No Loss layer cannot estimate accuracy." && false);
return EXIT_FAILURE;
}
int Layer::load_parameter() {
std::stringstream filename_weights, filename_biases;
// load weights and biases pretrained parameters
filename_weights << name_ << ".bin";
if (weights_->file_read(filename_weights.str()))
return -1;
filename_biases << name_ << ".bias.bin";
if (biases_->file_read(filename_biases.str()))
return -2;
std::cout << ".. loaded " << name_ << " pretrain parameter.." << std::endl;
return 0;
}
int Layer::save_parameter() {
std::stringstream filename_weights, filename_biases;
std::cout << ".. saving " << name_ << " parameter ..";
// Write weights file
if (weights_) {
filename_weights << name_ << ".bin";
if (weights_->file_write(filename_weights.str()))
return -1;
}
// Write bias file
if (biases_) {
filename_biases << name_ << ".bias.bin";
if (biases_->file_write(filename_biases.str()))
return -2;
}
std::cout << " done .." << std::endl;
return 0;
}
/****************************************************************
* Dense Layer *
****************************************************************/
Dense::Dense(std::string name, int output_size) {
name_ = name;
output_size_ = output_size;
}
Dense::~Dense() {
if (d_one_vec != nullptr)
cudaFree(d_one_vec);
}
__global__ void init_one_vec(float *d_one_vec, size_t length) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= length) return;
d_one_vec[i] = 1.f;
}
Blob<float> *Dense::forward(Blob<float> *input) {
// initialize weights and biases
if (weights_ == nullptr) {
// setup parameter size information
input_size_ = input->c() * input->h() * input->w();
// initialize weight, bias, and output
weights_ = new Blob<float>(1, 1, input_size_, output_size_);
biases_ = new Blob<float>(1, 1, output_size_);
}
// initilaize input and output
if (input_ == nullptr || batch_size_ != input->n()) {
input_ = input;
batch_size_ = input->n();
if (output_ == nullptr)
output_ = new Blob<float>(batch_size_, output_size_);
else
output_->reset(batch_size_, output_size_);
output_->tensor();
if (d_one_vec != nullptr)
cudaFree(d_one_vec);
checkCudaErrors(cudaMalloc((void **) &d_one_vec, sizeof(float) * batch_size_));
init_one_vec << < (batch_size_ + BLOCK_DIM_1D - 1) / BLOCK_DIM_1D, BLOCK_DIM_1D >> > (d_one_vec, batch_size_);
// initialize weights and biases
if (load_pretrain_ && !freeze_) {
if (load_parameter()) {
std::cout << "error occurred.." << std::endl;
exit(-1);
}
} else if (!freeze_) {
init_weight_bias();
} else {
/* do nothing */
}
}
// output = weights^T * input (without biases)
checkCublasErrors(
cublasSgemm(cuda_->cublas(),
CUBLAS_OP_T, CUBLAS_OP_N,
output_size_, batch_size_, input_size_,
&cuda_->one,
weights_->cuda(), input_size_,
input_->cuda(), input_size_,
&cuda_->zero,
output_->cuda(), output_size_));
// output += biases * d_one_vec^T
checkCublasErrors(cublasSgemm(cuda_->cublas(),
CUBLAS_OP_N, CUBLAS_OP_N,
output_size_, batch_size_, 1,
&cuda_->one,
biases_->cuda(), output_size_,
d_one_vec, 1,
&cuda_->one,
output_->cuda(), output_size_));
#if (DEBUG_DENSE & 0x01)
input_->print( name_ + "::input", true);
weights_->print(name_ + "::weight", true);
biases_->print( name_ + "::bias", true);
output_->print( name_ + "::output", true);
#endif // DEBUG_DENSE
return output_;
}
Blob<float> *Dense::backward(Blob<float> *grad_output) {
if (grad_weights_ == nullptr) {
grad_weights_ = new Blob<float>(weights_->shape());
grad_biases_ = new Blob<float>(biases_->shape());
}
if (grad_input_ == nullptr || batch_size_ != grad_output->n()) {
grad_output_ = grad_output;
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
// db = (dy) * d_one_vec
cublasSgemv(cuda_->cublas(),
CUBLAS_OP_N,
output_size_, batch_size_,
&cuda_->one,
grad_output_->cuda(), output_size_,
d_one_vec, 1,
&cuda_->zero,
grad_biases_->cuda(), 1);
// dw = x * (dy)^T
cublasSgemm(cuda_->cublas(),
CUBLAS_OP_N, CUBLAS_OP_T,
input_size_, output_size_, batch_size_,
&cuda_->one,
input_->cuda(), input_size_,
grad_output_->cuda(), output_size_,
&cuda_->zero,
grad_weights_->cuda(), input_size_);
// dx = W * dy
if (!gradient_stop_)
cublasSgemm(cuda_->cublas(),
CUBLAS_OP_N, CUBLAS_OP_N,
input_size_, batch_size_, output_size_,
&cuda_->one,
weights_->cuda(), input_size_,
grad_output_->cuda(), output_size_,
&cuda_->zero,
grad_input_->cuda(), input_size_);
#if (DEBUG_DENSE & 0x02)
std::cout << name_ << "[BACKWARD]" << std::endl;
grad_output->print( name_ + "::gradients", true, grad_output->n());
grad_weights_->print(name_ + "::gfilter", true);
grad_biases_->print( name_ + "::gbias", true);
if (!gradient_stop_)
grad_input_->print( name_ + "::gdata", true);
#endif // DEBUG_DENSE
return grad_input_;
}
/****************************************************************
* Activation Layer *
****************************************************************/
Activation::Activation(std::string name, cudnnActivationMode_t mode, float coef) {
name_ = name;
mode_ = mode;
coef_ = coef;
cudnnCreateActivationDescriptor(&act_desc_);
cudnnSetActivationDescriptor(act_desc_, mode, CUDNN_PROPAGATE_NAN, coef);
}
Activation::~Activation() {
cudnnDestroyActivationDescriptor(act_desc_);
}
Blob<float> *Activation::forward(Blob<float> *input) {
if (input_ == nullptr || batch_size_ != input->n()) {
input_ = input;
input_desc_ = input->tensor();
batch_size_ = input->n();
if (output_ == nullptr)
output_ = new Blob<float>(input->shape());
else
output_->reset(input->shape());
output_desc_ = output_->tensor();
}
cudnnActivationForward(cuda_->cudnn(),
act_desc_,
&cuda_->one,
input_desc_,
input->cuda(),
&cuda_->zero,
output_desc_,
output_->cuda());
return output_;
}
Blob<float> *Activation::backward(Blob<float> *grad_output) {
if (grad_input_ == nullptr || batch_size_ != grad_output->n()) {
grad_output_ = grad_output;
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
cudnnActivationBackward(cuda_->cudnn(),
act_desc_,
&cuda_->one,
output_desc_, output_->cuda(),
output_desc_, grad_output->cuda(),
input_desc_, input_->cuda(),
&cuda_->zero,
input_desc_, grad_input_->cuda());
return grad_input_;
}
/****************************************************************
* Softmax definition *
****************************************************************/
Softmax::Softmax(std::string name) {
name_ = name;
}
Softmax::~Softmax() {
}
Blob<float> *Softmax::forward(Blob<float> *input) {
if (input_ == nullptr || batch_size_ != input->n()) {
input_ = input;
input_desc_ = input->tensor();
batch_size_ = input->n();
if (output_ == nullptr)
output_ = new Blob<float>(input->shape());
else
output_->reset(input->shape());
output_desc_ = output_->tensor();
}
#if (DEBUG_SOFTMAX & 0x01)
std::cout << name_ << "[FORWARD]" << std::endl;
input_->print(name_ + "::input", true, input->n());
#endif
checkCudnnErrors(
cudnnSoftmaxForward(cuda_->cudnn(), CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL,
&cuda_->one, input_desc_, input->cuda(),
&cuda_->zero, output_desc_, output_->cuda()));
#if (DEBUG_SOFTMAX & 0x01)
output_->print(name_ + "::output", true, input->n());
#endif
return output_;
}
Blob<float> *Softmax::backward(Blob<float> *target) {
checkCudaErrors(cudaDeviceSynchronize());
if (grad_input_ == nullptr || batch_size_ != target->n()) {
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
// set grad_input_ as predict
checkCudaErrors(cudaMemcpyAsync(grad_input_->cuda(),
output_->cuda(), output_->buf_size(),
cudaMemcpyDeviceToDevice));
// set grad_input_ = predict - target
checkCublasErrors(
cublasSaxpy(cuda_->cublas(), target->len(),
&cuda_->minus_one, target->cuda(), 1,
grad_input_->cuda(), 1));
// normalize the grad_output by the batch size
int grad_output_size = target->n() * target->c() * target->h() * target->w();
float scale = 1.f / static_cast<float>(target->n());
checkCublasErrors(cublasSscal(cuda_->cublas(), grad_output_size, &scale, grad_input_->cuda(), 1));
#if (DEBUG_SOFTMAX & 0x02)
std::cout << name_ << "[BACKWARD]" << std::endl;
input_->print( name_ + "::input", true);
output_->print(name_ + "::predict", true);
target->print( name_ + "::y", true, target->n());
grad_input_->print(name_ + "::dx", true, target->n());
#endif
return grad_input_;
}
float Softmax::get_loss(Blob<float> *target) {
return loss_.loss(output_, target);
}
int Softmax::get_accuracy(Blob<float> *target) {
int batch_size = output_->n();
int output_size = output_->size();
assert(batch_size == target->n());
assert(output_size == target->size());
float *h_output, *h_target;
int idx_output, idx_target;
int hit_count = 0;
// get predicts and targets
h_output = output_->to(host);
h_target = target->to(host);
// idx_output = idx_target = 0;
for (int b = 0; b < batch_size; b++) {
idx_output = 0;
idx_target = 0;
for (int i = 1; i < 10; i++) {
if (h_output[b * output_size + i] > h_output[b * output_size + idx_output])
idx_output = i;
if (h_target[b * output_size + i] > h_target[b * output_size + idx_target])
idx_target = i;
}
if (idx_output == idx_target)
hit_count++;
}
return hit_count;
}
/****************************************************************
* Layer definition *
****************************************************************/
/**
* Convolutional layer with bias
*/
Conv2D::Conv2D(std::string name,
int out_channels,
int kernel_size,
int stride,
int padding,
int dilation) :
out_channels_(out_channels),
kernel_size_(kernel_size),
stride_(stride),
padding_(padding),
dilation_(dilation) {
name_ = name;
// create cudnn container handles
cudnnCreateFilterDescriptor(&filter_desc_);
cudnnCreateConvolutionDescriptor(&conv_desc_);
checkCudnnErrors(cudnnSetConvolution2dDescriptor(conv_desc_,
padding_, padding_, stride_, stride_, dilation_, dilation_,
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
}
Conv2D::~Conv2D() {
// distroy cudnn container resources
cudnnDestroyFilterDescriptor(filter_desc_);
cudnnDestroyConvolutionDescriptor(conv_desc_);
// terminate internal created blobs
if (d_workspace != nullptr) cudaFree(d_workspace);
}
void Conv2D::set_workspace() {
size_t temp_size = 0;
// forward
checkCudnnErrors(cudnnGetConvolutionForwardAlgorithm(cuda_->cudnn(),
input_desc_, filter_desc_, conv_desc_, output_desc_,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv_fwd_algo_));
checkCudnnErrors(cudnnGetConvolutionForwardWorkspaceSize(cuda_->cudnn(),
input_desc_, filter_desc_, conv_desc_, output_desc_,
conv_fwd_algo_, &temp_size));
workspace_size = std::max(workspace_size, temp_size);
// todo trainable check
// bwd - filter
checkCudnnErrors(cudnnGetConvolutionBackwardFilterAlgorithm(cuda_->cudnn(),
input_desc_, output_desc_, conv_desc_, filter_desc_,
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0,
&conv_bwd_filter_algo_));
checkCudnnErrors(cudnnGetConvolutionBackwardFilterWorkspaceSize(cuda_->cudnn(),
input_desc_, output_desc_, conv_desc_, filter_desc_,
conv_bwd_filter_algo_, &temp_size));
workspace_size = std::max(workspace_size, temp_size);
// bwd - data
checkCudnnErrors(cudnnGetConvolutionBackwardDataAlgorithm(cuda_->cudnn(),
filter_desc_, output_desc_, conv_desc_, input_desc_,
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0,
&conv_bwd_data_algo_));
checkCudnnErrors(cudnnGetConvolutionBackwardDataWorkspaceSize(cuda_->cudnn(),
filter_desc_, output_desc_, conv_desc_, input_desc_,
conv_bwd_data_algo_, &temp_size));
workspace_size = std::max(workspace_size, temp_size);
if (workspace_size > 0) {
if (d_workspace != nullptr) checkCudaErrors(cudaFree(d_workspace));
checkCudaErrors(cudaMalloc((void **) &d_workspace, workspace_size));
}
}
Blob<float> *Conv2D::forward(Blob<float> *input) {
// initialize weights and bias
if (weights_ == nullptr) {
// initialize containers handles
checkCudnnErrors(cudnnSetFilter4dDescriptor(filter_desc_,
CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
out_channels_, input->c(), kernel_size_, kernel_size_));
weights_ = new Blob<float>(out_channels_, input->c(), kernel_size_, kernel_size_);
biases_ = new Blob<float>(1, out_channels_); // bias size
bias_desc_ = biases_->tensor();
}
// initilaize input and output
if (input_ == nullptr || batch_size_ != input->n()) {
// initialize input
input_ = input;
input_desc_ = input->tensor();
batch_size_ = input->n();
// initilaize output
checkCudnnErrors(cudnnGetConvolution2dForwardOutputDim(
conv_desc_, input_desc_, filter_desc_,
&output_size_[0], &output_size_[1], &output_size_[2], &output_size_[3]));
if (output_ == nullptr)
output_ = new Blob<float>(output_size_);
else
output_->reset(output_size_);
output_desc_ = output_->tensor();
// initialize workspace for cudnn
set_workspace();
// initialize weights
if (load_pretrain_ && !freeze_) {
if (load_parameter()) {
std::cout << "error occurred.." << std::endl;
exit(-1);
}
} else if (!freeze_) {
init_weight_bias();
} else {
/* do nothing */
}
}
checkCudnnErrors(cudnnConvolutionForward(cuda_->cudnn(),
&cuda_->one, input_desc_, input_->cuda(),
filter_desc_, weights_->cuda(), conv_desc_, conv_fwd_algo_, d_workspace,
workspace_size,
&cuda_->zero, output_desc_, output_->cuda()));
checkCudnnErrors(cudnnAddTensor(cuda_->cudnn(),
&cuda_->one, bias_desc_, biases_->cuda(),
&cuda_->one, output_desc_, output_->cuda()));
#if (DEBUG_CONV & 0x01)
input_->print( name_ + "::input", true, input_->n(), 28);
weights_->print(name_ + "::weight", true);
biases_->print( name_ + "::bias", true);
output_->print( name_ + "::output", true);
#endif
return output_;
}
Blob<float> *Conv2D::backward(Blob<float> *grad_output) {
// initialize grad_output back-propagation space
if (grad_input_ == nullptr || batch_size_ != grad_output->n()) {
grad_output_ = grad_output;
grad_weights_ = new Blob<float>(weights_->shape());
grad_biases_ = new Blob<float>(1, biases_->c());
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
// gradients of biases
checkCudnnErrors(
cudnnConvolutionBackwardBias(cuda_->cudnn(),
&cuda_->one,
output_desc_, grad_output->cuda(),
&cuda_->zero,
bias_desc_, grad_biases_->cuda()));
// gradients of weights
checkCudnnErrors(
cudnnConvolutionBackwardFilter(cuda_->cudnn(),
&cuda_->one,
input_desc_, input_->cuda(),
output_desc_, grad_output_->cuda(),
conv_desc_, conv_bwd_filter_algo_, d_workspace, workspace_size,
&cuda_->zero,
filter_desc_, grad_weights_->cuda()));
// gradients of input data
if (!gradient_stop_) checkCudnnErrors(
cudnnConvolutionBackwardData(cuda_->cudnn(),
&cuda_->one,
filter_desc_, weights_->cuda(),
output_desc_, grad_output->cuda(),
conv_desc_, conv_bwd_data_algo_, d_workspace, workspace_size,
&cuda_->zero,
input_desc_, grad_input_->cuda()));
#if (DEBUG_CONV & 0x02)
std::cout << name_ << "[BACKWARD]" << std::endl;
grad_output->print( name_ + "::gradients", true);
grad_biases_->print(name_ + "gbias", true);
grad_weights_->print(name_+ "gfilter", true);
if (!gradient_stop_)
grad_input_->print(name_+"gdata", true);
#endif
#if (DEBUG_CONV & 0x04)
grad_output->print( name_ + "::gradients", true);
grad_biases_->print( name_ + "::gbias", true);
#endif
return grad_input_;
}
/****************************************************************
* Layer definition *
****************************************************************/
Pooling::Pooling(std::string name,
int kernel_size,
int padding,
int stride,
cudnnPoolingMode_t mode) :
kernel_size_(kernel_size),
padding_(padding),
stride_(stride),
mode_(mode) {
name_ = name;
cudnnCreatePoolingDescriptor(&pool_desc_);
cudnnSetPooling2dDescriptor(pool_desc_, mode_, CUDNN_PROPAGATE_NAN,
kernel_size_, kernel_size_, padding_, padding_, stride_, stride_);
}
Pooling::~Pooling() {
cudnnDestroyPoolingDescriptor(pool_desc_);
}
Blob<float> *Pooling::forward(Blob<float> *input) {
if (input_ == nullptr || batch_size_ != input->n()) {
input_ = input;
// resource initialize
input_desc_ = input_->tensor();
batch_size_ = input->n();
// setting output
cudnnGetPooling2dForwardOutputDim(pool_desc_, input_desc_,
&output_size_[0], &output_size_[1], &output_size_[2], &output_size_[3]);
if (output_ == nullptr)
output_ = new Blob<float>(output_size_);
else
output_->reset(output_size_);
output_desc_ = output_->tensor();
}
cudnnPoolingForward(cuda_->cudnn(), pool_desc_,
&cuda_->one, input_desc_, input_->cuda(),
&cuda_->zero, output_desc_, output_->cuda());
return output_;
}
Blob<float> *Pooling::backward(Blob<float> *grad_output) {
if (grad_input_ == nullptr || batch_size_ != grad_output->n()) {
grad_output_ = grad_output;
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
checkCudnnErrors(
cudnnPoolingBackward(cuda_->cudnn(), pool_desc_,
&cuda_->one,
output_desc_, output_->cuda(),
output_desc_, grad_output->cuda(),
input_desc_, input_->cuda(),
&cuda_->zero,
input_desc_, grad_input_->cuda()));
return grad_input_;
}
/****************************************************************
* FusedBatchNormalization definition *
****************************************************************/
FusedBatchNormalization::FusedBatchNormalization(std::string name, cudnnBatchNormMode_t mode) {
name_ = name;
mode_ = mode;
checkCudnnErrors(cudnnCreateTensorDescriptor(&bnScaleBiasMeanVarDesc_));
}
FusedBatchNormalization::~FusedBatchNormalization() {
cudnnDestroyTensorDescriptor(bnScaleBiasMeanVarDesc_);
}
Blob<float> *FusedBatchNormalization::forward(Blob<float> *input) {
// initialize weights and biases
if (weights_ == nullptr) {
// initialize weight, bias
size_ = input->c();
weights_ = new Blob<float>(1, size_, 1, 1);
biases_ = new Blob<float>(1, size_, 1, 1);
}
// initilaize input and output
if (input_ == nullptr || batch_size_ != input->n()) {
input_ = input;
batch_size_ = input->n();
input_desc_ = input_->tensor();
if (output_ == nullptr)
output_ = new Blob<float>(input->shape());
else
output_->reset(input->shape());
output_desc_ = output_->tensor();
cudaMalloc(&resultRunningMean_, sizeof(float) * size_);
cudaMalloc(&resultRunningVariance_, sizeof(float) * size_);
cudaMalloc(&resultSaveMean_, sizeof(float) * size_);
cudaMalloc(&resultSaveInvVariance_, sizeof(float) * size_);
checkCudnnErrors(cudnnDeriveBNTensorDescriptor(bnScaleBiasMeanVarDesc_, input_desc_, mode_));
// initialize weights and biases
if (load_pretrain_ && !freeze_) {
if (load_parameter()) {
std::cout << "error occurred.." << std::endl;
exit(-1);
}
} else if (!freeze_) {
init_weight_bias();
} else {
/* do nothing */
}
}
//y = beta*y + alpha *[bnBias + (bnScale * (x-estimatedMean)/sqrt(epsilon + estimatedVariance)]
checkCudnnErrors(
cudnnBatchNormalizationForwardTraining(cuda_->cudnn(), mode_, &cuda_->one, &cuda_->zero, input_desc_,
input_->cuda(), output_desc_, output_->cuda(),
bnScaleBiasMeanVarDesc_, weights_->cuda(), biases_->cuda(),
cuda_->one, resultRunningMean_,
resultRunningVariance_,
CUDNN_BN_MIN_EPSILON, resultSaveMean_, resultSaveInvVariance_));
#if (DEBUG_FBN & 0x01)
std::cout << name_ << "[FORWARD]" << std::endl;
input_->print( name_ + "::input", true, input_->n(), input_->h());
weights_->print(name_ + "::weight", true, weights_->n(), weights_->c());
biases_->print( name_ + "::bias", true, biases_->n(), biases_->c());
output_->print( name_ + "::output", true, output_->n(), output_->h());
#endif
return output_;
}
Blob<float> *FusedBatchNormalization::backward(Blob<float> *grad_input) {
// initialize grad_output back-propagation space
if (grad_input_ == nullptr || batch_size_ != grad_input->n()) {
grad_output_ = grad_input;
grad_weights_ = new Blob<float>(weights_->shape());
grad_biases_ = new Blob<float>(biases_->shape());
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(input_->shape());
else
grad_input_->reset(input_->shape());
}
checkCudnnErrors(
cudnnBatchNormalizationBackward(cuda_->cudnn(), mode_, &cuda_->one, &cuda_->zero, &cuda_->one, &cuda_->zero,
input_desc_, input_->cuda(), output_desc_, grad_output_->cuda(),
input_desc_,
grad_input_->cuda(), bnScaleBiasMeanVarDesc_, weights_->cuda(),
grad_weights_->cuda(),
grad_biases_->cuda(), CUDNN_BN_MIN_EPSILON, resultSaveMean_,
resultSaveInvVariance_));
#if (DEBUG_FBN & 0x02)
std::cout << name_ << "[BACKWARD]" << std::endl;
grad_output_->print( name_ + "::gradients", true, grad_output_->n());
grad_weights_->print(name_ + "::gfilter", true);
grad_biases_->print( name_ + "::gbias", true);
if (!gradient_stop_)
grad_input_->print( name_ + "::gdata", true);
#endif // DEBUG_FBN
return grad_input_;
}
/****************************************************************
* Pad definition *
****************************************************************/
Pad::Pad(std::string name, std::array<int, 8> paddings, int pad_value) {
name_ = name;
paddings_ = paddings;
pad_value_ = pad_value;
}
Pad::~Pad() {}
Blob<float> *Pad::forward(Blob<float> *input) {
// initilaize input and output
if (input_ == nullptr || batch_size_ != input->n()) {
input_ = input;
batch_size_ = input->n();
if (output_ == nullptr)
output_ = new Blob<float>(input->n() + paddings_.at(0) + paddings_.at(1),
input->c() + paddings_.at(2) + paddings_.at(3),
input->h() + paddings_.at(4) + paddings_.at(5),
input->w() + paddings_.at(6) + paddings_.at(7));
else
output_->reset(input->n() + paddings_.at(0) + paddings_.at(1),
input->c() + paddings_.at(2) + paddings_.at(3),
input->h() + paddings_.at(4) + paddings_.at(5),
input->w() + paddings_.at(6) + paddings_.at(7));
}
// eigen implemented.
// char buffer1[10];
// char buffer2[10];
// sprintf(buffer1, "%d", (batch_size_ + BLOCK_DIM_1D - 1) / BLOCK_DIM_1D);
// sprintf(buffer2, "%d", BLOCK_DIM_1D);
// setenv("TF_EIGEN_GRIDSIZE", buffer1, 1);
// setenv("TF_EIGEN_BLOCKSIZE", buffer2, 1);
// Eigen::GpuStreamDevice stream;
// Eigen::GpuDevice gpu_device(&stream);
// Eigen::TensorMap <Eigen::Tensor<float, 4>> gpu_in(input_->cuda(), input_->n(), input_->c(), input_->h(),
// input_->w());
// Eigen::TensorMap <Eigen::Tensor<float, 4>> gpu_out(output_->cuda(), output_->n(), output_->c(), output_->h(),
// output_->w());
// Eigen::array<std::pair<int, int>, 4> pads;
// pads[0] = std::make_pair(paddings_.at(0), paddings_.at(1));
// pads[1] = std::make_pair(paddings_.at(2), paddings_.at(3));
// pads[2] = std::make_pair(paddings_.at(4), paddings_.at(5));
// pads[3] = std::make_pair(paddings_.at(6), paddings_.at(7));
// gpu_out.device(gpu_device) = gpu_in.pad(pads, pad_value_);
// cudaDeviceSynchronize();
// cuda implemented.
PadForward << < (batch_size_ + BLOCK_DIM_1D - 1) / BLOCK_DIM_1D, BLOCK_DIM_1D >> >
(input_->len(), input_->cuda(), output_->cuda(), input_->n(), input_->c(), input_->h(), input_->w(), paddings_.at(
4));
PadForwardPadZero << < (batch_size_ + BLOCK_DIM_1D - 1) / BLOCK_DIM_1D, BLOCK_DIM_1D >> >
(output_->len(), output_->cuda(), output_->n(), output_->c(), output_->h(), output_->w(), paddings_.at(
4));
#if (DEBUG_PADDING & 0x01)
std::cout << name_ << "[FORWARD]" << std::endl;
input_->print( name_ + "::input", true, input_->n(), input_->h());
output_->print( name_ + "::output", true, output_->n(), output_->h());
#endif
return output_;
}
Blob<float> *Pad::backward(Blob<float> *grad_input) {
// initialize grad_output back-propagation space
if (grad_input_ == nullptr || batch_size_ != grad_input->n()) {
grad_output_ = grad_input;
batch_size_ = grad_input->n();
if (grad_input_ == nullptr)
grad_input_ = new Blob<float>(grad_input->n() - paddings_.at(0) - paddings_.at(1),
grad_input->c() - paddings_.at(2) - paddings_.at(3),
grad_input->h() - paddings_.at(4) - paddings_.at(5),
grad_input->w() - paddings_.at(6) - paddings_.at(7));
else
grad_input_->reset(grad_input->n() - paddings_.at(0) - paddings_.at(1),
grad_input->c() - paddings_.at(2) - paddings_.at(3),
grad_input->h() - paddings_.at(4) - paddings_.at(5),
grad_input->w() - paddings_.at(6) - paddings_.at(7));
}
if (!gradient_stop_){
// eigen implemented.
// char buffer1[10];
// char buffer2[10];
// sprintf(buffer1, "%d", (batch_size_ + BLOCK_DIM_1D - 1) / BLOCK_DIM_1D);
// sprintf(buffer2, "%d", BLOCK_DIM_1D);
// setenv("TF_EIGEN_GRIDSIZE", buffer1, 1);
// setenv("TF_EIGEN_BLOCKSIZE", buffer2, 1);
// Eigen::GpuStreamDevice stream;
// Eigen::GpuDevice gpu_device(&stream);
// Eigen::TensorMap <Eigen::Tensor<float, 4>> gpu_in(grad_output_->cuda(), grad_output_->n(), grad_output_->c(),
// grad_output_->h(),
// grad_output_->w());
// Eigen::TensorMap <Eigen::Tensor<float, 4>> gpu_out(grad_input_->cuda(), grad_input_->n(), grad_input_->c(),
// grad_input_->h(),
// grad_input_->w());
// Eigen::array<int, 4> offsets = {0, 0, paddings_.at(4), paddings_.at(6)};
// Eigen::array<int, 4> extents = {batch_size_, grad_output_->c(),
// grad_output_->w() - paddings_.at(6) - paddings_.at(7),
// grad_output_->h() - paddings_.at(4) - paddings_.at(5)};
// gpu_out.device(gpu_device) = gpu_in.slice(offsets, extents);
// cudaDeviceSynchronize();
// cuda implemented.
PadBackward << < (batch_size_ + BLOCK_DIM_1D - 1) / BLOCK_DIM_1D, BLOCK_DIM_1D >> >
(grad_input_->len(), grad_output_->cuda(), grad_input_->cuda(), grad_output_->n(), grad_output_->c(), grad_output_->h(), grad_output_->w(), paddings_.at(
4));
}
#if (DEBUG_PADDING & 0x02)
std::cout << name_ << "[BACKWARD]" << std::endl;
grad_output_->print( name_ + "::gradients", true, grad_output_->n(), grad_output_->h());
grad_input_->print( name_ + "::gdata", true, grad_input_->n(), grad_input_->h());
if (!gradient_stop_)
grad_input_->print( name_ + "::gdata", true);
#endif // DEBUG_PADDING
return grad_input_;
} |
e7a2895269644885302fb74cbf20d93369a65c6f.hip | // !!! This is a file automatically generated by hipify!!!
/**
* NASA Advanced Supercomputing Parallel Benchmarks C++
*
* based on NPB 3.3.1
*
* original version and technical report:
* http://www.nas.nasa.gov/Software/NPB/
*
* Authors:
* M. Yarrow
* C. Kuszmaul
*
* C++ version:
* Dalvan Griebler <[email protected]>
* Jnior Lff <[email protected]>
* Gabriell Araujo <[email protected]>
*
* CUDA version:
* Gabriell Araujo <[email protected]>
*/
#include <hip/hip_runtime.h>
#include "../common/npb-CPP.hpp"
#include "npbparams.hpp"
/*
* ---------------------------------------------------------------------
* note: please observe that in the routine conj_grad three
* implementations of the sparse matrix-vector multiply have
* been supplied. the default matrix-vector multiply is not
* loop unrolled. the alternate implementations are unrolled
* to a depth of 2 and unrolled to a depth of 8. please
* experiment with these to find the fastest for your particular
* architecture. if reporting timing results, any of these three may
* be used without penalty.
* ---------------------------------------------------------------------
* class specific parameters:
* it appears here for reference only.
* these are their values, however, this info is imported in the npbparams.h
* include file, which is written by the sys/setparams.c program.
* ---------------------------------------------------------------------
*/
#define NZ (NA*(NONZER+1)*(NONZER+1))
#define NAZ (NA*(NONZER+1))
#define T_INIT (0)
#define T_BENCH (1)
#define T_CONJ_GRAD (2)
#define T_LAST (3)
#define PROFILING_KERNEL_ONE (21)
#define PROFILING_KERNEL_TWO (22)
#define PROFILING_KERNEL_THREE (23)
#define PROFILING_KERNEL_FOUR (24)
#define PROFILING_KERNEL_FIVE (25)
#define PROFILING_KERNEL_SIX (26)
#define PROFILING_KERNEL_SEVEN (27)
#define PROFILING_KERNEL_EIGHT (28)
#define PROFILING_KERNEL_NINE (29)
#define PROFILING_KERNEL_TEN (30)
#define PROFILING_KERNEL_ELEVEN (31)
#define PROFILING_KERNEL_FIVE_MERGED_KERNEL_SIX (32)
#define MINIMUM_THREADS_PER_BLOCK (64)
#define THREADS_PER_BLOCK_ON_KERNEL_ONE (128)
#define THREADS_PER_BLOCK_ON_KERNEL_TWO (64)
#define THREADS_PER_BLOCK_ON_KERNEL_THREE (32)
#define THREADS_PER_BLOCK_ON_KERNEL_FOUR (64)
#define THREADS_PER_BLOCK_ON_KERNEL_FIVE (64)
#define THREADS_PER_BLOCK_ON_KERNEL_SIX (64)
#define THREADS_PER_BLOCK_ON_KERNEL_SEVEN (128)
#define THREADS_PER_BLOCK_ON_KERNEL_EIGHT (32)
#define THREADS_PER_BLOCK_ON_KERNEL_NINE (64)
#define THREADS_PER_BLOCK_ON_KERNEL_TEN (64)
#define THREADS_PER_BLOCK_ON_KERNEL_ELEVEN (128)
/* global variables */
#if defined(DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION)
static int colidx[NZ];
static int rowstr[NA+1];
static int iv[NA];
static int arow[NA];
static int acol[NAZ];
static double aelt[NAZ];
static double a[NZ];
static double x[NA+2];
static double z[NA+2];
static double p[NA+2];
static double q[NA+2];
static double r[NA+2];
#else
static int (*colidx)=(int*)malloc(sizeof(int)*(NZ));
static int (*rowstr)=(int*)malloc(sizeof(int)*(NA+1));
static int (*iv)=(int*)malloc(sizeof(int)*(NA));
static int (*arow)=(int*)malloc(sizeof(int)*(NA));
static int (*acol)=(int*)malloc(sizeof(int)*(NAZ));
static double (*aelt)=(double*)malloc(sizeof(double)*(NAZ));
static double (*a)=(double*)malloc(sizeof(double)*(NZ));
static double (*x)=(double*)malloc(sizeof(double)*(NA+2));
static double (*z)=(double*)malloc(sizeof(double)*(NA+2));
static double (*p)=(double*)malloc(sizeof(double)*(NA+2));
static double (*q)=(double*)malloc(sizeof(double)*(NA+2));
static double (*r)=(double*)malloc(sizeof(double)*(NA+2));
#endif
static int naa;
static int nzz;
static int firstrow;
static int lastrow;
static int firstcol;
static int lastcol;
static double amult;
static double tran;
static boolean timeron;
/* gpu variables */
int* colidx_device;
int* rowstr_device;
double* a_device;
double* p_device;
double* q_device;
double* r_device;
double* x_device;
double* z_device;
double* rho_device;
double* d_device;
double* alpha_device;
double* beta_device;
double* sum_device;
double* norm_temp1_device;
double* norm_temp2_device;
double* global_data;
double* global_data_two;
double* global_data_device;
double* global_data_two_device;
double global_data_reduce;
double global_data_two_reduce;
size_t global_data_elements;
size_t size_global_data;
size_t size_colidx_device;
size_t size_rowstr_device;
size_t size_iv_device;
size_t size_arow_device;
size_t size_acol_device;
size_t size_aelt_device;
size_t size_a_device;
size_t size_x_device;
size_t size_z_device;
size_t size_p_device;
size_t size_q_device;
size_t size_r_device;
size_t size_rho_device;
size_t size_d_device;
size_t size_alpha_device;
size_t size_beta_device;
size_t size_sum_device;
size_t size_norm_temp1_device;
size_t size_norm_temp2_device;
size_t kernel_one_blocks_per_grid;
size_t kernel_two_blocks_per_grid;
size_t kernel_three_blocks_per_grid;
size_t kernel_four_blocks_per_grid;
size_t kernel_five_blocks_per_grid;
size_t kernel_six_blocks_per_grid;
size_t kernel_seven_blocks_per_grid;
size_t kernel_eight_blocks_per_grid;
size_t kernel_nine_blocks_per_grid;
size_t kernel_ten_blocks_per_grid;
size_t kernel_eleven_blocks_per_grid;
size_t amount_of_share_data_on_kernel_one;
size_t amount_of_share_data_on_kernel_two;
size_t amount_of_share_data_on_kernel_three;
size_t amount_of_share_data_on_kernel_four;
size_t amount_of_share_data_on_kernel_five;
size_t amount_of_share_data_on_kernel_six;
size_t amount_of_share_data_on_kernel_seven;
size_t amount_of_share_data_on_kernel_eight;
size_t amount_of_share_data_on_kernel_nine;
size_t amount_of_share_data_on_kernel_ten;
size_t amount_of_share_data_on_kernel_eleven;
size_t reduce_memory_on_kernel_one;
size_t reduce_memory_on_kernel_two;
size_t reduce_memory_on_kernel_three;
size_t reduce_memory_on_kernel_four;
size_t reduce_memory_on_kernel_five;
size_t reduce_memory_on_kernel_six;
size_t reduce_memory_on_kernel_seven;
size_t reduce_memory_on_kernel_eight;
size_t reduce_memory_on_kernel_nine;
size_t reduce_memory_on_kernel_ten;
size_t reduce_memory_on_kernel_eleven;
extern __shared__ double extern_share_data[];
/* function prototypes */
static void conj_grad(int colidx[],
int rowstr[],
double x[],
double z[],
double a[],
double p[],
double q[],
double r[],
double* rnorm);
static void conj_grad_gpu(double* rnorm);
static void gpu_kernel_one();
__global__ void gpu_kernel_one(double p[],
double q[],
double r[],
double x[],
double z[]);
static void gpu_kernel_two(double* rho_host);
__global__ void gpu_kernel_two(double r[],
double* rho,
double global_data[]);
static void gpu_kernel_three();
__global__ void gpu_kernel_three(int colidx[],
int rowstr[],
double a[],
double p[],
double q[]);
static void gpu_kernel_four(double* d_host);
__global__ void gpu_kernel_four(double* d,
double* p,
double* q,
double global_data[]);
static void gpu_kernel_five(double alpha_host);
__global__ void gpu_kernel_five_1(double alpha,
double* p,
double* z);
__global__ void gpu_kernel_five_2(double alpha,
double* q,
double* r);
static void gpu_kernel_five_merged_kernel_six(double alpha_host,
double* rho_host);
__global__ void gpu_kernel_five_merged_kernel_six(double alpha,
double* p,
double* q,
double* r,
double* z,
double global_data[]);
static void gpu_kernel_six(double* rho_host);
__global__ void gpu_kernel_six(double r[],
double global_data[]);
static void gpu_kernel_seven(double beta_host);
__global__ void gpu_kernel_seven(double beta,
double* p,
double* r);
static void gpu_kernel_eight();
__global__ void gpu_kernel_eight(int colidx[],
int rowstr[],
double a[],
double r[],
double* z);
static void gpu_kernel_nine(double* sum_host);
__global__ void gpu_kernel_nine(double r[],
double x[],
double* sum,
double global_data[]);
static void gpu_kernel_ten(double* norm_temp1,
double* norm_temp2);
__global__ void gpu_kernel_ten(double* norm_temp1,
double* norm_temp2,
double x[],
double z[]);
__global__ void gpu_kernel_ten_1(double* norm_temp,
double x[],
double z[]);
__global__ void gpu_kernel_ten_2(double* norm_temp,
double x[],
double z[]);
static void gpu_kernel_eleven(double norm_temp2);
__global__ void gpu_kernel_eleven(double norm_temp2,
double x[],
double z[]);
static int icnvrt(double x,
int ipwr2);
static void makea(int n,
int nz,
double a[],
int colidx[],
int rowstr[],
int firstrow,
int lastrow,
int firstcol,
int lastcol,
int arow[],
int acol[][NONZER+1],
double aelt[][NONZER+1],
int iv[]);
static void release_gpu();
static void setup_gpu();
static void sparse(double a[],
int colidx[],
int rowstr[],
int n,
int nz,
int nozer,
int arow[],
int acol[][NONZER+1],
double aelt[][NONZER+1],
int firstrow,
int lastrow,
int nzloc[],
double rcond,
double shift);
static void sprnvc(int n,
int nz,
int nn1,
double v[],
int iv[]);
static void vecset(int n,
double v[],
int iv[],
int* nzv,
int i,
double val);
/* cg */
int main(int argc, char** argv){
#if defined(DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION)
printf(" DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION mode on\n");
#endif
int i, j, k, it;
double zeta;
double rnorm;
double norm_temp1, norm_temp2;
double t, mflops, tmax;
char class_npb;
boolean verified;
double zeta_verify_value, epsilon, err;
char *t_names[T_LAST];
for(i=0; i<T_LAST; i++){
timer_clear(i);
}
timer_clear(PROFILING_KERNEL_ONE);
timer_clear(PROFILING_KERNEL_TWO);
timer_clear(PROFILING_KERNEL_THREE);
timer_clear(PROFILING_KERNEL_FOUR);
timer_clear(PROFILING_KERNEL_FIVE);
timer_clear(PROFILING_KERNEL_SIX);
timer_clear(PROFILING_KERNEL_SEVEN);
timer_clear(PROFILING_KERNEL_EIGHT);
timer_clear(PROFILING_KERNEL_NINE);
timer_clear(PROFILING_KERNEL_TEN);
timer_clear(PROFILING_KERNEL_ELEVEN);
timer_clear(PROFILING_KERNEL_FIVE_MERGED_KERNEL_SIX);
FILE* fp;
if((fp = fopen("timer.flag", "r")) != NULL){
timeron = TRUE;
t_names[T_INIT] = (char*)"init";
t_names[T_BENCH] = (char*)"benchmk";
t_names[T_CONJ_GRAD] = (char*)"conjgd";
fclose(fp);
}else{
timeron = FALSE;
}
timer_start(T_INIT);
firstrow = 0;
lastrow = NA-1;
firstcol = 0;
lastcol = NA-1;
if(NA == 1400 && NONZER == 7 && NITER == 15 && SHIFT == 10.0){
class_npb = 'S';
zeta_verify_value = 8.5971775078648;
}else if(NA == 7000 && NONZER == 8 && NITER == 15 && SHIFT == 12.0){
class_npb = 'W';
zeta_verify_value = 10.362595087124;
}else if(NA == 14000 && NONZER == 11 && NITER == 15 && SHIFT == 20.0){
class_npb = 'A';
zeta_verify_value = 17.130235054029;
}else if(NA == 75000 && NONZER == 13 && NITER == 75 && SHIFT == 60.0){
class_npb = 'B';
zeta_verify_value = 22.712745482631;
}else if(NA == 150000 && NONZER == 15 && NITER == 75 && SHIFT == 110.0){
class_npb = 'C';
zeta_verify_value = 28.973605592845;
}else if(NA == 1500000 && NONZER == 21 && NITER == 100 && SHIFT == 500.0){
class_npb = 'D';
zeta_verify_value = 52.514532105794;
}else if(NA == 9000000 && NONZER == 26 && NITER == 100 && SHIFT == 1500.0){
class_npb = 'E';
zeta_verify_value = 77.522164599383;
}else{
class_npb = 'U';
}
printf("\n\n NAS Parallel Benchmarks 4.1 CUDA C++ version - CG Benchmark\n\n");
printf(" Size: %11d\n", NA);
printf(" Iterations: %5d\n", NITER);
naa = NA;
nzz = NZ;
/* initialize random number generator */
tran = 314159265.0;
amult = 1220703125.0;
zeta = randlc( &tran, amult );
makea(naa,
nzz,
a,
colidx,
rowstr,
firstrow,
lastrow,
firstcol,
lastcol,
arow,
(int(*)[NONZER+1])(void*)acol,
(double(*)[NONZER+1])(void*)aelt,
iv);
/*
* ---------------------------------------------------------------------
* note: as a result of the above call to makea:
* values of j used in indexing rowstr go from 0 --> lastrow-firstrow
* values of colidx which are col indexes go from firstcol --> lastcol
* so:
* shift the col index vals from actual (firstcol --> lastcol)
* to local, i.e., (0 --> lastcol-firstcol)
* ---------------------------------------------------------------------
*/
for(j = 0; j < lastrow - firstrow + 1; j++){
for(k = rowstr[j]; k < rowstr[j+1]; k++){
colidx[k] = colidx[k] - firstcol;
}
}
/* set starting vector to (1, 1, .... 1) */
for(i = 0; i < NA+1; i++){
x[i] = 1.0;
}
for(j = 0; j<lastcol-firstcol+1; j++){
q[j] = 0.0;
z[j] = 0.0;
r[j] = 0.0;
p[j] = 0.0;
}
zeta = 0.0;
/*
* -------------------------------------------------------------------
* ---->
* do one iteration untimed to init all code and data page tables
* ----> (then reinit, start timing, to niter its)
* -------------------------------------------------------------------*/
for(it = 1; it <= 1; it++){
/* the call to the conjugate gradient routine */
conj_grad(colidx, rowstr, x, z, a, p, q, r, &rnorm);
/*
* --------------------------------------------------------------------
* zeta = shift + 1/(x.z)
* so, first: (x.z)
* also, find norm of z
* so, first: (z.z)
* --------------------------------------------------------------------
*/
norm_temp1 = 0.0;
norm_temp2 = 0.0;
for(j = 0; j < lastcol - firstcol + 1; j++){
norm_temp1 = norm_temp1 + x[j] * z[j];
norm_temp2 = norm_temp2 + z[j] * z[j];
}
norm_temp2 = 1.0 / sqrt(norm_temp2);
/* normalize z to obtain x */
for(j = 0; j < lastcol - firstcol + 1; j++){
x[j] = norm_temp2 * z[j];
}
} /* end of do one iteration untimed */
/* set starting vector to (1, 1, .... 1) */
for(i = 0; i < NA+1; i++){
x[i] = 1.0;
}
zeta = 0.0;
timer_stop(T_INIT);
printf(" Initialization time = %15.3f seconds\n", timer_read(T_INIT));
setup_gpu();
timer_start(T_BENCH);
/*
* --------------------------------------------------------------------
* ---->
* main iteration for inverse power method
* ---->
* --------------------------------------------------------------------
*/
for(it = 1; it <= NITER; it++){
/* the call to the conjugate gradient routine */
if(timeron){timer_start(T_CONJ_GRAD);}
conj_grad_gpu(&rnorm);
if(timeron){timer_stop(T_CONJ_GRAD);}
/*
* --------------------------------------------------------------------
* zeta = shift + 1/(x.z)
* so, first: (x.z)
* also, find norm of z
* so, first: (z.z)
* --------------------------------------------------------------------
*/
gpu_kernel_ten(&norm_temp1, &norm_temp2);
norm_temp2 = 1.0 / sqrt(norm_temp2);
zeta = SHIFT + 1.0 / norm_temp1;
if(it==1){printf("\n iteration ||r|| zeta\n");}
printf(" %5d %20.14e%20.13e\n", it, rnorm, zeta);
/* normalize z to obtain x */
gpu_kernel_eleven(norm_temp2);
} /* end of main iter inv pow meth */
timer_stop(T_BENCH);
/*
* --------------------------------------------------------------------
* end of timed section
* --------------------------------------------------------------------
*/
t = timer_read(T_BENCH);
printf(" Benchmark completed\n");
epsilon = 1.0e-10;
if(class_npb != 'U'){
err = fabs(zeta - zeta_verify_value) / zeta_verify_value;
if(err <= epsilon){
verified = TRUE;
printf(" VERIFICATION SUCCESSFUL\n");
printf(" Zeta is %20.13e\n", zeta);
printf(" Error is %20.13e\n", err);
}else{
verified = FALSE;
printf(" VERIFICATION FAILED\n");
printf(" Zeta %20.13e\n", zeta);
printf(" The correct zeta is %20.13e\n", zeta_verify_value);
}
}else{
verified = FALSE;
printf(" Problem size unknown\n");
printf(" NO VERIFICATION PERFORMED\n");
}
if(t != 0.0){
mflops = (double)(2.0*NITER*NA)
* (3.0+(double)(NONZER*(NONZER+1))
+ 25.0
* (5.0+(double)(NONZER*(NONZER+1)))+3.0)
/ t / 1000000.0;
}else{
mflops = 0.0;
}
c_print_results((char*)"CG",
class_npb,
NA,
0,
0,
NITER,
t,
mflops,
(char*)" floating point",
verified,
(char*)NPBVERSION,
(char*)COMPILETIME,
(char*)CS1,
(char*)CS2,
(char*)CS3,
(char*)CS4,
(char*)CS5,
(char*)CS6,
(char*)CS7);
/*
* ---------------------------------------------------------------------
* more timers
* ---------------------------------------------------------------------
*/
if(timeron){
tmax = timer_read(T_BENCH);
if(tmax == 0.0){tmax = 1.0;}
printf(" SECTION Time (secs)\n");
for(i = 0; i < T_LAST; i++){
t = timer_read(i);
if(i == T_INIT){
printf(" %8s:%9.3f\n", t_names[i], t);
}else{
printf(" %8s:%9.3f (%6.2f%%)\n", t_names[i], t, t*100.0/tmax);
if(i == T_CONJ_GRAD){
t = tmax - t;
printf(" --> %8s:%9.3f (%6.2f%%)\n", "rest", t, t*100.0/tmax);
}
}
}
}
release_gpu();
return 0;
}
/*
* ---------------------------------------------------------------------
* floating point arrays here are named as in NPB1 spec discussion of
* CG algorithm
* ---------------------------------------------------------------------
*/
static void conj_grad(int colidx[],
int rowstr[],
double x[],
double z[],
double a[],
double p[],
double q[],
double r[],
double* rnorm){
int j, k;
int cgit, cgitmax;
double d, sum, rho, rho0, alpha, beta;
cgitmax = 25;
rho = 0.0;
/* initialize the CG algorithm */
for(j = 0; j < naa+1; j++){
q[j] = 0.0;
z[j] = 0.0;
r[j] = x[j];
p[j] = r[j];
}
/*
* --------------------------------------------------------------------
* rho = r.r
* now, obtain the norm of r: First, sum squares of r elements locally...
* --------------------------------------------------------------------
*/
for(j = 0; j < lastcol - firstcol + 1; j++){
rho = rho + r[j]*r[j];
}
/* the conj grad iteration loop */
for(cgit = 1; cgit <= cgitmax; cgit++){
/*
* ---------------------------------------------------------------------
* q = A.p
* the partition submatrix-vector multiply: use workspace w
* ---------------------------------------------------------------------
*
* note: this version of the multiply is actually (slightly: maybe %5)
* faster on the sp2 on 16 nodes than is the unrolled-by-2 version
* below. on the Cray t3d, the reverse is TRUE, i.e., the
* unrolled-by-two version is some 10% faster.
* the unrolled-by-8 version below is significantly faster
* on the Cray t3d - overall speed of code is 1.5 times faster.
*/
for(j = 0; j < lastrow - firstrow + 1; j++){
sum = 0.0;
for(k = rowstr[j]; k < rowstr[j+1]; k++){
sum = sum + a[k]*p[colidx[k]];
}
q[j] = sum;
}
/*
* --------------------------------------------------------------------
* obtain p.q
* --------------------------------------------------------------------
*/
d = 0.0;
for (j = 0; j < lastcol - firstcol + 1; j++) {
d = d + p[j]*q[j];
}
/*
* --------------------------------------------------------------------
* obtain alpha = rho / (p.q)
* -------------------------------------------------------------------
*/
alpha = rho / d;
/*
* --------------------------------------------------------------------
* save a temporary of rho
* --------------------------------------------------------------------
*/
rho0 = rho;
/*
* ---------------------------------------------------------------------
* obtain z = z + alpha*p
* and r = r - alpha*q
* ---------------------------------------------------------------------
*/
rho = 0.0;
for(j = 0; j < lastcol - firstcol + 1; j++){
z[j] = z[j] + alpha*p[j];
r[j] = r[j] - alpha*q[j];
}
/*
* ---------------------------------------------------------------------
* rho = r.r
* now, obtain the norm of r: first, sum squares of r elements locally...
* ---------------------------------------------------------------------
*/
for(j = 0; j < lastcol - firstcol + 1; j++){
rho = rho + r[j]*r[j];
}
/*
* ---------------------------------------------------------------------
* obtain beta
* ---------------------------------------------------------------------
*/
beta = rho / rho0;
/*
* ---------------------------------------------------------------------
* p = r + beta*p
* ---------------------------------------------------------------------
*/
for(j = 0; j < lastcol - firstcol + 1; j++){
p[j] = r[j] + beta*p[j];
}
} /* end of do cgit=1, cgitmax */
/*
* ---------------------------------------------------------------------
* compute residual norm explicitly: ||r|| = ||x - A.z||
* first, form A.z
* the partition submatrix-vector multiply
* ---------------------------------------------------------------------
*/
sum = 0.0;
for(j = 0; j < lastrow - firstrow + 1; j++){
d = 0.0;
for(k = rowstr[j]; k < rowstr[j+1]; k++){
d = d + a[k]*z[colidx[k]];
}
r[j] = d;
}
/*
* ---------------------------------------------------------------------
* at this point, r contains A.z
* ---------------------------------------------------------------------
*/
for(j = 0; j < lastcol-firstcol+1; j++){
d = x[j] - r[j];
sum = sum + d*d;
}
*rnorm = sqrt(sum);
}
static void conj_grad_gpu(double* rnorm){
double d, sum, rho, rho0, alpha, beta;
int cgit, cgitmax = 25;
/* initialize the CG algorithm */
gpu_kernel_one();
/* rho = r.r - now, obtain the norm of r: first, sum squares of r elements locally */
gpu_kernel_two(&rho);
/* the conj grad iteration loop */
for(cgit = 1; cgit <= cgitmax; cgit++){
/* q = A.p */
gpu_kernel_three();
/* obtain p.q */
gpu_kernel_four(&d);
alpha = rho / d;
/* save a temporary of rho */
rho0 = rho;
/* obtain (z = z + alpha*p) and (r = r - alpha*q) */
/* gpu_kernel_five(alpha); */
/* rho = r.r - now, obtain the norm of r: first, sum squares of r elements locally */
/* gpu_kernel_six(&rho); */
/* (z = z + alpha*p) and (r = r - alpha*q) and (rho = r.r) */
gpu_kernel_five_merged_kernel_six(alpha, &rho);
/* obtain beta */
beta = rho / rho0;
/* p = r + beta*p */
gpu_kernel_seven(beta);
} /* end of do cgit=1, cgitmax */
/* compute residual norm explicitly: ||r|| = ||x - A.z|| */
gpu_kernel_eight();
/* at this point, r contains A.z */
gpu_kernel_nine(&sum);
*rnorm = sqrt(sum);
}
static void gpu_kernel_one(){
hipLaunchKernelGGL(( gpu_kernel_one), dim3(kernel_one_blocks_per_grid),
dim3( THREADS_PER_BLOCK_ON_KERNEL_ONE), 0, 0,
p_device,
q_device,
r_device,
x_device,
z_device);
}
__global__ void gpu_kernel_one(double p[],
double q[],
double r[],
double x[],
double z[]){
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if(thread_id >= NA){return;}
q[thread_id] = 0.0;
z[thread_id] = 0.0;
double x_value = x[thread_id];
r[thread_id] = x_value;
p[thread_id] = x_value;
}
static void gpu_kernel_two(double* rho_host){
hipLaunchKernelGGL(( gpu_kernel_two), dim3(kernel_two_blocks_per_grid),
dim3( THREADS_PER_BLOCK_ON_KERNEL_TWO),
amount_of_share_data_on_kernel_two, 0,
r_device,
rho_device,
global_data_device);
global_data_reduce=0.0;
hipMemcpy(global_data, global_data_device, reduce_memory_on_kernel_two, hipMemcpyDeviceToHost);
for(int i=0; i<kernel_two_blocks_per_grid; i++){global_data_reduce+=global_data[i];}
*rho_host=global_data_reduce;
}
__global__ void gpu_kernel_two(double r[],
double* rho,
double global_data[]){
double* share_data = (double*)extern_share_data;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int local_id = threadIdx.x;
share_data[local_id] = 0.0;
if(thread_id >= NA){return;}
double r_value = r[thread_id];
share_data[local_id] = r_value * r_value;
__syncthreads();
for(int i=blockDim.x/2; i>0; i>>=1){
if(local_id<i){share_data[local_id]+=share_data[local_id+i];}
__syncthreads();
}
if(local_id==0){global_data[blockIdx.x]=share_data[0];}
}
static void gpu_kernel_three(){
hipLaunchKernelGGL(( gpu_kernel_three), dim3(kernel_three_blocks_per_grid),
dim3( THREADS_PER_BLOCK_ON_KERNEL_THREE),
amount_of_share_data_on_kernel_three, 0,
colidx_device,
rowstr_device,
a_device,
p_device,
q_device);
}
__global__ void gpu_kernel_three(int colidx[],
int rowstr[],
double a[],
double p[],
double q[]){
double* share_data = (double*)extern_share_data;
int j = (int) ((blockIdx.x*blockDim.x+threadIdx.x) / blockDim.x);
int local_id = threadIdx.x;
int begin = rowstr[j];
int end = rowstr[j+1];
double sum = 0.0;
for(int k=begin+local_id; k<end; k+=blockDim.x){
sum = sum + a[k]*p[colidx[k]];
}
share_data[local_id] = sum;
__syncthreads();
for(int i=blockDim.x/2; i>0; i>>=1){
if(local_id<i){share_data[local_id]+=share_data[local_id+i];}
__syncthreads();
}
if(local_id==0){q[j]=share_data[0];}
}
static void gpu_kernel_four(double* d_host){
hipLaunchKernelGGL(( gpu_kernel_four), dim3(kernel_four_blocks_per_grid),
dim3( THREADS_PER_BLOCK_ON_KERNEL_FOUR),
amount_of_share_data_on_kernel_four, 0,
d_device,
p_device,
q_device,
global_data_device);
global_data_reduce=0.0;
hipMemcpy(global_data, global_data_device, reduce_memory_on_kernel_four, hipMemcpyDeviceToHost);
for(int i=0; i<kernel_four_blocks_per_grid; i++){global_data_reduce+=global_data[i];}
*d_host=global_data_reduce;
}
__global__ void gpu_kernel_four(double* d,
double* p,
double* q,
double global_data[]){
double* share_data = (double*)extern_share_data;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int local_id = threadIdx.x;
share_data[local_id] = 0.0;
if(thread_id >= NA){return;}
share_data[threadIdx.x] = p[thread_id] * q[thread_id];
__syncthreads();
for(int i=blockDim.x/2; i>0; i>>=1){
if(local_id<i){share_data[local_id]+=share_data[local_id+i];}
__syncthreads();
}
if(local_id==0){global_data[blockIdx.x]=share_data[0];}
}
static void gpu_kernel_five(double alpha_host){
hipLaunchKernelGGL(( gpu_kernel_five_1), dim3(kernel_five_blocks_per_grid),
dim3( THREADS_PER_BLOCK_ON_KERNEL_FIVE), 0, 0,
alpha_host,
p_device,
z_device);
hipLaunchKernelGGL(( gpu_kernel_five_2), dim3(kernel_five_blocks_per_grid),
dim3( THREADS_PER_BLOCK_ON_KERNEL_FIVE), 0, 0,
alpha_host,
q_device,
r_device);
}
__global__ void gpu_kernel_five_1(double alpha,
double* p,
double* z){
int j = blockIdx.x * blockDim.x + threadIdx.x;
if(j >= NA){return;}
z[j] += alpha * p[j];
}
__global__ void gpu_kernel_five_2(double alpha,
double* q,
double* r){
int j = blockIdx.x * blockDim.x + threadIdx.x;
if(j >= NA){return;}
r[j] -= alpha * q[j];
}
static void gpu_kernel_five_merged_kernel_six(double alpha_host,
double* rho_host){
hipLaunchKernelGGL(( gpu_kernel_five_merged_kernel_six), dim3(kernel_five_blocks_per_grid),
dim3( THREADS_PER_BLOCK_ON_KERNEL_FIVE),
amount_of_share_data_on_kernel_five, 0,
alpha_host,
p_device,
q_device,
r_device,
z_device,
global_data_device);
global_data_reduce=0.0;
hipMemcpy(global_data, global_data_device, reduce_memory_on_kernel_five, hipMemcpyDeviceToHost);
for(int i=0; i<kernel_five_blocks_per_grid; i++){global_data_reduce+=global_data[i];}
*rho_host=global_data_reduce;
}
__global__ void gpu_kernel_five_merged_kernel_six(double alpha,
double* p,
double* q,
double* r,
double* z,
double global_data[]){
double* share_data = (double*)extern_share_data;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int local_id = threadIdx.x;
/* kernel_five computation */
if(thread_id < NA){
double r_value;
z[thread_id] = z[thread_id] + alpha*p[thread_id];
r_value = r[thread_id] - alpha*q[thread_id];
r[thread_id] = r_value;
share_data[local_id] = r_value * r_value;
}else{
share_data[local_id] = 0.0;
}
/* kernel_six computation */
__syncthreads();
for(int i=blockDim.x/2; i>0; i>>=1){
if(local_id<i){share_data[local_id]+=share_data[local_id+i];}
__syncthreads();
}
if(local_id==0){global_data[blockIdx.x]=share_data[0];}
}
static void gpu_kernel_six(double* rho_host){
hipLaunchKernelGGL(( gpu_kernel_six), dim3(kernel_six_blocks_per_grid),
dim3( THREADS_PER_BLOCK_ON_KERNEL_SIX),
amount_of_share_data_on_kernel_six, 0,
r_device,
global_data_device);
global_data_reduce=0.0;
hipMemcpy(global_data, global_data_device, reduce_memory_on_kernel_six, hipMemcpyDeviceToHost);
for(int i=0; i<kernel_six_blocks_per_grid; i++){global_data_reduce+=global_data[i];}
*rho_host=global_data_reduce;
}
__global__ void gpu_kernel_six(double r[],
double global_data[]){
double* share_data = (double*)extern_share_data;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int local_id = threadIdx.x;
share_data[local_id] = 0.0;
if(thread_id >= NA){return;}
double r_value = r[thread_id];
share_data[local_id] = r_value * r_value;
__syncthreads();
for(int i=blockDim.x/2; i>0; i>>=1){
if(local_id<i){share_data[local_id]+=share_data[local_id+i];}
__syncthreads();
}
if(local_id==0){global_data[blockIdx.x]=share_data[0];}
}
static void gpu_kernel_seven(double beta_host){
hipLaunchKernelGGL(( gpu_kernel_seven), dim3(kernel_seven_blocks_per_grid),
dim3( THREADS_PER_BLOCK_ON_KERNEL_SEVEN), 0, 0,
beta_host,
p_device,
r_device);
}
__global__ void gpu_kernel_seven(double beta,
double* p,
double* r){
int j = blockIdx.x * blockDim.x + threadIdx.x;
if(j >= NA){return;}
p[j] = r[j] + beta*p[j];
}
static void gpu_kernel_eight(){
hipLaunchKernelGGL(( gpu_kernel_eight), dim3(kernel_eight_blocks_per_grid),
dim3( THREADS_PER_BLOCK_ON_KERNEL_EIGHT),
amount_of_share_data_on_kernel_eight, 0,
colidx_device,
rowstr_device,
a_device,
r_device,
z_device);
}
__global__ void gpu_kernel_eight(int colidx[],
int rowstr[],
double a[],
double r[],
double* z){
double* share_data = (double*)extern_share_data;
int j = (int) ((blockIdx.x*blockDim.x+threadIdx.x) / blockDim.x);
int local_id = threadIdx.x;
int begin = rowstr[j];
int end = rowstr[j+1];
double sum = 0.0;
for(int k=begin+local_id; k<end; k+=blockDim.x){
sum = sum + a[k]*z[colidx[k]];
}
share_data[local_id] = sum;
__syncthreads();
for(int i=blockDim.x/2; i>0; i>>=1){
if(local_id<i){share_data[local_id]+=share_data[local_id+i];}
__syncthreads();
}
if(local_id==0){r[j]=share_data[0];}
}
static void gpu_kernel_nine(double* sum_host){
hipLaunchKernelGGL(( gpu_kernel_nine), dim3(kernel_nine_blocks_per_grid),
dim3( THREADS_PER_BLOCK_ON_KERNEL_NINE),
amount_of_share_data_on_kernel_nine, 0,
r_device,
x_device,
sum_device,
global_data_device);
global_data_reduce=0.0;
hipMemcpy(global_data, global_data_device, reduce_memory_on_kernel_nine, hipMemcpyDeviceToHost);
for(int i=0; i<kernel_nine_blocks_per_grid; i++){global_data_reduce+=global_data[i];}
*sum_host=global_data_reduce;
}
__global__ void gpu_kernel_nine(double r[], double x[], double* sum, double global_data[]){
double* share_data = (double*)extern_share_data;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int local_id = threadIdx.x;
share_data[local_id] = 0.0;
if(thread_id >= NA){return;}
share_data[local_id] = x[thread_id] - r[thread_id];
share_data[local_id] = share_data[local_id] * share_data[local_id];
__syncthreads();
for(int i=blockDim.x/2; i>0; i>>=1) {
if(local_id<i){share_data[local_id]+=share_data[local_id+i];}
__syncthreads();
}
if(local_id==0){global_data[blockIdx.x]=share_data[0];}
}
static void gpu_kernel_ten(double* norm_temp1,
double* norm_temp2){
//gpu_kernel_ten<<<kernel_ten_blocks_per_grid,THREADS_PER_BLOCK_ON_KERNEL_TEN,amount_of_share_data_on_kernel_ten*2>>>(global_data_device,global_data_two_device,x_device,z_device);
hipLaunchKernelGGL(( gpu_kernel_ten_1), dim3(kernel_ten_blocks_per_grid),dim3(THREADS_PER_BLOCK_ON_KERNEL_TEN),amount_of_share_data_on_kernel_ten, 0, global_data_device,x_device,z_device);
hipLaunchKernelGGL(( gpu_kernel_ten_2), dim3(kernel_ten_blocks_per_grid),dim3(THREADS_PER_BLOCK_ON_KERNEL_TEN),amount_of_share_data_on_kernel_ten, 0, global_data_two_device,x_device,z_device);
global_data_reduce=0.0;
global_data_two_reduce=0.0;
hipMemcpy(global_data, global_data_device, reduce_memory_on_kernel_ten, hipMemcpyDeviceToHost);
hipMemcpy(global_data_two, global_data_two_device, reduce_memory_on_kernel_ten, hipMemcpyDeviceToHost);
for(int i=0; i<kernel_ten_blocks_per_grid; i++){global_data_reduce+=global_data[i];global_data_two_reduce+=global_data_two[i];}
*norm_temp1=global_data_reduce;
*norm_temp2=global_data_two_reduce;
}
__global__ void gpu_kernel_ten(double* norm_temp1,
double* norm_temp2,
double x[],
double z[]){
double* share_data_1 = (double*)(extern_share_data);
double* share_data_2 = (double*)(&share_data_1[THREADS_PER_BLOCK_ON_KERNEL_TEN]);
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int local_id = threadIdx.x;
share_data_1[threadIdx.x] = 0.0;
share_data_2[threadIdx.x] = 0.0;
if(thread_id >= NA){return;}
share_data_1[threadIdx.x] = x[thread_id]*z[thread_id];
share_data_2[threadIdx.x] = z[thread_id]*z[thread_id];
__syncthreads();
for(int i=blockDim.x/2; i>0; i>>=1){
if(local_id<i){
share_data_1[local_id]+=share_data_1[local_id+i];
share_data_2[local_id]+=share_data_2[local_id+i];}
__syncthreads();
}
if(local_id==0){
norm_temp1[blockIdx.x]=share_data_1[0];
norm_temp2[blockIdx.x]=share_data_2[0];}
}
__global__ void gpu_kernel_ten_1(double* norm_temp,
double x[],
double z[]){
double* share_data = (double*)extern_share_data;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int local_id = threadIdx.x;
share_data[threadIdx.x] = 0.0;
if(thread_id >= NA){return;}
share_data[threadIdx.x] = x[thread_id]*z[thread_id];
__syncthreads();
for(int i=blockDim.x/2; i>0; i>>=1){
if(local_id<i){share_data[local_id]+=share_data[local_id+i];}
__syncthreads();
}
if(local_id==0){norm_temp[blockIdx.x]=share_data[0];}
}
__global__ void gpu_kernel_ten_2(double* norm_temp,
double x[],
double z[]){
double* share_data = (double*)extern_share_data;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int local_id = threadIdx.x;
share_data[threadIdx.x] = 0.0;
if(thread_id >= NA){return;}
share_data[threadIdx.x] = z[thread_id]*z[thread_id];
__syncthreads();
for(int i=blockDim.x/2; i>0; i>>=1){
if(local_id<i){share_data[local_id]+=share_data[local_id+i];}
__syncthreads();
}
if(local_id==0){norm_temp[blockIdx.x]=share_data[0];}
}
static void gpu_kernel_eleven(double norm_temp2){
hipLaunchKernelGGL(( gpu_kernel_eleven), dim3(kernel_eleven_blocks_per_grid),
dim3( THREADS_PER_BLOCK_ON_KERNEL_ELEVEN), 0, 0,
norm_temp2,
x_device,
z_device);
}
__global__ void gpu_kernel_eleven(double norm_temp2, double x[], double z[]){
int j = blockIdx.x * blockDim.x + threadIdx.x;
if(j >= NA){return;}
x[j]=norm_temp2*z[j];
}
/*
* ---------------------------------------------------------------------
* scale a double precision number x in (0,1) by a power of 2 and chop it
* ---------------------------------------------------------------------
*/
static int icnvrt(double x, int ipwr2){
return (int)(ipwr2 * x);
}
/*
* ---------------------------------------------------------------------
* generate the test problem for benchmark 6
* makea generates a sparse matrix with a
* prescribed sparsity distribution
*
* parameter type usage
*
* input
*
* n i number of cols/rows of matrix
* nz i nonzeros as declared array size
* rcond r*8 condition number
* shift r*8 main diagonal shift
*
* output
*
* a r*8 array for nonzeros
* colidx i col indices
* rowstr i row pointers
*
* workspace
*
* iv, arow, acol i
* aelt r*8
* ---------------------------------------------------------------------
*/
static void makea(int n,
int nz,
double a[],
int colidx[],
int rowstr[],
int firstrow,
int lastrow,
int firstcol,
int lastcol,
int arow[],
int acol[][NONZER+1],
double aelt[][NONZER+1],
int iv[]){
int iouter, ivelt, nzv, nn1;
int ivc[NONZER+1];
double vc[NONZER+1];
/*
* --------------------------------------------------------------------
* nonzer is approximately (int(sqrt(nnza /n)));
* --------------------------------------------------------------------
* nn1 is the smallest power of two not less than n
* --------------------------------------------------------------------
*/
nn1 = 1;
do{
nn1 = 2 * nn1;
}while(nn1 < n);
/*
* -------------------------------------------------------------------
* generate nonzero positions and save for the use in sparse
* -------------------------------------------------------------------
*/
for(iouter = 0; iouter < n; iouter++){
nzv = NONZER;
sprnvc(n, nzv, nn1, vc, ivc);
vecset(n, vc, ivc, &nzv, iouter+1, 0.5);
arow[iouter] = nzv;
for(ivelt = 0; ivelt < nzv; ivelt++){
acol[iouter][ivelt] = ivc[ivelt] - 1;
aelt[iouter][ivelt] = vc[ivelt];
}
}
/*
* ---------------------------------------------------------------------
* ... make the sparse matrix from list of elements with duplicates
* (iv is used as workspace)
* ---------------------------------------------------------------------
*/
sparse(a,
colidx,
rowstr,
n,
nz,
NONZER,
arow,
acol,
aelt,
firstrow,
lastrow,
iv,
RCOND,
SHIFT);
}
static void release_gpu(){
hipFree(colidx_device);
hipFree(rowstr_device);
hipFree(a_device);
hipFree(p_device);
hipFree(q_device);
hipFree(r_device);
hipFree(x_device);
hipFree(z_device);
hipFree(rho_device);
hipFree(d_device);
hipFree(alpha_device);
hipFree(beta_device);
hipFree(sum_device);
hipFree(norm_temp1_device);
hipFree(norm_temp2_device);
hipFree(global_data_device);
hipFree(global_data_two_device);
}
static void setup_gpu(){
global_data_elements=ceil(double(NA)/double(MINIMUM_THREADS_PER_BLOCK));
size_global_data=sizeof(double)*(global_data_elements);
size_colidx_device=sizeof(int)*(NZ);
size_rowstr_device=sizeof(int)*(NA+1);
size_iv_device=sizeof(int)*(NA);
size_arow_device=sizeof(int)*(NA);
size_acol_device=sizeof(int)*(NAZ);
size_aelt_device=sizeof(double)*(NAZ);
size_a_device=sizeof(double)*(NZ);
size_x_device=sizeof(double)*(NA+2);
size_z_device=sizeof(double)*(NA+2);
size_p_device=sizeof(double)*(NA+2);
size_q_device=sizeof(double)*(NA+2);
size_r_device=sizeof(double)*(NA+2);
size_rho_device=sizeof(double);
size_d_device=sizeof(double);
size_alpha_device=sizeof(double);
size_beta_device=sizeof(double);
size_sum_device=sizeof(double);
size_norm_temp1_device=sizeof(double);
size_norm_temp2_device=sizeof(double);
global_data=(double*)malloc(size_global_data);
global_data_two=(double*)malloc(size_global_data);
hipMalloc(&colidx_device, size_colidx_device);
hipMalloc(&rowstr_device, size_rowstr_device);
hipMalloc(&a_device, size_a_device);
hipMalloc(&p_device, size_p_device);
hipMalloc(&q_device, size_q_device);
hipMalloc(&r_device, size_r_device);
hipMalloc(&x_device, size_x_device);
hipMalloc(&z_device, size_z_device);
hipMalloc(&rho_device, size_rho_device);
hipMalloc(&d_device, size_d_device);
hipMalloc(&alpha_device, size_alpha_device);
hipMalloc(&beta_device, size_beta_device);
hipMalloc(&sum_device, size_sum_device);
hipMalloc(&norm_temp1_device, size_norm_temp1_device);
hipMalloc(&norm_temp2_device, size_norm_temp2_device);
hipMalloc(&global_data_device, size_global_data);
hipMalloc(&global_data_two_device, size_global_data);
hipMemcpy(colidx_device, colidx, size_colidx_device, hipMemcpyHostToDevice);
hipMemcpy(rowstr_device, rowstr, size_rowstr_device, hipMemcpyHostToDevice);
hipMemcpy(a_device, a, size_a_device, hipMemcpyHostToDevice);
hipMemcpy(p_device, p, size_p_device, hipMemcpyHostToDevice);
hipMemcpy(q_device, q, size_q_device, hipMemcpyHostToDevice);
hipMemcpy(r_device, r, size_r_device, hipMemcpyHostToDevice);
hipMemcpy(x_device, x, size_x_device, hipMemcpyHostToDevice);
hipMemcpy(z_device, z, size_z_device, hipMemcpyHostToDevice);
kernel_one_blocks_per_grid=(ceil((double)NA/(double)THREADS_PER_BLOCK_ON_KERNEL_ONE));
kernel_two_blocks_per_grid=(ceil((double)NA/(double)THREADS_PER_BLOCK_ON_KERNEL_TWO));
kernel_three_blocks_per_grid=NA;
kernel_four_blocks_per_grid=(ceil((double)NA/(double)THREADS_PER_BLOCK_ON_KERNEL_FOUR));
kernel_five_blocks_per_grid=(ceil((double)NA/(double)THREADS_PER_BLOCK_ON_KERNEL_FIVE));
kernel_six_blocks_per_grid=(ceil((double)NA/(double)THREADS_PER_BLOCK_ON_KERNEL_SIX));
kernel_seven_blocks_per_grid=(ceil((double)NA/THREADS_PER_BLOCK_ON_KERNEL_SEVEN));
kernel_eight_blocks_per_grid=NA;
kernel_nine_blocks_per_grid=(ceil((double)NA/(double)THREADS_PER_BLOCK_ON_KERNEL_NINE));
kernel_ten_blocks_per_grid=(ceil((double)NA/(double)THREADS_PER_BLOCK_ON_KERNEL_TEN));
kernel_eleven_blocks_per_grid=(ceil((double)NA/(double)THREADS_PER_BLOCK_ON_KERNEL_ELEVEN));
amount_of_share_data_on_kernel_one=sizeof(double)*THREADS_PER_BLOCK_ON_KERNEL_ONE;
amount_of_share_data_on_kernel_two=sizeof(double)*THREADS_PER_BLOCK_ON_KERNEL_TWO;
amount_of_share_data_on_kernel_three=sizeof(double)*THREADS_PER_BLOCK_ON_KERNEL_THREE;
amount_of_share_data_on_kernel_four=sizeof(double)*THREADS_PER_BLOCK_ON_KERNEL_FOUR;
amount_of_share_data_on_kernel_five=sizeof(double)*THREADS_PER_BLOCK_ON_KERNEL_FIVE;
amount_of_share_data_on_kernel_six=sizeof(double)*THREADS_PER_BLOCK_ON_KERNEL_SIX;
amount_of_share_data_on_kernel_seven=sizeof(double)*THREADS_PER_BLOCK_ON_KERNEL_SEVEN;
amount_of_share_data_on_kernel_eight=sizeof(double)*THREADS_PER_BLOCK_ON_KERNEL_EIGHT;
amount_of_share_data_on_kernel_nine=sizeof(double)*THREADS_PER_BLOCK_ON_KERNEL_NINE;
amount_of_share_data_on_kernel_ten=sizeof(double)*THREADS_PER_BLOCK_ON_KERNEL_TEN;
amount_of_share_data_on_kernel_eleven=sizeof(double)*THREADS_PER_BLOCK_ON_KERNEL_ELEVEN;
reduce_memory_on_kernel_one=kernel_one_blocks_per_grid*sizeof(double);
reduce_memory_on_kernel_two=kernel_two_blocks_per_grid*sizeof(double);
reduce_memory_on_kernel_three=kernel_three_blocks_per_grid*sizeof(double);
reduce_memory_on_kernel_four=kernel_four_blocks_per_grid*sizeof(double);
reduce_memory_on_kernel_five=kernel_five_blocks_per_grid*sizeof(double);
reduce_memory_on_kernel_six=kernel_six_blocks_per_grid*sizeof(double);
reduce_memory_on_kernel_seven=kernel_seven_blocks_per_grid*sizeof(double);
reduce_memory_on_kernel_eight=kernel_eight_blocks_per_grid*sizeof(double);
reduce_memory_on_kernel_nine=kernel_nine_blocks_per_grid*sizeof(double);
reduce_memory_on_kernel_ten=kernel_ten_blocks_per_grid*sizeof(double);
reduce_memory_on_kernel_eleven=kernel_eleven_blocks_per_grid*sizeof(double);
}
/*
* ---------------------------------------------------------------------
* rows range from firstrow to lastrow
* the rowstr pointers are defined for nrows = lastrow-firstrow+1 values
* ---------------------------------------------------------------------
*/
static void sparse(double a[],
int colidx[],
int rowstr[],
int n,
int nz,
int nozer,
int arow[],
int acol[][NONZER+1],
double aelt[][NONZER+1],
int firstrow,
int lastrow,
int nzloc[],
double rcond,
double shift){
int nrows;
/*
* ---------------------------------------------------
* generate a sparse matrix from a list of
* [col, row, element] tri
* ---------------------------------------------------
*/
int i, j, j1, j2, nza, k, kk, nzrow, jcol;
double size, scale, ratio, va;
boolean goto_40;
/*
* --------------------------------------------------------------------
* how many rows of result
* --------------------------------------------------------------------
*/
nrows = lastrow - firstrow + 1;
/*
* --------------------------------------------------------------------
* ...count the number of triples in each row
* --------------------------------------------------------------------
*/
for(j = 0; j < nrows+1; j++){
rowstr[j] = 0;
}
for(i = 0; i < n; i++){
for(nza = 0; nza < arow[i]; nza++){
j = acol[i][nza] + 1;
rowstr[j] = rowstr[j] + arow[i];
}
}
rowstr[0] = 0;
for(j = 1; j < nrows+1; j++){
rowstr[j] = rowstr[j] + rowstr[j-1];
}
nza = rowstr[nrows] - 1;
/*
* ---------------------------------------------------------------------
* ... rowstr(j) now is the location of the first nonzero
* of row j of a
* ---------------------------------------------------------------------
*/
if(nza > nz){
printf("Space for matrix elements exceeded in sparse\n");
printf("nza, nzmax = %d, %d\n", nza, nz);
exit(EXIT_FAILURE);
}
/*
* ---------------------------------------------------------------------
* ... preload data pages
* ---------------------------------------------------------------------
*/
for(j = 0; j < nrows; j++){
for(k = rowstr[j]; k < rowstr[j+1]; k++){
a[k] = 0.0;
colidx[k] = -1;
}
nzloc[j] = 0;
}
/*
* ---------------------------------------------------------------------
* ... generate actual values by summing duplicates
* ---------------------------------------------------------------------
*/
size = 1.0;
ratio = pow(rcond, (1.0 / (double)(n)));
for(i = 0; i < n; i++){
for(nza = 0; nza < arow[i]; nza++){
j = acol[i][nza];
scale = size * aelt[i][nza];
for(nzrow = 0; nzrow < arow[i]; nzrow++){
jcol = acol[i][nzrow];
va = aelt[i][nzrow] * scale;
/*
* --------------------------------------------------------------------
* ... add the identity * rcond to the generated matrix to bound
* the smallest eigenvalue from below by rcond
* --------------------------------------------------------------------
*/
if(jcol == j && j == i){
va = va + rcond - shift;
}
goto_40 = FALSE;
for(k = rowstr[j]; k < rowstr[j+1]; k++){
if(colidx[k] > jcol){
/*
* ----------------------------------------------------------------
* ... insert colidx here orderly
* ----------------------------------------------------------------
*/
for(kk = rowstr[j+1]-2; kk >= k; kk--){
if(colidx[kk] > -1){
a[kk+1] = a[kk];
colidx[kk+1] = colidx[kk];
}
}
colidx[k] = jcol;
a[k] = 0.0;
goto_40 = TRUE;
break;
}else if(colidx[k] == -1){
colidx[k] = jcol;
goto_40 = TRUE;
break;
}else if(colidx[k] == jcol){
/*
* --------------------------------------------------------------
* ... mark the duplicated entry
* -------------------------------------------------------------
*/
nzloc[j] = nzloc[j] + 1;
goto_40 = TRUE;
break;
}
}
if(goto_40 == FALSE){
printf("internal error in sparse: i=%d\n", i);
exit(EXIT_FAILURE);
}
a[k] = a[k] + va;
}
}
size = size * ratio;
}
/*
* ---------------------------------------------------------------------
* ... remove empty entries and generate final results
* ---------------------------------------------------------------------
*/
for(j = 1; j < nrows; j++){
nzloc[j] = nzloc[j] + nzloc[j-1];
}
for(j = 0; j < nrows; j++){
if(j > 0){
j1 = rowstr[j] - nzloc[j-1];
}else{
j1 = 0;
}
j2 = rowstr[j+1] - nzloc[j];
nza = rowstr[j];
for(k = j1; k < j2; k++){
a[k] = a[nza];
colidx[k] = colidx[nza];
nza = nza + 1;
}
}
for(j = 1; j < nrows+1; j++){
rowstr[j] = rowstr[j] - nzloc[j-1];
}
nza = rowstr[nrows] - 1;
}
/*
* ---------------------------------------------------------------------
* generate a sparse n-vector (v, iv)
* having nzv nonzeros
*
* mark(i) is set to 1 if position i is nonzero.
* mark is all zero on entry and is reset to all zero before exit
* this corrects a performance bug found by John G. Lewis, caused by
* reinitialization of mark on every one of the n calls to sprnvc
* ---------------------------------------------------------------------
*/
static void sprnvc(int n, int nz, int nn1, double v[], int iv[]){
int nzv, ii, i;
double vecelt, vecloc;
nzv = 0;
while(nzv < nz){
vecelt = randlc(&tran, amult);
/*
* --------------------------------------------------------------------
* generate an integer between 1 and n in a portable manner
* --------------------------------------------------------------------
*/
vecloc = randlc(&tran, amult);
i = icnvrt(vecloc, nn1) + 1;
if(i>n){continue;}
/*
* --------------------------------------------------------------------
* was this integer generated already?
* --------------------------------------------------------------------
*/
boolean was_gen = FALSE;
for(ii = 0; ii < nzv; ii++){
if(iv[ii] == i){
was_gen = TRUE;
break;
}
}
if(was_gen){continue;}
v[nzv] = vecelt;
iv[nzv] = i;
nzv = nzv + 1;
}
}
/*
* --------------------------------------------------------------------
* set ith element of sparse vector (v, iv) with
* nzv nonzeros to val
* --------------------------------------------------------------------
*/
static void vecset(int n, double v[], int iv[], int* nzv, int i, double val){
int k;
boolean set;
set = FALSE;
for(k = 0; k < *nzv; k++){
if(iv[k] == i){
v[k] = val;
set = TRUE;
}
}
if(set == FALSE){
v[*nzv] = val;
iv[*nzv] = i;
*nzv = *nzv + 1;
}
}
| e7a2895269644885302fb74cbf20d93369a65c6f.cu | /**
* NASA Advanced Supercomputing Parallel Benchmarks C++
*
* based on NPB 3.3.1
*
* original version and technical report:
* http://www.nas.nasa.gov/Software/NPB/
*
* Authors:
* M. Yarrow
* C. Kuszmaul
*
* C++ version:
* Dalvan Griebler <[email protected]>
* Júnior Löff <[email protected]>
* Gabriell Araujo <[email protected]>
*
* CUDA version:
* Gabriell Araujo <[email protected]>
*/
#include <cuda.h>
#include "../common/npb-CPP.hpp"
#include "npbparams.hpp"
/*
* ---------------------------------------------------------------------
* note: please observe that in the routine conj_grad three
* implementations of the sparse matrix-vector multiply have
* been supplied. the default matrix-vector multiply is not
* loop unrolled. the alternate implementations are unrolled
* to a depth of 2 and unrolled to a depth of 8. please
* experiment with these to find the fastest for your particular
* architecture. if reporting timing results, any of these three may
* be used without penalty.
* ---------------------------------------------------------------------
* class specific parameters:
* it appears here for reference only.
* these are their values, however, this info is imported in the npbparams.h
* include file, which is written by the sys/setparams.c program.
* ---------------------------------------------------------------------
*/
#define NZ (NA*(NONZER+1)*(NONZER+1))
#define NAZ (NA*(NONZER+1))
#define T_INIT (0)
#define T_BENCH (1)
#define T_CONJ_GRAD (2)
#define T_LAST (3)
#define PROFILING_KERNEL_ONE (21)
#define PROFILING_KERNEL_TWO (22)
#define PROFILING_KERNEL_THREE (23)
#define PROFILING_KERNEL_FOUR (24)
#define PROFILING_KERNEL_FIVE (25)
#define PROFILING_KERNEL_SIX (26)
#define PROFILING_KERNEL_SEVEN (27)
#define PROFILING_KERNEL_EIGHT (28)
#define PROFILING_KERNEL_NINE (29)
#define PROFILING_KERNEL_TEN (30)
#define PROFILING_KERNEL_ELEVEN (31)
#define PROFILING_KERNEL_FIVE_MERGED_KERNEL_SIX (32)
#define MINIMUM_THREADS_PER_BLOCK (64)
#define THREADS_PER_BLOCK_ON_KERNEL_ONE (128)
#define THREADS_PER_BLOCK_ON_KERNEL_TWO (64)
#define THREADS_PER_BLOCK_ON_KERNEL_THREE (32)
#define THREADS_PER_BLOCK_ON_KERNEL_FOUR (64)
#define THREADS_PER_BLOCK_ON_KERNEL_FIVE (64)
#define THREADS_PER_BLOCK_ON_KERNEL_SIX (64)
#define THREADS_PER_BLOCK_ON_KERNEL_SEVEN (128)
#define THREADS_PER_BLOCK_ON_KERNEL_EIGHT (32)
#define THREADS_PER_BLOCK_ON_KERNEL_NINE (64)
#define THREADS_PER_BLOCK_ON_KERNEL_TEN (64)
#define THREADS_PER_BLOCK_ON_KERNEL_ELEVEN (128)
/* global variables */
#if defined(DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION)
static int colidx[NZ];
static int rowstr[NA+1];
static int iv[NA];
static int arow[NA];
static int acol[NAZ];
static double aelt[NAZ];
static double a[NZ];
static double x[NA+2];
static double z[NA+2];
static double p[NA+2];
static double q[NA+2];
static double r[NA+2];
#else
static int (*colidx)=(int*)malloc(sizeof(int)*(NZ));
static int (*rowstr)=(int*)malloc(sizeof(int)*(NA+1));
static int (*iv)=(int*)malloc(sizeof(int)*(NA));
static int (*arow)=(int*)malloc(sizeof(int)*(NA));
static int (*acol)=(int*)malloc(sizeof(int)*(NAZ));
static double (*aelt)=(double*)malloc(sizeof(double)*(NAZ));
static double (*a)=(double*)malloc(sizeof(double)*(NZ));
static double (*x)=(double*)malloc(sizeof(double)*(NA+2));
static double (*z)=(double*)malloc(sizeof(double)*(NA+2));
static double (*p)=(double*)malloc(sizeof(double)*(NA+2));
static double (*q)=(double*)malloc(sizeof(double)*(NA+2));
static double (*r)=(double*)malloc(sizeof(double)*(NA+2));
#endif
static int naa;
static int nzz;
static int firstrow;
static int lastrow;
static int firstcol;
static int lastcol;
static double amult;
static double tran;
static boolean timeron;
/* gpu variables */
int* colidx_device;
int* rowstr_device;
double* a_device;
double* p_device;
double* q_device;
double* r_device;
double* x_device;
double* z_device;
double* rho_device;
double* d_device;
double* alpha_device;
double* beta_device;
double* sum_device;
double* norm_temp1_device;
double* norm_temp2_device;
double* global_data;
double* global_data_two;
double* global_data_device;
double* global_data_two_device;
double global_data_reduce;
double global_data_two_reduce;
size_t global_data_elements;
size_t size_global_data;
size_t size_colidx_device;
size_t size_rowstr_device;
size_t size_iv_device;
size_t size_arow_device;
size_t size_acol_device;
size_t size_aelt_device;
size_t size_a_device;
size_t size_x_device;
size_t size_z_device;
size_t size_p_device;
size_t size_q_device;
size_t size_r_device;
size_t size_rho_device;
size_t size_d_device;
size_t size_alpha_device;
size_t size_beta_device;
size_t size_sum_device;
size_t size_norm_temp1_device;
size_t size_norm_temp2_device;
size_t kernel_one_blocks_per_grid;
size_t kernel_two_blocks_per_grid;
size_t kernel_three_blocks_per_grid;
size_t kernel_four_blocks_per_grid;
size_t kernel_five_blocks_per_grid;
size_t kernel_six_blocks_per_grid;
size_t kernel_seven_blocks_per_grid;
size_t kernel_eight_blocks_per_grid;
size_t kernel_nine_blocks_per_grid;
size_t kernel_ten_blocks_per_grid;
size_t kernel_eleven_blocks_per_grid;
size_t amount_of_share_data_on_kernel_one;
size_t amount_of_share_data_on_kernel_two;
size_t amount_of_share_data_on_kernel_three;
size_t amount_of_share_data_on_kernel_four;
size_t amount_of_share_data_on_kernel_five;
size_t amount_of_share_data_on_kernel_six;
size_t amount_of_share_data_on_kernel_seven;
size_t amount_of_share_data_on_kernel_eight;
size_t amount_of_share_data_on_kernel_nine;
size_t amount_of_share_data_on_kernel_ten;
size_t amount_of_share_data_on_kernel_eleven;
size_t reduce_memory_on_kernel_one;
size_t reduce_memory_on_kernel_two;
size_t reduce_memory_on_kernel_three;
size_t reduce_memory_on_kernel_four;
size_t reduce_memory_on_kernel_five;
size_t reduce_memory_on_kernel_six;
size_t reduce_memory_on_kernel_seven;
size_t reduce_memory_on_kernel_eight;
size_t reduce_memory_on_kernel_nine;
size_t reduce_memory_on_kernel_ten;
size_t reduce_memory_on_kernel_eleven;
extern __shared__ double extern_share_data[];
/* function prototypes */
static void conj_grad(int colidx[],
int rowstr[],
double x[],
double z[],
double a[],
double p[],
double q[],
double r[],
double* rnorm);
static void conj_grad_gpu(double* rnorm);
static void gpu_kernel_one();
__global__ void gpu_kernel_one(double p[],
double q[],
double r[],
double x[],
double z[]);
static void gpu_kernel_two(double* rho_host);
__global__ void gpu_kernel_two(double r[],
double* rho,
double global_data[]);
static void gpu_kernel_three();
__global__ void gpu_kernel_three(int colidx[],
int rowstr[],
double a[],
double p[],
double q[]);
static void gpu_kernel_four(double* d_host);
__global__ void gpu_kernel_four(double* d,
double* p,
double* q,
double global_data[]);
static void gpu_kernel_five(double alpha_host);
__global__ void gpu_kernel_five_1(double alpha,
double* p,
double* z);
__global__ void gpu_kernel_five_2(double alpha,
double* q,
double* r);
static void gpu_kernel_five_merged_kernel_six(double alpha_host,
double* rho_host);
__global__ void gpu_kernel_five_merged_kernel_six(double alpha,
double* p,
double* q,
double* r,
double* z,
double global_data[]);
static void gpu_kernel_six(double* rho_host);
__global__ void gpu_kernel_six(double r[],
double global_data[]);
static void gpu_kernel_seven(double beta_host);
__global__ void gpu_kernel_seven(double beta,
double* p,
double* r);
static void gpu_kernel_eight();
__global__ void gpu_kernel_eight(int colidx[],
int rowstr[],
double a[],
double r[],
double* z);
static void gpu_kernel_nine(double* sum_host);
__global__ void gpu_kernel_nine(double r[],
double x[],
double* sum,
double global_data[]);
static void gpu_kernel_ten(double* norm_temp1,
double* norm_temp2);
__global__ void gpu_kernel_ten(double* norm_temp1,
double* norm_temp2,
double x[],
double z[]);
__global__ void gpu_kernel_ten_1(double* norm_temp,
double x[],
double z[]);
__global__ void gpu_kernel_ten_2(double* norm_temp,
double x[],
double z[]);
static void gpu_kernel_eleven(double norm_temp2);
__global__ void gpu_kernel_eleven(double norm_temp2,
double x[],
double z[]);
static int icnvrt(double x,
int ipwr2);
static void makea(int n,
int nz,
double a[],
int colidx[],
int rowstr[],
int firstrow,
int lastrow,
int firstcol,
int lastcol,
int arow[],
int acol[][NONZER+1],
double aelt[][NONZER+1],
int iv[]);
static void release_gpu();
static void setup_gpu();
static void sparse(double a[],
int colidx[],
int rowstr[],
int n,
int nz,
int nozer,
int arow[],
int acol[][NONZER+1],
double aelt[][NONZER+1],
int firstrow,
int lastrow,
int nzloc[],
double rcond,
double shift);
static void sprnvc(int n,
int nz,
int nn1,
double v[],
int iv[]);
static void vecset(int n,
double v[],
int iv[],
int* nzv,
int i,
double val);
/* cg */
int main(int argc, char** argv){
#if defined(DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION)
printf(" DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION mode on\n");
#endif
int i, j, k, it;
double zeta;
double rnorm;
double norm_temp1, norm_temp2;
double t, mflops, tmax;
char class_npb;
boolean verified;
double zeta_verify_value, epsilon, err;
char *t_names[T_LAST];
for(i=0; i<T_LAST; i++){
timer_clear(i);
}
timer_clear(PROFILING_KERNEL_ONE);
timer_clear(PROFILING_KERNEL_TWO);
timer_clear(PROFILING_KERNEL_THREE);
timer_clear(PROFILING_KERNEL_FOUR);
timer_clear(PROFILING_KERNEL_FIVE);
timer_clear(PROFILING_KERNEL_SIX);
timer_clear(PROFILING_KERNEL_SEVEN);
timer_clear(PROFILING_KERNEL_EIGHT);
timer_clear(PROFILING_KERNEL_NINE);
timer_clear(PROFILING_KERNEL_TEN);
timer_clear(PROFILING_KERNEL_ELEVEN);
timer_clear(PROFILING_KERNEL_FIVE_MERGED_KERNEL_SIX);
FILE* fp;
if((fp = fopen("timer.flag", "r")) != NULL){
timeron = TRUE;
t_names[T_INIT] = (char*)"init";
t_names[T_BENCH] = (char*)"benchmk";
t_names[T_CONJ_GRAD] = (char*)"conjgd";
fclose(fp);
}else{
timeron = FALSE;
}
timer_start(T_INIT);
firstrow = 0;
lastrow = NA-1;
firstcol = 0;
lastcol = NA-1;
if(NA == 1400 && NONZER == 7 && NITER == 15 && SHIFT == 10.0){
class_npb = 'S';
zeta_verify_value = 8.5971775078648;
}else if(NA == 7000 && NONZER == 8 && NITER == 15 && SHIFT == 12.0){
class_npb = 'W';
zeta_verify_value = 10.362595087124;
}else if(NA == 14000 && NONZER == 11 && NITER == 15 && SHIFT == 20.0){
class_npb = 'A';
zeta_verify_value = 17.130235054029;
}else if(NA == 75000 && NONZER == 13 && NITER == 75 && SHIFT == 60.0){
class_npb = 'B';
zeta_verify_value = 22.712745482631;
}else if(NA == 150000 && NONZER == 15 && NITER == 75 && SHIFT == 110.0){
class_npb = 'C';
zeta_verify_value = 28.973605592845;
}else if(NA == 1500000 && NONZER == 21 && NITER == 100 && SHIFT == 500.0){
class_npb = 'D';
zeta_verify_value = 52.514532105794;
}else if(NA == 9000000 && NONZER == 26 && NITER == 100 && SHIFT == 1500.0){
class_npb = 'E';
zeta_verify_value = 77.522164599383;
}else{
class_npb = 'U';
}
printf("\n\n NAS Parallel Benchmarks 4.1 CUDA C++ version - CG Benchmark\n\n");
printf(" Size: %11d\n", NA);
printf(" Iterations: %5d\n", NITER);
naa = NA;
nzz = NZ;
/* initialize random number generator */
tran = 314159265.0;
amult = 1220703125.0;
zeta = randlc( &tran, amult );
makea(naa,
nzz,
a,
colidx,
rowstr,
firstrow,
lastrow,
firstcol,
lastcol,
arow,
(int(*)[NONZER+1])(void*)acol,
(double(*)[NONZER+1])(void*)aelt,
iv);
/*
* ---------------------------------------------------------------------
* note: as a result of the above call to makea:
* values of j used in indexing rowstr go from 0 --> lastrow-firstrow
* values of colidx which are col indexes go from firstcol --> lastcol
* so:
* shift the col index vals from actual (firstcol --> lastcol)
* to local, i.e., (0 --> lastcol-firstcol)
* ---------------------------------------------------------------------
*/
for(j = 0; j < lastrow - firstrow + 1; j++){
for(k = rowstr[j]; k < rowstr[j+1]; k++){
colidx[k] = colidx[k] - firstcol;
}
}
/* set starting vector to (1, 1, .... 1) */
for(i = 0; i < NA+1; i++){
x[i] = 1.0;
}
for(j = 0; j<lastcol-firstcol+1; j++){
q[j] = 0.0;
z[j] = 0.0;
r[j] = 0.0;
p[j] = 0.0;
}
zeta = 0.0;
/*
* -------------------------------------------------------------------
* ---->
* do one iteration untimed to init all code and data page tables
* ----> (then reinit, start timing, to niter its)
* -------------------------------------------------------------------*/
for(it = 1; it <= 1; it++){
/* the call to the conjugate gradient routine */
conj_grad(colidx, rowstr, x, z, a, p, q, r, &rnorm);
/*
* --------------------------------------------------------------------
* zeta = shift + 1/(x.z)
* so, first: (x.z)
* also, find norm of z
* so, first: (z.z)
* --------------------------------------------------------------------
*/
norm_temp1 = 0.0;
norm_temp2 = 0.0;
for(j = 0; j < lastcol - firstcol + 1; j++){
norm_temp1 = norm_temp1 + x[j] * z[j];
norm_temp2 = norm_temp2 + z[j] * z[j];
}
norm_temp2 = 1.0 / sqrt(norm_temp2);
/* normalize z to obtain x */
for(j = 0; j < lastcol - firstcol + 1; j++){
x[j] = norm_temp2 * z[j];
}
} /* end of do one iteration untimed */
/* set starting vector to (1, 1, .... 1) */
for(i = 0; i < NA+1; i++){
x[i] = 1.0;
}
zeta = 0.0;
timer_stop(T_INIT);
printf(" Initialization time = %15.3f seconds\n", timer_read(T_INIT));
setup_gpu();
timer_start(T_BENCH);
/*
* --------------------------------------------------------------------
* ---->
* main iteration for inverse power method
* ---->
* --------------------------------------------------------------------
*/
for(it = 1; it <= NITER; it++){
/* the call to the conjugate gradient routine */
if(timeron){timer_start(T_CONJ_GRAD);}
conj_grad_gpu(&rnorm);
if(timeron){timer_stop(T_CONJ_GRAD);}
/*
* --------------------------------------------------------------------
* zeta = shift + 1/(x.z)
* so, first: (x.z)
* also, find norm of z
* so, first: (z.z)
* --------------------------------------------------------------------
*/
gpu_kernel_ten(&norm_temp1, &norm_temp2);
norm_temp2 = 1.0 / sqrt(norm_temp2);
zeta = SHIFT + 1.0 / norm_temp1;
if(it==1){printf("\n iteration ||r|| zeta\n");}
printf(" %5d %20.14e%20.13e\n", it, rnorm, zeta);
/* normalize z to obtain x */
gpu_kernel_eleven(norm_temp2);
} /* end of main iter inv pow meth */
timer_stop(T_BENCH);
/*
* --------------------------------------------------------------------
* end of timed section
* --------------------------------------------------------------------
*/
t = timer_read(T_BENCH);
printf(" Benchmark completed\n");
epsilon = 1.0e-10;
if(class_npb != 'U'){
err = fabs(zeta - zeta_verify_value) / zeta_verify_value;
if(err <= epsilon){
verified = TRUE;
printf(" VERIFICATION SUCCESSFUL\n");
printf(" Zeta is %20.13e\n", zeta);
printf(" Error is %20.13e\n", err);
}else{
verified = FALSE;
printf(" VERIFICATION FAILED\n");
printf(" Zeta %20.13e\n", zeta);
printf(" The correct zeta is %20.13e\n", zeta_verify_value);
}
}else{
verified = FALSE;
printf(" Problem size unknown\n");
printf(" NO VERIFICATION PERFORMED\n");
}
if(t != 0.0){
mflops = (double)(2.0*NITER*NA)
* (3.0+(double)(NONZER*(NONZER+1))
+ 25.0
* (5.0+(double)(NONZER*(NONZER+1)))+3.0)
/ t / 1000000.0;
}else{
mflops = 0.0;
}
c_print_results((char*)"CG",
class_npb,
NA,
0,
0,
NITER,
t,
mflops,
(char*)" floating point",
verified,
(char*)NPBVERSION,
(char*)COMPILETIME,
(char*)CS1,
(char*)CS2,
(char*)CS3,
(char*)CS4,
(char*)CS5,
(char*)CS6,
(char*)CS7);
/*
* ---------------------------------------------------------------------
* more timers
* ---------------------------------------------------------------------
*/
if(timeron){
tmax = timer_read(T_BENCH);
if(tmax == 0.0){tmax = 1.0;}
printf(" SECTION Time (secs)\n");
for(i = 0; i < T_LAST; i++){
t = timer_read(i);
if(i == T_INIT){
printf(" %8s:%9.3f\n", t_names[i], t);
}else{
printf(" %8s:%9.3f (%6.2f%%)\n", t_names[i], t, t*100.0/tmax);
if(i == T_CONJ_GRAD){
t = tmax - t;
printf(" --> %8s:%9.3f (%6.2f%%)\n", "rest", t, t*100.0/tmax);
}
}
}
}
release_gpu();
return 0;
}
/*
* ---------------------------------------------------------------------
* floating point arrays here are named as in NPB1 spec discussion of
* CG algorithm
* ---------------------------------------------------------------------
*/
static void conj_grad(int colidx[],
int rowstr[],
double x[],
double z[],
double a[],
double p[],
double q[],
double r[],
double* rnorm){
int j, k;
int cgit, cgitmax;
double d, sum, rho, rho0, alpha, beta;
cgitmax = 25;
rho = 0.0;
/* initialize the CG algorithm */
for(j = 0; j < naa+1; j++){
q[j] = 0.0;
z[j] = 0.0;
r[j] = x[j];
p[j] = r[j];
}
/*
* --------------------------------------------------------------------
* rho = r.r
* now, obtain the norm of r: First, sum squares of r elements locally...
* --------------------------------------------------------------------
*/
for(j = 0; j < lastcol - firstcol + 1; j++){
rho = rho + r[j]*r[j];
}
/* the conj grad iteration loop */
for(cgit = 1; cgit <= cgitmax; cgit++){
/*
* ---------------------------------------------------------------------
* q = A.p
* the partition submatrix-vector multiply: use workspace w
* ---------------------------------------------------------------------
*
* note: this version of the multiply is actually (slightly: maybe %5)
* faster on the sp2 on 16 nodes than is the unrolled-by-2 version
* below. on the Cray t3d, the reverse is TRUE, i.e., the
* unrolled-by-two version is some 10% faster.
* the unrolled-by-8 version below is significantly faster
* on the Cray t3d - overall speed of code is 1.5 times faster.
*/
for(j = 0; j < lastrow - firstrow + 1; j++){
sum = 0.0;
for(k = rowstr[j]; k < rowstr[j+1]; k++){
sum = sum + a[k]*p[colidx[k]];
}
q[j] = sum;
}
/*
* --------------------------------------------------------------------
* obtain p.q
* --------------------------------------------------------------------
*/
d = 0.0;
for (j = 0; j < lastcol - firstcol + 1; j++) {
d = d + p[j]*q[j];
}
/*
* --------------------------------------------------------------------
* obtain alpha = rho / (p.q)
* -------------------------------------------------------------------
*/
alpha = rho / d;
/*
* --------------------------------------------------------------------
* save a temporary of rho
* --------------------------------------------------------------------
*/
rho0 = rho;
/*
* ---------------------------------------------------------------------
* obtain z = z + alpha*p
* and r = r - alpha*q
* ---------------------------------------------------------------------
*/
rho = 0.0;
for(j = 0; j < lastcol - firstcol + 1; j++){
z[j] = z[j] + alpha*p[j];
r[j] = r[j] - alpha*q[j];
}
/*
* ---------------------------------------------------------------------
* rho = r.r
* now, obtain the norm of r: first, sum squares of r elements locally...
* ---------------------------------------------------------------------
*/
for(j = 0; j < lastcol - firstcol + 1; j++){
rho = rho + r[j]*r[j];
}
/*
* ---------------------------------------------------------------------
* obtain beta
* ---------------------------------------------------------------------
*/
beta = rho / rho0;
/*
* ---------------------------------------------------------------------
* p = r + beta*p
* ---------------------------------------------------------------------
*/
for(j = 0; j < lastcol - firstcol + 1; j++){
p[j] = r[j] + beta*p[j];
}
} /* end of do cgit=1, cgitmax */
/*
* ---------------------------------------------------------------------
* compute residual norm explicitly: ||r|| = ||x - A.z||
* first, form A.z
* the partition submatrix-vector multiply
* ---------------------------------------------------------------------
*/
sum = 0.0;
for(j = 0; j < lastrow - firstrow + 1; j++){
d = 0.0;
for(k = rowstr[j]; k < rowstr[j+1]; k++){
d = d + a[k]*z[colidx[k]];
}
r[j] = d;
}
/*
* ---------------------------------------------------------------------
* at this point, r contains A.z
* ---------------------------------------------------------------------
*/
for(j = 0; j < lastcol-firstcol+1; j++){
d = x[j] - r[j];
sum = sum + d*d;
}
*rnorm = sqrt(sum);
}
static void conj_grad_gpu(double* rnorm){
double d, sum, rho, rho0, alpha, beta;
int cgit, cgitmax = 25;
/* initialize the CG algorithm */
gpu_kernel_one();
/* rho = r.r - now, obtain the norm of r: first, sum squares of r elements locally */
gpu_kernel_two(&rho);
/* the conj grad iteration loop */
for(cgit = 1; cgit <= cgitmax; cgit++){
/* q = A.p */
gpu_kernel_three();
/* obtain p.q */
gpu_kernel_four(&d);
alpha = rho / d;
/* save a temporary of rho */
rho0 = rho;
/* obtain (z = z + alpha*p) and (r = r - alpha*q) */
/* gpu_kernel_five(alpha); */
/* rho = r.r - now, obtain the norm of r: first, sum squares of r elements locally */
/* gpu_kernel_six(&rho); */
/* (z = z + alpha*p) and (r = r - alpha*q) and (rho = r.r) */
gpu_kernel_five_merged_kernel_six(alpha, &rho);
/* obtain beta */
beta = rho / rho0;
/* p = r + beta*p */
gpu_kernel_seven(beta);
} /* end of do cgit=1, cgitmax */
/* compute residual norm explicitly: ||r|| = ||x - A.z|| */
gpu_kernel_eight();
/* at this point, r contains A.z */
gpu_kernel_nine(&sum);
*rnorm = sqrt(sum);
}
static void gpu_kernel_one(){
gpu_kernel_one<<<kernel_one_blocks_per_grid,
THREADS_PER_BLOCK_ON_KERNEL_ONE>>>(
p_device,
q_device,
r_device,
x_device,
z_device);
}
__global__ void gpu_kernel_one(double p[],
double q[],
double r[],
double x[],
double z[]){
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if(thread_id >= NA){return;}
q[thread_id] = 0.0;
z[thread_id] = 0.0;
double x_value = x[thread_id];
r[thread_id] = x_value;
p[thread_id] = x_value;
}
static void gpu_kernel_two(double* rho_host){
gpu_kernel_two<<<kernel_two_blocks_per_grid,
THREADS_PER_BLOCK_ON_KERNEL_TWO,
amount_of_share_data_on_kernel_two>>>(
r_device,
rho_device,
global_data_device);
global_data_reduce=0.0;
cudaMemcpy(global_data, global_data_device, reduce_memory_on_kernel_two, cudaMemcpyDeviceToHost);
for(int i=0; i<kernel_two_blocks_per_grid; i++){global_data_reduce+=global_data[i];}
*rho_host=global_data_reduce;
}
__global__ void gpu_kernel_two(double r[],
double* rho,
double global_data[]){
double* share_data = (double*)extern_share_data;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int local_id = threadIdx.x;
share_data[local_id] = 0.0;
if(thread_id >= NA){return;}
double r_value = r[thread_id];
share_data[local_id] = r_value * r_value;
__syncthreads();
for(int i=blockDim.x/2; i>0; i>>=1){
if(local_id<i){share_data[local_id]+=share_data[local_id+i];}
__syncthreads();
}
if(local_id==0){global_data[blockIdx.x]=share_data[0];}
}
static void gpu_kernel_three(){
gpu_kernel_three<<<kernel_three_blocks_per_grid,
THREADS_PER_BLOCK_ON_KERNEL_THREE,
amount_of_share_data_on_kernel_three>>>(
colidx_device,
rowstr_device,
a_device,
p_device,
q_device);
}
__global__ void gpu_kernel_three(int colidx[],
int rowstr[],
double a[],
double p[],
double q[]){
double* share_data = (double*)extern_share_data;
int j = (int) ((blockIdx.x*blockDim.x+threadIdx.x) / blockDim.x);
int local_id = threadIdx.x;
int begin = rowstr[j];
int end = rowstr[j+1];
double sum = 0.0;
for(int k=begin+local_id; k<end; k+=blockDim.x){
sum = sum + a[k]*p[colidx[k]];
}
share_data[local_id] = sum;
__syncthreads();
for(int i=blockDim.x/2; i>0; i>>=1){
if(local_id<i){share_data[local_id]+=share_data[local_id+i];}
__syncthreads();
}
if(local_id==0){q[j]=share_data[0];}
}
static void gpu_kernel_four(double* d_host){
gpu_kernel_four<<<kernel_four_blocks_per_grid,
THREADS_PER_BLOCK_ON_KERNEL_FOUR,
amount_of_share_data_on_kernel_four>>>(
d_device,
p_device,
q_device,
global_data_device);
global_data_reduce=0.0;
cudaMemcpy(global_data, global_data_device, reduce_memory_on_kernel_four, cudaMemcpyDeviceToHost);
for(int i=0; i<kernel_four_blocks_per_grid; i++){global_data_reduce+=global_data[i];}
*d_host=global_data_reduce;
}
__global__ void gpu_kernel_four(double* d,
double* p,
double* q,
double global_data[]){
double* share_data = (double*)extern_share_data;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int local_id = threadIdx.x;
share_data[local_id] = 0.0;
if(thread_id >= NA){return;}
share_data[threadIdx.x] = p[thread_id] * q[thread_id];
__syncthreads();
for(int i=blockDim.x/2; i>0; i>>=1){
if(local_id<i){share_data[local_id]+=share_data[local_id+i];}
__syncthreads();
}
if(local_id==0){global_data[blockIdx.x]=share_data[0];}
}
static void gpu_kernel_five(double alpha_host){
gpu_kernel_five_1<<<kernel_five_blocks_per_grid,
THREADS_PER_BLOCK_ON_KERNEL_FIVE>>>(
alpha_host,
p_device,
z_device);
gpu_kernel_five_2<<<kernel_five_blocks_per_grid,
THREADS_PER_BLOCK_ON_KERNEL_FIVE>>>(
alpha_host,
q_device,
r_device);
}
__global__ void gpu_kernel_five_1(double alpha,
double* p,
double* z){
int j = blockIdx.x * blockDim.x + threadIdx.x;
if(j >= NA){return;}
z[j] += alpha * p[j];
}
__global__ void gpu_kernel_five_2(double alpha,
double* q,
double* r){
int j = blockIdx.x * blockDim.x + threadIdx.x;
if(j >= NA){return;}
r[j] -= alpha * q[j];
}
static void gpu_kernel_five_merged_kernel_six(double alpha_host,
double* rho_host){
gpu_kernel_five_merged_kernel_six<<<kernel_five_blocks_per_grid,
THREADS_PER_BLOCK_ON_KERNEL_FIVE,
amount_of_share_data_on_kernel_five>>>(
alpha_host,
p_device,
q_device,
r_device,
z_device,
global_data_device);
global_data_reduce=0.0;
cudaMemcpy(global_data, global_data_device, reduce_memory_on_kernel_five, cudaMemcpyDeviceToHost);
for(int i=0; i<kernel_five_blocks_per_grid; i++){global_data_reduce+=global_data[i];}
*rho_host=global_data_reduce;
}
__global__ void gpu_kernel_five_merged_kernel_six(double alpha,
double* p,
double* q,
double* r,
double* z,
double global_data[]){
double* share_data = (double*)extern_share_data;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int local_id = threadIdx.x;
/* kernel_five computation */
if(thread_id < NA){
double r_value;
z[thread_id] = z[thread_id] + alpha*p[thread_id];
r_value = r[thread_id] - alpha*q[thread_id];
r[thread_id] = r_value;
share_data[local_id] = r_value * r_value;
}else{
share_data[local_id] = 0.0;
}
/* kernel_six computation */
__syncthreads();
for(int i=blockDim.x/2; i>0; i>>=1){
if(local_id<i){share_data[local_id]+=share_data[local_id+i];}
__syncthreads();
}
if(local_id==0){global_data[blockIdx.x]=share_data[0];}
}
static void gpu_kernel_six(double* rho_host){
gpu_kernel_six<<<kernel_six_blocks_per_grid,
THREADS_PER_BLOCK_ON_KERNEL_SIX,
amount_of_share_data_on_kernel_six>>>(
r_device,
global_data_device);
global_data_reduce=0.0;
cudaMemcpy(global_data, global_data_device, reduce_memory_on_kernel_six, cudaMemcpyDeviceToHost);
for(int i=0; i<kernel_six_blocks_per_grid; i++){global_data_reduce+=global_data[i];}
*rho_host=global_data_reduce;
}
__global__ void gpu_kernel_six(double r[],
double global_data[]){
double* share_data = (double*)extern_share_data;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int local_id = threadIdx.x;
share_data[local_id] = 0.0;
if(thread_id >= NA){return;}
double r_value = r[thread_id];
share_data[local_id] = r_value * r_value;
__syncthreads();
for(int i=blockDim.x/2; i>0; i>>=1){
if(local_id<i){share_data[local_id]+=share_data[local_id+i];}
__syncthreads();
}
if(local_id==0){global_data[blockIdx.x]=share_data[0];}
}
static void gpu_kernel_seven(double beta_host){
gpu_kernel_seven<<<kernel_seven_blocks_per_grid,
THREADS_PER_BLOCK_ON_KERNEL_SEVEN>>>(
beta_host,
p_device,
r_device);
}
__global__ void gpu_kernel_seven(double beta,
double* p,
double* r){
int j = blockIdx.x * blockDim.x + threadIdx.x;
if(j >= NA){return;}
p[j] = r[j] + beta*p[j];
}
static void gpu_kernel_eight(){
gpu_kernel_eight<<<kernel_eight_blocks_per_grid,
THREADS_PER_BLOCK_ON_KERNEL_EIGHT,
amount_of_share_data_on_kernel_eight>>>(
colidx_device,
rowstr_device,
a_device,
r_device,
z_device);
}
__global__ void gpu_kernel_eight(int colidx[],
int rowstr[],
double a[],
double r[],
double* z){
double* share_data = (double*)extern_share_data;
int j = (int) ((blockIdx.x*blockDim.x+threadIdx.x) / blockDim.x);
int local_id = threadIdx.x;
int begin = rowstr[j];
int end = rowstr[j+1];
double sum = 0.0;
for(int k=begin+local_id; k<end; k+=blockDim.x){
sum = sum + a[k]*z[colidx[k]];
}
share_data[local_id] = sum;
__syncthreads();
for(int i=blockDim.x/2; i>0; i>>=1){
if(local_id<i){share_data[local_id]+=share_data[local_id+i];}
__syncthreads();
}
if(local_id==0){r[j]=share_data[0];}
}
static void gpu_kernel_nine(double* sum_host){
gpu_kernel_nine<<<kernel_nine_blocks_per_grid,
THREADS_PER_BLOCK_ON_KERNEL_NINE,
amount_of_share_data_on_kernel_nine>>>(
r_device,
x_device,
sum_device,
global_data_device);
global_data_reduce=0.0;
cudaMemcpy(global_data, global_data_device, reduce_memory_on_kernel_nine, cudaMemcpyDeviceToHost);
for(int i=0; i<kernel_nine_blocks_per_grid; i++){global_data_reduce+=global_data[i];}
*sum_host=global_data_reduce;
}
__global__ void gpu_kernel_nine(double r[], double x[], double* sum, double global_data[]){
double* share_data = (double*)extern_share_data;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int local_id = threadIdx.x;
share_data[local_id] = 0.0;
if(thread_id >= NA){return;}
share_data[local_id] = x[thread_id] - r[thread_id];
share_data[local_id] = share_data[local_id] * share_data[local_id];
__syncthreads();
for(int i=blockDim.x/2; i>0; i>>=1) {
if(local_id<i){share_data[local_id]+=share_data[local_id+i];}
__syncthreads();
}
if(local_id==0){global_data[blockIdx.x]=share_data[0];}
}
static void gpu_kernel_ten(double* norm_temp1,
double* norm_temp2){
//gpu_kernel_ten<<<kernel_ten_blocks_per_grid,THREADS_PER_BLOCK_ON_KERNEL_TEN,amount_of_share_data_on_kernel_ten*2>>>(global_data_device,global_data_two_device,x_device,z_device);
gpu_kernel_ten_1<<<kernel_ten_blocks_per_grid,THREADS_PER_BLOCK_ON_KERNEL_TEN,amount_of_share_data_on_kernel_ten>>>(global_data_device,x_device,z_device);
gpu_kernel_ten_2<<<kernel_ten_blocks_per_grid,THREADS_PER_BLOCK_ON_KERNEL_TEN,amount_of_share_data_on_kernel_ten>>>(global_data_two_device,x_device,z_device);
global_data_reduce=0.0;
global_data_two_reduce=0.0;
cudaMemcpy(global_data, global_data_device, reduce_memory_on_kernel_ten, cudaMemcpyDeviceToHost);
cudaMemcpy(global_data_two, global_data_two_device, reduce_memory_on_kernel_ten, cudaMemcpyDeviceToHost);
for(int i=0; i<kernel_ten_blocks_per_grid; i++){global_data_reduce+=global_data[i];global_data_two_reduce+=global_data_two[i];}
*norm_temp1=global_data_reduce;
*norm_temp2=global_data_two_reduce;
}
__global__ void gpu_kernel_ten(double* norm_temp1,
double* norm_temp2,
double x[],
double z[]){
double* share_data_1 = (double*)(extern_share_data);
double* share_data_2 = (double*)(&share_data_1[THREADS_PER_BLOCK_ON_KERNEL_TEN]);
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int local_id = threadIdx.x;
share_data_1[threadIdx.x] = 0.0;
share_data_2[threadIdx.x] = 0.0;
if(thread_id >= NA){return;}
share_data_1[threadIdx.x] = x[thread_id]*z[thread_id];
share_data_2[threadIdx.x] = z[thread_id]*z[thread_id];
__syncthreads();
for(int i=blockDim.x/2; i>0; i>>=1){
if(local_id<i){
share_data_1[local_id]+=share_data_1[local_id+i];
share_data_2[local_id]+=share_data_2[local_id+i];}
__syncthreads();
}
if(local_id==0){
norm_temp1[blockIdx.x]=share_data_1[0];
norm_temp2[blockIdx.x]=share_data_2[0];}
}
__global__ void gpu_kernel_ten_1(double* norm_temp,
double x[],
double z[]){
double* share_data = (double*)extern_share_data;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int local_id = threadIdx.x;
share_data[threadIdx.x] = 0.0;
if(thread_id >= NA){return;}
share_data[threadIdx.x] = x[thread_id]*z[thread_id];
__syncthreads();
for(int i=blockDim.x/2; i>0; i>>=1){
if(local_id<i){share_data[local_id]+=share_data[local_id+i];}
__syncthreads();
}
if(local_id==0){norm_temp[blockIdx.x]=share_data[0];}
}
__global__ void gpu_kernel_ten_2(double* norm_temp,
double x[],
double z[]){
double* share_data = (double*)extern_share_data;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int local_id = threadIdx.x;
share_data[threadIdx.x] = 0.0;
if(thread_id >= NA){return;}
share_data[threadIdx.x] = z[thread_id]*z[thread_id];
__syncthreads();
for(int i=blockDim.x/2; i>0; i>>=1){
if(local_id<i){share_data[local_id]+=share_data[local_id+i];}
__syncthreads();
}
if(local_id==0){norm_temp[blockIdx.x]=share_data[0];}
}
static void gpu_kernel_eleven(double norm_temp2){
gpu_kernel_eleven<<<kernel_eleven_blocks_per_grid,
THREADS_PER_BLOCK_ON_KERNEL_ELEVEN>>>(
norm_temp2,
x_device,
z_device);
}
__global__ void gpu_kernel_eleven(double norm_temp2, double x[], double z[]){
int j = blockIdx.x * blockDim.x + threadIdx.x;
if(j >= NA){return;}
x[j]=norm_temp2*z[j];
}
/*
* ---------------------------------------------------------------------
* scale a double precision number x in (0,1) by a power of 2 and chop it
* ---------------------------------------------------------------------
*/
static int icnvrt(double x, int ipwr2){
return (int)(ipwr2 * x);
}
/*
* ---------------------------------------------------------------------
* generate the test problem for benchmark 6
* makea generates a sparse matrix with a
* prescribed sparsity distribution
*
* parameter type usage
*
* input
*
* n i number of cols/rows of matrix
* nz i nonzeros as declared array size
* rcond r*8 condition number
* shift r*8 main diagonal shift
*
* output
*
* a r*8 array for nonzeros
* colidx i col indices
* rowstr i row pointers
*
* workspace
*
* iv, arow, acol i
* aelt r*8
* ---------------------------------------------------------------------
*/
static void makea(int n,
int nz,
double a[],
int colidx[],
int rowstr[],
int firstrow,
int lastrow,
int firstcol,
int lastcol,
int arow[],
int acol[][NONZER+1],
double aelt[][NONZER+1],
int iv[]){
int iouter, ivelt, nzv, nn1;
int ivc[NONZER+1];
double vc[NONZER+1];
/*
* --------------------------------------------------------------------
* nonzer is approximately (int(sqrt(nnza /n)));
* --------------------------------------------------------------------
* nn1 is the smallest power of two not less than n
* --------------------------------------------------------------------
*/
nn1 = 1;
do{
nn1 = 2 * nn1;
}while(nn1 < n);
/*
* -------------------------------------------------------------------
* generate nonzero positions and save for the use in sparse
* -------------------------------------------------------------------
*/
for(iouter = 0; iouter < n; iouter++){
nzv = NONZER;
sprnvc(n, nzv, nn1, vc, ivc);
vecset(n, vc, ivc, &nzv, iouter+1, 0.5);
arow[iouter] = nzv;
for(ivelt = 0; ivelt < nzv; ivelt++){
acol[iouter][ivelt] = ivc[ivelt] - 1;
aelt[iouter][ivelt] = vc[ivelt];
}
}
/*
* ---------------------------------------------------------------------
* ... make the sparse matrix from list of elements with duplicates
* (iv is used as workspace)
* ---------------------------------------------------------------------
*/
sparse(a,
colidx,
rowstr,
n,
nz,
NONZER,
arow,
acol,
aelt,
firstrow,
lastrow,
iv,
RCOND,
SHIFT);
}
static void release_gpu(){
cudaFree(colidx_device);
cudaFree(rowstr_device);
cudaFree(a_device);
cudaFree(p_device);
cudaFree(q_device);
cudaFree(r_device);
cudaFree(x_device);
cudaFree(z_device);
cudaFree(rho_device);
cudaFree(d_device);
cudaFree(alpha_device);
cudaFree(beta_device);
cudaFree(sum_device);
cudaFree(norm_temp1_device);
cudaFree(norm_temp2_device);
cudaFree(global_data_device);
cudaFree(global_data_two_device);
}
static void setup_gpu(){
global_data_elements=ceil(double(NA)/double(MINIMUM_THREADS_PER_BLOCK));
size_global_data=sizeof(double)*(global_data_elements);
size_colidx_device=sizeof(int)*(NZ);
size_rowstr_device=sizeof(int)*(NA+1);
size_iv_device=sizeof(int)*(NA);
size_arow_device=sizeof(int)*(NA);
size_acol_device=sizeof(int)*(NAZ);
size_aelt_device=sizeof(double)*(NAZ);
size_a_device=sizeof(double)*(NZ);
size_x_device=sizeof(double)*(NA+2);
size_z_device=sizeof(double)*(NA+2);
size_p_device=sizeof(double)*(NA+2);
size_q_device=sizeof(double)*(NA+2);
size_r_device=sizeof(double)*(NA+2);
size_rho_device=sizeof(double);
size_d_device=sizeof(double);
size_alpha_device=sizeof(double);
size_beta_device=sizeof(double);
size_sum_device=sizeof(double);
size_norm_temp1_device=sizeof(double);
size_norm_temp2_device=sizeof(double);
global_data=(double*)malloc(size_global_data);
global_data_two=(double*)malloc(size_global_data);
cudaMalloc(&colidx_device, size_colidx_device);
cudaMalloc(&rowstr_device, size_rowstr_device);
cudaMalloc(&a_device, size_a_device);
cudaMalloc(&p_device, size_p_device);
cudaMalloc(&q_device, size_q_device);
cudaMalloc(&r_device, size_r_device);
cudaMalloc(&x_device, size_x_device);
cudaMalloc(&z_device, size_z_device);
cudaMalloc(&rho_device, size_rho_device);
cudaMalloc(&d_device, size_d_device);
cudaMalloc(&alpha_device, size_alpha_device);
cudaMalloc(&beta_device, size_beta_device);
cudaMalloc(&sum_device, size_sum_device);
cudaMalloc(&norm_temp1_device, size_norm_temp1_device);
cudaMalloc(&norm_temp2_device, size_norm_temp2_device);
cudaMalloc(&global_data_device, size_global_data);
cudaMalloc(&global_data_two_device, size_global_data);
cudaMemcpy(colidx_device, colidx, size_colidx_device, cudaMemcpyHostToDevice);
cudaMemcpy(rowstr_device, rowstr, size_rowstr_device, cudaMemcpyHostToDevice);
cudaMemcpy(a_device, a, size_a_device, cudaMemcpyHostToDevice);
cudaMemcpy(p_device, p, size_p_device, cudaMemcpyHostToDevice);
cudaMemcpy(q_device, q, size_q_device, cudaMemcpyHostToDevice);
cudaMemcpy(r_device, r, size_r_device, cudaMemcpyHostToDevice);
cudaMemcpy(x_device, x, size_x_device, cudaMemcpyHostToDevice);
cudaMemcpy(z_device, z, size_z_device, cudaMemcpyHostToDevice);
kernel_one_blocks_per_grid=(ceil((double)NA/(double)THREADS_PER_BLOCK_ON_KERNEL_ONE));
kernel_two_blocks_per_grid=(ceil((double)NA/(double)THREADS_PER_BLOCK_ON_KERNEL_TWO));
kernel_three_blocks_per_grid=NA;
kernel_four_blocks_per_grid=(ceil((double)NA/(double)THREADS_PER_BLOCK_ON_KERNEL_FOUR));
kernel_five_blocks_per_grid=(ceil((double)NA/(double)THREADS_PER_BLOCK_ON_KERNEL_FIVE));
kernel_six_blocks_per_grid=(ceil((double)NA/(double)THREADS_PER_BLOCK_ON_KERNEL_SIX));
kernel_seven_blocks_per_grid=(ceil((double)NA/THREADS_PER_BLOCK_ON_KERNEL_SEVEN));
kernel_eight_blocks_per_grid=NA;
kernel_nine_blocks_per_grid=(ceil((double)NA/(double)THREADS_PER_BLOCK_ON_KERNEL_NINE));
kernel_ten_blocks_per_grid=(ceil((double)NA/(double)THREADS_PER_BLOCK_ON_KERNEL_TEN));
kernel_eleven_blocks_per_grid=(ceil((double)NA/(double)THREADS_PER_BLOCK_ON_KERNEL_ELEVEN));
amount_of_share_data_on_kernel_one=sizeof(double)*THREADS_PER_BLOCK_ON_KERNEL_ONE;
amount_of_share_data_on_kernel_two=sizeof(double)*THREADS_PER_BLOCK_ON_KERNEL_TWO;
amount_of_share_data_on_kernel_three=sizeof(double)*THREADS_PER_BLOCK_ON_KERNEL_THREE;
amount_of_share_data_on_kernel_four=sizeof(double)*THREADS_PER_BLOCK_ON_KERNEL_FOUR;
amount_of_share_data_on_kernel_five=sizeof(double)*THREADS_PER_BLOCK_ON_KERNEL_FIVE;
amount_of_share_data_on_kernel_six=sizeof(double)*THREADS_PER_BLOCK_ON_KERNEL_SIX;
amount_of_share_data_on_kernel_seven=sizeof(double)*THREADS_PER_BLOCK_ON_KERNEL_SEVEN;
amount_of_share_data_on_kernel_eight=sizeof(double)*THREADS_PER_BLOCK_ON_KERNEL_EIGHT;
amount_of_share_data_on_kernel_nine=sizeof(double)*THREADS_PER_BLOCK_ON_KERNEL_NINE;
amount_of_share_data_on_kernel_ten=sizeof(double)*THREADS_PER_BLOCK_ON_KERNEL_TEN;
amount_of_share_data_on_kernel_eleven=sizeof(double)*THREADS_PER_BLOCK_ON_KERNEL_ELEVEN;
reduce_memory_on_kernel_one=kernel_one_blocks_per_grid*sizeof(double);
reduce_memory_on_kernel_two=kernel_two_blocks_per_grid*sizeof(double);
reduce_memory_on_kernel_three=kernel_three_blocks_per_grid*sizeof(double);
reduce_memory_on_kernel_four=kernel_four_blocks_per_grid*sizeof(double);
reduce_memory_on_kernel_five=kernel_five_blocks_per_grid*sizeof(double);
reduce_memory_on_kernel_six=kernel_six_blocks_per_grid*sizeof(double);
reduce_memory_on_kernel_seven=kernel_seven_blocks_per_grid*sizeof(double);
reduce_memory_on_kernel_eight=kernel_eight_blocks_per_grid*sizeof(double);
reduce_memory_on_kernel_nine=kernel_nine_blocks_per_grid*sizeof(double);
reduce_memory_on_kernel_ten=kernel_ten_blocks_per_grid*sizeof(double);
reduce_memory_on_kernel_eleven=kernel_eleven_blocks_per_grid*sizeof(double);
}
/*
* ---------------------------------------------------------------------
* rows range from firstrow to lastrow
* the rowstr pointers are defined for nrows = lastrow-firstrow+1 values
* ---------------------------------------------------------------------
*/
static void sparse(double a[],
int colidx[],
int rowstr[],
int n,
int nz,
int nozer,
int arow[],
int acol[][NONZER+1],
double aelt[][NONZER+1],
int firstrow,
int lastrow,
int nzloc[],
double rcond,
double shift){
int nrows;
/*
* ---------------------------------------------------
* generate a sparse matrix from a list of
* [col, row, element] tri
* ---------------------------------------------------
*/
int i, j, j1, j2, nza, k, kk, nzrow, jcol;
double size, scale, ratio, va;
boolean goto_40;
/*
* --------------------------------------------------------------------
* how many rows of result
* --------------------------------------------------------------------
*/
nrows = lastrow - firstrow + 1;
/*
* --------------------------------------------------------------------
* ...count the number of triples in each row
* --------------------------------------------------------------------
*/
for(j = 0; j < nrows+1; j++){
rowstr[j] = 0;
}
for(i = 0; i < n; i++){
for(nza = 0; nza < arow[i]; nza++){
j = acol[i][nza] + 1;
rowstr[j] = rowstr[j] + arow[i];
}
}
rowstr[0] = 0;
for(j = 1; j < nrows+1; j++){
rowstr[j] = rowstr[j] + rowstr[j-1];
}
nza = rowstr[nrows] - 1;
/*
* ---------------------------------------------------------------------
* ... rowstr(j) now is the location of the first nonzero
* of row j of a
* ---------------------------------------------------------------------
*/
if(nza > nz){
printf("Space for matrix elements exceeded in sparse\n");
printf("nza, nzmax = %d, %d\n", nza, nz);
exit(EXIT_FAILURE);
}
/*
* ---------------------------------------------------------------------
* ... preload data pages
* ---------------------------------------------------------------------
*/
for(j = 0; j < nrows; j++){
for(k = rowstr[j]; k < rowstr[j+1]; k++){
a[k] = 0.0;
colidx[k] = -1;
}
nzloc[j] = 0;
}
/*
* ---------------------------------------------------------------------
* ... generate actual values by summing duplicates
* ---------------------------------------------------------------------
*/
size = 1.0;
ratio = pow(rcond, (1.0 / (double)(n)));
for(i = 0; i < n; i++){
for(nza = 0; nza < arow[i]; nza++){
j = acol[i][nza];
scale = size * aelt[i][nza];
for(nzrow = 0; nzrow < arow[i]; nzrow++){
jcol = acol[i][nzrow];
va = aelt[i][nzrow] * scale;
/*
* --------------------------------------------------------------------
* ... add the identity * rcond to the generated matrix to bound
* the smallest eigenvalue from below by rcond
* --------------------------------------------------------------------
*/
if(jcol == j && j == i){
va = va + rcond - shift;
}
goto_40 = FALSE;
for(k = rowstr[j]; k < rowstr[j+1]; k++){
if(colidx[k] > jcol){
/*
* ----------------------------------------------------------------
* ... insert colidx here orderly
* ----------------------------------------------------------------
*/
for(kk = rowstr[j+1]-2; kk >= k; kk--){
if(colidx[kk] > -1){
a[kk+1] = a[kk];
colidx[kk+1] = colidx[kk];
}
}
colidx[k] = jcol;
a[k] = 0.0;
goto_40 = TRUE;
break;
}else if(colidx[k] == -1){
colidx[k] = jcol;
goto_40 = TRUE;
break;
}else if(colidx[k] == jcol){
/*
* --------------------------------------------------------------
* ... mark the duplicated entry
* -------------------------------------------------------------
*/
nzloc[j] = nzloc[j] + 1;
goto_40 = TRUE;
break;
}
}
if(goto_40 == FALSE){
printf("internal error in sparse: i=%d\n", i);
exit(EXIT_FAILURE);
}
a[k] = a[k] + va;
}
}
size = size * ratio;
}
/*
* ---------------------------------------------------------------------
* ... remove empty entries and generate final results
* ---------------------------------------------------------------------
*/
for(j = 1; j < nrows; j++){
nzloc[j] = nzloc[j] + nzloc[j-1];
}
for(j = 0; j < nrows; j++){
if(j > 0){
j1 = rowstr[j] - nzloc[j-1];
}else{
j1 = 0;
}
j2 = rowstr[j+1] - nzloc[j];
nza = rowstr[j];
for(k = j1; k < j2; k++){
a[k] = a[nza];
colidx[k] = colidx[nza];
nza = nza + 1;
}
}
for(j = 1; j < nrows+1; j++){
rowstr[j] = rowstr[j] - nzloc[j-1];
}
nza = rowstr[nrows] - 1;
}
/*
* ---------------------------------------------------------------------
* generate a sparse n-vector (v, iv)
* having nzv nonzeros
*
* mark(i) is set to 1 if position i is nonzero.
* mark is all zero on entry and is reset to all zero before exit
* this corrects a performance bug found by John G. Lewis, caused by
* reinitialization of mark on every one of the n calls to sprnvc
* ---------------------------------------------------------------------
*/
static void sprnvc(int n, int nz, int nn1, double v[], int iv[]){
int nzv, ii, i;
double vecelt, vecloc;
nzv = 0;
while(nzv < nz){
vecelt = randlc(&tran, amult);
/*
* --------------------------------------------------------------------
* generate an integer between 1 and n in a portable manner
* --------------------------------------------------------------------
*/
vecloc = randlc(&tran, amult);
i = icnvrt(vecloc, nn1) + 1;
if(i>n){continue;}
/*
* --------------------------------------------------------------------
* was this integer generated already?
* --------------------------------------------------------------------
*/
boolean was_gen = FALSE;
for(ii = 0; ii < nzv; ii++){
if(iv[ii] == i){
was_gen = TRUE;
break;
}
}
if(was_gen){continue;}
v[nzv] = vecelt;
iv[nzv] = i;
nzv = nzv + 1;
}
}
/*
* --------------------------------------------------------------------
* set ith element of sparse vector (v, iv) with
* nzv nonzeros to val
* --------------------------------------------------------------------
*/
static void vecset(int n, double v[], int iv[], int* nzv, int i, double val){
int k;
boolean set;
set = FALSE;
for(k = 0; k < *nzv; k++){
if(iv[k] == i){
v[k] = val;
set = TRUE;
}
}
if(set == FALSE){
v[*nzv] = val;
iv[*nzv] = i;
*nzv = *nzv + 1;
}
}
|
cb6acf209a1c03d17d092605a1e7f8e5b5e6955b.hip | // !!! This is a file automatically generated by hipify!!!
#include <config.h>
#include <rocblas.h>
#include <hip/hip_runtime_api.h>
#include <hiprand/hiprand.h>
#include <iostream>
#include <random>
#include <primitiv/cuda_device.h>
#include <primitiv/cuda_utils.h>
#include <primitiv/error.h>
using std::cerr;
using std::endl;
namespace {
/*
* CUDA kernels
*/
#define IDX (threadIdx.x + blockIdx.x * blockDim.x)
#define IDY (threadIdx.y + blockIdx.y * blockDim.y)
__global__ void set_const_dev(float k, unsigned size, float *py) {
const unsigned i = IDX;
if (i < size) py[i] = k;
}
__global__ void set_identity_dev(unsigned size, unsigned skip, float *py) {
const unsigned i = IDX;
if (i < size) py[i] = !(i % skip);
}
__global__ void rand_bernoulli_dev(float p, float size, float *py) {
const unsigned i = IDX;
if (i < size) py[i] = (float)(py[i] <= p);
}
__global__ void rand_affine_dev(
float shift, float scale, unsigned size, float *py) {
const unsigned i = IDX;
if (i < size) py[i] = py[i] * scale + shift;
}
__global__ void pick_fw_dev(
const float *px, const unsigned *pi,
unsigned wx, unsigned wy, unsigned sx, unsigned si, unsigned sy,
float *py) {
const unsigned t = IDX;
const unsigned ox = blockIdx.y * sx + pi[blockIdx.y * si] * wy;
const unsigned oy = blockIdx.y * sy;
if (t < sy) py[oy + t] = px[ox + (t / wy) * wx + (t % wy)];
}
__global__ void slice_fw_dev(
const float *px, unsigned span, unsigned skip, unsigned size, float *py) {
const unsigned i = IDX;
if (i < size) py[i] = px[(i / span) * skip + (i % span)];
}
__global__ void concat_fw_dev(
const float *px, unsigned span, unsigned skip, unsigned x_size,
unsigned y_size, float *py) {
const unsigned i = IDX;
if (i < y_size) py[(i / span) * skip + (i % span)] = px[i % x_size];
}
__global__ void pick_bw_dev(
const float *pgy, const unsigned *pi,
unsigned wx, unsigned wy, unsigned sx, unsigned si, unsigned sy,
float *pgx) {
const unsigned t = IDX;
const unsigned ox = blockIdx.y * sx + pi[blockIdx.y * si] * wy;
const unsigned oy = blockIdx.y * sy;
if (t < sy) ::atomicAdd(pgx + ox + (t / wy) * wx + (t % wy), pgy[oy + t]);
}
__global__ void slice_bw_dev(
const float *pgy, unsigned wx, unsigned wy, unsigned nx, unsigned ny,
float *pgx) {
const unsigned i = IDX;
if (i < wy * ::max(nx, ny)) {
::atomicAdd(
pgx + ((i / wy) * wx + (i % wy)) % (wx * nx), pgy[i % (wy * ny)]);
}
}
#define CUDADEV_KERNEL_FW_X(name, op) \
__global__ void name##_fw_dev(const float *px, unsigned size, float *py) { \
const unsigned i = IDX; \
if (i < size) py[i] = (op); \
}
#define CUDADEV_KERNEL_BW_X(name, op) \
__global__ void name##_bw_dev( \
const float *px, const float *py, const float *pgy, unsigned size, \
float *pgx) { \
static_cast<void>(px); \
static_cast<void>(py); \
const unsigned i = IDX; \
if (i < size) pgx[i] += (op); \
}
#define CUDADEV_KERNEL_FW_X_CONST(name, op) \
__global__ void name##_fw_dev( \
const float *px, float k, unsigned size, float *py) { \
const unsigned i = IDX; \
if (i < size) py[i] = (op); \
}
#define CUDADEV_KERNEL_BW_X_CONST(name, op) \
__global__ void name##_bw_dev( \
const float *px, const float *py, const float *pgy, float k, \
unsigned size, float *pgx) { \
static_cast<void>(px); \
static_cast<void>(py); \
const unsigned i = IDX; \
if (i < size) pgx[i] += (op); \
}
#define CUDADEV_KERNEL_FW_X_SCALAR_R(name, op) \
__global__ void name##_fw_dev( \
const float *px, const float *pk, unsigned size, unsigned mbx, \
unsigned mbk, float *py) { \
const unsigned i = IDX; \
const unsigned shift = blockIdx.y * size; \
if (i < size) py[i + shift] = op(px[i + mbx * shift], pk[mbk * blockIdx.y]); \
}
#define CUDADEV_KERNEL_FW_X_SCALAR_L(name, op) \
__global__ void name##_fw_dev( \
const float *px, const float *pk, unsigned size, unsigned mbx, \
unsigned mbk, float *py) { \
const unsigned i = IDX; \
const unsigned shift = blockIdx.y * size; \
if (i < size) py[i + shift] = op(pk[mbk * blockIdx.y], px[i + mbx * shift]); \
}
#define CUDADEV_KERNEL_FW_AB(name, op) \
__global__ void name##_fw_dev( \
const float *pa, const float *pb, unsigned size, unsigned mba, \
unsigned mbb, float *py) { \
const unsigned i = IDX; \
const unsigned shift = blockIdx.y * size; \
if (i < size) py[i + shift] = op(pa[i + mba * shift], pb[i + mbb * shift]); \
}
CUDADEV_KERNEL_FW_X(negate, -px[i]);
CUDADEV_KERNEL_FW_X(sqrt, ::__fsqrt_rn(px[i]));
CUDADEV_KERNEL_FW_X(exp, ::expf(px[i]));
CUDADEV_KERNEL_FW_X(log, ::logf(px[i]));
CUDADEV_KERNEL_FW_X(tanh, ::tanhf(px[i]));
CUDADEV_KERNEL_FW_X(sigmoid, .5f + .5f * ::tanhf(.5f * px[i]));
CUDADEV_KERNEL_FW_X(
softplus, ::fmaxf(px[i], .0f) + ::logf(1.f + ::expf(-::fabs(px[i]))));
CUDADEV_KERNEL_FW_X(sin, ::sinf(px[i]));
CUDADEV_KERNEL_FW_X(cos, ::cosf(px[i]));
CUDADEV_KERNEL_FW_X(tan, ::tanf(px[i]));
CUDADEV_KERNEL_BW_X(sqrt, .5f * pgy[i] / py[i]);
CUDADEV_KERNEL_BW_X(exp, py[i] * pgy[i]);
CUDADEV_KERNEL_BW_X(log, pgy[i] / px[i]);
CUDADEV_KERNEL_BW_X(tanh, (1.f - py[i] * py[i]) * pgy[i]);
CUDADEV_KERNEL_BW_X(sigmoid, py[i] * (1.f - py[i]) * pgy[i]);
CUDADEV_KERNEL_BW_X(softplus, (.5f + .5f * ::tanhf(.5f * px[i])) * pgy[i]);
CUDADEV_KERNEL_BW_X(sin, ::cosf(px[i]) * pgy[i]);
CUDADEV_KERNEL_BW_X(cos, -::sinf(px[i]) * pgy[i]);
CUDADEV_KERNEL_BW_X(tan, (1.f + py[i] * py[i]) * pgy[i]);
CUDADEV_KERNEL_FW_X_CONST(add_const, px[i] + k);
CUDADEV_KERNEL_FW_X_CONST(subtract_const_r, px[i] - k);
CUDADEV_KERNEL_FW_X_CONST(subtract_const_l, k - px[i]);
CUDADEV_KERNEL_FW_X_CONST(multiply_const, px[i] * k);
CUDADEV_KERNEL_FW_X_CONST(divide_const_r, px[i] / k);
CUDADEV_KERNEL_FW_X_CONST(divide_const_l, k / px[i]);
CUDADEV_KERNEL_FW_X_CONST(prelu, ::fmaxf(px[i], .0f) + k * ::fminf(px[i], .0f));
CUDADEV_KERNEL_FW_X_CONST(
elu, ::fmaxf(px[i], .0f) + k * (::expf(::fminf(px[i], .0f)) - 1.0f));
CUDADEV_KERNEL_BW_X_CONST(add_const, pgy[i]);
CUDADEV_KERNEL_BW_X_CONST(subtract_const_r, pgy[i]);
CUDADEV_KERNEL_BW_X_CONST(subtract_const_l, -pgy[i]);
CUDADEV_KERNEL_BW_X_CONST(multiply_const, k * pgy[i]);
CUDADEV_KERNEL_BW_X_CONST(divide_const_r, pgy[i] / k);
CUDADEV_KERNEL_BW_X_CONST(divide_const_l, -py[i] * pgy[i] / px[i]);
CUDADEV_KERNEL_BW_X_CONST(prelu, pgy[i] * ((px[i] > .0f) + k * (px[i] <= .0f)));
CUDADEV_KERNEL_BW_X_CONST(
elu, pgy[i] * ((px[i] > .0f) + (py[i] + k) * (px[i] <= .0f)));
CUDADEV_KERNEL_FW_X_SCALAR_R(add_scalar, ::__fadd_rn);
CUDADEV_KERNEL_FW_X_SCALAR_R(subtract_scalar_r, ::__fsub_rn);
CUDADEV_KERNEL_FW_X_SCALAR_L(subtract_scalar_l, ::__fsub_rn);
CUDADEV_KERNEL_FW_X_SCALAR_R(multiply_scalar, ::__fmul_rn);
CUDADEV_KERNEL_FW_X_SCALAR_R(divide_scalar_r, ::__fdiv_rn);
CUDADEV_KERNEL_FW_X_SCALAR_L(divide_scalar_l, ::__fdiv_rn);
CUDADEV_KERNEL_FW_AB(add, ::__fadd_rn);
CUDADEV_KERNEL_FW_AB(subtract, ::__fsub_rn);
CUDADEV_KERNEL_FW_AB(multiply, ::__fmul_rn);
CUDADEV_KERNEL_FW_AB(divide, ::__fdiv_rn);
#undef CUDADEV_KERNEL_FW_X
#undef CUDADEV_KERNEL_BW_X
#undef CUDADEV_KERNEL_FW_X_CONST
#undef CUDADEV_KERNEL_BW_X_CONST
#undef CUDADEV_KERNEL_FW_X_SCALAR_R
#undef CUDADEV_KERNEL_FW_X_SCALAR_L
#undef CUDADEV_KERNEL_FW_AB
__global__ void add_bw_dev(
const float *, const float *, const float *, const float *pgy,
unsigned size, unsigned mba, unsigned mbb, float *pga, float *pgb) {
const unsigned i = IDX;
const unsigned shift = blockIdx.y * size;
if (i < size) {
const float gy = pgy[i + shift];
::atomicAdd(pga + i + mba * shift, gy);
::atomicAdd(pgb + i + mbb * shift, gy);
}
}
__global__ void subtract_bw_dev(
const float *, const float *, const float *, const float *pgy,
unsigned size, unsigned mba, unsigned mbb, float *pga, float *pgb) {
const unsigned i = IDX;
const unsigned shift = blockIdx.y * size;
if (i < size) {
const float gy = pgy[i + shift];
::atomicAdd(pga + i + mba * shift, gy);
::atomicAdd(pgb + i + mbb * shift, -gy);
}
}
__global__ void multiply_bw_dev(
const float *pa, const float *pb, const float *, const float *pgy,
unsigned size, unsigned mba, unsigned mbb, float *pga, float *pgb) {
const unsigned i = IDX;
const unsigned shift = blockIdx.y * size;
if (i < size) {
const float gy = pgy[i + shift];
const unsigned a_ofs = i + mba * shift;
const unsigned b_ofs = i + mbb * shift;
::atomicAdd(pga + a_ofs, gy * pb[b_ofs]);
::atomicAdd(pgb + b_ofs, gy * pa[a_ofs]);
}
}
__global__ void divide_bw_dev(
const float *, const float *pb, const float *py, const float *pgy,
unsigned size, unsigned mba, unsigned mbb, float *pga, float *pgb) {
const unsigned i = IDX;
const unsigned shift = blockIdx.y * size;
if (i < size) {
const unsigned b_ofs = i + mbb * shift;
const unsigned y_ofs = i + shift;
const float k = pgy[y_ofs] / pb[b_ofs];
::atomicAdd(pga + i + mba * shift, k);
::atomicAdd(pgb + b_ofs, -k * py[y_ofs]);
}
}
__global__ void transpose_fw_dev(
const float *px, unsigned rows, unsigned cols, float *py) {
const unsigned i = IDX;
const unsigned j = IDY;
unsigned ofs = blockIdx.z * rows * cols;
if (i < rows && j < cols) py[ofs + j + i * cols] = px[ofs + i + j * rows];
}
__global__ void transpose_bw_dev(
const float *py, unsigned rows, unsigned cols, float *px) {
const unsigned i = IDX;
const unsigned j = IDY;
unsigned ofs = blockIdx.z * rows * cols;
if (i < rows && j < cols) px[ofs + i + j * rows] += py[ofs + j + i * cols];
}
template<unsigned BLOCK_SIZE>
__global__ void sum_fw_dev(
const float *px, unsigned skip, unsigned n, float *py) {
__shared__ float temp[BLOCK_SIZE];
const unsigned bid = blockIdx.x;
const unsigned tid = threadIdx.x;
px += bid % skip + (bid / skip) * skip * n;
temp[tid] = 0;
for (unsigned i = tid; i < n; i += BLOCK_SIZE) temp[tid] += px[i * skip];
__syncthreads();
#define REDUCE(k) \
if (BLOCK_SIZE >= k << 1) { \
if (tid < k) temp[tid] += temp[tid + k]; \
__syncthreads(); \
}
REDUCE(512)
REDUCE(256)
REDUCE(128)
REDUCE(64)
REDUCE(32)
REDUCE(16)
REDUCE(8)
REDUCE(4)
REDUCE(2)
REDUCE(1)
#undef REDUCE
if (tid == 0) py[bid] = temp[0];
}
__device__ float logsumexp2_fw_dev(float a, float b) {
return a > b
? a + ::log(1.f + ::exp(b - a))
: b + ::log(1.f + ::exp(a - b));
}
template<unsigned BLOCK_SIZE>
__global__ void logsumexp_fw_dev(
const float *px, unsigned skip, unsigned n, float *py) {
__shared__ float temp[BLOCK_SIZE];
const unsigned bid = blockIdx.x;
const unsigned tid = threadIdx.x;
px += bid % skip + (bid / skip) * skip * n;
temp[tid] = -1e38; // NOTE(odashi): Near the minimum of the float.
for (unsigned i = tid; i < n; i += BLOCK_SIZE) {
temp[tid] = ::logsumexp2_fw_dev(temp[tid], px[i * skip]);
}
__syncthreads();
#define REDUCE(k) \
if (BLOCK_SIZE >= k << 1) { \
if (tid < k) temp[tid] = ::logsumexp2_fw_dev(temp[tid], temp[tid + k]); \
__syncthreads(); \
}
REDUCE(512)
REDUCE(256)
REDUCE(128)
REDUCE(64)
REDUCE(32)
REDUCE(16)
REDUCE(8)
REDUCE(4)
REDUCE(2)
REDUCE(1)
#undef REDUCE
if (tid == 0) py[bid] = temp[0];
}
__global__ void broadcast_fw_dev(
const float *px, unsigned skip1, unsigned skip2, unsigned size, float *py) {
const unsigned i = IDX;
if (i < size) py[i] = px[i % skip1 + (i / skip2) * skip1];
}
__global__ void batch_sum_fw_dev(
const float *px, unsigned size, unsigned batch, float *py) {
const unsigned i = IDX;
if (i < size) {
float temp = .0f;
px += i;
for (unsigned j = 0; j < batch; ++j, px += size) {
temp += *px;
}
py[i] = temp;
}
}
__global__ void inplace_multiply_const_dev(
float k, unsigned size, float *px) {
const unsigned i = IDX;
if (i < size) px[i] *= k;
}
__global__ void inplace_add_dev(
const float *px, unsigned size, unsigned mbx, unsigned mby, float *py) {
const unsigned i = IDX;
const unsigned shift = blockIdx.y * size;
if (i < size) ::atomicAdd(py + i + mby * shift, px[i + mbx * shift]);
}
__global__ void inplace_subtract_dev(
const float *px, unsigned size, unsigned mbx, unsigned mby, float *py) {
const unsigned i = IDX;
const unsigned shift = blockIdx.y * size;
if (i < size) ::atomicAdd(py + i + mby * shift, -px[i + mbx * shift]);
}
#undef IDX
#undef IDY
// Minimum requirements of the compute capability.
static const int MIN_CC_MAJOR = 3;
static const int MIN_CC_MINOR = 0;
/*
* CUBLAS initializer/finalizer.
*/
class CUBLASHandle {
private:
CUBLASHandle(const CUBLASHandle &) = delete;
CUBLASHandle(CUBLASHandle &&) = delete;
CUBLASHandle &operator=(const CUBLASHandle &) = delete;
CUBLASHandle &operator=(CUBLASHandle &&) = delete;
public:
explicit CUBLASHandle(unsigned dev_id) {
CUDA_CALL(::hipSetDevice(dev_id));
CUBLAS_CALL(::hipblasCreate(&handle_));
//cerr << "CUBLAS initialized at device " << dev_id << '.' << endl;
}
~CUBLASHandle() {
CUBLAS_CALL(::hipblasDestroy(handle_));
//cerr << "CUBLAS finalized." << endl;
}
::hipblasHandle_t get() const { return handle_; }
private:
::hipblasHandle_t handle_;
};
/*
* CURAND initializer/finalizer.
*/
class CURANDHandle {
private:
CURANDHandle(const CURANDHandle &) = delete;
CURANDHandle(CURANDHandle &&) = delete;
CURANDHandle &operator=(const CURANDHandle &) = delete;
CURANDHandle &operator=(CURANDHandle &&) = delete;
public:
CURANDHandle(unsigned dev_id, unsigned rng_seed) {
CUDA_CALL(::hipSetDevice(dev_id));
CURAND_CALL(::hiprandCreateGenerator(&handle_, HIPRAND_RNG_PSEUDO_DEFAULT));
CURAND_CALL(::hiprandSetPseudoRandomGeneratorSeed(handle_, rng_seed));
//cerr << "CURAND initialized at device " << dev_id << '.' << endl;
}
~CURANDHandle() {
CURAND_CALL(::hiprandDestroyGenerator(handle_));
//cerr << "CURAND finalized." << endl;
}
::hiprandGenerator_t get() const { return handle_; }
private:
::hiprandGenerator_t handle_;
};
} // namespace
namespace primitiv {
namespace devices {
/*
* Hidden objects of CUDA device.
*/
struct CUDAInternalState {
CUDAInternalState(unsigned dev_id, unsigned rng_seed)
: cublas(dev_id) , hiprand(dev_id, rng_seed) {}
::CUBLASHandle cublas;
::CURANDHandle hiprand;
::hipDeviceProp_t prop;
};
unsigned CUDA::num_devices() {
int ret;
CUDA_CALL(::hipGetDeviceCount(&ret));
return ret;
}
void CUDA::initialize() {
// Retrieves device properties.
::hipDeviceProp_t prop;
CUDA_CALL(::hipGetDeviceProperties(&prop, dev_id_));
// Check compute capability requirements.
if (prop.major < ::MIN_CC_MAJOR ||
(prop.major == ::MIN_CC_MAJOR && prop.minor < ::MIN_CC_MINOR)) {
THROW_ERROR(
"CUDA Device " << dev_id_ << " does not satisfy the "
"minimum requirement of the compute capability: "
<< prop.major << '.' << prop.minor << " < "
<< ::MIN_CC_MAJOR << '.' << ::MIN_CC_MINOR);
}
// Calculates size of dims to be used in CUDA kernels.
dim1_x_ = 1;
while (dim1_x_ < 1024 &&
dim1_x_ < static_cast<unsigned>(prop.maxThreadsPerBlock)) {
dim1_x_ <<= 1;
}
dim2_y_ = dim1_x_;
dim2_x_ = 1;
while (dim2_x_ < dim2_y_) {
dim2_x_ <<= 1;
dim2_y_ >>= 1;
}
max_batch_ = prop.maxGridSize[1];
// Initializes additional libraries
state_.reset(new CUDAInternalState(dev_id_, rng_seed_));
state_->prop = prop;
// Initializes the device pointer for integer IDs.
ids_ptr_ = pool_.allocate(sizeof(unsigned) * max_batch_);
}
CUDA::CUDA(unsigned device_id)
: dev_id_(device_id)
, rng_seed_(std::random_device()())
, pool_(device_id) {
initialize();
}
CUDA::CUDA(unsigned device_id, unsigned rng_seed)
: dev_id_(device_id)
, rng_seed_(rng_seed)
, pool_(device_id) {
initialize();
}
CUDA::~CUDA() {
// Nothing to do for now.
}
void CUDA::dump_description() const {
cerr << "Device " << this << ':' << endl;
cerr << " Type: CUDA" << endl;
const ::hipDeviceProp_t &prop = state_->prop;
cerr << " Physical Device: " << dev_id_ << ':' << endl;
cerr << " Name ................. " << prop.name << endl;
cerr << " Global Memory ........ " << prop.totalGlobalMem << endl;
cerr << " Shared Memory ........ " << prop.sharedMemPerBlock << endl;
cerr << " Threads/block ........ " << prop.maxThreadsPerBlock << endl;
cerr << " Threads dim .......... " << prop.maxThreadsDim[0] << ','
<< prop.maxThreadsDim[1] << ','
<< prop.maxThreadsDim[2] << endl;
cerr << " Grid size ............ " << prop.maxGridSize[0] << ','
<< prop.maxGridSize[1] << ','
<< prop.maxGridSize[2] << endl;
cerr << " Compute Capability ... " << prop.major << '.'
<< prop.minor << endl;
/*
cerr << " Configurations:" << endl;
cerr << " 1 dim ........... " << dim1_x_ << " threads" << endl;
cerr << " 2 dims .......... " << dim2_x_ << "x"
<< dim2_y_ << " threads" << endl;
cerr << " Maximum batch ... " << max_batch_ <<endl;
*/
}
std::shared_ptr<void> CUDA::new_handle(const Shape &shape) {
return pool_.allocate(sizeof(float) * shape.size());
}
#define GRID_SIZE(x, threads) (((x) + (threads) - 1) / (threads))
#define DATA(x) static_cast<float *>((x).data())
#define CDATA(x) static_cast<const float *>((x).data())
std::vector<float> CUDA::tensor_to_vector_impl(const Tensor &x) {
const unsigned size = x.shape().size();
std::vector<float> ret(size);
CUDA_CALL(::hipSetDevice(dev_id_));
CUDA_CALL(::hipMemcpy(
&ret[0], x.data(), sizeof(float) * size, hipMemcpyDeviceToHost));
return ret;
}
void CUDA::reset_tensor_impl(float k, Tensor &x) {
const unsigned size = x.shape().size();
const unsigned num_blocks = GRID_SIZE(size, dim1_x_);
CUDA_CALL(::hipSetDevice(dev_id_));
hipLaunchKernelGGL(( ::set_const_dev), dim3(num_blocks), dim3(dim1_x_), 0, 0, k, size, DATA(x));
}
void CUDA::reset_tensor_by_array_impl(const float values[], Tensor &x) {
const unsigned size = x.shape().size();
CUDA_CALL(::hipSetDevice(dev_id_));
CUDA_CALL(::hipMemcpy(
x.data(), values, sizeof(float) * size, hipMemcpyHostToDevice));
}
void CUDA::copy_tensor_impl(const Tensor &x, Tensor &y) {
switch (x.device().type()) {
case Device::DEVICE_TYPE_CPU:
reset_tensor_by_array(CDATA(x), y);
break;
case Device::DEVICE_TYPE_CUDA:
CUDA_CALL(::hipSetDevice(dev_id_));
// NOTE(odashi):
// If source/destination devices use the unified memory space on the 64
// bits machine, we can perform ::hipMemcpy to copy data beyond devices.
CUDA_CALL(::hipMemcpyAsync(
DATA(y), CDATA(x),
sizeof(float) * x.shape().size(),
hipMemcpyDeviceToDevice, 0));
break;
default:
reset_tensor_by_vector(x.to_vector(), y);
}
}
void CUDA::identity_impl(Tensor &y) {
const unsigned size = y.shape().size();
const unsigned skip = y.shape()[0] + 1;
const unsigned num_blocks = GRID_SIZE(size, dim1_x_);
CUDA_CALL(::hipSetDevice(dev_id_));
hipLaunchKernelGGL(( ::set_identity_dev), dim3(num_blocks), dim3(dim1_x_), 0, 0, size, skip, DATA(y));
}
void CUDA::random_bernoulli_impl(float p, Tensor &y) {
const unsigned size = y.shape().size();
const unsigned num_blocks = GRID_SIZE(size, dim1_x_);
CUDA_CALL(::hipSetDevice(dev_id_));
CURAND_CALL(::hiprandGenerateUniform(state_->hiprand.get(), DATA(y), size));
hipLaunchKernelGGL(( ::rand_bernoulli_dev), dim3(num_blocks), dim3(dim1_x_), 0, 0, p, size, DATA(y));
}
void CUDA::random_uniform_impl(float lower, float upper, Tensor &y) {
const unsigned size = y.shape().size();
const unsigned num_blocks = GRID_SIZE(size, dim1_x_);
const float scale = upper - lower;
CUDA_CALL(::hipSetDevice(dev_id_));
CURAND_CALL(::hiprandGenerateUniform(state_->hiprand.get(), DATA(y), size));
hipLaunchKernelGGL(( ::rand_affine_dev), dim3(num_blocks), dim3(dim1_x_), 0, 0, lower, scale, size, DATA(y));
}
void CUDA::random_normal_impl(float mean, float sd, Tensor &y) {
CUDA_CALL(::hipSetDevice(dev_id_));
CURAND_CALL(::hiprandGenerateNormal(
state_->hiprand.get(), DATA(y), y.shape().size(), mean, sd));
}
void CUDA::random_log_normal_impl(float mean, float sd, Tensor &y) {
CUDA_CALL(::hipSetDevice(dev_id_));
CURAND_CALL(::hiprandGenerateLogNormal(
state_->hiprand.get(), DATA(y), y.shape().size(), mean, sd));
}
void CUDA::pick_fw_impl(
const Tensor &x, const std::vector<unsigned> &ids, unsigned dim,
Tensor &y) {
const unsigned wy = y.shape().lower_volume(dim);
const unsigned sy = y.shape().volume();
const unsigned g1 = GRID_SIZE(sy, dim1_x_);
const unsigned bs = y.shape().batch();
CUDA_CALL(::hipSetDevice(dev_id_));
CUDA_CALL(::hipMemcpy(
ids_ptr_.get(), ids.data(), sizeof(unsigned) * ids.size(),
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( ::pick_fw_dev), dim3(dim3(g1, bs)), dim3(dim1_x_), 0, 0,
CDATA(x), static_cast<const unsigned *>(ids_ptr_.get()),
wy * x.shape()[dim], wy,
x.shape().has_batch() * x.shape().volume(), ids.size() > 1, sy,
DATA(y));
}
void CUDA::slice_fw_impl(
const Tensor &x, unsigned dim, unsigned offset, Tensor &y) {
const unsigned base = y.shape().lower_volume(dim);
const unsigned span = base * y.shape()[dim];
const unsigned skip = base * x.shape()[dim];
const unsigned size = y.shape().size();
const unsigned num_blocks = GRID_SIZE(size, dim1_x_);
CUDA_CALL(::hipSetDevice(dev_id_));
hipLaunchKernelGGL(( ::slice_fw_dev), dim3(num_blocks), dim3(dim1_x_), 0, 0,
CDATA(x) + base * offset, span, skip, size, DATA(y));
}
void CUDA::concat_fw_impl(
const std::vector<const Tensor *> &xs, unsigned dim, Tensor &y) {
const unsigned new_bs = y.shape().batch();
const unsigned base = y.shape().lower_volume(dim);
const unsigned skip = base * y.shape()[dim];
unsigned repeat = y.shape().volume() / skip;
CUDA_CALL(::hipSetDevice(dev_id_));
unsigned offset = 0;
for (const Tensor *x : xs) {
const unsigned span = base * x->shape()[dim];
const unsigned x_size = span * repeat * x->shape().batch();
const unsigned y_size = span * repeat * new_bs;
const unsigned num_blocks = GRID_SIZE(y_size, dim1_x_);
hipLaunchKernelGGL(( ::concat_fw_dev), dim3(num_blocks), dim3(dim1_x_), 0, 0,
CDATA(*x), span, skip, x_size, y_size, DATA(y) + offset);
offset += span;
}
}
void CUDA::pick_bw_impl(
const Tensor &gy, const std::vector<unsigned>& ids, unsigned dim,
Tensor &gx) {
const unsigned wy = gy.shape().lower_volume(dim);
const unsigned sy = gy.shape().volume();
const unsigned g1 = GRID_SIZE(sy, dim1_x_);
const unsigned bs = gy.shape().batch();
CUDA_CALL(::hipSetDevice(dev_id_));
CUDA_CALL(::hipMemcpy(
ids_ptr_.get(), ids.data(), sizeof(unsigned) * ids.size(),
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( ::pick_bw_dev), dim3(dim3(g1, bs)), dim3(dim1_x_), 0, 0,
CDATA(gy), static_cast<const unsigned *>(ids_ptr_.get()),
wy *gx.shape()[dim], wy,
gx.shape().has_batch() * gx.shape().volume(), ids.size() > 1, sy,
DATA(gx));
}
void CUDA::slice_bw_impl(
const Tensor &gy, unsigned dim, unsigned offset, Tensor &gx) {
const Shape &sx = gx.shape();
const Shape &sy = gy.shape();
const unsigned base = sx.lower_volume(dim);
const unsigned ox = base * offset;
const unsigned wx = base * sx[dim];
const unsigned wy = base * sy[dim];
const unsigned repeat = sx.volume() / wx;
const unsigned nx = repeat * sx.batch();
const unsigned ny = repeat * sy.batch();
const unsigned g1 = GRID_SIZE(wy * ::max(nx, ny), dim1_x_);
CUDA_CALL(::hipSetDevice(dev_id_));
hipLaunchKernelGGL(( ::slice_bw_dev), dim3(g1), dim3(dim1_x_), 0, 0, CDATA(gy), wx, wy, nx, ny, DATA(gx) + ox);
}
#define CUDADEV_FW_X(name) \
void CUDA::name##_fw_impl(const Tensor &x, Tensor &y) { \
const unsigned size = x.shape().size(); \
const unsigned num_blocks = GRID_SIZE(size, dim1_x_); \
CUDA_CALL(::hipSetDevice(dev_id_)); \
hipLaunchKernelGGL(( ::name##_fw_dev), dim3(num_blocks), dim3(dim1_x_), 0, 0, CDATA(x), size, DATA(y)); \
}
#define CUDADEV_BW_X(name) \
void CUDA::name##_bw_impl( \
const Tensor &x, const Tensor &y, const Tensor &gy, Tensor &gx) { \
const unsigned size = x.shape().size(); \
const unsigned num_blocks = GRID_SIZE(size, dim1_x_); \
CUDA_CALL(::hipSetDevice(dev_id_)); \
hipLaunchKernelGGL(( ::name##_bw_dev), dim3(num_blocks), dim3(dim1_x_), 0, 0, \
CDATA(x), CDATA(y), CDATA(gy), size, DATA(gx)); \
}
#define CUDADEV_FW_X_CONST(name) \
void CUDA::name##_fw_impl(const Tensor &x, float k, Tensor &y) { \
const unsigned size = x.shape().size(); \
const unsigned num_blocks = GRID_SIZE(size,dim1_x_); \
CUDA_CALL(::hipSetDevice(dev_id_)); \
hipLaunchKernelGGL(( ::name##_fw_dev), dim3(num_blocks), dim3(dim1_x_), 0, 0, CDATA(x), k, size, DATA(y)); \
}
#define CUDADEV_BW_X_CONST(name) \
void CUDA::name##_bw_impl( \
const Tensor &x, const Tensor &y, const Tensor &gy, float k, Tensor &gx) { \
const unsigned size = x.shape().size(); \
const unsigned num_blocks = GRID_SIZE(size, dim1_x_); \
CUDA_CALL(::hipSetDevice(dev_id_)); \
hipLaunchKernelGGL(( ::name##_bw_dev), dim3(num_blocks), dim3(dim1_x_), 0, 0, \
CDATA(x), CDATA(y), CDATA(gy), k, size, DATA(gx)); \
}
#define CUDADEV_FW_X_SCALAR(name) \
void CUDA::name##_fw_impl(const Tensor &x, const Tensor &k, Tensor &y) { \
const unsigned size = y.shape().volume(); \
const unsigned g1 = GRID_SIZE(size, dim1_x_); \
const unsigned g2 = y.shape().batch(); \
CUDA_CALL(::hipSetDevice(dev_id_)); \
hipLaunchKernelGGL(( ::name##_fw_dev), dim3(dim3(g1, g2, 1)), dim3(dim1_x_), 0, 0, \
CDATA(x), CDATA(k), size, \
x.shape().has_batch(), k.shape().has_batch(), DATA(y)); \
}
#define CUDADEV_FW_AB(name) \
void CUDA::name##_fw_impl(const Tensor &a, const Tensor &b, Tensor &y) { \
const unsigned size = y.shape().volume(); \
const unsigned g1 = GRID_SIZE(size, dim1_x_); \
const unsigned g2 = y.shape().batch(); \
CUDA_CALL(::hipSetDevice(dev_id_)); \
hipLaunchKernelGGL(( ::name##_fw_dev), dim3(dim3(g1, g2, 1)), dim3(dim1_x_), 0, 0, \
CDATA(a), CDATA(b), size, \
a.shape().has_batch(), b.shape().has_batch(), DATA(y)); \
}
#define CUDADEV_BW_AB(name) \
void CUDA::name##_bw_impl( \
const Tensor &a, const Tensor &b, const Tensor &y, const Tensor &gy, \
Tensor &ga, Tensor &gb) { \
const unsigned size = y.shape().volume(); \
const unsigned g1 = GRID_SIZE(size, dim1_x_); \
const unsigned g2 = y.shape().batch(); \
CUDA_CALL(::hipSetDevice(dev_id_)); \
hipLaunchKernelGGL(( ::name##_bw_dev), dim3(dim3(g1, g2, 1)), dim3(dim1_x_), 0, 0, \
CDATA(a), CDATA(b), CDATA(y), CDATA(gy), size, \
a.shape().has_batch(), b.shape().has_batch(), DATA(ga), DATA(gb)); \
}
CUDADEV_FW_X(negate);
CUDADEV_FW_X(sqrt);
CUDADEV_FW_X(exp);
CUDADEV_FW_X(log);
CUDADEV_FW_X(tanh);
CUDADEV_FW_X(sigmoid);
CUDADEV_FW_X(softplus);
CUDADEV_FW_X(sin);
CUDADEV_FW_X(cos);
CUDADEV_FW_X(tan);
CUDADEV_BW_X(sqrt);
CUDADEV_BW_X(exp);
CUDADEV_BW_X(log);
CUDADEV_BW_X(tanh);
CUDADEV_BW_X(sigmoid);
CUDADEV_BW_X(softplus);
CUDADEV_BW_X(sin);
CUDADEV_BW_X(cos);
CUDADEV_BW_X(tan);
CUDADEV_FW_X_CONST(add_const);
CUDADEV_FW_X_CONST(subtract_const_r);
CUDADEV_FW_X_CONST(subtract_const_l);
CUDADEV_FW_X_CONST(multiply_const);
CUDADEV_FW_X_CONST(divide_const_r);
CUDADEV_FW_X_CONST(divide_const_l);
CUDADEV_FW_X_CONST(prelu);
CUDADEV_FW_X_CONST(elu);
CUDADEV_BW_X_CONST(add_const);
CUDADEV_BW_X_CONST(subtract_const_r);
CUDADEV_BW_X_CONST(subtract_const_l);
CUDADEV_BW_X_CONST(multiply_const);
CUDADEV_BW_X_CONST(divide_const_r);
CUDADEV_BW_X_CONST(divide_const_l);
CUDADEV_BW_X_CONST(prelu);
CUDADEV_BW_X_CONST(elu);
CUDADEV_FW_X_SCALAR(add_scalar);
CUDADEV_FW_X_SCALAR(subtract_scalar_r);
CUDADEV_FW_X_SCALAR(subtract_scalar_l);
CUDADEV_FW_X_SCALAR(multiply_scalar);
CUDADEV_FW_X_SCALAR(divide_scalar_r);
CUDADEV_FW_X_SCALAR(divide_scalar_l);
CUDADEV_FW_AB(add);
CUDADEV_FW_AB(subtract);
CUDADEV_FW_AB(multiply);
CUDADEV_FW_AB(divide);
CUDADEV_BW_AB(add);
CUDADEV_BW_AB(subtract);
CUDADEV_BW_AB(multiply);
CUDADEV_BW_AB(divide);
#undef CUDADEV_FW_X
#undef CUDADEV_BW_X
#undef CUDADEV_FW_X_CONST
#undef CUDADEV_BW_X_CONST
#undef CUDADEV_FW_X_SCALAR
#undef CUDADEV_FW_AB
#undef CUDADEV_BW_AB
void CUDA::transpose_fw_impl(const Tensor &x, Tensor &y) {
const unsigned rows = x.shape()[0];
const unsigned cols = x.shape()[1];
const unsigned bs = x.shape().batch();
const unsigned g1 = GRID_SIZE(rows, dim2_x_);
const unsigned g2 = GRID_SIZE(cols, dim2_y_);
CUDA_CALL(::hipSetDevice(dev_id_));
hipLaunchKernelGGL(( ::transpose_fw_dev), dim3(dim3(g1, g2, bs)), dim3(dim3(dim2_x_, dim2_y_, 1)), 0, 0,
CDATA(x), rows, cols, DATA(y));
}
void CUDA::matmul_fw_impl(const Tensor &a, const Tensor &b, Tensor &y) {
const unsigned di = a.shape()[0];
const unsigned dj = a.shape()[1];
const unsigned dk = b.shape()[1];
float alpha = 1.;
float beta = 0.;
CUDA_CALL(::hipSetDevice(dev_id_));
if (a.shape().has_batch()) {
// Do gemm multiple times.
const unsigned a_skip = di * dj;
const unsigned b_skip = b.shape().has_batch() * dj * dk;
const unsigned y_skip = di * dk;
const unsigned bs = a.shape().batch();
for (unsigned n = 0; n < bs; ++n) {
CUBLAS_CALL(::hipblasSgemm(
state_->cublas.get(), ::HIPBLAS_OP_N, ::HIPBLAS_OP_N,
di, dk, dj,
&alpha, CDATA(a) + n * a_skip, di, CDATA(b) + n * b_skip, dj,
&beta, DATA(y) + n * y_skip, di));
}
} else {
// Do gemm only once to calculate the product with a combined matrix.
CUBLAS_CALL(::hipblasSgemm(
state_->cublas.get(), ::HIPBLAS_OP_N, ::HIPBLAS_OP_N,
di, dk * b.shape().batch(), dj,
&alpha, CDATA(a), di, CDATA(b), dj,
&beta, DATA(y), di));
}
}
void CUDA::transpose_bw_impl(
const Tensor &, const Tensor &, const Tensor &gy, Tensor &gx) {
const unsigned rows = gx.shape()[0];
const unsigned cols = gx.shape()[1];
const unsigned bs = gx.shape().batch();
const unsigned g1 = GRID_SIZE(rows, dim2_x_);
const unsigned g2 = GRID_SIZE(cols, dim2_y_);
CUDA_CALL(::hipSetDevice(dev_id_));
hipLaunchKernelGGL(( ::transpose_bw_dev), dim3(dim3(g1, g2, bs)), dim3(dim3(dim2_x_, dim2_y_, 1)), 0, 0,
CDATA(gy), rows, cols, DATA(gx));
}
void CUDA::matmul_bw_impl(
const Tensor &a, const Tensor &b, const Tensor &, const Tensor &gy,
Tensor &ga, Tensor &gb) {
// ga += gy . b^T
// gb += a^T . gy
const unsigned di = a.shape()[0];
const unsigned dj = a.shape()[1];
const unsigned dk = b.shape()[1];
float alpha = 1.;
float beta = 1.;
CUDA_CALL(::hipSetDevice(dev_id_));
if (a.shape().has_batch()) {
// Do gemm multiple times.
const unsigned a_skip = di * dj;
const unsigned b_skip = b.shape().has_batch() * dj * dk;
const unsigned y_skip = di * dk;
const unsigned bs = a.shape().batch();
for (unsigned n = 0; n < bs; ++n) {
CUBLAS_CALL(::hipblasSgemm(
state_->cublas.get(), ::HIPBLAS_OP_N, ::HIPBLAS_OP_T,
di, dj, dk,
&alpha, CDATA(gy) + n * y_skip, di, CDATA(b) + n * b_skip, dj,
&beta, DATA(ga) + n * a_skip, di));
CUBLAS_CALL(::hipblasSgemm(
state_->cublas.get(), ::HIPBLAS_OP_T, ::HIPBLAS_OP_N,
dj, dk, di,
&alpha, CDATA(a) + n * a_skip, di, CDATA(gy) + n * y_skip, di,
&beta, DATA(gb) + n * b_skip, dj));
}
} else {
// Do gemm only once to calculate the product with a combined matrix.
CUBLAS_CALL(::hipblasSgemm(
state_->cublas.get(), ::HIPBLAS_OP_N, ::HIPBLAS_OP_T,
di, dj, dk * b.shape().batch(),
&alpha, CDATA(gy), di, CDATA(b), dj,
&beta, DATA(ga), di));
CUBLAS_CALL(::hipblasSgemm(
state_->cublas.get(), ::HIPBLAS_OP_T, ::HIPBLAS_OP_N,
dj, dk * b.shape().batch(), di,
&alpha, CDATA(a), di, CDATA(gy), di,
&beta, DATA(gb), dj));
}
}
void CUDA::sum_fw_impl(const Tensor &x, unsigned dim, Tensor &y) {
const unsigned n = x.shape()[dim];
const unsigned r = y.shape().size();
const unsigned s = y.shape().lower_volume(dim);
unsigned block_size = dim1_x_;
while (block_size >> 1 >= n) block_size >>= 1;
CUDA_CALL(::hipSetDevice(dev_id_));
switch (block_size) {
#define CASE(k) \
case k:hipLaunchKernelGGL(( ::sum_fw_dev<k>), dim3(r), dim3(k), 0, 0, CDATA(x), s, n, DATA(y)); break
CASE(1024);
CASE(512);
CASE(256);
CASE(128);
CASE(64);
CASE(32);
CASE(16);
CASE(8);
CASE(4);
CASE(2);
CASE(1);
#undef CASE
}
}
void CUDA::logsumexp_fw_impl(const Tensor &x, unsigned dim, Tensor &y) {
const unsigned n = x.shape()[dim];
const unsigned r = y.shape().size();
const unsigned s = y.shape().lower_volume(dim);
unsigned block_size = dim1_x_;
while (block_size >> 1 >= n) block_size >>= 1;
CUDA_CALL(::hipSetDevice(dev_id_));
switch (block_size) {
#define CASE(k) \
case k:hipLaunchKernelGGL(( ::logsumexp_fw_dev<k>), dim3(r), dim3(k), 0, 0, CDATA(x), s, n, DATA(y)); break
CASE(1024);
CASE(512);
CASE(256);
CASE(128);
CASE(64);
CASE(32);
CASE(16);
CASE(8);
CASE(4);
CASE(2);
CASE(1);
#undef CASE
}
}
void CUDA::broadcast_fw_impl(
const Tensor &x, unsigned dim, unsigned size, Tensor &y) {
const unsigned skip1 = y.shape().lower_volume(dim);
const unsigned skip2 = skip1 * size;
const unsigned total = y.shape().size();
const unsigned g1 = GRID_SIZE(total, dim1_x_);
CUDA_CALL(::hipSetDevice(dev_id_));
hipLaunchKernelGGL(( ::broadcast_fw_dev), dim3(g1), dim3(dim1_x_), 0, 0, CDATA(x), skip1, skip2, total, DATA(y));
}
void CUDA::batch_sum_fw_impl(const Tensor &x, Tensor &y) {
const unsigned size = y.shape().size();
const unsigned g1 = GRID_SIZE(size, dim1_x_);
CUDA_CALL(::hipSetDevice(dev_id_));
hipLaunchKernelGGL(( ::batch_sum_fw_dev), dim3(g1), dim3(dim1_x_), 0, 0,
CDATA(x), size, x.shape().batch(), DATA(y));
}
void CUDA::inplace_multiply_const_impl(float k, Tensor &x) {
const unsigned size = x.shape().size();
const unsigned g1 = GRID_SIZE(size, dim1_x_);
CUDA_CALL(::hipSetDevice(dev_id_));
hipLaunchKernelGGL(( ::inplace_multiply_const_dev), dim3(g1), dim3(dim1_x_), 0, 0, k, size, DATA(x));
}
void CUDA::inplace_add_impl(const Tensor &x, Tensor &y) {
const unsigned size = y.shape().volume();
const unsigned g1 = GRID_SIZE(size, dim1_x_);
const unsigned bs = ::max(x.shape().batch(), y.shape().batch());
CUDA_CALL(::hipSetDevice(dev_id_));
hipLaunchKernelGGL(( ::inplace_add_dev), dim3(dim3(g1, bs, 1)), dim3(dim1_x_), 0, 0,
CDATA(x), size, x.shape().has_batch(), y.shape().has_batch(), DATA(y));
}
void CUDA::inplace_subtract_impl(const Tensor &x, Tensor &y) {
const unsigned size = y.shape().volume();
const unsigned g1 = GRID_SIZE(size, dim1_x_);
const unsigned bs = ::max(x.shape().batch(), y.shape().batch());
CUDA_CALL(::hipSetDevice(dev_id_));
hipLaunchKernelGGL(( ::inplace_subtract_dev), dim3(dim3(g1, bs, 1)), dim3(dim1_x_), 0, 0,
CDATA(x), size, x.shape().has_batch(), y.shape().has_batch(), DATA(y));
}
} // namespace devices
} // namespace primitiv
| cb6acf209a1c03d17d092605a1e7f8e5b5e6955b.cu | #include <config.h>
#include <cublas_v2.h>
#include <cuda_runtime_api.h>
#include <curand.h>
#include <iostream>
#include <random>
#include <primitiv/cuda_device.h>
#include <primitiv/cuda_utils.h>
#include <primitiv/error.h>
using std::cerr;
using std::endl;
namespace {
/*
* CUDA kernels
*/
#define IDX (threadIdx.x + blockIdx.x * blockDim.x)
#define IDY (threadIdx.y + blockIdx.y * blockDim.y)
__global__ void set_const_dev(float k, unsigned size, float *py) {
const unsigned i = IDX;
if (i < size) py[i] = k;
}
__global__ void set_identity_dev(unsigned size, unsigned skip, float *py) {
const unsigned i = IDX;
if (i < size) py[i] = !(i % skip);
}
__global__ void rand_bernoulli_dev(float p, float size, float *py) {
const unsigned i = IDX;
if (i < size) py[i] = (float)(py[i] <= p);
}
__global__ void rand_affine_dev(
float shift, float scale, unsigned size, float *py) {
const unsigned i = IDX;
if (i < size) py[i] = py[i] * scale + shift;
}
__global__ void pick_fw_dev(
const float *px, const unsigned *pi,
unsigned wx, unsigned wy, unsigned sx, unsigned si, unsigned sy,
float *py) {
const unsigned t = IDX;
const unsigned ox = blockIdx.y * sx + pi[blockIdx.y * si] * wy;
const unsigned oy = blockIdx.y * sy;
if (t < sy) py[oy + t] = px[ox + (t / wy) * wx + (t % wy)];
}
__global__ void slice_fw_dev(
const float *px, unsigned span, unsigned skip, unsigned size, float *py) {
const unsigned i = IDX;
if (i < size) py[i] = px[(i / span) * skip + (i % span)];
}
__global__ void concat_fw_dev(
const float *px, unsigned span, unsigned skip, unsigned x_size,
unsigned y_size, float *py) {
const unsigned i = IDX;
if (i < y_size) py[(i / span) * skip + (i % span)] = px[i % x_size];
}
__global__ void pick_bw_dev(
const float *pgy, const unsigned *pi,
unsigned wx, unsigned wy, unsigned sx, unsigned si, unsigned sy,
float *pgx) {
const unsigned t = IDX;
const unsigned ox = blockIdx.y * sx + pi[blockIdx.y * si] * wy;
const unsigned oy = blockIdx.y * sy;
if (t < sy) ::atomicAdd(pgx + ox + (t / wy) * wx + (t % wy), pgy[oy + t]);
}
__global__ void slice_bw_dev(
const float *pgy, unsigned wx, unsigned wy, unsigned nx, unsigned ny,
float *pgx) {
const unsigned i = IDX;
if (i < wy * ::max(nx, ny)) {
::atomicAdd(
pgx + ((i / wy) * wx + (i % wy)) % (wx * nx), pgy[i % (wy * ny)]);
}
}
#define CUDADEV_KERNEL_FW_X(name, op) \
__global__ void name##_fw_dev(const float *px, unsigned size, float *py) { \
const unsigned i = IDX; \
if (i < size) py[i] = (op); \
}
#define CUDADEV_KERNEL_BW_X(name, op) \
__global__ void name##_bw_dev( \
const float *px, const float *py, const float *pgy, unsigned size, \
float *pgx) { \
static_cast<void>(px); \
static_cast<void>(py); \
const unsigned i = IDX; \
if (i < size) pgx[i] += (op); \
}
#define CUDADEV_KERNEL_FW_X_CONST(name, op) \
__global__ void name##_fw_dev( \
const float *px, float k, unsigned size, float *py) { \
const unsigned i = IDX; \
if (i < size) py[i] = (op); \
}
#define CUDADEV_KERNEL_BW_X_CONST(name, op) \
__global__ void name##_bw_dev( \
const float *px, const float *py, const float *pgy, float k, \
unsigned size, float *pgx) { \
static_cast<void>(px); \
static_cast<void>(py); \
const unsigned i = IDX; \
if (i < size) pgx[i] += (op); \
}
#define CUDADEV_KERNEL_FW_X_SCALAR_R(name, op) \
__global__ void name##_fw_dev( \
const float *px, const float *pk, unsigned size, unsigned mbx, \
unsigned mbk, float *py) { \
const unsigned i = IDX; \
const unsigned shift = blockIdx.y * size; \
if (i < size) py[i + shift] = op(px[i + mbx * shift], pk[mbk * blockIdx.y]); \
}
#define CUDADEV_KERNEL_FW_X_SCALAR_L(name, op) \
__global__ void name##_fw_dev( \
const float *px, const float *pk, unsigned size, unsigned mbx, \
unsigned mbk, float *py) { \
const unsigned i = IDX; \
const unsigned shift = blockIdx.y * size; \
if (i < size) py[i + shift] = op(pk[mbk * blockIdx.y], px[i + mbx * shift]); \
}
#define CUDADEV_KERNEL_FW_AB(name, op) \
__global__ void name##_fw_dev( \
const float *pa, const float *pb, unsigned size, unsigned mba, \
unsigned mbb, float *py) { \
const unsigned i = IDX; \
const unsigned shift = blockIdx.y * size; \
if (i < size) py[i + shift] = op(pa[i + mba * shift], pb[i + mbb * shift]); \
}
CUDADEV_KERNEL_FW_X(negate, -px[i]);
CUDADEV_KERNEL_FW_X(sqrt, ::__fsqrt_rn(px[i]));
CUDADEV_KERNEL_FW_X(exp, ::expf(px[i]));
CUDADEV_KERNEL_FW_X(log, ::logf(px[i]));
CUDADEV_KERNEL_FW_X(tanh, ::tanhf(px[i]));
CUDADEV_KERNEL_FW_X(sigmoid, .5f + .5f * ::tanhf(.5f * px[i]));
CUDADEV_KERNEL_FW_X(
softplus, ::fmaxf(px[i], .0f) + ::logf(1.f + ::expf(-::fabs(px[i]))));
CUDADEV_KERNEL_FW_X(sin, ::sinf(px[i]));
CUDADEV_KERNEL_FW_X(cos, ::cosf(px[i]));
CUDADEV_KERNEL_FW_X(tan, ::tanf(px[i]));
CUDADEV_KERNEL_BW_X(sqrt, .5f * pgy[i] / py[i]);
CUDADEV_KERNEL_BW_X(exp, py[i] * pgy[i]);
CUDADEV_KERNEL_BW_X(log, pgy[i] / px[i]);
CUDADEV_KERNEL_BW_X(tanh, (1.f - py[i] * py[i]) * pgy[i]);
CUDADEV_KERNEL_BW_X(sigmoid, py[i] * (1.f - py[i]) * pgy[i]);
CUDADEV_KERNEL_BW_X(softplus, (.5f + .5f * ::tanhf(.5f * px[i])) * pgy[i]);
CUDADEV_KERNEL_BW_X(sin, ::cosf(px[i]) * pgy[i]);
CUDADEV_KERNEL_BW_X(cos, -::sinf(px[i]) * pgy[i]);
CUDADEV_KERNEL_BW_X(tan, (1.f + py[i] * py[i]) * pgy[i]);
CUDADEV_KERNEL_FW_X_CONST(add_const, px[i] + k);
CUDADEV_KERNEL_FW_X_CONST(subtract_const_r, px[i] - k);
CUDADEV_KERNEL_FW_X_CONST(subtract_const_l, k - px[i]);
CUDADEV_KERNEL_FW_X_CONST(multiply_const, px[i] * k);
CUDADEV_KERNEL_FW_X_CONST(divide_const_r, px[i] / k);
CUDADEV_KERNEL_FW_X_CONST(divide_const_l, k / px[i]);
CUDADEV_KERNEL_FW_X_CONST(prelu, ::fmaxf(px[i], .0f) + k * ::fminf(px[i], .0f));
CUDADEV_KERNEL_FW_X_CONST(
elu, ::fmaxf(px[i], .0f) + k * (::expf(::fminf(px[i], .0f)) - 1.0f));
CUDADEV_KERNEL_BW_X_CONST(add_const, pgy[i]);
CUDADEV_KERNEL_BW_X_CONST(subtract_const_r, pgy[i]);
CUDADEV_KERNEL_BW_X_CONST(subtract_const_l, -pgy[i]);
CUDADEV_KERNEL_BW_X_CONST(multiply_const, k * pgy[i]);
CUDADEV_KERNEL_BW_X_CONST(divide_const_r, pgy[i] / k);
CUDADEV_KERNEL_BW_X_CONST(divide_const_l, -py[i] * pgy[i] / px[i]);
CUDADEV_KERNEL_BW_X_CONST(prelu, pgy[i] * ((px[i] > .0f) + k * (px[i] <= .0f)));
CUDADEV_KERNEL_BW_X_CONST(
elu, pgy[i] * ((px[i] > .0f) + (py[i] + k) * (px[i] <= .0f)));
CUDADEV_KERNEL_FW_X_SCALAR_R(add_scalar, ::__fadd_rn);
CUDADEV_KERNEL_FW_X_SCALAR_R(subtract_scalar_r, ::__fsub_rn);
CUDADEV_KERNEL_FW_X_SCALAR_L(subtract_scalar_l, ::__fsub_rn);
CUDADEV_KERNEL_FW_X_SCALAR_R(multiply_scalar, ::__fmul_rn);
CUDADEV_KERNEL_FW_X_SCALAR_R(divide_scalar_r, ::__fdiv_rn);
CUDADEV_KERNEL_FW_X_SCALAR_L(divide_scalar_l, ::__fdiv_rn);
CUDADEV_KERNEL_FW_AB(add, ::__fadd_rn);
CUDADEV_KERNEL_FW_AB(subtract, ::__fsub_rn);
CUDADEV_KERNEL_FW_AB(multiply, ::__fmul_rn);
CUDADEV_KERNEL_FW_AB(divide, ::__fdiv_rn);
#undef CUDADEV_KERNEL_FW_X
#undef CUDADEV_KERNEL_BW_X
#undef CUDADEV_KERNEL_FW_X_CONST
#undef CUDADEV_KERNEL_BW_X_CONST
#undef CUDADEV_KERNEL_FW_X_SCALAR_R
#undef CUDADEV_KERNEL_FW_X_SCALAR_L
#undef CUDADEV_KERNEL_FW_AB
__global__ void add_bw_dev(
const float *, const float *, const float *, const float *pgy,
unsigned size, unsigned mba, unsigned mbb, float *pga, float *pgb) {
const unsigned i = IDX;
const unsigned shift = blockIdx.y * size;
if (i < size) {
const float gy = pgy[i + shift];
::atomicAdd(pga + i + mba * shift, gy);
::atomicAdd(pgb + i + mbb * shift, gy);
}
}
__global__ void subtract_bw_dev(
const float *, const float *, const float *, const float *pgy,
unsigned size, unsigned mba, unsigned mbb, float *pga, float *pgb) {
const unsigned i = IDX;
const unsigned shift = blockIdx.y * size;
if (i < size) {
const float gy = pgy[i + shift];
::atomicAdd(pga + i + mba * shift, gy);
::atomicAdd(pgb + i + mbb * shift, -gy);
}
}
__global__ void multiply_bw_dev(
const float *pa, const float *pb, const float *, const float *pgy,
unsigned size, unsigned mba, unsigned mbb, float *pga, float *pgb) {
const unsigned i = IDX;
const unsigned shift = blockIdx.y * size;
if (i < size) {
const float gy = pgy[i + shift];
const unsigned a_ofs = i + mba * shift;
const unsigned b_ofs = i + mbb * shift;
::atomicAdd(pga + a_ofs, gy * pb[b_ofs]);
::atomicAdd(pgb + b_ofs, gy * pa[a_ofs]);
}
}
__global__ void divide_bw_dev(
const float *, const float *pb, const float *py, const float *pgy,
unsigned size, unsigned mba, unsigned mbb, float *pga, float *pgb) {
const unsigned i = IDX;
const unsigned shift = blockIdx.y * size;
if (i < size) {
const unsigned b_ofs = i + mbb * shift;
const unsigned y_ofs = i + shift;
const float k = pgy[y_ofs] / pb[b_ofs];
::atomicAdd(pga + i + mba * shift, k);
::atomicAdd(pgb + b_ofs, -k * py[y_ofs]);
}
}
__global__ void transpose_fw_dev(
const float *px, unsigned rows, unsigned cols, float *py) {
const unsigned i = IDX;
const unsigned j = IDY;
unsigned ofs = blockIdx.z * rows * cols;
if (i < rows && j < cols) py[ofs + j + i * cols] = px[ofs + i + j * rows];
}
__global__ void transpose_bw_dev(
const float *py, unsigned rows, unsigned cols, float *px) {
const unsigned i = IDX;
const unsigned j = IDY;
unsigned ofs = blockIdx.z * rows * cols;
if (i < rows && j < cols) px[ofs + i + j * rows] += py[ofs + j + i * cols];
}
template<unsigned BLOCK_SIZE>
__global__ void sum_fw_dev(
const float *px, unsigned skip, unsigned n, float *py) {
__shared__ float temp[BLOCK_SIZE];
const unsigned bid = blockIdx.x;
const unsigned tid = threadIdx.x;
px += bid % skip + (bid / skip) * skip * n;
temp[tid] = 0;
for (unsigned i = tid; i < n; i += BLOCK_SIZE) temp[tid] += px[i * skip];
__syncthreads();
#define REDUCE(k) \
if (BLOCK_SIZE >= k << 1) { \
if (tid < k) temp[tid] += temp[tid + k]; \
__syncthreads(); \
}
REDUCE(512)
REDUCE(256)
REDUCE(128)
REDUCE(64)
REDUCE(32)
REDUCE(16)
REDUCE(8)
REDUCE(4)
REDUCE(2)
REDUCE(1)
#undef REDUCE
if (tid == 0) py[bid] = temp[0];
}
__device__ float logsumexp2_fw_dev(float a, float b) {
return a > b
? a + ::log(1.f + ::exp(b - a))
: b + ::log(1.f + ::exp(a - b));
}
template<unsigned BLOCK_SIZE>
__global__ void logsumexp_fw_dev(
const float *px, unsigned skip, unsigned n, float *py) {
__shared__ float temp[BLOCK_SIZE];
const unsigned bid = blockIdx.x;
const unsigned tid = threadIdx.x;
px += bid % skip + (bid / skip) * skip * n;
temp[tid] = -1e38; // NOTE(odashi): Near the minimum of the float.
for (unsigned i = tid; i < n; i += BLOCK_SIZE) {
temp[tid] = ::logsumexp2_fw_dev(temp[tid], px[i * skip]);
}
__syncthreads();
#define REDUCE(k) \
if (BLOCK_SIZE >= k << 1) { \
if (tid < k) temp[tid] = ::logsumexp2_fw_dev(temp[tid], temp[tid + k]); \
__syncthreads(); \
}
REDUCE(512)
REDUCE(256)
REDUCE(128)
REDUCE(64)
REDUCE(32)
REDUCE(16)
REDUCE(8)
REDUCE(4)
REDUCE(2)
REDUCE(1)
#undef REDUCE
if (tid == 0) py[bid] = temp[0];
}
__global__ void broadcast_fw_dev(
const float *px, unsigned skip1, unsigned skip2, unsigned size, float *py) {
const unsigned i = IDX;
if (i < size) py[i] = px[i % skip1 + (i / skip2) * skip1];
}
__global__ void batch_sum_fw_dev(
const float *px, unsigned size, unsigned batch, float *py) {
const unsigned i = IDX;
if (i < size) {
float temp = .0f;
px += i;
for (unsigned j = 0; j < batch; ++j, px += size) {
temp += *px;
}
py[i] = temp;
}
}
__global__ void inplace_multiply_const_dev(
float k, unsigned size, float *px) {
const unsigned i = IDX;
if (i < size) px[i] *= k;
}
__global__ void inplace_add_dev(
const float *px, unsigned size, unsigned mbx, unsigned mby, float *py) {
const unsigned i = IDX;
const unsigned shift = blockIdx.y * size;
if (i < size) ::atomicAdd(py + i + mby * shift, px[i + mbx * shift]);
}
__global__ void inplace_subtract_dev(
const float *px, unsigned size, unsigned mbx, unsigned mby, float *py) {
const unsigned i = IDX;
const unsigned shift = blockIdx.y * size;
if (i < size) ::atomicAdd(py + i + mby * shift, -px[i + mbx * shift]);
}
#undef IDX
#undef IDY
// Minimum requirements of the compute capability.
static const int MIN_CC_MAJOR = 3;
static const int MIN_CC_MINOR = 0;
/*
* CUBLAS initializer/finalizer.
*/
class CUBLASHandle {
private:
CUBLASHandle(const CUBLASHandle &) = delete;
CUBLASHandle(CUBLASHandle &&) = delete;
CUBLASHandle &operator=(const CUBLASHandle &) = delete;
CUBLASHandle &operator=(CUBLASHandle &&) = delete;
public:
explicit CUBLASHandle(unsigned dev_id) {
CUDA_CALL(::cudaSetDevice(dev_id));
CUBLAS_CALL(::cublasCreate(&handle_));
//cerr << "CUBLAS initialized at device " << dev_id << '.' << endl;
}
~CUBLASHandle() {
CUBLAS_CALL(::cublasDestroy(handle_));
//cerr << "CUBLAS finalized." << endl;
}
::cublasHandle_t get() const { return handle_; }
private:
::cublasHandle_t handle_;
};
/*
* CURAND initializer/finalizer.
*/
class CURANDHandle {
private:
CURANDHandle(const CURANDHandle &) = delete;
CURANDHandle(CURANDHandle &&) = delete;
CURANDHandle &operator=(const CURANDHandle &) = delete;
CURANDHandle &operator=(CURANDHandle &&) = delete;
public:
CURANDHandle(unsigned dev_id, unsigned rng_seed) {
CUDA_CALL(::cudaSetDevice(dev_id));
CURAND_CALL(::curandCreateGenerator(&handle_, CURAND_RNG_PSEUDO_DEFAULT));
CURAND_CALL(::curandSetPseudoRandomGeneratorSeed(handle_, rng_seed));
//cerr << "CURAND initialized at device " << dev_id << '.' << endl;
}
~CURANDHandle() {
CURAND_CALL(::curandDestroyGenerator(handle_));
//cerr << "CURAND finalized." << endl;
}
::curandGenerator_t get() const { return handle_; }
private:
::curandGenerator_t handle_;
};
} // namespace
namespace primitiv {
namespace devices {
/*
* Hidden objects of CUDA device.
*/
struct CUDAInternalState {
CUDAInternalState(unsigned dev_id, unsigned rng_seed)
: cublas(dev_id) , curand(dev_id, rng_seed) {}
::CUBLASHandle cublas;
::CURANDHandle curand;
::cudaDeviceProp prop;
};
unsigned CUDA::num_devices() {
int ret;
CUDA_CALL(::cudaGetDeviceCount(&ret));
return ret;
}
void CUDA::initialize() {
// Retrieves device properties.
::cudaDeviceProp prop;
CUDA_CALL(::cudaGetDeviceProperties(&prop, dev_id_));
// Check compute capability requirements.
if (prop.major < ::MIN_CC_MAJOR ||
(prop.major == ::MIN_CC_MAJOR && prop.minor < ::MIN_CC_MINOR)) {
THROW_ERROR(
"CUDA Device " << dev_id_ << " does not satisfy the "
"minimum requirement of the compute capability: "
<< prop.major << '.' << prop.minor << " < "
<< ::MIN_CC_MAJOR << '.' << ::MIN_CC_MINOR);
}
// Calculates size of dims to be used in CUDA kernels.
dim1_x_ = 1;
while (dim1_x_ < 1024 &&
dim1_x_ < static_cast<unsigned>(prop.maxThreadsPerBlock)) {
dim1_x_ <<= 1;
}
dim2_y_ = dim1_x_;
dim2_x_ = 1;
while (dim2_x_ < dim2_y_) {
dim2_x_ <<= 1;
dim2_y_ >>= 1;
}
max_batch_ = prop.maxGridSize[1];
// Initializes additional libraries
state_.reset(new CUDAInternalState(dev_id_, rng_seed_));
state_->prop = prop;
// Initializes the device pointer for integer IDs.
ids_ptr_ = pool_.allocate(sizeof(unsigned) * max_batch_);
}
CUDA::CUDA(unsigned device_id)
: dev_id_(device_id)
, rng_seed_(std::random_device()())
, pool_(device_id) {
initialize();
}
CUDA::CUDA(unsigned device_id, unsigned rng_seed)
: dev_id_(device_id)
, rng_seed_(rng_seed)
, pool_(device_id) {
initialize();
}
CUDA::~CUDA() {
// Nothing to do for now.
}
void CUDA::dump_description() const {
cerr << "Device " << this << ':' << endl;
cerr << " Type: CUDA" << endl;
const ::cudaDeviceProp &prop = state_->prop;
cerr << " Physical Device: " << dev_id_ << ':' << endl;
cerr << " Name ................. " << prop.name << endl;
cerr << " Global Memory ........ " << prop.totalGlobalMem << endl;
cerr << " Shared Memory ........ " << prop.sharedMemPerBlock << endl;
cerr << " Threads/block ........ " << prop.maxThreadsPerBlock << endl;
cerr << " Threads dim .......... " << prop.maxThreadsDim[0] << ','
<< prop.maxThreadsDim[1] << ','
<< prop.maxThreadsDim[2] << endl;
cerr << " Grid size ............ " << prop.maxGridSize[0] << ','
<< prop.maxGridSize[1] << ','
<< prop.maxGridSize[2] << endl;
cerr << " Compute Capability ... " << prop.major << '.'
<< prop.minor << endl;
/*
cerr << " Configurations:" << endl;
cerr << " 1 dim ........... " << dim1_x_ << " threads" << endl;
cerr << " 2 dims .......... " << dim2_x_ << "x"
<< dim2_y_ << " threads" << endl;
cerr << " Maximum batch ... " << max_batch_ <<endl;
*/
}
std::shared_ptr<void> CUDA::new_handle(const Shape &shape) {
return pool_.allocate(sizeof(float) * shape.size());
}
#define GRID_SIZE(x, threads) (((x) + (threads) - 1) / (threads))
#define DATA(x) static_cast<float *>((x).data())
#define CDATA(x) static_cast<const float *>((x).data())
std::vector<float> CUDA::tensor_to_vector_impl(const Tensor &x) {
const unsigned size = x.shape().size();
std::vector<float> ret(size);
CUDA_CALL(::cudaSetDevice(dev_id_));
CUDA_CALL(::cudaMemcpy(
&ret[0], x.data(), sizeof(float) * size, cudaMemcpyDeviceToHost));
return ret;
}
void CUDA::reset_tensor_impl(float k, Tensor &x) {
const unsigned size = x.shape().size();
const unsigned num_blocks = GRID_SIZE(size, dim1_x_);
CUDA_CALL(::cudaSetDevice(dev_id_));
::set_const_dev<<<num_blocks, dim1_x_>>>(k, size, DATA(x));
}
void CUDA::reset_tensor_by_array_impl(const float values[], Tensor &x) {
const unsigned size = x.shape().size();
CUDA_CALL(::cudaSetDevice(dev_id_));
CUDA_CALL(::cudaMemcpy(
x.data(), values, sizeof(float) * size, cudaMemcpyHostToDevice));
}
void CUDA::copy_tensor_impl(const Tensor &x, Tensor &y) {
switch (x.device().type()) {
case Device::DEVICE_TYPE_CPU:
reset_tensor_by_array(CDATA(x), y);
break;
case Device::DEVICE_TYPE_CUDA:
CUDA_CALL(::cudaSetDevice(dev_id_));
// NOTE(odashi):
// If source/destination devices use the unified memory space on the 64
// bits machine, we can perform ::cudaMemcpy to copy data beyond devices.
CUDA_CALL(::cudaMemcpyAsync(
DATA(y), CDATA(x),
sizeof(float) * x.shape().size(),
cudaMemcpyDeviceToDevice, 0));
break;
default:
reset_tensor_by_vector(x.to_vector(), y);
}
}
void CUDA::identity_impl(Tensor &y) {
const unsigned size = y.shape().size();
const unsigned skip = y.shape()[0] + 1;
const unsigned num_blocks = GRID_SIZE(size, dim1_x_);
CUDA_CALL(::cudaSetDevice(dev_id_));
::set_identity_dev<<<num_blocks, dim1_x_>>>(size, skip, DATA(y));
}
void CUDA::random_bernoulli_impl(float p, Tensor &y) {
const unsigned size = y.shape().size();
const unsigned num_blocks = GRID_SIZE(size, dim1_x_);
CUDA_CALL(::cudaSetDevice(dev_id_));
CURAND_CALL(::curandGenerateUniform(state_->curand.get(), DATA(y), size));
::rand_bernoulli_dev<<<num_blocks, dim1_x_>>>(p, size, DATA(y));
}
void CUDA::random_uniform_impl(float lower, float upper, Tensor &y) {
const unsigned size = y.shape().size();
const unsigned num_blocks = GRID_SIZE(size, dim1_x_);
const float scale = upper - lower;
CUDA_CALL(::cudaSetDevice(dev_id_));
CURAND_CALL(::curandGenerateUniform(state_->curand.get(), DATA(y), size));
::rand_affine_dev<<<num_blocks, dim1_x_>>>(lower, scale, size, DATA(y));
}
void CUDA::random_normal_impl(float mean, float sd, Tensor &y) {
CUDA_CALL(::cudaSetDevice(dev_id_));
CURAND_CALL(::curandGenerateNormal(
state_->curand.get(), DATA(y), y.shape().size(), mean, sd));
}
void CUDA::random_log_normal_impl(float mean, float sd, Tensor &y) {
CUDA_CALL(::cudaSetDevice(dev_id_));
CURAND_CALL(::curandGenerateLogNormal(
state_->curand.get(), DATA(y), y.shape().size(), mean, sd));
}
void CUDA::pick_fw_impl(
const Tensor &x, const std::vector<unsigned> &ids, unsigned dim,
Tensor &y) {
const unsigned wy = y.shape().lower_volume(dim);
const unsigned sy = y.shape().volume();
const unsigned g1 = GRID_SIZE(sy, dim1_x_);
const unsigned bs = y.shape().batch();
CUDA_CALL(::cudaSetDevice(dev_id_));
CUDA_CALL(::cudaMemcpy(
ids_ptr_.get(), ids.data(), sizeof(unsigned) * ids.size(),
cudaMemcpyHostToDevice));
::pick_fw_dev<<<dim3(g1, bs), dim1_x_>>>(
CDATA(x), static_cast<const unsigned *>(ids_ptr_.get()),
wy * x.shape()[dim], wy,
x.shape().has_batch() * x.shape().volume(), ids.size() > 1, sy,
DATA(y));
}
void CUDA::slice_fw_impl(
const Tensor &x, unsigned dim, unsigned offset, Tensor &y) {
const unsigned base = y.shape().lower_volume(dim);
const unsigned span = base * y.shape()[dim];
const unsigned skip = base * x.shape()[dim];
const unsigned size = y.shape().size();
const unsigned num_blocks = GRID_SIZE(size, dim1_x_);
CUDA_CALL(::cudaSetDevice(dev_id_));
::slice_fw_dev<<<num_blocks, dim1_x_>>>(
CDATA(x) + base * offset, span, skip, size, DATA(y));
}
void CUDA::concat_fw_impl(
const std::vector<const Tensor *> &xs, unsigned dim, Tensor &y) {
const unsigned new_bs = y.shape().batch();
const unsigned base = y.shape().lower_volume(dim);
const unsigned skip = base * y.shape()[dim];
unsigned repeat = y.shape().volume() / skip;
CUDA_CALL(::cudaSetDevice(dev_id_));
unsigned offset = 0;
for (const Tensor *x : xs) {
const unsigned span = base * x->shape()[dim];
const unsigned x_size = span * repeat * x->shape().batch();
const unsigned y_size = span * repeat * new_bs;
const unsigned num_blocks = GRID_SIZE(y_size, dim1_x_);
::concat_fw_dev<<<num_blocks, dim1_x_>>>(
CDATA(*x), span, skip, x_size, y_size, DATA(y) + offset);
offset += span;
}
}
void CUDA::pick_bw_impl(
const Tensor &gy, const std::vector<unsigned>& ids, unsigned dim,
Tensor &gx) {
const unsigned wy = gy.shape().lower_volume(dim);
const unsigned sy = gy.shape().volume();
const unsigned g1 = GRID_SIZE(sy, dim1_x_);
const unsigned bs = gy.shape().batch();
CUDA_CALL(::cudaSetDevice(dev_id_));
CUDA_CALL(::cudaMemcpy(
ids_ptr_.get(), ids.data(), sizeof(unsigned) * ids.size(),
cudaMemcpyHostToDevice));
::pick_bw_dev<<<dim3(g1, bs), dim1_x_>>>(
CDATA(gy), static_cast<const unsigned *>(ids_ptr_.get()),
wy *gx.shape()[dim], wy,
gx.shape().has_batch() * gx.shape().volume(), ids.size() > 1, sy,
DATA(gx));
}
void CUDA::slice_bw_impl(
const Tensor &gy, unsigned dim, unsigned offset, Tensor &gx) {
const Shape &sx = gx.shape();
const Shape &sy = gy.shape();
const unsigned base = sx.lower_volume(dim);
const unsigned ox = base * offset;
const unsigned wx = base * sx[dim];
const unsigned wy = base * sy[dim];
const unsigned repeat = sx.volume() / wx;
const unsigned nx = repeat * sx.batch();
const unsigned ny = repeat * sy.batch();
const unsigned g1 = GRID_SIZE(wy * std::max(nx, ny), dim1_x_);
CUDA_CALL(::cudaSetDevice(dev_id_));
::slice_bw_dev<<<g1, dim1_x_>>>(CDATA(gy), wx, wy, nx, ny, DATA(gx) + ox);
}
#define CUDADEV_FW_X(name) \
void CUDA::name##_fw_impl(const Tensor &x, Tensor &y) { \
const unsigned size = x.shape().size(); \
const unsigned num_blocks = GRID_SIZE(size, dim1_x_); \
CUDA_CALL(::cudaSetDevice(dev_id_)); \
::name##_fw_dev<<<num_blocks, dim1_x_>>>(CDATA(x), size, DATA(y)); \
}
#define CUDADEV_BW_X(name) \
void CUDA::name##_bw_impl( \
const Tensor &x, const Tensor &y, const Tensor &gy, Tensor &gx) { \
const unsigned size = x.shape().size(); \
const unsigned num_blocks = GRID_SIZE(size, dim1_x_); \
CUDA_CALL(::cudaSetDevice(dev_id_)); \
::name##_bw_dev<<<num_blocks, dim1_x_>>>( \
CDATA(x), CDATA(y), CDATA(gy), size, DATA(gx)); \
}
#define CUDADEV_FW_X_CONST(name) \
void CUDA::name##_fw_impl(const Tensor &x, float k, Tensor &y) { \
const unsigned size = x.shape().size(); \
const unsigned num_blocks = GRID_SIZE(size,dim1_x_); \
CUDA_CALL(::cudaSetDevice(dev_id_)); \
::name##_fw_dev<<<num_blocks, dim1_x_>>>(CDATA(x), k, size, DATA(y)); \
}
#define CUDADEV_BW_X_CONST(name) \
void CUDA::name##_bw_impl( \
const Tensor &x, const Tensor &y, const Tensor &gy, float k, Tensor &gx) { \
const unsigned size = x.shape().size(); \
const unsigned num_blocks = GRID_SIZE(size, dim1_x_); \
CUDA_CALL(::cudaSetDevice(dev_id_)); \
::name##_bw_dev<<<num_blocks, dim1_x_>>>( \
CDATA(x), CDATA(y), CDATA(gy), k, size, DATA(gx)); \
}
#define CUDADEV_FW_X_SCALAR(name) \
void CUDA::name##_fw_impl(const Tensor &x, const Tensor &k, Tensor &y) { \
const unsigned size = y.shape().volume(); \
const unsigned g1 = GRID_SIZE(size, dim1_x_); \
const unsigned g2 = y.shape().batch(); \
CUDA_CALL(::cudaSetDevice(dev_id_)); \
::name##_fw_dev<<<dim3(g1, g2, 1), dim1_x_>>>( \
CDATA(x), CDATA(k), size, \
x.shape().has_batch(), k.shape().has_batch(), DATA(y)); \
}
#define CUDADEV_FW_AB(name) \
void CUDA::name##_fw_impl(const Tensor &a, const Tensor &b, Tensor &y) { \
const unsigned size = y.shape().volume(); \
const unsigned g1 = GRID_SIZE(size, dim1_x_); \
const unsigned g2 = y.shape().batch(); \
CUDA_CALL(::cudaSetDevice(dev_id_)); \
::name##_fw_dev<<<dim3(g1, g2, 1), dim1_x_>>>( \
CDATA(a), CDATA(b), size, \
a.shape().has_batch(), b.shape().has_batch(), DATA(y)); \
}
#define CUDADEV_BW_AB(name) \
void CUDA::name##_bw_impl( \
const Tensor &a, const Tensor &b, const Tensor &y, const Tensor &gy, \
Tensor &ga, Tensor &gb) { \
const unsigned size = y.shape().volume(); \
const unsigned g1 = GRID_SIZE(size, dim1_x_); \
const unsigned g2 = y.shape().batch(); \
CUDA_CALL(::cudaSetDevice(dev_id_)); \
::name##_bw_dev<<<dim3(g1, g2, 1), dim1_x_>>>( \
CDATA(a), CDATA(b), CDATA(y), CDATA(gy), size, \
a.shape().has_batch(), b.shape().has_batch(), DATA(ga), DATA(gb)); \
}
CUDADEV_FW_X(negate);
CUDADEV_FW_X(sqrt);
CUDADEV_FW_X(exp);
CUDADEV_FW_X(log);
CUDADEV_FW_X(tanh);
CUDADEV_FW_X(sigmoid);
CUDADEV_FW_X(softplus);
CUDADEV_FW_X(sin);
CUDADEV_FW_X(cos);
CUDADEV_FW_X(tan);
CUDADEV_BW_X(sqrt);
CUDADEV_BW_X(exp);
CUDADEV_BW_X(log);
CUDADEV_BW_X(tanh);
CUDADEV_BW_X(sigmoid);
CUDADEV_BW_X(softplus);
CUDADEV_BW_X(sin);
CUDADEV_BW_X(cos);
CUDADEV_BW_X(tan);
CUDADEV_FW_X_CONST(add_const);
CUDADEV_FW_X_CONST(subtract_const_r);
CUDADEV_FW_X_CONST(subtract_const_l);
CUDADEV_FW_X_CONST(multiply_const);
CUDADEV_FW_X_CONST(divide_const_r);
CUDADEV_FW_X_CONST(divide_const_l);
CUDADEV_FW_X_CONST(prelu);
CUDADEV_FW_X_CONST(elu);
CUDADEV_BW_X_CONST(add_const);
CUDADEV_BW_X_CONST(subtract_const_r);
CUDADEV_BW_X_CONST(subtract_const_l);
CUDADEV_BW_X_CONST(multiply_const);
CUDADEV_BW_X_CONST(divide_const_r);
CUDADEV_BW_X_CONST(divide_const_l);
CUDADEV_BW_X_CONST(prelu);
CUDADEV_BW_X_CONST(elu);
CUDADEV_FW_X_SCALAR(add_scalar);
CUDADEV_FW_X_SCALAR(subtract_scalar_r);
CUDADEV_FW_X_SCALAR(subtract_scalar_l);
CUDADEV_FW_X_SCALAR(multiply_scalar);
CUDADEV_FW_X_SCALAR(divide_scalar_r);
CUDADEV_FW_X_SCALAR(divide_scalar_l);
CUDADEV_FW_AB(add);
CUDADEV_FW_AB(subtract);
CUDADEV_FW_AB(multiply);
CUDADEV_FW_AB(divide);
CUDADEV_BW_AB(add);
CUDADEV_BW_AB(subtract);
CUDADEV_BW_AB(multiply);
CUDADEV_BW_AB(divide);
#undef CUDADEV_FW_X
#undef CUDADEV_BW_X
#undef CUDADEV_FW_X_CONST
#undef CUDADEV_BW_X_CONST
#undef CUDADEV_FW_X_SCALAR
#undef CUDADEV_FW_AB
#undef CUDADEV_BW_AB
void CUDA::transpose_fw_impl(const Tensor &x, Tensor &y) {
const unsigned rows = x.shape()[0];
const unsigned cols = x.shape()[1];
const unsigned bs = x.shape().batch();
const unsigned g1 = GRID_SIZE(rows, dim2_x_);
const unsigned g2 = GRID_SIZE(cols, dim2_y_);
CUDA_CALL(::cudaSetDevice(dev_id_));
::transpose_fw_dev<<<dim3(g1, g2, bs), dim3(dim2_x_, dim2_y_, 1)>>>(
CDATA(x), rows, cols, DATA(y));
}
void CUDA::matmul_fw_impl(const Tensor &a, const Tensor &b, Tensor &y) {
const unsigned di = a.shape()[0];
const unsigned dj = a.shape()[1];
const unsigned dk = b.shape()[1];
float alpha = 1.;
float beta = 0.;
CUDA_CALL(::cudaSetDevice(dev_id_));
if (a.shape().has_batch()) {
// Do gemm multiple times.
const unsigned a_skip = di * dj;
const unsigned b_skip = b.shape().has_batch() * dj * dk;
const unsigned y_skip = di * dk;
const unsigned bs = a.shape().batch();
for (unsigned n = 0; n < bs; ++n) {
CUBLAS_CALL(::cublasSgemm(
state_->cublas.get(), ::CUBLAS_OP_N, ::CUBLAS_OP_N,
di, dk, dj,
&alpha, CDATA(a) + n * a_skip, di, CDATA(b) + n * b_skip, dj,
&beta, DATA(y) + n * y_skip, di));
}
} else {
// Do gemm only once to calculate the product with a combined matrix.
CUBLAS_CALL(::cublasSgemm(
state_->cublas.get(), ::CUBLAS_OP_N, ::CUBLAS_OP_N,
di, dk * b.shape().batch(), dj,
&alpha, CDATA(a), di, CDATA(b), dj,
&beta, DATA(y), di));
}
}
void CUDA::transpose_bw_impl(
const Tensor &, const Tensor &, const Tensor &gy, Tensor &gx) {
const unsigned rows = gx.shape()[0];
const unsigned cols = gx.shape()[1];
const unsigned bs = gx.shape().batch();
const unsigned g1 = GRID_SIZE(rows, dim2_x_);
const unsigned g2 = GRID_SIZE(cols, dim2_y_);
CUDA_CALL(::cudaSetDevice(dev_id_));
::transpose_bw_dev<<<dim3(g1, g2, bs), dim3(dim2_x_, dim2_y_, 1)>>>(
CDATA(gy), rows, cols, DATA(gx));
}
void CUDA::matmul_bw_impl(
const Tensor &a, const Tensor &b, const Tensor &, const Tensor &gy,
Tensor &ga, Tensor &gb) {
// ga += gy . b^T
// gb += a^T . gy
const unsigned di = a.shape()[0];
const unsigned dj = a.shape()[1];
const unsigned dk = b.shape()[1];
float alpha = 1.;
float beta = 1.;
CUDA_CALL(::cudaSetDevice(dev_id_));
if (a.shape().has_batch()) {
// Do gemm multiple times.
const unsigned a_skip = di * dj;
const unsigned b_skip = b.shape().has_batch() * dj * dk;
const unsigned y_skip = di * dk;
const unsigned bs = a.shape().batch();
for (unsigned n = 0; n < bs; ++n) {
CUBLAS_CALL(::cublasSgemm(
state_->cublas.get(), ::CUBLAS_OP_N, ::CUBLAS_OP_T,
di, dj, dk,
&alpha, CDATA(gy) + n * y_skip, di, CDATA(b) + n * b_skip, dj,
&beta, DATA(ga) + n * a_skip, di));
CUBLAS_CALL(::cublasSgemm(
state_->cublas.get(), ::CUBLAS_OP_T, ::CUBLAS_OP_N,
dj, dk, di,
&alpha, CDATA(a) + n * a_skip, di, CDATA(gy) + n * y_skip, di,
&beta, DATA(gb) + n * b_skip, dj));
}
} else {
// Do gemm only once to calculate the product with a combined matrix.
CUBLAS_CALL(::cublasSgemm(
state_->cublas.get(), ::CUBLAS_OP_N, ::CUBLAS_OP_T,
di, dj, dk * b.shape().batch(),
&alpha, CDATA(gy), di, CDATA(b), dj,
&beta, DATA(ga), di));
CUBLAS_CALL(::cublasSgemm(
state_->cublas.get(), ::CUBLAS_OP_T, ::CUBLAS_OP_N,
dj, dk * b.shape().batch(), di,
&alpha, CDATA(a), di, CDATA(gy), di,
&beta, DATA(gb), dj));
}
}
void CUDA::sum_fw_impl(const Tensor &x, unsigned dim, Tensor &y) {
const unsigned n = x.shape()[dim];
const unsigned r = y.shape().size();
const unsigned s = y.shape().lower_volume(dim);
unsigned block_size = dim1_x_;
while (block_size >> 1 >= n) block_size >>= 1;
CUDA_CALL(::cudaSetDevice(dev_id_));
switch (block_size) {
#define CASE(k) \
case k: ::sum_fw_dev<k><<<r, k>>>(CDATA(x), s, n, DATA(y)); break
CASE(1024);
CASE(512);
CASE(256);
CASE(128);
CASE(64);
CASE(32);
CASE(16);
CASE(8);
CASE(4);
CASE(2);
CASE(1);
#undef CASE
}
}
void CUDA::logsumexp_fw_impl(const Tensor &x, unsigned dim, Tensor &y) {
const unsigned n = x.shape()[dim];
const unsigned r = y.shape().size();
const unsigned s = y.shape().lower_volume(dim);
unsigned block_size = dim1_x_;
while (block_size >> 1 >= n) block_size >>= 1;
CUDA_CALL(::cudaSetDevice(dev_id_));
switch (block_size) {
#define CASE(k) \
case k: ::logsumexp_fw_dev<k><<<r, k>>>(CDATA(x), s, n, DATA(y)); break
CASE(1024);
CASE(512);
CASE(256);
CASE(128);
CASE(64);
CASE(32);
CASE(16);
CASE(8);
CASE(4);
CASE(2);
CASE(1);
#undef CASE
}
}
void CUDA::broadcast_fw_impl(
const Tensor &x, unsigned dim, unsigned size, Tensor &y) {
const unsigned skip1 = y.shape().lower_volume(dim);
const unsigned skip2 = skip1 * size;
const unsigned total = y.shape().size();
const unsigned g1 = GRID_SIZE(total, dim1_x_);
CUDA_CALL(::cudaSetDevice(dev_id_));
::broadcast_fw_dev<<<g1, dim1_x_>>>(CDATA(x), skip1, skip2, total, DATA(y));
}
void CUDA::batch_sum_fw_impl(const Tensor &x, Tensor &y) {
const unsigned size = y.shape().size();
const unsigned g1 = GRID_SIZE(size, dim1_x_);
CUDA_CALL(::cudaSetDevice(dev_id_));
::batch_sum_fw_dev<<<g1, dim1_x_>>>(
CDATA(x), size, x.shape().batch(), DATA(y));
}
void CUDA::inplace_multiply_const_impl(float k, Tensor &x) {
const unsigned size = x.shape().size();
const unsigned g1 = GRID_SIZE(size, dim1_x_);
CUDA_CALL(::cudaSetDevice(dev_id_));
::inplace_multiply_const_dev<<<g1, dim1_x_>>>(k, size, DATA(x));
}
void CUDA::inplace_add_impl(const Tensor &x, Tensor &y) {
const unsigned size = y.shape().volume();
const unsigned g1 = GRID_SIZE(size, dim1_x_);
const unsigned bs = std::max(x.shape().batch(), y.shape().batch());
CUDA_CALL(::cudaSetDevice(dev_id_));
::inplace_add_dev<<<dim3(g1, bs, 1), dim1_x_>>>(
CDATA(x), size, x.shape().has_batch(), y.shape().has_batch(), DATA(y));
}
void CUDA::inplace_subtract_impl(const Tensor &x, Tensor &y) {
const unsigned size = y.shape().volume();
const unsigned g1 = GRID_SIZE(size, dim1_x_);
const unsigned bs = std::max(x.shape().batch(), y.shape().batch());
CUDA_CALL(::cudaSetDevice(dev_id_));
::inplace_subtract_dev<<<dim3(g1, bs, 1), dim1_x_>>>(
CDATA(x), size, x.shape().has_batch(), y.shape().has_batch(), DATA(y));
}
} // namespace devices
} // namespace primitiv
|
25be80bcbb4e701d70aca96343c7be84607d9363.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
// CUDA
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "rocblas.h"
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// At least 2 arguments expected
// Input and result
//if ((nlhs == 1) & (nrhs==1)) {
// double -> GPU array
// double *from = mxGetPr(prhs[0]);
// mwSize dims[2]; dims[0] = 5; dims[1] = 1;
// plhs[0] = mxCreateNumericArray(2, dims, mxINT64_CLASS, mxREAL);
}
| 25be80bcbb4e701d70aca96343c7be84607d9363.cu | #include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
// CUDA
#include "cuda.h"
#include "cuda_runtime.h"
#include "cublas.h"
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// At least 2 arguments expected
// Input and result
//if ((nlhs == 1) & (nrhs==1)) {
// double -> GPU array
// double *from = mxGetPr(prhs[0]);
// mwSize dims[2]; dims[0] = 5; dims[1] = 1;
// plhs[0] = mxCreateNumericArray(2, dims, mxINT64_CLASS, mxREAL);
}
|
136d70c3d7f0b33766749d1516cdd30aa25cfa44.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2017-2023 by XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <limits>
#include <memory>
#include <utility>
#include <vector>
#include "../collective/communicator-inl.cuh"
#include "../common/bitfield.h"
#include "../common/categorical.h"
#include "../common/cuda_context.cuh" // CUDAContext
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/io.h"
#include "../common/timer.h"
#include "../data/ellpack_page.cuh"
#include "../data/ellpack_page.h"
#include "constraints.cuh"
#include "driver.h"
#include "gpu_hist/evaluate_splits.cuh"
#include "gpu_hist/expand_entry.cuh"
#include "gpu_hist/feature_groups.cuh"
#include "gpu_hist/gradient_based_sampler.cuh"
#include "gpu_hist/histogram.cuh"
#include "gpu_hist/row_partitioner.cuh"
#include "hist/param.h"
#include "param.h"
#include "updater_gpu_common.cuh"
#include "xgboost/base.h"
#include "xgboost/context.h"
#include "xgboost/data.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/json.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/task.h" // for ObjInfo
#include "xgboost/tree_model.h"
namespace xgboost::tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
/**
* \struct DeviceHistogramStorage
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <size_t kStopGrowingSize = 1 << 28>
class DeviceHistogramStorage {
private:
using GradientSumT = GradientPairInt64;
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
// Large buffer of zeroed memory, caches histograms
dh::device_vector<typename GradientSumT::ValueT> data_;
// If we run out of storage allocate one histogram at a time
// in overflow. Not cached, overwritten when a new histogram
// is requested
dh::device_vector<typename GradientSumT::ValueT> overflow_;
std::map<int, size_t> overflow_nidx_map_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2, "Number of items in gradient type should be 2.");
public:
// Start with about 16mb
DeviceHistogramStorage() { data_.reserve(1 << 22); }
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
auto d_data = data_.data().get();
dh::LaunchN(data_.size(), [=] __device__(size_t idx) { d_data[idx] = 0.0f; });
nidx_map_.clear();
overflow_nidx_map_.clear();
}
[[nodiscard]] bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend() ||
overflow_nidx_map_.find(nidx) != overflow_nidx_map_.cend();
}
[[nodiscard]] int Bins() const { return n_bins_; }
[[nodiscard]] size_t HistogramSize() const { return n_bins_ * kNumItemsInGradientSum; }
dh::device_vector<typename GradientSumT::ValueT>& Data() { return data_; }
void AllocateHistograms(const std::vector<int>& new_nidxs) {
for (int nidx : new_nidxs) {
CHECK(!HistogramExists(nidx));
}
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize() * new_nidxs.size();
if (used_size >= kStopGrowingSize) {
// Use overflow
// Delete previous entries
overflow_nidx_map_.clear();
overflow_.resize(HistogramSize() * new_nidxs.size());
// Zero memory
auto d_data = overflow_.data().get();
dh::LaunchN(overflow_.size(),
[=] __device__(size_t idx) { d_data[idx] = 0.0; });
// Append new histograms
for (int nidx : new_nidxs) {
overflow_nidx_map_[nidx] = overflow_nidx_map_.size() * HistogramSize();
}
} else {
CHECK_GE(data_.size(), used_size);
// Expand if necessary
if (data_.size() < new_used_size) {
data_.resize(::max(data_.size() * 2, new_used_size));
}
// Append new histograms
for (int nidx : new_nidxs) {
nidx_map_[nidx] = nidx_map_.size() * HistogramSize();
}
}
CHECK_GE(data_.size(), nidx_map_.size() * HistogramSize());
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
if (nidx_map_.find(nidx) != nidx_map_.cend()) {
// Fetch from normal cache
auto ptr = data_.data().get() + nidx_map_.at(nidx);
return {reinterpret_cast<GradientSumT*>(ptr), static_cast<std::size_t>(n_bins_)};
} else {
// Fetch from overflow
auto ptr = overflow_.data().get() + overflow_nidx_map_.at(nidx);
return {reinterpret_cast<GradientSumT*>(ptr), static_cast<std::size_t>(n_bins_)};
}
}
};
// Manage memory for a single GPU
struct GPUHistMakerDevice {
private:
GPUHistEvaluator evaluator_;
Context const* ctx_;
std::shared_ptr<common::ColumnSampler> column_sampler_;
public:
EllpackPageImpl const* page{nullptr};
common::Span<FeatureType const> feature_types;
std::unique_ptr<RowPartitioner> row_partitioner;
DeviceHistogramStorage<> hist{};
dh::device_vector<GradientPair> d_gpair; // storage for gpair;
common::Span<GradientPair> gpair;
dh::device_vector<int> monotone_constraints;
// node idx for each sample
dh::device_vector<bst_node_t> positions;
TrainParam param;
std::unique_ptr<GradientQuantiser> quantiser;
dh::PinnedMemory pinned;
dh::PinnedMemory pinned2;
common::Monitor monitor;
FeatureInteractionConstraintDevice interaction_constraints;
std::unique_ptr<GradientBasedSampler> sampler;
std::unique_ptr<FeatureGroups> feature_groups;
GPUHistMakerDevice(Context const* ctx, bool is_external_memory,
common::Span<FeatureType const> _feature_types, bst_row_t _n_rows,
TrainParam _param, std::shared_ptr<common::ColumnSampler> column_sampler,
uint32_t n_features, BatchParam batch_param)
: evaluator_{_param, n_features, ctx->gpu_id},
ctx_(ctx),
feature_types{_feature_types},
param(std::move(_param)),
column_sampler_(std::move(column_sampler)),
interaction_constraints(param, n_features) {
sampler = std::make_unique<GradientBasedSampler>(ctx, _n_rows, batch_param, param.subsample,
param.sampling_method, is_external_memory);
if (!param.monotone_constraints.empty()) {
// Copy assigning an empty vector causes an exception in MSVC debug builds
monotone_constraints = param.monotone_constraints;
}
CHECK(column_sampler_);
monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(ctx_->gpu_id));
}
~GPUHistMakerDevice() = default;
void InitFeatureGroupsOnce() {
if (!feature_groups) {
CHECK(page);
feature_groups.reset(new FeatureGroups(page->Cuts(), page->is_dense,
dh::MaxSharedMemoryOptin(ctx_->gpu_id),
sizeof(GradientPairPrecise)));
}
}
// Reset values for each update iteration
void Reset(HostDeviceVector<GradientPair>* dh_gpair, DMatrix* dmat, int64_t num_columns) {
auto const& info = dmat->Info();
this->column_sampler_->Init(ctx_, num_columns, info.feature_weights.HostVector(),
param.colsample_bynode, param.colsample_bylevel,
param.colsample_bytree);
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
this->interaction_constraints.Reset();
if (d_gpair.size() != dh_gpair->Size()) {
d_gpair.resize(dh_gpair->Size());
}
dh::safe_cuda(hipMemcpyAsync(d_gpair.data().get(), dh_gpair->ConstDevicePointer(),
dh_gpair->Size() * sizeof(GradientPair),
hipMemcpyDeviceToDevice));
auto sample = sampler->Sample(ctx_, dh::ToSpan(d_gpair), dmat);
page = sample.page;
gpair = sample.gpair;
this->evaluator_.Reset(page->Cuts(), feature_types, dmat->Info().num_col_, param, ctx_->gpu_id);
quantiser.reset(new GradientQuantiser(this->gpair));
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(ctx_->gpu_id, sample.sample_rows));
// Init histogram
hist.Init(ctx_->gpu_id, page->Cuts().TotalBins());
hist.Reset();
this->InitFeatureGroupsOnce();
}
GPUExpandEntry EvaluateRootSplit(GradientPairInt64 root_sum) {
int nidx = RegTree::kRoot;
GPUTrainingParam gpu_param(param);
auto sampled_features = column_sampler_->GetFeatureSet(0);
sampled_features->SetDevice(ctx_->Device());
common::Span<bst_feature_t> feature_set =
interaction_constraints.Query(sampled_features->DeviceSpan(), nidx);
auto matrix = page->GetDeviceAccessor(ctx_->gpu_id);
EvaluateSplitInputs inputs{nidx, 0, root_sum, feature_set, hist.GetNodeHistogram(nidx)};
EvaluateSplitSharedInputs shared_inputs{
gpu_param,
*quantiser,
feature_types,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
matrix.is_dense && !collective::IsDistributed()
};
auto split = this->evaluator_.EvaluateSingleSplit(inputs, shared_inputs);
return split;
}
void EvaluateSplits(const std::vector<GPUExpandEntry>& candidates, const RegTree& tree,
common::Span<GPUExpandEntry> pinned_candidates_out) {
if (candidates.empty()) return;
dh::TemporaryArray<EvaluateSplitInputs> d_node_inputs(2 * candidates.size());
dh::TemporaryArray<DeviceSplitCandidate> splits_out(2 * candidates.size());
std::vector<bst_node_t> nidx(2 * candidates.size());
auto h_node_inputs = pinned2.GetSpan<EvaluateSplitInputs>(2 * candidates.size());
auto matrix = page->GetDeviceAccessor(ctx_->gpu_id);
EvaluateSplitSharedInputs shared_inputs{GPUTrainingParam{param}, *quantiser, feature_types,
matrix.feature_segments, matrix.gidx_fvalue_map,
matrix.min_fvalue,
// is_dense represents the local data
matrix.is_dense && !collective::IsDistributed()};
dh::TemporaryArray<GPUExpandEntry> entries(2 * candidates.size());
// Store the feature set ptrs so they dont go out of scope before the kernel is called
std::vector<std::shared_ptr<HostDeviceVector<bst_feature_t>>> feature_sets;
for (size_t i = 0; i < candidates.size(); i++) {
auto candidate = candidates.at(i);
int left_nidx = tree[candidate.nid].LeftChild();
int right_nidx = tree[candidate.nid].RightChild();
nidx[i * 2] = left_nidx;
nidx[i * 2 + 1] = right_nidx;
auto left_sampled_features = column_sampler_->GetFeatureSet(tree.GetDepth(left_nidx));
left_sampled_features->SetDevice(ctx_->Device());
feature_sets.emplace_back(left_sampled_features);
common::Span<bst_feature_t> left_feature_set =
interaction_constraints.Query(left_sampled_features->DeviceSpan(), left_nidx);
auto right_sampled_features = column_sampler_->GetFeatureSet(tree.GetDepth(right_nidx));
right_sampled_features->SetDevice(ctx_->Device());
feature_sets.emplace_back(right_sampled_features);
common::Span<bst_feature_t> right_feature_set =
interaction_constraints.Query(right_sampled_features->DeviceSpan(),
right_nidx);
h_node_inputs[i * 2] = {left_nidx, candidate.depth + 1,
candidate.split.left_sum, left_feature_set,
hist.GetNodeHistogram(left_nidx)};
h_node_inputs[i * 2 + 1] = {right_nidx, candidate.depth + 1,
candidate.split.right_sum, right_feature_set,
hist.GetNodeHistogram(right_nidx)};
}
bst_feature_t max_active_features = 0;
for (auto input : h_node_inputs) {
max_active_features =
::max(max_active_features, static_cast<bst_feature_t>(input.feature_set.size()));
}
dh::safe_cuda(hipMemcpyAsync(
d_node_inputs.data().get(), h_node_inputs.data(),
h_node_inputs.size() * sizeof(EvaluateSplitInputs), hipMemcpyDefault));
this->evaluator_.EvaluateSplits(nidx, max_active_features,
dh::ToSpan(d_node_inputs), shared_inputs,
dh::ToSpan(entries));
dh::safe_cuda(hipMemcpyAsync(pinned_candidates_out.data(),
entries.data().get(), sizeof(GPUExpandEntry) * entries.size(),
hipMemcpyDeviceToHost));
dh::DefaultStream().Sync();
}
void BuildHist(int nidx) {
auto d_node_hist = hist.GetNodeHistogram(nidx);
auto d_ridx = row_partitioner->GetRows(nidx);
BuildGradientHistogram(ctx_->CUDACtx(), page->GetDeviceAccessor(ctx_->gpu_id),
feature_groups->DeviceAccessor(ctx_->gpu_id), gpair, d_ridx, d_node_hist,
*quantiser);
}
// Attempt to do subtraction trick
// return true if succeeded
bool SubtractionTrick(int nidx_parent, int nidx_histogram, int nidx_subtraction) {
if (!hist.HistogramExists(nidx_histogram) || !hist.HistogramExists(nidx_parent)) {
return false;
}
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(page->Cuts().TotalBins(), [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
return true;
}
// Extra data for each node that is passed
// to the update position function
struct NodeSplitData {
RegTree::Node split_node;
FeatureType split_type;
common::CatBitField node_cats;
};
void UpdatePosition(const std::vector<GPUExpandEntry>& candidates, RegTree* p_tree) {
if (candidates.empty()) return;
std::vector<int> nidx(candidates.size());
std::vector<int> left_nidx(candidates.size());
std::vector<int> right_nidx(candidates.size());
std::vector<NodeSplitData> split_data(candidates.size());
for (size_t i = 0; i < candidates.size(); i++) {
auto& e = candidates[i];
RegTree::Node split_node = (*p_tree)[e.nid];
auto split_type = p_tree->NodeSplitType(e.nid);
nidx.at(i) = e.nid;
left_nidx.at(i) = split_node.LeftChild();
right_nidx.at(i) = split_node.RightChild();
split_data.at(i) = NodeSplitData{split_node, split_type, e.split.split_cats};
}
auto d_matrix = page->GetDeviceAccessor(ctx_->gpu_id);
row_partitioner->UpdatePositionBatch(
nidx, left_nidx, right_nidx, split_data,
[=] __device__(bst_uint ridx, const NodeSplitData& data) {
// given a row index, returns the node id it belongs to
bst_float cut_value = d_matrix.GetFvalue(ridx, data.split_node.SplitIndex());
// Missing value
bool go_left = true;
if (isnan(cut_value)) {
go_left = data.split_node.DefaultLeft();
} else {
if (data.split_type == FeatureType::kCategorical) {
go_left = common::Decision(data.node_cats.Bits(), cut_value);
} else {
go_left = cut_value <= data.split_node.SplitCond();
}
}
return go_left;
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf. This information is used later to update the
// prediction cache
void FinalisePosition(RegTree const* p_tree, DMatrix* p_fmat, ObjInfo task,
HostDeviceVector<bst_node_t>* p_out_position) {
// Prediction cache will not be used with external memory
if (!p_fmat->SingleColBlock()) {
if (task.UpdateTreeLeaf()) {
LOG(FATAL) << "Current objective function can not be used with external memory.";
}
p_out_position->Resize(0);
positions.clear();
return;
}
dh::TemporaryArray<RegTree::Node> d_nodes(p_tree->GetNodes().size());
dh::safe_cuda(hipMemcpyAsync(d_nodes.data().get(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
hipMemcpyHostToDevice));
auto const& h_split_types = p_tree->GetSplitTypes();
auto const& categories = p_tree->GetSplitCategories();
auto const& categories_segments = p_tree->GetSplitCategoriesPtr();
dh::caching_device_vector<FeatureType> d_split_types;
dh::caching_device_vector<uint32_t> d_categories;
dh::caching_device_vector<RegTree::CategoricalSplitMatrix::Segment> d_categories_segments;
if (!categories.empty()) {
dh::CopyToD(h_split_types, &d_split_types);
dh::CopyToD(categories, &d_categories);
dh::CopyToD(categories_segments, &d_categories_segments);
}
FinalisePositionInPage(page, dh::ToSpan(d_nodes), dh::ToSpan(d_split_types),
dh::ToSpan(d_categories), dh::ToSpan(d_categories_segments),
p_out_position);
}
void FinalisePositionInPage(
EllpackPageImpl const* page, const common::Span<RegTree::Node> d_nodes,
common::Span<FeatureType const> d_feature_types, common::Span<uint32_t const> categories,
common::Span<RegTree::CategoricalSplitMatrix::Segment> categories_segments,
HostDeviceVector<bst_node_t>* p_out_position) {
auto d_matrix = page->GetDeviceAccessor(ctx_->gpu_id);
auto d_gpair = this->gpair;
p_out_position->SetDevice(ctx_->gpu_id);
p_out_position->Resize(row_partitioner->GetRows().size());
auto new_position_op = [=] __device__(size_t row_id, int position) {
// What happens if user prune the tree?
if (!d_matrix.IsInRange(row_id)) {
return RowPartitioner::kIgnoredTreePosition;
}
auto node = d_nodes[position];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetFvalue(row_id, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
bool go_left = true;
if (common::IsCat(d_feature_types, position)) {
auto node_cats = categories.subspan(categories_segments[position].beg,
categories_segments[position].size);
go_left = common::Decision(node_cats, element);
} else {
go_left = element <= node.SplitCond();
}
if (go_left) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
return position;
}; // NOLINT
auto d_out_position = p_out_position->DeviceSpan();
row_partitioner->FinalisePosition(d_out_position, new_position_op);
auto s_position = p_out_position->ConstDeviceSpan();
positions.resize(s_position.size());
dh::safe_cuda(hipMemcpyAsync(positions.data().get(), s_position.data(),
s_position.size_bytes(), hipMemcpyDeviceToDevice,
ctx_->CUDACtx()->Stream()));
dh::LaunchN(row_partitioner->GetRows().size(), [=] __device__(size_t idx) {
bst_node_t position = d_out_position[idx];
bool is_row_sampled = d_gpair[idx].GetHess() - .0f == 0.f;
d_out_position[idx] = is_row_sampled ? ~position : position;
});
}
bool UpdatePredictionCache(linalg::MatrixView<float> out_preds_d, RegTree const* p_tree) {
if (positions.empty()) {
return false;
}
CHECK(p_tree);
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
CHECK_EQ(out_preds_d.DeviceIdx(), ctx_->gpu_id);
auto d_position = dh::ToSpan(positions);
CHECK_EQ(out_preds_d.Size(), d_position.size());
auto const& h_nodes = p_tree->GetNodes();
dh::caching_device_vector<RegTree::Node> nodes(h_nodes.size());
dh::safe_cuda(hipMemcpyAsync(nodes.data().get(), h_nodes.data(),
h_nodes.size() * sizeof(RegTree::Node), hipMemcpyHostToDevice,
ctx_->CUDACtx()->Stream()));
auto d_nodes = dh::ToSpan(nodes);
CHECK_EQ(out_preds_d.Shape(1), 1);
dh::LaunchN(d_position.size(), ctx_->CUDACtx()->Stream(),
[=] XGBOOST_DEVICE(std::size_t idx) mutable {
bst_node_t nidx = d_position[idx];
auto weight = d_nodes[nidx].LeafValue();
out_preds_d(idx, 0) += weight;
});
return true;
}
// num histograms is the number of contiguous histograms in memory to reduce over
void AllReduceHist(int nidx, int num_histograms) {
monitor.Start("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
using ReduceT = typename std::remove_pointer<decltype(d_node_hist)>::type::ValueT;
collective::AllReduce<collective::Operation::kSum>(
ctx_->gpu_id, reinterpret_cast<ReduceT*>(d_node_hist),
page->Cuts().TotalBins() * 2 * num_histograms);
monitor.Stop("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(std::vector<GPUExpandEntry> const& candidates, const RegTree& tree) {
if (candidates.empty()) return;
// Some nodes we will manually compute histograms
// others we will do by subtraction
std::vector<int> hist_nidx;
std::vector<int> subtraction_nidx;
for (auto& e : candidates) {
// Decide whether to build the left histogram or right histogram
// Use sum of Hessian as a heuristic to select node with fewest training instances
bool fewer_right = e.split.right_sum.GetQuantisedHess() < e.split.left_sum.GetQuantisedHess();
if (fewer_right) {
hist_nidx.emplace_back(tree[e.nid].RightChild());
subtraction_nidx.emplace_back(tree[e.nid].LeftChild());
} else {
hist_nidx.emplace_back(tree[e.nid].LeftChild());
subtraction_nidx.emplace_back(tree[e.nid].RightChild());
}
}
std::vector<int> all_new = hist_nidx;
all_new.insert(all_new.end(), subtraction_nidx.begin(), subtraction_nidx.end());
// Allocate the histograms
// Guaranteed contiguous memory
hist.AllocateHistograms(all_new);
for (auto nidx : hist_nidx) {
this->BuildHist(nidx);
}
// Reduce all in one go
// This gives much better latency in a distributed setting
// when processing a large batch
this->AllReduceHist(hist_nidx.at(0), hist_nidx.size());
for (size_t i = 0; i < subtraction_nidx.size(); i++) {
auto build_hist_nidx = hist_nidx.at(i);
auto subtraction_trick_nidx = subtraction_nidx.at(i);
auto parent_nidx = candidates.at(i).nid;
if (!this->SubtractionTrick(parent_nidx, build_hist_nidx, subtraction_trick_nidx)) {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, 1);
}
}
}
void ApplySplit(const GPUExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
// Sanity check - have we created a leaf with no training instances?
if (!collective::IsDistributed() && row_partitioner) {
CHECK(row_partitioner->GetRows(candidate.nid).size() > 0)
<< "No training instances in this leaf!";
}
auto base_weight = candidate.base_weight;
auto left_weight = candidate.left_weight * param.learning_rate;
auto right_weight = candidate.right_weight * param.learning_rate;
auto parent_hess = quantiser
->ToFloatingPoint(candidate.split.left_sum +
candidate.split.right_sum)
.GetHess();
auto left_hess =
quantiser->ToFloatingPoint(candidate.split.left_sum).GetHess();
auto right_hess =
quantiser->ToFloatingPoint(candidate.split.right_sum).GetHess();
auto is_cat = candidate.split.is_cat;
if (is_cat) {
// should be set to nan in evaluation split.
CHECK(common::CheckNAN(candidate.split.fvalue));
std::vector<common::CatBitField::value_type> split_cats;
CHECK_GT(candidate.split.split_cats.Bits().size(), 0);
auto h_cats = this->evaluator_.GetHostNodeCats(candidate.nid);
auto n_bins_feature = page->Cuts().FeatureBins(candidate.split.findex);
split_cats.resize(common::CatBitField::ComputeStorageSize(n_bins_feature), 0);
CHECK_LE(split_cats.size(), h_cats.size());
std::copy(h_cats.data(), h_cats.data() + split_cats.size(), split_cats.data());
tree.ExpandCategorical(
candidate.nid, candidate.split.findex, split_cats, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight, candidate.split.loss_chg, parent_hess,
left_hess, right_hess);
} else {
CHECK(!common::CheckNAN(candidate.split.fvalue));
tree.ExpandNode(candidate.nid, candidate.split.findex, candidate.split.fvalue,
candidate.split.dir == kLeftDir, base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_hess,
left_hess, right_hess);
}
evaluator_.ApplyTreeSplit(candidate, p_tree);
const auto& parent = tree[candidate.nid];
interaction_constraints.Split(candidate.nid, parent.SplitIndex(), parent.LeftChild(),
parent.RightChild());
}
GPUExpandEntry InitRoot(RegTree* p_tree) {
constexpr bst_node_t kRootNIdx = 0;
dh::XGBCachingDeviceAllocator<char> alloc;
auto quantiser = *this->quantiser;
auto gpair_it = dh::MakeTransformIterator<GradientPairInt64>(
dh::tbegin(gpair), [=] __device__(auto const &gpair) {
return quantiser.ToFixedPoint(gpair);
});
GradientPairInt64 root_sum_quantised =
dh::Reduce(ctx_->CUDACtx()->CTP(), gpair_it, gpair_it + gpair.size(),
GradientPairInt64{}, thrust::plus<GradientPairInt64>{});
using ReduceT = typename decltype(root_sum_quantised)::ValueT;
collective::Allreduce<collective::Operation::kSum>(
reinterpret_cast<ReduceT *>(&root_sum_quantised), 2);
hist.AllocateHistograms({kRootNIdx});
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, 1);
// Remember root stats
auto root_sum = quantiser.ToFloatingPoint(root_sum_quantised);
p_tree->Stat(kRootNIdx).sum_hess = root_sum.GetHess();
auto weight = CalcWeight(param, root_sum);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Generate first split
auto root_entry = this->EvaluateRootSplit(root_sum_quantised);
return root_entry;
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat, ObjInfo const* task,
RegTree* p_tree, HostDeviceVector<bst_node_t>* p_out_position) {
auto& tree = *p_tree;
// Process maximum 32 nodes at a time
Driver<GPUExpandEntry> driver(param, 32);
monitor.Start("Reset");
this->Reset(gpair_all, p_fmat, p_fmat->Info().num_col_);
monitor.Stop("Reset");
monitor.Start("InitRoot");
driver.Push({this->InitRoot(p_tree)});
monitor.Stop("InitRoot");
// The set of leaves that can be expanded asynchronously
auto expand_set = driver.Pop();
while (!expand_set.empty()) {
for (auto& candidate : expand_set) {
this->ApplySplit(candidate, p_tree);
}
// Get the candidates we are allowed to expand further
// e.g. We do not bother further processing nodes whose children are beyond max depth
std::vector<GPUExpandEntry> filtered_expand_set;
std::copy_if(expand_set.begin(), expand_set.end(), std::back_inserter(filtered_expand_set),
[&](const auto& e) { return driver.IsChildValid(e); });
auto new_candidates =
pinned.GetSpan<GPUExpandEntry>(filtered_expand_set.size() * 2, GPUExpandEntry());
monitor.Start("UpdatePosition");
// Update position is only run when child is valid, instead of right after apply
// split (as in approx tree method). Hense we have the finalise position call
// in GPU Hist.
this->UpdatePosition(filtered_expand_set, p_tree);
monitor.Stop("UpdatePosition");
monitor.Start("BuildHist");
this->BuildHistLeftRight(filtered_expand_set, tree);
monitor.Stop("BuildHist");
monitor.Start("EvaluateSplits");
this->EvaluateSplits(filtered_expand_set, *p_tree, new_candidates);
monitor.Stop("EvaluateSplits");
dh::DefaultStream().Sync();
driver.Push(new_candidates.begin(), new_candidates.end());
expand_set = driver.Pop();
}
monitor.Start("FinalisePosition");
this->FinalisePosition(p_tree, p_fmat, *task, p_out_position);
monitor.Stop("FinalisePosition");
}
};
class GPUHistMaker : public TreeUpdater {
using GradientSumT = GradientPairPrecise;
public:
explicit GPUHistMaker(Context const* ctx, ObjInfo const* task) : TreeUpdater(ctx), task_{task} {};
void Configure(const Args& args) override {
// Used in test to count how many configurations are performed
LOG(DEBUG) << "[GPU Hist]: Configure";
hist_maker_param_.UpdateAllowUnknown(args);
dh::CheckComputeCapability();
initialised_ = false;
monitor_.Init("updater_gpu_hist");
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("hist_train_param"), &this->hist_maker_param_);
initialised_ = false;
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["hist_train_param"] = ToJson(hist_maker_param_);
}
~GPUHistMaker() { // NOLINT
dh::GlobalMemoryLogger().Log();
}
void Update(TrainParam const* param, HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
common::Span<HostDeviceVector<bst_node_t>> out_position,
const std::vector<RegTree*>& trees) override {
monitor_.Start("Update");
// build tree
try {
std::size_t t_idx{0};
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(param, gpair, dmat, tree, &out_position[t_idx]);
this->hist_maker_param_.CheckTreesSynchronized(tree);
++t_idx;
}
dh::safe_cuda(hipGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
monitor_.Stop("Update");
}
void InitDataOnce(TrainParam const* param, DMatrix* dmat) {
CHECK_GE(ctx_->gpu_id, 0) << "Must have at least one device";
info_ = &dmat->Info();
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
collective::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
this->column_sampler_ = std::make_shared<common::ColumnSampler>(column_sampling_seed);
auto batch_param = BatchParam{param->max_bin, TrainParam::DftSparseThreshold()};
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
info_->feature_types.SetDevice(ctx_->gpu_id);
maker = std::make_unique<GPUHistMakerDevice>(
ctx_, !dmat->SingleColBlock(), info_->feature_types.ConstDeviceSpan(), info_->num_row_,
*param, column_sampler_, info_->num_col_, batch_param);
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(TrainParam const* param, DMatrix* dmat, RegTree const* p_tree) {
if (!initialised_) {
monitor_.Start("InitDataOnce");
this->InitDataOnce(param, dmat);
monitor_.Stop("InitDataOnce");
}
p_last_tree_ = p_tree;
CHECK(hist_maker_param_.GetInitialised());
}
void UpdateTree(TrainParam const* param, HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
RegTree* p_tree, HostDeviceVector<bst_node_t>* p_out_position) {
monitor_.Start("InitData");
this->InitData(param, p_fmat, p_tree);
monitor_.Stop("InitData");
gpair->SetDevice(ctx_->gpu_id);
maker->UpdateTree(gpair, p_fmat, task_, p_tree, p_out_position);
}
bool UpdatePredictionCache(const DMatrix* data,
linalg::MatrixView<bst_float> p_out_preds) override {
if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.Start("UpdatePredictionCache");
bool result = maker->UpdatePredictionCache(p_out_preds, p_last_tree_);
monitor_.Stop("UpdatePredictionCache");
return result;
}
MetaInfo* info_{}; // NOLINT
std::unique_ptr<GPUHistMakerDevice> maker; // NOLINT
[[nodiscard]] char const* Name() const override { return "grow_gpu_hist"; }
[[nodiscard]] bool HasNodePosition() const override { return true; }
private:
bool initialised_{false};
HistMakerTrainParam hist_maker_param_;
DMatrix* p_last_fmat_{nullptr};
RegTree const* p_last_tree_{nullptr};
ObjInfo const* task_{nullptr};
common::Monitor monitor_;
std::shared_ptr<common::ColumnSampler> column_sampler_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([](Context const* ctx, ObjInfo const* task) {
return new GPUHistMaker(ctx, task);
});
#endif // !defined(GTEST_TEST)
class GPUGlobalApproxMaker : public TreeUpdater {
public:
explicit GPUGlobalApproxMaker(Context const* ctx, ObjInfo const* task)
: TreeUpdater(ctx), task_{task} {};
void Configure(Args const& args) override {
// Used in test to count how many configurations are performed
LOG(DEBUG) << "[GPU Approx]: Configure";
hist_maker_param_.UpdateAllowUnknown(args);
dh::CheckComputeCapability();
initialised_ = false;
monitor_.Init(this->Name());
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("hist_train_param"), &this->hist_maker_param_);
initialised_ = false;
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["hist_train_param"] = ToJson(hist_maker_param_);
}
~GPUGlobalApproxMaker() override { dh::GlobalMemoryLogger().Log(); }
void Update(TrainParam const* param, HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
common::Span<HostDeviceVector<bst_node_t>> out_position,
const std::vector<RegTree*>& trees) override {
monitor_.Start("Update");
this->InitDataOnce(p_fmat);
// build tree
hess_.resize(gpair->Size());
auto hess = dh::ToSpan(hess_);
gpair->SetDevice(ctx_->Device());
auto d_gpair = gpair->ConstDeviceSpan();
auto cuctx = ctx_->CUDACtx();
thrust::transform(cuctx->CTP(), dh::tcbegin(d_gpair), dh::tcend(d_gpair), dh::tbegin(hess),
[=] XGBOOST_DEVICE(GradientPair const& g) { return g.GetHess(); });
auto const& info = p_fmat->Info();
info.feature_types.SetDevice(ctx_->Device());
auto batch = BatchParam{param->max_bin, hess, !task_->const_hess};
maker_ = std::make_unique<GPUHistMakerDevice>(
ctx_, !p_fmat->SingleColBlock(), info.feature_types.ConstDeviceSpan(), info.num_row_,
*param, column_sampler_, info.num_col_, batch);
std::size_t t_idx{0};
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(gpair, p_fmat, tree, &out_position[t_idx]);
this->hist_maker_param_.CheckTreesSynchronized(tree);
++t_idx;
}
monitor_.Stop("Update");
}
void InitDataOnce(DMatrix* p_fmat) {
if (this->initialised_) {
return;
}
monitor_.Start(__func__);
CHECK(ctx_->IsCUDA()) << error::InvalidCUDAOrdinal();
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
collective::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
this->column_sampler_ = std::make_shared<common::ColumnSampler>(column_sampling_seed);
p_last_fmat_ = p_fmat;
initialised_ = true;
monitor_.Stop(__func__);
}
void InitData(DMatrix* p_fmat, RegTree const* p_tree) {
this->InitDataOnce(p_fmat);
p_last_tree_ = p_tree;
CHECK(hist_maker_param_.GetInitialised());
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat, RegTree* p_tree,
HostDeviceVector<bst_node_t>* p_out_position) {
monitor_.Start("InitData");
this->InitData(p_fmat, p_tree);
monitor_.Stop("InitData");
gpair->SetDevice(ctx_->gpu_id);
maker_->UpdateTree(gpair, p_fmat, task_, p_tree, p_out_position);
}
bool UpdatePredictionCache(const DMatrix* data,
linalg::MatrixView<bst_float> p_out_preds) override {
if (maker_ == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.Start("UpdatePredictionCache");
bool result = maker_->UpdatePredictionCache(p_out_preds, p_last_tree_);
monitor_.Stop("UpdatePredictionCache");
return result;
}
[[nodiscard]] char const* Name() const override { return "grow_gpu_approx"; }
[[nodiscard]] bool HasNodePosition() const override { return true; }
private:
bool initialised_{false};
HistMakerTrainParam hist_maker_param_;
dh::device_vector<float> hess_;
std::shared_ptr<common::ColumnSampler> column_sampler_;
std::unique_ptr<GPUHistMakerDevice> maker_;
DMatrix* p_last_fmat_{nullptr};
RegTree const* p_last_tree_{nullptr};
ObjInfo const* task_{nullptr};
common::Monitor monitor_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUApproxMaker, "grow_gpu_approx")
.describe("Grow tree with GPU.")
.set_body([](Context const* ctx, ObjInfo const* task) {
return new GPUGlobalApproxMaker(ctx, task);
});
#endif // !defined(GTEST_TEST)
} // namespace xgboost::tree
| 136d70c3d7f0b33766749d1516cdd30aa25cfa44.cu | /**
* Copyright 2017-2023 by XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <limits>
#include <memory>
#include <utility>
#include <vector>
#include "../collective/communicator-inl.cuh"
#include "../common/bitfield.h"
#include "../common/categorical.h"
#include "../common/cuda_context.cuh" // CUDAContext
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/io.h"
#include "../common/timer.h"
#include "../data/ellpack_page.cuh"
#include "../data/ellpack_page.h"
#include "constraints.cuh"
#include "driver.h"
#include "gpu_hist/evaluate_splits.cuh"
#include "gpu_hist/expand_entry.cuh"
#include "gpu_hist/feature_groups.cuh"
#include "gpu_hist/gradient_based_sampler.cuh"
#include "gpu_hist/histogram.cuh"
#include "gpu_hist/row_partitioner.cuh"
#include "hist/param.h"
#include "param.h"
#include "updater_gpu_common.cuh"
#include "xgboost/base.h"
#include "xgboost/context.h"
#include "xgboost/data.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/json.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/task.h" // for ObjInfo
#include "xgboost/tree_model.h"
namespace xgboost::tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
/**
* \struct DeviceHistogramStorage
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <size_t kStopGrowingSize = 1 << 28>
class DeviceHistogramStorage {
private:
using GradientSumT = GradientPairInt64;
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
// Large buffer of zeroed memory, caches histograms
dh::device_vector<typename GradientSumT::ValueT> data_;
// If we run out of storage allocate one histogram at a time
// in overflow. Not cached, overwritten when a new histogram
// is requested
dh::device_vector<typename GradientSumT::ValueT> overflow_;
std::map<int, size_t> overflow_nidx_map_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2, "Number of items in gradient type should be 2.");
public:
// Start with about 16mb
DeviceHistogramStorage() { data_.reserve(1 << 22); }
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
auto d_data = data_.data().get();
dh::LaunchN(data_.size(), [=] __device__(size_t idx) { d_data[idx] = 0.0f; });
nidx_map_.clear();
overflow_nidx_map_.clear();
}
[[nodiscard]] bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend() ||
overflow_nidx_map_.find(nidx) != overflow_nidx_map_.cend();
}
[[nodiscard]] int Bins() const { return n_bins_; }
[[nodiscard]] size_t HistogramSize() const { return n_bins_ * kNumItemsInGradientSum; }
dh::device_vector<typename GradientSumT::ValueT>& Data() { return data_; }
void AllocateHistograms(const std::vector<int>& new_nidxs) {
for (int nidx : new_nidxs) {
CHECK(!HistogramExists(nidx));
}
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize() * new_nidxs.size();
if (used_size >= kStopGrowingSize) {
// Use overflow
// Delete previous entries
overflow_nidx_map_.clear();
overflow_.resize(HistogramSize() * new_nidxs.size());
// Zero memory
auto d_data = overflow_.data().get();
dh::LaunchN(overflow_.size(),
[=] __device__(size_t idx) { d_data[idx] = 0.0; });
// Append new histograms
for (int nidx : new_nidxs) {
overflow_nidx_map_[nidx] = overflow_nidx_map_.size() * HistogramSize();
}
} else {
CHECK_GE(data_.size(), used_size);
// Expand if necessary
if (data_.size() < new_used_size) {
data_.resize(std::max(data_.size() * 2, new_used_size));
}
// Append new histograms
for (int nidx : new_nidxs) {
nidx_map_[nidx] = nidx_map_.size() * HistogramSize();
}
}
CHECK_GE(data_.size(), nidx_map_.size() * HistogramSize());
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
if (nidx_map_.find(nidx) != nidx_map_.cend()) {
// Fetch from normal cache
auto ptr = data_.data().get() + nidx_map_.at(nidx);
return {reinterpret_cast<GradientSumT*>(ptr), static_cast<std::size_t>(n_bins_)};
} else {
// Fetch from overflow
auto ptr = overflow_.data().get() + overflow_nidx_map_.at(nidx);
return {reinterpret_cast<GradientSumT*>(ptr), static_cast<std::size_t>(n_bins_)};
}
}
};
// Manage memory for a single GPU
struct GPUHistMakerDevice {
private:
GPUHistEvaluator evaluator_;
Context const* ctx_;
std::shared_ptr<common::ColumnSampler> column_sampler_;
public:
EllpackPageImpl const* page{nullptr};
common::Span<FeatureType const> feature_types;
std::unique_ptr<RowPartitioner> row_partitioner;
DeviceHistogramStorage<> hist{};
dh::device_vector<GradientPair> d_gpair; // storage for gpair;
common::Span<GradientPair> gpair;
dh::device_vector<int> monotone_constraints;
// node idx for each sample
dh::device_vector<bst_node_t> positions;
TrainParam param;
std::unique_ptr<GradientQuantiser> quantiser;
dh::PinnedMemory pinned;
dh::PinnedMemory pinned2;
common::Monitor monitor;
FeatureInteractionConstraintDevice interaction_constraints;
std::unique_ptr<GradientBasedSampler> sampler;
std::unique_ptr<FeatureGroups> feature_groups;
GPUHistMakerDevice(Context const* ctx, bool is_external_memory,
common::Span<FeatureType const> _feature_types, bst_row_t _n_rows,
TrainParam _param, std::shared_ptr<common::ColumnSampler> column_sampler,
uint32_t n_features, BatchParam batch_param)
: evaluator_{_param, n_features, ctx->gpu_id},
ctx_(ctx),
feature_types{_feature_types},
param(std::move(_param)),
column_sampler_(std::move(column_sampler)),
interaction_constraints(param, n_features) {
sampler = std::make_unique<GradientBasedSampler>(ctx, _n_rows, batch_param, param.subsample,
param.sampling_method, is_external_memory);
if (!param.monotone_constraints.empty()) {
// Copy assigning an empty vector causes an exception in MSVC debug builds
monotone_constraints = param.monotone_constraints;
}
CHECK(column_sampler_);
monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(ctx_->gpu_id));
}
~GPUHistMakerDevice() = default;
void InitFeatureGroupsOnce() {
if (!feature_groups) {
CHECK(page);
feature_groups.reset(new FeatureGroups(page->Cuts(), page->is_dense,
dh::MaxSharedMemoryOptin(ctx_->gpu_id),
sizeof(GradientPairPrecise)));
}
}
// Reset values for each update iteration
void Reset(HostDeviceVector<GradientPair>* dh_gpair, DMatrix* dmat, int64_t num_columns) {
auto const& info = dmat->Info();
this->column_sampler_->Init(ctx_, num_columns, info.feature_weights.HostVector(),
param.colsample_bynode, param.colsample_bylevel,
param.colsample_bytree);
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
this->interaction_constraints.Reset();
if (d_gpair.size() != dh_gpair->Size()) {
d_gpair.resize(dh_gpair->Size());
}
dh::safe_cuda(cudaMemcpyAsync(d_gpair.data().get(), dh_gpair->ConstDevicePointer(),
dh_gpair->Size() * sizeof(GradientPair),
cudaMemcpyDeviceToDevice));
auto sample = sampler->Sample(ctx_, dh::ToSpan(d_gpair), dmat);
page = sample.page;
gpair = sample.gpair;
this->evaluator_.Reset(page->Cuts(), feature_types, dmat->Info().num_col_, param, ctx_->gpu_id);
quantiser.reset(new GradientQuantiser(this->gpair));
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(ctx_->gpu_id, sample.sample_rows));
// Init histogram
hist.Init(ctx_->gpu_id, page->Cuts().TotalBins());
hist.Reset();
this->InitFeatureGroupsOnce();
}
GPUExpandEntry EvaluateRootSplit(GradientPairInt64 root_sum) {
int nidx = RegTree::kRoot;
GPUTrainingParam gpu_param(param);
auto sampled_features = column_sampler_->GetFeatureSet(0);
sampled_features->SetDevice(ctx_->Device());
common::Span<bst_feature_t> feature_set =
interaction_constraints.Query(sampled_features->DeviceSpan(), nidx);
auto matrix = page->GetDeviceAccessor(ctx_->gpu_id);
EvaluateSplitInputs inputs{nidx, 0, root_sum, feature_set, hist.GetNodeHistogram(nidx)};
EvaluateSplitSharedInputs shared_inputs{
gpu_param,
*quantiser,
feature_types,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
matrix.is_dense && !collective::IsDistributed()
};
auto split = this->evaluator_.EvaluateSingleSplit(inputs, shared_inputs);
return split;
}
void EvaluateSplits(const std::vector<GPUExpandEntry>& candidates, const RegTree& tree,
common::Span<GPUExpandEntry> pinned_candidates_out) {
if (candidates.empty()) return;
dh::TemporaryArray<EvaluateSplitInputs> d_node_inputs(2 * candidates.size());
dh::TemporaryArray<DeviceSplitCandidate> splits_out(2 * candidates.size());
std::vector<bst_node_t> nidx(2 * candidates.size());
auto h_node_inputs = pinned2.GetSpan<EvaluateSplitInputs>(2 * candidates.size());
auto matrix = page->GetDeviceAccessor(ctx_->gpu_id);
EvaluateSplitSharedInputs shared_inputs{GPUTrainingParam{param}, *quantiser, feature_types,
matrix.feature_segments, matrix.gidx_fvalue_map,
matrix.min_fvalue,
// is_dense represents the local data
matrix.is_dense && !collective::IsDistributed()};
dh::TemporaryArray<GPUExpandEntry> entries(2 * candidates.size());
// Store the feature set ptrs so they dont go out of scope before the kernel is called
std::vector<std::shared_ptr<HostDeviceVector<bst_feature_t>>> feature_sets;
for (size_t i = 0; i < candidates.size(); i++) {
auto candidate = candidates.at(i);
int left_nidx = tree[candidate.nid].LeftChild();
int right_nidx = tree[candidate.nid].RightChild();
nidx[i * 2] = left_nidx;
nidx[i * 2 + 1] = right_nidx;
auto left_sampled_features = column_sampler_->GetFeatureSet(tree.GetDepth(left_nidx));
left_sampled_features->SetDevice(ctx_->Device());
feature_sets.emplace_back(left_sampled_features);
common::Span<bst_feature_t> left_feature_set =
interaction_constraints.Query(left_sampled_features->DeviceSpan(), left_nidx);
auto right_sampled_features = column_sampler_->GetFeatureSet(tree.GetDepth(right_nidx));
right_sampled_features->SetDevice(ctx_->Device());
feature_sets.emplace_back(right_sampled_features);
common::Span<bst_feature_t> right_feature_set =
interaction_constraints.Query(right_sampled_features->DeviceSpan(),
right_nidx);
h_node_inputs[i * 2] = {left_nidx, candidate.depth + 1,
candidate.split.left_sum, left_feature_set,
hist.GetNodeHistogram(left_nidx)};
h_node_inputs[i * 2 + 1] = {right_nidx, candidate.depth + 1,
candidate.split.right_sum, right_feature_set,
hist.GetNodeHistogram(right_nidx)};
}
bst_feature_t max_active_features = 0;
for (auto input : h_node_inputs) {
max_active_features =
std::max(max_active_features, static_cast<bst_feature_t>(input.feature_set.size()));
}
dh::safe_cuda(cudaMemcpyAsync(
d_node_inputs.data().get(), h_node_inputs.data(),
h_node_inputs.size() * sizeof(EvaluateSplitInputs), cudaMemcpyDefault));
this->evaluator_.EvaluateSplits(nidx, max_active_features,
dh::ToSpan(d_node_inputs), shared_inputs,
dh::ToSpan(entries));
dh::safe_cuda(cudaMemcpyAsync(pinned_candidates_out.data(),
entries.data().get(), sizeof(GPUExpandEntry) * entries.size(),
cudaMemcpyDeviceToHost));
dh::DefaultStream().Sync();
}
void BuildHist(int nidx) {
auto d_node_hist = hist.GetNodeHistogram(nidx);
auto d_ridx = row_partitioner->GetRows(nidx);
BuildGradientHistogram(ctx_->CUDACtx(), page->GetDeviceAccessor(ctx_->gpu_id),
feature_groups->DeviceAccessor(ctx_->gpu_id), gpair, d_ridx, d_node_hist,
*quantiser);
}
// Attempt to do subtraction trick
// return true if succeeded
bool SubtractionTrick(int nidx_parent, int nidx_histogram, int nidx_subtraction) {
if (!hist.HistogramExists(nidx_histogram) || !hist.HistogramExists(nidx_parent)) {
return false;
}
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(page->Cuts().TotalBins(), [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
return true;
}
// Extra data for each node that is passed
// to the update position function
struct NodeSplitData {
RegTree::Node split_node;
FeatureType split_type;
common::CatBitField node_cats;
};
void UpdatePosition(const std::vector<GPUExpandEntry>& candidates, RegTree* p_tree) {
if (candidates.empty()) return;
std::vector<int> nidx(candidates.size());
std::vector<int> left_nidx(candidates.size());
std::vector<int> right_nidx(candidates.size());
std::vector<NodeSplitData> split_data(candidates.size());
for (size_t i = 0; i < candidates.size(); i++) {
auto& e = candidates[i];
RegTree::Node split_node = (*p_tree)[e.nid];
auto split_type = p_tree->NodeSplitType(e.nid);
nidx.at(i) = e.nid;
left_nidx.at(i) = split_node.LeftChild();
right_nidx.at(i) = split_node.RightChild();
split_data.at(i) = NodeSplitData{split_node, split_type, e.split.split_cats};
}
auto d_matrix = page->GetDeviceAccessor(ctx_->gpu_id);
row_partitioner->UpdatePositionBatch(
nidx, left_nidx, right_nidx, split_data,
[=] __device__(bst_uint ridx, const NodeSplitData& data) {
// given a row index, returns the node id it belongs to
bst_float cut_value = d_matrix.GetFvalue(ridx, data.split_node.SplitIndex());
// Missing value
bool go_left = true;
if (isnan(cut_value)) {
go_left = data.split_node.DefaultLeft();
} else {
if (data.split_type == FeatureType::kCategorical) {
go_left = common::Decision(data.node_cats.Bits(), cut_value);
} else {
go_left = cut_value <= data.split_node.SplitCond();
}
}
return go_left;
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf. This information is used later to update the
// prediction cache
void FinalisePosition(RegTree const* p_tree, DMatrix* p_fmat, ObjInfo task,
HostDeviceVector<bst_node_t>* p_out_position) {
// Prediction cache will not be used with external memory
if (!p_fmat->SingleColBlock()) {
if (task.UpdateTreeLeaf()) {
LOG(FATAL) << "Current objective function can not be used with external memory.";
}
p_out_position->Resize(0);
positions.clear();
return;
}
dh::TemporaryArray<RegTree::Node> d_nodes(p_tree->GetNodes().size());
dh::safe_cuda(cudaMemcpyAsync(d_nodes.data().get(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
cudaMemcpyHostToDevice));
auto const& h_split_types = p_tree->GetSplitTypes();
auto const& categories = p_tree->GetSplitCategories();
auto const& categories_segments = p_tree->GetSplitCategoriesPtr();
dh::caching_device_vector<FeatureType> d_split_types;
dh::caching_device_vector<uint32_t> d_categories;
dh::caching_device_vector<RegTree::CategoricalSplitMatrix::Segment> d_categories_segments;
if (!categories.empty()) {
dh::CopyToD(h_split_types, &d_split_types);
dh::CopyToD(categories, &d_categories);
dh::CopyToD(categories_segments, &d_categories_segments);
}
FinalisePositionInPage(page, dh::ToSpan(d_nodes), dh::ToSpan(d_split_types),
dh::ToSpan(d_categories), dh::ToSpan(d_categories_segments),
p_out_position);
}
void FinalisePositionInPage(
EllpackPageImpl const* page, const common::Span<RegTree::Node> d_nodes,
common::Span<FeatureType const> d_feature_types, common::Span<uint32_t const> categories,
common::Span<RegTree::CategoricalSplitMatrix::Segment> categories_segments,
HostDeviceVector<bst_node_t>* p_out_position) {
auto d_matrix = page->GetDeviceAccessor(ctx_->gpu_id);
auto d_gpair = this->gpair;
p_out_position->SetDevice(ctx_->gpu_id);
p_out_position->Resize(row_partitioner->GetRows().size());
auto new_position_op = [=] __device__(size_t row_id, int position) {
// What happens if user prune the tree?
if (!d_matrix.IsInRange(row_id)) {
return RowPartitioner::kIgnoredTreePosition;
}
auto node = d_nodes[position];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetFvalue(row_id, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
bool go_left = true;
if (common::IsCat(d_feature_types, position)) {
auto node_cats = categories.subspan(categories_segments[position].beg,
categories_segments[position].size);
go_left = common::Decision(node_cats, element);
} else {
go_left = element <= node.SplitCond();
}
if (go_left) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
return position;
}; // NOLINT
auto d_out_position = p_out_position->DeviceSpan();
row_partitioner->FinalisePosition(d_out_position, new_position_op);
auto s_position = p_out_position->ConstDeviceSpan();
positions.resize(s_position.size());
dh::safe_cuda(cudaMemcpyAsync(positions.data().get(), s_position.data(),
s_position.size_bytes(), cudaMemcpyDeviceToDevice,
ctx_->CUDACtx()->Stream()));
dh::LaunchN(row_partitioner->GetRows().size(), [=] __device__(size_t idx) {
bst_node_t position = d_out_position[idx];
bool is_row_sampled = d_gpair[idx].GetHess() - .0f == 0.f;
d_out_position[idx] = is_row_sampled ? ~position : position;
});
}
bool UpdatePredictionCache(linalg::MatrixView<float> out_preds_d, RegTree const* p_tree) {
if (positions.empty()) {
return false;
}
CHECK(p_tree);
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
CHECK_EQ(out_preds_d.DeviceIdx(), ctx_->gpu_id);
auto d_position = dh::ToSpan(positions);
CHECK_EQ(out_preds_d.Size(), d_position.size());
auto const& h_nodes = p_tree->GetNodes();
dh::caching_device_vector<RegTree::Node> nodes(h_nodes.size());
dh::safe_cuda(cudaMemcpyAsync(nodes.data().get(), h_nodes.data(),
h_nodes.size() * sizeof(RegTree::Node), cudaMemcpyHostToDevice,
ctx_->CUDACtx()->Stream()));
auto d_nodes = dh::ToSpan(nodes);
CHECK_EQ(out_preds_d.Shape(1), 1);
dh::LaunchN(d_position.size(), ctx_->CUDACtx()->Stream(),
[=] XGBOOST_DEVICE(std::size_t idx) mutable {
bst_node_t nidx = d_position[idx];
auto weight = d_nodes[nidx].LeafValue();
out_preds_d(idx, 0) += weight;
});
return true;
}
// num histograms is the number of contiguous histograms in memory to reduce over
void AllReduceHist(int nidx, int num_histograms) {
monitor.Start("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
using ReduceT = typename std::remove_pointer<decltype(d_node_hist)>::type::ValueT;
collective::AllReduce<collective::Operation::kSum>(
ctx_->gpu_id, reinterpret_cast<ReduceT*>(d_node_hist),
page->Cuts().TotalBins() * 2 * num_histograms);
monitor.Stop("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(std::vector<GPUExpandEntry> const& candidates, const RegTree& tree) {
if (candidates.empty()) return;
// Some nodes we will manually compute histograms
// others we will do by subtraction
std::vector<int> hist_nidx;
std::vector<int> subtraction_nidx;
for (auto& e : candidates) {
// Decide whether to build the left histogram or right histogram
// Use sum of Hessian as a heuristic to select node with fewest training instances
bool fewer_right = e.split.right_sum.GetQuantisedHess() < e.split.left_sum.GetQuantisedHess();
if (fewer_right) {
hist_nidx.emplace_back(tree[e.nid].RightChild());
subtraction_nidx.emplace_back(tree[e.nid].LeftChild());
} else {
hist_nidx.emplace_back(tree[e.nid].LeftChild());
subtraction_nidx.emplace_back(tree[e.nid].RightChild());
}
}
std::vector<int> all_new = hist_nidx;
all_new.insert(all_new.end(), subtraction_nidx.begin(), subtraction_nidx.end());
// Allocate the histograms
// Guaranteed contiguous memory
hist.AllocateHistograms(all_new);
for (auto nidx : hist_nidx) {
this->BuildHist(nidx);
}
// Reduce all in one go
// This gives much better latency in a distributed setting
// when processing a large batch
this->AllReduceHist(hist_nidx.at(0), hist_nidx.size());
for (size_t i = 0; i < subtraction_nidx.size(); i++) {
auto build_hist_nidx = hist_nidx.at(i);
auto subtraction_trick_nidx = subtraction_nidx.at(i);
auto parent_nidx = candidates.at(i).nid;
if (!this->SubtractionTrick(parent_nidx, build_hist_nidx, subtraction_trick_nidx)) {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, 1);
}
}
}
void ApplySplit(const GPUExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
// Sanity check - have we created a leaf with no training instances?
if (!collective::IsDistributed() && row_partitioner) {
CHECK(row_partitioner->GetRows(candidate.nid).size() > 0)
<< "No training instances in this leaf!";
}
auto base_weight = candidate.base_weight;
auto left_weight = candidate.left_weight * param.learning_rate;
auto right_weight = candidate.right_weight * param.learning_rate;
auto parent_hess = quantiser
->ToFloatingPoint(candidate.split.left_sum +
candidate.split.right_sum)
.GetHess();
auto left_hess =
quantiser->ToFloatingPoint(candidate.split.left_sum).GetHess();
auto right_hess =
quantiser->ToFloatingPoint(candidate.split.right_sum).GetHess();
auto is_cat = candidate.split.is_cat;
if (is_cat) {
// should be set to nan in evaluation split.
CHECK(common::CheckNAN(candidate.split.fvalue));
std::vector<common::CatBitField::value_type> split_cats;
CHECK_GT(candidate.split.split_cats.Bits().size(), 0);
auto h_cats = this->evaluator_.GetHostNodeCats(candidate.nid);
auto n_bins_feature = page->Cuts().FeatureBins(candidate.split.findex);
split_cats.resize(common::CatBitField::ComputeStorageSize(n_bins_feature), 0);
CHECK_LE(split_cats.size(), h_cats.size());
std::copy(h_cats.data(), h_cats.data() + split_cats.size(), split_cats.data());
tree.ExpandCategorical(
candidate.nid, candidate.split.findex, split_cats, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight, candidate.split.loss_chg, parent_hess,
left_hess, right_hess);
} else {
CHECK(!common::CheckNAN(candidate.split.fvalue));
tree.ExpandNode(candidate.nid, candidate.split.findex, candidate.split.fvalue,
candidate.split.dir == kLeftDir, base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_hess,
left_hess, right_hess);
}
evaluator_.ApplyTreeSplit(candidate, p_tree);
const auto& parent = tree[candidate.nid];
interaction_constraints.Split(candidate.nid, parent.SplitIndex(), parent.LeftChild(),
parent.RightChild());
}
GPUExpandEntry InitRoot(RegTree* p_tree) {
constexpr bst_node_t kRootNIdx = 0;
dh::XGBCachingDeviceAllocator<char> alloc;
auto quantiser = *this->quantiser;
auto gpair_it = dh::MakeTransformIterator<GradientPairInt64>(
dh::tbegin(gpair), [=] __device__(auto const &gpair) {
return quantiser.ToFixedPoint(gpair);
});
GradientPairInt64 root_sum_quantised =
dh::Reduce(ctx_->CUDACtx()->CTP(), gpair_it, gpair_it + gpair.size(),
GradientPairInt64{}, thrust::plus<GradientPairInt64>{});
using ReduceT = typename decltype(root_sum_quantised)::ValueT;
collective::Allreduce<collective::Operation::kSum>(
reinterpret_cast<ReduceT *>(&root_sum_quantised), 2);
hist.AllocateHistograms({kRootNIdx});
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, 1);
// Remember root stats
auto root_sum = quantiser.ToFloatingPoint(root_sum_quantised);
p_tree->Stat(kRootNIdx).sum_hess = root_sum.GetHess();
auto weight = CalcWeight(param, root_sum);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Generate first split
auto root_entry = this->EvaluateRootSplit(root_sum_quantised);
return root_entry;
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat, ObjInfo const* task,
RegTree* p_tree, HostDeviceVector<bst_node_t>* p_out_position) {
auto& tree = *p_tree;
// Process maximum 32 nodes at a time
Driver<GPUExpandEntry> driver(param, 32);
monitor.Start("Reset");
this->Reset(gpair_all, p_fmat, p_fmat->Info().num_col_);
monitor.Stop("Reset");
monitor.Start("InitRoot");
driver.Push({this->InitRoot(p_tree)});
monitor.Stop("InitRoot");
// The set of leaves that can be expanded asynchronously
auto expand_set = driver.Pop();
while (!expand_set.empty()) {
for (auto& candidate : expand_set) {
this->ApplySplit(candidate, p_tree);
}
// Get the candidates we are allowed to expand further
// e.g. We do not bother further processing nodes whose children are beyond max depth
std::vector<GPUExpandEntry> filtered_expand_set;
std::copy_if(expand_set.begin(), expand_set.end(), std::back_inserter(filtered_expand_set),
[&](const auto& e) { return driver.IsChildValid(e); });
auto new_candidates =
pinned.GetSpan<GPUExpandEntry>(filtered_expand_set.size() * 2, GPUExpandEntry());
monitor.Start("UpdatePosition");
// Update position is only run when child is valid, instead of right after apply
// split (as in approx tree method). Hense we have the finalise position call
// in GPU Hist.
this->UpdatePosition(filtered_expand_set, p_tree);
monitor.Stop("UpdatePosition");
monitor.Start("BuildHist");
this->BuildHistLeftRight(filtered_expand_set, tree);
monitor.Stop("BuildHist");
monitor.Start("EvaluateSplits");
this->EvaluateSplits(filtered_expand_set, *p_tree, new_candidates);
monitor.Stop("EvaluateSplits");
dh::DefaultStream().Sync();
driver.Push(new_candidates.begin(), new_candidates.end());
expand_set = driver.Pop();
}
monitor.Start("FinalisePosition");
this->FinalisePosition(p_tree, p_fmat, *task, p_out_position);
monitor.Stop("FinalisePosition");
}
};
class GPUHistMaker : public TreeUpdater {
using GradientSumT = GradientPairPrecise;
public:
explicit GPUHistMaker(Context const* ctx, ObjInfo const* task) : TreeUpdater(ctx), task_{task} {};
void Configure(const Args& args) override {
// Used in test to count how many configurations are performed
LOG(DEBUG) << "[GPU Hist]: Configure";
hist_maker_param_.UpdateAllowUnknown(args);
dh::CheckComputeCapability();
initialised_ = false;
monitor_.Init("updater_gpu_hist");
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("hist_train_param"), &this->hist_maker_param_);
initialised_ = false;
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["hist_train_param"] = ToJson(hist_maker_param_);
}
~GPUHistMaker() { // NOLINT
dh::GlobalMemoryLogger().Log();
}
void Update(TrainParam const* param, HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
common::Span<HostDeviceVector<bst_node_t>> out_position,
const std::vector<RegTree*>& trees) override {
monitor_.Start("Update");
// build tree
try {
std::size_t t_idx{0};
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(param, gpair, dmat, tree, &out_position[t_idx]);
this->hist_maker_param_.CheckTreesSynchronized(tree);
++t_idx;
}
dh::safe_cuda(cudaGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
monitor_.Stop("Update");
}
void InitDataOnce(TrainParam const* param, DMatrix* dmat) {
CHECK_GE(ctx_->gpu_id, 0) << "Must have at least one device";
info_ = &dmat->Info();
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
collective::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
this->column_sampler_ = std::make_shared<common::ColumnSampler>(column_sampling_seed);
auto batch_param = BatchParam{param->max_bin, TrainParam::DftSparseThreshold()};
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
info_->feature_types.SetDevice(ctx_->gpu_id);
maker = std::make_unique<GPUHistMakerDevice>(
ctx_, !dmat->SingleColBlock(), info_->feature_types.ConstDeviceSpan(), info_->num_row_,
*param, column_sampler_, info_->num_col_, batch_param);
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(TrainParam const* param, DMatrix* dmat, RegTree const* p_tree) {
if (!initialised_) {
monitor_.Start("InitDataOnce");
this->InitDataOnce(param, dmat);
monitor_.Stop("InitDataOnce");
}
p_last_tree_ = p_tree;
CHECK(hist_maker_param_.GetInitialised());
}
void UpdateTree(TrainParam const* param, HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
RegTree* p_tree, HostDeviceVector<bst_node_t>* p_out_position) {
monitor_.Start("InitData");
this->InitData(param, p_fmat, p_tree);
monitor_.Stop("InitData");
gpair->SetDevice(ctx_->gpu_id);
maker->UpdateTree(gpair, p_fmat, task_, p_tree, p_out_position);
}
bool UpdatePredictionCache(const DMatrix* data,
linalg::MatrixView<bst_float> p_out_preds) override {
if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.Start("UpdatePredictionCache");
bool result = maker->UpdatePredictionCache(p_out_preds, p_last_tree_);
monitor_.Stop("UpdatePredictionCache");
return result;
}
MetaInfo* info_{}; // NOLINT
std::unique_ptr<GPUHistMakerDevice> maker; // NOLINT
[[nodiscard]] char const* Name() const override { return "grow_gpu_hist"; }
[[nodiscard]] bool HasNodePosition() const override { return true; }
private:
bool initialised_{false};
HistMakerTrainParam hist_maker_param_;
DMatrix* p_last_fmat_{nullptr};
RegTree const* p_last_tree_{nullptr};
ObjInfo const* task_{nullptr};
common::Monitor monitor_;
std::shared_ptr<common::ColumnSampler> column_sampler_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([](Context const* ctx, ObjInfo const* task) {
return new GPUHistMaker(ctx, task);
});
#endif // !defined(GTEST_TEST)
class GPUGlobalApproxMaker : public TreeUpdater {
public:
explicit GPUGlobalApproxMaker(Context const* ctx, ObjInfo const* task)
: TreeUpdater(ctx), task_{task} {};
void Configure(Args const& args) override {
// Used in test to count how many configurations are performed
LOG(DEBUG) << "[GPU Approx]: Configure";
hist_maker_param_.UpdateAllowUnknown(args);
dh::CheckComputeCapability();
initialised_ = false;
monitor_.Init(this->Name());
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("hist_train_param"), &this->hist_maker_param_);
initialised_ = false;
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["hist_train_param"] = ToJson(hist_maker_param_);
}
~GPUGlobalApproxMaker() override { dh::GlobalMemoryLogger().Log(); }
void Update(TrainParam const* param, HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
common::Span<HostDeviceVector<bst_node_t>> out_position,
const std::vector<RegTree*>& trees) override {
monitor_.Start("Update");
this->InitDataOnce(p_fmat);
// build tree
hess_.resize(gpair->Size());
auto hess = dh::ToSpan(hess_);
gpair->SetDevice(ctx_->Device());
auto d_gpair = gpair->ConstDeviceSpan();
auto cuctx = ctx_->CUDACtx();
thrust::transform(cuctx->CTP(), dh::tcbegin(d_gpair), dh::tcend(d_gpair), dh::tbegin(hess),
[=] XGBOOST_DEVICE(GradientPair const& g) { return g.GetHess(); });
auto const& info = p_fmat->Info();
info.feature_types.SetDevice(ctx_->Device());
auto batch = BatchParam{param->max_bin, hess, !task_->const_hess};
maker_ = std::make_unique<GPUHistMakerDevice>(
ctx_, !p_fmat->SingleColBlock(), info.feature_types.ConstDeviceSpan(), info.num_row_,
*param, column_sampler_, info.num_col_, batch);
std::size_t t_idx{0};
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(gpair, p_fmat, tree, &out_position[t_idx]);
this->hist_maker_param_.CheckTreesSynchronized(tree);
++t_idx;
}
monitor_.Stop("Update");
}
void InitDataOnce(DMatrix* p_fmat) {
if (this->initialised_) {
return;
}
monitor_.Start(__func__);
CHECK(ctx_->IsCUDA()) << error::InvalidCUDAOrdinal();
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
collective::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
this->column_sampler_ = std::make_shared<common::ColumnSampler>(column_sampling_seed);
p_last_fmat_ = p_fmat;
initialised_ = true;
monitor_.Stop(__func__);
}
void InitData(DMatrix* p_fmat, RegTree const* p_tree) {
this->InitDataOnce(p_fmat);
p_last_tree_ = p_tree;
CHECK(hist_maker_param_.GetInitialised());
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat, RegTree* p_tree,
HostDeviceVector<bst_node_t>* p_out_position) {
monitor_.Start("InitData");
this->InitData(p_fmat, p_tree);
monitor_.Stop("InitData");
gpair->SetDevice(ctx_->gpu_id);
maker_->UpdateTree(gpair, p_fmat, task_, p_tree, p_out_position);
}
bool UpdatePredictionCache(const DMatrix* data,
linalg::MatrixView<bst_float> p_out_preds) override {
if (maker_ == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.Start("UpdatePredictionCache");
bool result = maker_->UpdatePredictionCache(p_out_preds, p_last_tree_);
monitor_.Stop("UpdatePredictionCache");
return result;
}
[[nodiscard]] char const* Name() const override { return "grow_gpu_approx"; }
[[nodiscard]] bool HasNodePosition() const override { return true; }
private:
bool initialised_{false};
HistMakerTrainParam hist_maker_param_;
dh::device_vector<float> hess_;
std::shared_ptr<common::ColumnSampler> column_sampler_;
std::unique_ptr<GPUHistMakerDevice> maker_;
DMatrix* p_last_fmat_{nullptr};
RegTree const* p_last_tree_{nullptr};
ObjInfo const* task_{nullptr};
common::Monitor monitor_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUApproxMaker, "grow_gpu_approx")
.describe("Grow tree with GPU.")
.set_body([](Context const* ctx, ObjInfo const* task) {
return new GPUGlobalApproxMaker(ctx, task);
});
#endif // !defined(GTEST_TEST)
} // namespace xgboost::tree
|
e149d20e90200d761a8b644fbb64a87a887cb70d.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <hip/hip_complex.h>
#include <time.h>
#include "gpu_beamformer.h"
//nvcc test_cublasCgemmBatched.cu -lcublas -o test_cublasCgemmBatched
//sudo nvidia-persistenced --user peix
using namespace std;
/*#define TRANS_INPUT 1 // whether transpose input data
#define RECORD_BF_RAW 0 // whether record raw beamformed data
// Number of frequency bins, integer multiples of BLOCK_COLS = 8
#define N_FBIN 16
// Number of time samples, integer multiples of TILE_DIM = 32
#define N_TSAMP 4096
// Number of formed beams, integer multiples of BLOCK_ROWS = 4
#define N_BEAM 64
// Number of elements, integer multiples of BLOCK_ROWS = 4
#define N_ELEM 192
// Number of time samples for each short time integration
#define N_TSAMP_ACCU 32
#define N_POLS 2 // Number of polarizations
#define N_BEAM_STOKES (N_BEAM/N_POLS) // Number of beams after Stokes calculation
#define N_STOKES 4 // Number of Stokes items
#define N_ACCU (N_TSAMP/N_TSAMP_ACCU) // Number of short time integrations
#define N_INPUTS (N_ELEM*N_FBIN*N_TSAMP) // Number of complex samples to process
#define N_WEIGHTS (N_ELEM*N_FBIN*N_BEAM) // Number of complex beamformer weights
#define N_OUTPUTS_BF (N_BEAM*N_TSAMP*N_FBIN) // Number of complex samples in beamformed output structure
#define N_OUTPUTS (N_BEAM_STOKES*N_ACCU*N_FBIN*N_STOKES) // Number of samples in accumulator output structure
*/
// For CUDA function related time consume test
hipEvent_t start, stop;
float elapsedTime;
// For file I/O and CPU time consume test
clock_t begin,end,begin_main,end_main;
double time_spent;
// Three dimension of CUDA block, dimBlock = dim3(TILE_DIM,BLOCK_ROWS,BLOCK_COLS)
//const int TILE_DIM = 16; //32
static int TILE_DIM = 16; //16
static int BLOCK_ROWS = 4; //4
static int BLOCK_COLS = 8; //8
dim3 dimGrid;
dim3 dimBlock(TILE_DIM,BLOCK_ROWS,BLOCK_COLS);
// Matrix dimension to be calculated by cuBLAS
int nr_rows_A, nr_cols_A, nr_rows_B, nr_cols_B, nr_rows_C;
// handle to the cuBLAS library context
static hipblasHandle_t handle;
hipblasStatus_t stat;
hipError_t cudaStat;
// Define variables on device
static hipComplex *d_weights;
static hipComplex *d_tdata;
static hipComplex *d_idata;
static hipComplex *d_net_data;
static hipComplex *d_beamformed;
static hipComplex **d_arr_A = NULL;
static hipComplex **d_arr_B = NULL;
hipComplex **d_arr_C = NULL;
float *d_weights_r;
signed char *d_idata_r;
float *d_stokes_out, *d_accu_stokes_in, *d_power_out;
float *d_accu_stokes;
// Define variables on host
static hipComplex **h_arr_A;
static hipComplex **h_arr_B;
static hipComplex **h_arr_C;
float *h_weights_r;
//signed char *h_data_r; // for file read
hipComplex *h_beamformed;
float *h_accu_stokes;
//CUDA_VISIBLE_DEVICES = 1;
// Define variables for directory and file name
//char dir[128] = "/home/peix/workspace/paf_sim/";
char dir[128] = "/buff0/";
char dir_output[128];
char fn_weight[256];
char fn_data[256];
char fn_output_bf[256];
char fn_output[256];
// CUDA device to be used
int cuda_core = 0;
void initBeamformer(int cuda_core){
int rv = hipSetDevice(cuda_core);
printf("Set CUDA device to GPU#: %d\n",cuda_core);
sprintf(dir_output,"%s%s%d%s" ,dir,"gpu",cuda_core,"/");
// Creat cuda event and cublas handle
hipEventCreate( &start );
hipEventCreate( &stop ) ;
hipblasCreate(&handle);
// Matrix dimension assignment
nr_rows_A = N_BEAM;
nr_cols_A = N_ELEM;
nr_rows_B = N_ELEM;
nr_cols_B = N_TSAMP;
nr_rows_C = N_BEAM;
// Allocate memory for weights
hipHostMalloc(&h_weights_r,2*N_WEIGHTS*sizeof(float),hipHostMallocMapped);
// Allocate memory for data
//hipHostMalloc(&h_data_r,2*N_INPUTS*sizeof(signed char),hipHostMallocMapped);
// Allocate memory for beamformed data
hipHostMalloc(&h_beamformed,N_OUTPUTS_BF*sizeof(hipComplex),hipHostMallocMapped);
// Allocate memory for accumulated data
hipHostMalloc(&h_accu_stokes,N_OUTPUTS*sizeof(float));
// Allocate memory to host arrays - This is all memory allocated to arrays that are used by gemmBatched. Allocate 3 arrays on CPU
hipHostMalloc((void **)&h_arr_A, nr_rows_A * nr_cols_A *N_FBIN*sizeof(hipComplex*),hipHostMallocMapped);
hipHostMalloc((void **)&h_arr_B, nr_rows_B * nr_cols_B *N_FBIN*sizeof(hipComplex*),hipHostMallocMapped);
hipHostMalloc((void **)&h_arr_C, nr_rows_C * nr_cols_B *N_FBIN*sizeof(hipComplex*),hipHostMallocMapped);
// Allocate memory on GPU
hipMalloc(&d_weights, N_WEIGHTS*sizeof(hipComplex));
hipMalloc(&d_idata_r,2*nr_rows_B * nr_cols_B *N_FBIN*sizeof(signed char));
hipMalloc(&d_tdata, N_INPUTS*sizeof(hipComplex));
hipMalloc(&d_idata, N_INPUTS*sizeof(hipComplex));
hipMalloc(&d_net_data, N_INPUTS*sizeof(hipComplex));
hipMalloc(&d_beamformed,N_OUTPUTS_BF*sizeof(hipComplex));
hipMalloc(&d_stokes_out,N_OUTPUTS_BF*2*sizeof(float));
hipMalloc(&d_power_out,N_OUTPUTS_BF*sizeof(float));
hipMalloc(&d_accu_stokes_in,N_OUTPUTS_BF*2*sizeof(float));
hipMalloc(&d_accu_stokes,N_OUTPUTS*sizeof(float));
// Allocate memory for each batch in an array.
for(int i = 0; i < N_FBIN; i++){
h_arr_A[i] = d_weights + i*nr_rows_A*nr_cols_A;
h_arr_B[i] = d_tdata + i*nr_rows_B*nr_cols_B;
h_arr_C[i] = d_beamformed + i*nr_rows_C*nr_cols_B;
}
// Get the device pointers for the pinned CPU memory mapped into the GPU memory, implement zero copy
cudaStat = hipHostGetDevicePointer((void **)&d_arr_A, (void *)h_arr_A, 0);
assert(!cudaStat);
cudaStat = hipHostGetDevicePointer((void **)&d_arr_B, (void *)h_arr_B, 0);
assert(!cudaStat);
cudaStat = hipHostGetDevicePointer((void **)&d_arr_C, (void *)h_arr_C, 0);
assert(!cudaStat);
//hipMalloc(&d_weights_r,nr_rows_A * nr_cols_A *2*N_FBIN*sizeof(float));
//hipMemcpy(d_weights_r,h_weights_r,nr_rows_A * nr_cols_A *2*N_FBIN*sizeof(float),hipMemcpyHostToDevice);
cudaStat = hipHostGetDevicePointer((void **)&d_weights_r, (void *)h_weights_r, 0);
assert(!cudaStat);
//hipMalloc(&d_idata_r,2*nr_rows_B * nr_cols_B *N_FBIN*sizeof(signed char));
//hipMemcpy(d_idata_r,h_data_r,2*nr_rows_B * nr_cols_B *N_FBIN*sizeof(float),hipMemcpyHostToDevice);
//cudaStat = hipHostGetDevicePointer((void **)&d_idata_r, (void *)h_data_r, 0);
//assert(!cudaStat);
}
void loadWeights(char * filename){
sprintf(fn_weight,"%s%s" ,dir,filename);
printf("Read weights from: %s\n",fn_weight);
FILE * f_weight;
f_weight = fopen(fn_weight, "rb");
size_t size1 = fread(h_weights_r, sizeof(float), nr_rows_A * nr_cols_A *2*N_FBIN, f_weight);
fclose(f_weight);
}
__global__ void realToComplex(float *idata, hipComplex *odata, int width, int height, int nreps)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
odata[x*height*width + y*width + z].x = idata[2*(x*height*width + y*width + z)];
odata[x*height*width + y*width + z].y = idata[2*(x*height*width + y*width + z)+1];
}
__global__ void realDataToComplex(signed char *idata, hipComplex *odata, int width, int height, int nreps)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
//odata[x*height*width + y*width + z].x = idata[2*(x*height*width + y*width + z)];
//odata[x*height*width + y*width + z].y = idata[2*(x*height*width + y*width + z)+1];
odata[x*height*width + y*width + z].x = (int)idata[2*(x*height*width + y*width + z)]*1.0f;
odata[x*height*width + y*width + z].y = (int)idata[2*(x*height*width + y*width + z)+1]*1.0f;
//printf("%3.0f %3.0f ",odata[x*height*width + y*width + z].x,odata[x*height*width + y*width + z].y);
}
__global__ void copyData(hipComplex *idata, hipComplex *odata)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
odata[x*N_ELEM*N_TSAMP + y*N_ELEM + z] = idata[x*N_ELEM*N_TSAMP + y*N_ELEM + z];
}
__global__ void transposeNetData(hipComplex *idata, hipComplex *odata)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
// N_PACK_PER_ELEM, N_TSAMP_PER_PACK, N_ELEM_PER_PACK*N_FBIN
int in_p = x*N_TSAMP_PER_PACK*N_ELEM_PER_PACK*N_FBIN+y*N_ELEM_PER_PACK*N_FBIN+z;
int out_p = y*N_PACK_PER_ELEM*N_ELEM_PER_PACK*N_FBIN+x*N_ELEM_PER_PACK*N_FBIN+z;
odata[out_p] = idata[in_p];
/*__syncthreads();
for (int i=0;i<N_PACK_PER_TSAMP;i++){
odata[4096*12*i+out_p] = idata[4096*12*i+in_p];
}
__syncthreads();*/
}
__global__ void transposeData(hipComplex *idata, hipComplex *odata)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
//odata[z*N_ELEM*N_TSAMP + y*N_ELEM + x] = idata[x*N_ELEM*N_FBIN + y*N_FBIN + z];
odata[z*N_ELEM*N_TSAMP + x*N_ELEM + y] = idata[x*N_ELEM*N_FBIN + y*N_FBIN + z];
}
__global__ void transposeData2(hipComplex *idata, hipComplex *odata)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
odata[y*N_TSAMP*N_ELEM + x*N_ELEM + z] = idata[x*N_FBIN*N_ELEM + y*N_ELEM + z];
}
__global__ void calcStokes(hipComplex * idata, float * odata) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
int m,n;
//__syncthreads();
//Dimension: N_FBIN x N_TSAMP x N_BEAM_STOKES
m = N_STOKES*(x*N_TSAMP*N_BEAM_STOKES+y*N_BEAM_STOKES+z);
n = 2*(x*N_TSAMP*N_BEAM_STOKES+y*N_BEAM_STOKES+z);
// Re(X)^2 + Im(X)^2
odata[m] = idata[n].x * idata[n].x + idata[n+1].x * idata[n+1].x;
// Re(Y)^2 + Im(Y)^2
odata[m+1] = idata[n].y * idata[n].y + idata[n+1].y * idata[n+1].y;
// Re(XY*)
odata[m+2] = idata[n].x * idata[n+1].x + idata[n].y * idata[n+1].y;
// Im(XY*)
odata[m+3] = idata[n].y * idata[n+1].x - idata[n].x * idata[n+1].y;
//__syncthreads();
}
__global__ void calcPWR(hipComplex * idata, float * odata) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
int i;
//__syncthreads();
//Dimension: N_FBIN x N_TSAMP x N_BEAM_STOKES
i = x*N_TSAMP*N_BEAM_STOKES+y*N_BEAM_STOKES+z;
// Power
odata[i] = idata[i].x * idata[i].x + idata[i].y * idata[i].y;
}
__global__ void transposeStokes(float *idata, float *odata)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
//dimGrid = dim3(N_FBIN/TILE_DIM, N_TSAMP/BLOCK_ROWS,N_BEAM_STOKES*N_STOKES/BLOCK_COLS);
odata[x*N_TSAMP*N_BEAM_STOKES*N_STOKES+z*N_TSAMP+y] = idata[x*N_TSAMP*N_BEAM_STOKES*N_STOKES+y*N_BEAM_STOKES*N_STOKES+z];
}
__global__ void accuStokes(float *idata, float *odata){
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
int m;
//__syncthreads();
m = x*N_BEAM_STOKES*N_STOKES*N_ACCU + y*N_ACCU + z;
for(int i=0;i<N_TSAMP_ACCU;++i){
odata[m]+=idata[m*N_TSAMP_ACCU+i];
}
//__syncthreads();
}
__global__ void accuPWR(float *idata, float *odata){
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
int m;
//__syncthreads();
m = x*N_ACCU*N_BEAM_STOKES + y*N_BEAM_STOKES + z;
for(int i=0;i<N_TSAMP_ACCU;++i){
odata[m]+=idata[m*N_TSAMP_ACCU+i];
}
//__syncthreads();
}
// Function to free resources
void bfCleanup(){
// Free pinned memory
hipHostFree(h_arr_A);
hipHostFree(h_arr_B);
hipHostFree(h_arr_C);
hipHostFree(h_weights_r);
//hipHostFree(h_data_r);
hipHostFree(h_beamformed);
//hipHostFree(h_accu_stokes);
// Free GPU memory
if (d_tdata != NULL) {
hipFree(d_tdata);
}
if (d_idata != NULL) {
hipFree(d_idata);
}
if (d_net_data != NULL) {
hipFree(d_net_data);
}
if (d_weights != NULL) {
hipFree(d_weights);
}
if (d_beamformed != NULL) {
hipFree(d_beamformed);
}
if (d_stokes_out != NULL) {
hipFree(d_stokes_out);
}
if (d_power_out != NULL) {
hipFree(d_power_out);
}
if (d_accu_stokes_in != NULL) {
hipFree(d_accu_stokes_in);
}
if (d_accu_stokes != NULL) {
hipFree(d_accu_stokes);
}
if (d_arr_A != NULL) {
hipFree(d_arr_A);
}
if (d_arr_B != NULL) {
hipFree(d_arr_B);
}
if (d_arr_C != NULL) {
hipFree(d_arr_C);
}
// Free up and release cublas handle
hipblasDestroy(handle);
hipEventDestroy( start );
hipEventDestroy( stop );
}
//int main(int argc, char *argv[])
//void runBeamformer(signed char * data_in, float * data_out)
void runBeamformer()
//void runBeamformer(signed char * data_in)
{
if (GPU_TEST_PRINT) {begin_main = clock();}
//print(hipGetDeviceCount());
//print(hipGetDeviceProperties(0));
//if(argc>1){
//cuda_core = atoi(argv[1]);}
//hipSetDevice(cuda_core);
//sprintf(dir_output,"%s%s%d%s" ,dir,"gpu",cuda_core,"/");
// Convert to complex numbers
if (GPU_TEST_PRINT) {begin = clock();}
// Weights in dimension of N_FBIN x N_BEAM x N_ELE
TILE_DIM = 4;
BLOCK_ROWS = 1;
BLOCK_COLS = 8;
dimBlock = dim3(TILE_DIM,BLOCK_ROWS,BLOCK_COLS);
dimGrid = dim3(N_ELEM/TILE_DIM, N_BEAM/BLOCK_ROWS, N_FBIN/BLOCK_COLS);
//dimBlock = dim3(16, 16, 2);// number of threads per block must be less than 1024
hipDeviceSynchronize();
hipLaunchKernelGGL(( realToComplex), dim3(dimGrid),dim3(dimBlock), 0, 0, d_weights_r, d_weights, N_FBIN, N_BEAM, N_ELEM);
hipDeviceSynchronize();
if (GPU_TEST_PRINT) {
end = clock();
time_spent = (double)(end - begin)/CLOCKS_PER_SEC*1000;
printf("**************************************************************************\n");
printf("Convert weights to complex numbers elapsed: %3.3f ms\n",time_spent);
}
// Read data from file
//begin = clock();
//sprintf(fn_data,"%s%s%d%s%d%s%d%s" ,dir,"data_",N_FBIN,"x",N_ELEM,"x",N_TSAMP,".bin");
/*sprintf(fn_data,"%s%s%d%s%d%s%d%s%d%s" ,dir,"data",cuda_core,"_",N_FBIN,"x",N_ELEM,"x",N_TSAMP,".bin");
FILE * f_data;
f_data = fopen(fn_data, "rb");
size_t size2 = fread(h_data_r, sizeof(float), 2*nr_rows_B * nr_cols_B *N_FBIN, f_data);
fclose(f_data);*/
//h_data_r = data_in;
//cudaStat = hipHostGetDevicePointer((void **)&d_idata_r, (void *)data_in, 0);
//assert(!cudaStat);
//memcpy(h_data_r,data_in,N_INPUTS*2*sizeof(signed char));
//hipMemcpy(d_idata_r,h_data_r,2*nr_rows_B * nr_cols_B *N_FBIN*sizeof(float),hipMemcpyHostToDevice);
/*for (int i=0;i<8192;i++){
printf("%d ",h_data_r[i]);
}*/
/*if (GPU_TEST_PRINT) {
end = clock();
time_spent = (double)(end - begin)/CLOCKS_PER_SEC*1000;
printf("Read data from %s elapsed: %3.3f ms\n",fn_data,time_spent);
}*/
// Convert to complex numbers
if (GPU_TEST_PRINT) {begin = clock();}
hipDeviceSynchronize();
// If input data dimension is: N_TSAMP x N_ELE x N_FBIN
if(TRANS_INPUT==1){
//printf("TRANS_INPUT is: %d\n",TRANS_INPUT);
//dimGrid = dim3(N_TSAMP/TILE_DIM, N_ELEM/BLOCK_ROWS,N_FBIN/BLOCK_COLS);
TILE_DIM = 32;
BLOCK_ROWS = 8;
BLOCK_COLS = 4;
dimBlock = dim3(TILE_DIM,BLOCK_ROWS,BLOCK_COLS);
dimGrid = dim3(N_TSAMP/TILE_DIM, N_FBIN/BLOCK_ROWS,N_ELEM/BLOCK_COLS);
hipLaunchKernelGGL(( realDataToComplex), dim3(dimGrid),dim3(dimBlock), 0, 0, d_idata_r, d_idata, N_ELEM, N_FBIN, N_TSAMP);
}
else{
dimGrid = dim3(N_FBIN/TILE_DIM, N_TSAMP/BLOCK_ROWS,N_ELEM/BLOCK_COLS);
hipLaunchKernelGGL(( realDataToComplex), dim3(dimGrid),dim3(dimBlock), 0, 0, d_idata_r, d_idata, N_ELEM, N_TSAMP, N_FBIN);
}
hipDeviceSynchronize();
if (GPU_TEST_PRINT) {
end = clock();
time_spent = (double)(end - begin)/CLOCKS_PER_SEC*1000;
printf("Convert data to complex numbers elapsed: %3.3f ms\n",time_spent);
}
if (GPU_TEST_PRINT) {begin = clock();}
hipDeviceSynchronize();
// If transpose input data is needed, then transpose data to dimension: N_FBIN x N_TSAMP x N_ELE
if(TRANS_INPUT==1){
if(FAKED_INPUT==1){
TILE_DIM = 12;
dimBlock = dim3(TILE_DIM,BLOCK_ROWS,BLOCK_COLS);
dimGrid = dim3(N_PACK_PER_ELEM/TILE_DIM, N_TSAMP_PER_PACK/BLOCK_ROWS,N_ELEM_PER_PACK*N_FBIN/BLOCK_COLS);
hipLaunchKernelGGL(( transposeNetData), dim3(dimGrid), dim3(dimBlock), 0, 0, d_idata, d_net_data);
for (int i=0;i<N_PACK_PER_TSAMP;i++){
hipLaunchKernelGGL(( transposeNetData), dim3(dimGrid), dim3(dimBlock), 0, 0, d_idata+4096*12*i, d_net_data+4096*12*i);
}
TILE_DIM = 16;
dimBlock = dim3(TILE_DIM,BLOCK_ROWS,BLOCK_COLS);
dimGrid = dim3(N_TSAMP/TILE_DIM, N_ELEM/BLOCK_ROWS,N_FBIN/BLOCK_COLS);
hipLaunchKernelGGL(( transposeData), dim3(dimGrid), dim3(dimBlock), 0, 0, d_net_data, d_tdata);
}
else{
if(N_POLS==1){
//printf("N_POLS is: %d\n",N_POLS);
TILE_DIM = 32;
BLOCK_ROWS = 8;
BLOCK_COLS = 4;
dimBlock = dim3(TILE_DIM,BLOCK_ROWS,BLOCK_COLS);
dimGrid = dim3(N_TSAMP/TILE_DIM, N_FBIN/BLOCK_ROWS,N_ELEM/BLOCK_COLS);
hipLaunchKernelGGL(( transposeData2), dim3(dimGrid), dim3(dimBlock), 0, 0, d_idata, d_tdata);
}
else{
dimGrid = dim3(N_TSAMP/TILE_DIM, N_ELEM/BLOCK_ROWS,N_FBIN/BLOCK_COLS);
hipLaunchKernelGGL(( transposeData), dim3(dimGrid), dim3(dimBlock), 0, 0, d_idata, d_tdata);
}
}
}
else{
dimGrid = dim3(N_TSAMP/TILE_DIM, N_ELEM/BLOCK_ROWS,N_FBIN/BLOCK_COLS);
hipLaunchKernelGGL(( copyData), dim3(dimGrid), dim3(dimBlock), 0, 0, d_idata, d_tdata);
}
/*if(TRANS_INPUT==1){
hipLaunchKernelGGL(( transposeData), dim3(dimGrid), dim3(dimBlock), 0, 0, d_idata, d_tdata);
}
else{
hipLaunchKernelGGL(( copyData), dim3(dimGrid), dim3(dimBlock), 0, 0, d_idata, d_tdata);
}*/
hipDeviceSynchronize();
if (GPU_TEST_PRINT) {
end = clock();
time_spent = (double)(end - begin)/CLOCKS_PER_SEC*1000;
printf("Transpose data elapsed: %3.3f ms\n",time_spent);
}
// Execute matrix multipulication kernel
if (GPU_TEST_PRINT) {hipEventRecord( start, 0 ) ;}
// Leading dimensions are always the rows of each matrix since the data is stored in a column-wise order.
int lda=nr_rows_A, ldb=nr_rows_B, ldc=nr_rows_C;
hipComplex alf;
hipComplex bet;
alf.x = 1;
alf.y = 0;
bet.x = 0;
bet.y = 0;
int batchCount = N_FBIN; // There must be the same number of batches in each array.
hipDeviceSynchronize();
stat = hipblasCgemmBatched(
handle, // handle to the cuBLAS library context.
HIPBLAS_OP_N, // Operation on matrices within array A.
HIPBLAS_OP_N, // Operation on matrices within array B.
nr_rows_A, // Number of rows in matrix A and C.
nr_cols_B, // Number of columns in matrix B and C.
nr_cols_A, // Number of columns and rows in matrix A and B respectively.
&alf, // Scalar used for multiplication.
(const hipComplex **)d_arr_A, // Weight array of pointers.
lda, // Leading dimension of each batch or matrix in array A.
(const hipComplex **)d_arr_B, // Data array of pointers.
ldb, // Leading dimension of each batch or matrix in array B.
&bet, // Scalar used for multiplication.
(hipComplex **)d_arr_C, // Output array of pointers.
ldc, // Leading dimension of each batch or matrix in array C.
batchCount); // Number of batches in each array.
hipDeviceSynchronize();
if (stat == HIPBLAS_STATUS_INVALID_VALUE) {
printf("RTBF: Invalid CUBLAS values\n");
} else if (stat == HIPBLAS_STATUS_EXECUTION_FAILED) {
printf("RTBF: Execution failed.\n");
}
if(stat != HIPBLAS_STATUS_SUCCESS){
cerr << "hipblasCgemmBatched failed" << endl;
exit(1);
}
assert(!hipGetLastError());
if (GPU_TEST_PRINT) {
hipEventRecord( stop, 0 ) ;
hipEventSynchronize( stop );
hipEventElapsedTime( &elapsedTime,start, stop );
printf( "Matrix multiplication kernel(hipblasSgemmBatched) duration: %3.3f ms\n", elapsedTime );
}
if (RECORD_BF_RAW==1){
// copy beamformed data back to host, zero copy cannot map memory from GPU to CPU
if (GPU_TEST_PRINT) {begin = clock();}
hipDeviceSynchronize();
cudaStat = hipMemcpy(h_beamformed, d_beamformed, N_OUTPUTS_BF*sizeof(hipComplex), hipMemcpyDeviceToHost);
assert(!cudaStat);
//cudaStat = hipHostGetDevicePointer((void **)&d_beamformed, (void *)h_beamformed, 0);
//assert(!cudaStat);
hipDeviceSynchronize();
//dimGrid = dim3(N_FBIN/TILE_DIM, N_TSAMP/BLOCK_ROWS, N_BEAM/BLOCK_COLS);
//copyData<<<dimGrid, dimBlock>>>(d_beamformed, h_beamformed);
if (GPU_TEST_PRINT) {
end = clock();
time_spent = (double)(end - begin)/CLOCKS_PER_SEC*1000;
printf("Copy beamformed data back to host elapsed: %3.3f ms\n",time_spent);
}
if (GPU_TEST_PRINT) {begin = clock();}
// Write beamformed result to file
sprintf(fn_output_bf,"%s%s%d%s%d%s%d%s" ,dir_output,"output_bf_",N_FBIN,"x",N_BEAM,"x",N_TSAMP,".bin");///home/peix/workspace/paf_sim/output_**.bin
FILE * f_output_bf;
f_output_bf = fopen(fn_output_bf, "wb");
fwrite(h_beamformed, sizeof(hipComplex), N_OUTPUTS_BF, f_output_bf);
fclose(f_output_bf);
if (GPU_TEST_PRINT) {
end = clock();
time_spent = (double)(end - begin)/CLOCKS_PER_SEC*1000;
printf("Write beamformed result to file elapsed: %3.3f ms\n",time_spent);
}
}
if (GPU_TEST_PRINT) {begin = clock();}
hipDeviceSynchronize();
if (N_STOKES==4){
dimGrid = dim3(N_FBIN/TILE_DIM, N_TSAMP/BLOCK_ROWS, N_BEAM_STOKES/BLOCK_COLS);
hipLaunchKernelGGL(( calcStokes), dim3(dimGrid),dim3(dimBlock), 0, 0, d_beamformed, d_stokes_out);
dimGrid = dim3(N_FBIN/TILE_DIM, N_TSAMP/BLOCK_ROWS, N_BEAM_STOKES*N_STOKES/BLOCK_COLS);
hipLaunchKernelGGL(( transposeStokes), dim3(dimGrid),dim3(dimBlock), 0, 0, d_stokes_out,d_accu_stokes_in);
hipMemset(d_accu_stokes,0,N_OUTPUTS*sizeof(float));
dimGrid = dim3(N_FBIN/TILE_DIM, N_BEAM_STOKES*N_STOKES/BLOCK_ROWS, N_ACCU/BLOCK_COLS);
hipLaunchKernelGGL(( accuStokes), dim3(dimGrid),dim3(dimBlock), 0, 0, d_accu_stokes_in, d_accu_stokes);}
else if (N_STOKES==1){
TILE_DIM = 8;
BLOCK_ROWS = 128;
BLOCK_COLS = 1;
dimBlock = dim3(TILE_DIM,BLOCK_ROWS,BLOCK_COLS);
dimGrid = dim3(N_FBIN/TILE_DIM, N_TSAMP/BLOCK_ROWS, N_BEAM_STOKES/BLOCK_COLS);
hipLaunchKernelGGL(( calcPWR), dim3(dimGrid),dim3(dimBlock), 0, 0, d_beamformed, d_power_out);
hipMemset(d_accu_stokes,0,N_OUTPUTS*sizeof(float));
TILE_DIM = 8;
BLOCK_ROWS = 1;
BLOCK_COLS = 1;
dimBlock = dim3(TILE_DIM,BLOCK_ROWS,BLOCK_COLS);
//dimGrid = dim3(N_FBIN/TILE_DIM, N_BEAM_STOKES*N_STOKES/BLOCK_ROWS, N_ACCU/BLOCK_COLS);
dimGrid = dim3(N_FBIN/TILE_DIM, N_ACCU/BLOCK_ROWS, N_BEAM_STOKES*N_STOKES/BLOCK_COLS);
hipLaunchKernelGGL(( accuPWR), dim3(dimGrid),dim3(dimBlock), 0, 0, d_power_out, d_accu_stokes);
}
hipDeviceSynchronize();
if (GPU_TEST_PRINT) {
end = clock();
time_spent = (double)(end - begin)/CLOCKS_PER_SEC*1000;
printf("Calculate Stokes parameter and accumulate data elapsed: %3.3f ms\n",time_spent);
}
begin = clock();
hipDeviceSynchronize();
// copy accumulated Stokes data back to host
cudaStat = hipMemcpy(h_accu_stokes, d_accu_stokes, N_OUTPUTS*sizeof(float), hipMemcpyDeviceToHost);
assert(!cudaStat);
//dimGrid = dim3(N_FBIN/TILE_DIM, N_BEAM_STOKES*N_STOKES/BLOCK_ROWS, N_ACCU/BLOCK_COLS);
//copyDataReal<<<dimGrid, dimBlock>>>(d_accu_stokes, h_accu_stokes);
hipDeviceSynchronize();
if (GPU_TEST_PRINT) {
end = clock();
time_spent = (double)(end - begin)/CLOCKS_PER_SEC*1000;
printf("Copy accumulated Stokes data to host elapsed: %3.3f ms\n",time_spent);
}
//begin = clock();
/*sprintf(fn_output,"%s%s%d%s%d%s%d%s" ,dir_output,"output_",N_FBIN,"x",N_BEAM_STOKES,"x",N_ACCU,".bin");///home/peix/workspace/paf_sim/output_**.bin
//sprintf(fn_output,"%s%s%d%s%d%s%d%s" ,dir_output,"output_",N_FBIN,"x",N_BEAM_STOKES,"x",N_ACCU,".bin");///home/peix/workspace/paf_sim/output_**.bin
FILE * f_output;
f_output = fopen(fn_output, "wb");
fwrite(h_accu_stokes, sizeof(float), N_OUTPUTS, f_output);
fclose(f_output);*/
//data_out = h_accu_stokes;
//memcpy(data_out,h_accu_stokes,N_OUTPUTS*sizeof(float));
/*for (int i=0;i<N_FBIN;i++){
for (int j=0;j<N_BEAM_STOKES*N_STOKES;j++){
if(j<4){
printf("%3.2f ",h_accu_stokes[i*N_BEAM_STOKES*N_STOKES*N_ACCU+j*N_ACCU]);
}
}
}*/
/*if (GPU_TEST_PRINT) {
end = clock();
time_spent = (double)(end - begin)/CLOCKS_PER_SEC*1000;
printf("Write accumulated Stokes data to file elapsed: %3.3f ms\n",time_spent);
}*/
/*begin = clock();
// Free resources
//bfCleanup();
if (GPU_TEST_PRINT) {
end = clock();
time_spent = (double)(end - begin)/CLOCKS_PER_SEC*1000;
printf("Free memory elapsed: %3.3f ms\n",time_spent);
}*/
if (GPU_TEST_PRINT) {
end_main = clock();
time_spent = (double)(end_main - begin_main)/CLOCKS_PER_SEC*1000;
printf("The run_beamformer program totally elapsed: %3.3f ms\n",time_spent);
//printf("**************************************************************************\n");
}
//return 0;
}
| e149d20e90200d761a8b644fbb64a87a887cb70d.cu | #include <cuda_runtime.h>
#include <cublas_v2.h>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cuComplex.h>
#include <time.h>
#include "gpu_beamformer.h"
//nvcc test_cublasCgemmBatched.cu -lcublas -o test_cublasCgemmBatched
//sudo nvidia-persistenced --user peix
using namespace std;
/*#define TRANS_INPUT 1 // whether transpose input data
#define RECORD_BF_RAW 0 // whether record raw beamformed data
// Number of frequency bins, integer multiples of BLOCK_COLS = 8
#define N_FBIN 16
// Number of time samples, integer multiples of TILE_DIM = 32
#define N_TSAMP 4096
// Number of formed beams, integer multiples of BLOCK_ROWS = 4
#define N_BEAM 64
// Number of elements, integer multiples of BLOCK_ROWS = 4
#define N_ELEM 192
// Number of time samples for each short time integration
#define N_TSAMP_ACCU 32
#define N_POLS 2 // Number of polarizations
#define N_BEAM_STOKES (N_BEAM/N_POLS) // Number of beams after Stokes calculation
#define N_STOKES 4 // Number of Stokes items
#define N_ACCU (N_TSAMP/N_TSAMP_ACCU) // Number of short time integrations
#define N_INPUTS (N_ELEM*N_FBIN*N_TSAMP) // Number of complex samples to process
#define N_WEIGHTS (N_ELEM*N_FBIN*N_BEAM) // Number of complex beamformer weights
#define N_OUTPUTS_BF (N_BEAM*N_TSAMP*N_FBIN) // Number of complex samples in beamformed output structure
#define N_OUTPUTS (N_BEAM_STOKES*N_ACCU*N_FBIN*N_STOKES) // Number of samples in accumulator output structure
*/
// For CUDA function related time consume test
cudaEvent_t start, stop;
float elapsedTime;
// For file I/O and CPU time consume test
clock_t begin,end,begin_main,end_main;
double time_spent;
// Three dimension of CUDA block, dimBlock = dim3(TILE_DIM,BLOCK_ROWS,BLOCK_COLS)
//const int TILE_DIM = 16; //32
static int TILE_DIM = 16; //16
static int BLOCK_ROWS = 4; //4
static int BLOCK_COLS = 8; //8
dim3 dimGrid;
dim3 dimBlock(TILE_DIM,BLOCK_ROWS,BLOCK_COLS);
// Matrix dimension to be calculated by cuBLAS
int nr_rows_A, nr_cols_A, nr_rows_B, nr_cols_B, nr_rows_C;
// handle to the cuBLAS library context
static cublasHandle_t handle;
cublasStatus_t stat;
cudaError_t cudaStat;
// Define variables on device
static cuComplex *d_weights;
static cuComplex *d_tdata;
static cuComplex *d_idata;
static cuComplex *d_net_data;
static cuComplex *d_beamformed;
static cuComplex **d_arr_A = NULL;
static cuComplex **d_arr_B = NULL;
cuComplex **d_arr_C = NULL;
float *d_weights_r;
signed char *d_idata_r;
float *d_stokes_out, *d_accu_stokes_in, *d_power_out;
float *d_accu_stokes;
// Define variables on host
static cuComplex **h_arr_A;
static cuComplex **h_arr_B;
static cuComplex **h_arr_C;
float *h_weights_r;
//signed char *h_data_r; // for file read
cuComplex *h_beamformed;
float *h_accu_stokes;
//CUDA_VISIBLE_DEVICES = 1;
// Define variables for directory and file name
//char dir[128] = "/home/peix/workspace/paf_sim/";
char dir[128] = "/buff0/";
char dir_output[128];
char fn_weight[256];
char fn_data[256];
char fn_output_bf[256];
char fn_output[256];
// CUDA device to be used
int cuda_core = 0;
void initBeamformer(int cuda_core){
int rv = cudaSetDevice(cuda_core);
printf("Set CUDA device to GPU#: %d\n",cuda_core);
sprintf(dir_output,"%s%s%d%s" ,dir,"gpu",cuda_core,"/");
// Creat cuda event and cublas handle
cudaEventCreate( &start );
cudaEventCreate( &stop ) ;
cublasCreate(&handle);
// Matrix dimension assignment
nr_rows_A = N_BEAM;
nr_cols_A = N_ELEM;
nr_rows_B = N_ELEM;
nr_cols_B = N_TSAMP;
nr_rows_C = N_BEAM;
// Allocate memory for weights
cudaHostAlloc(&h_weights_r,2*N_WEIGHTS*sizeof(float),cudaHostAllocMapped);
// Allocate memory for data
//cudaHostAlloc(&h_data_r,2*N_INPUTS*sizeof(signed char),cudaHostAllocMapped);
// Allocate memory for beamformed data
cudaHostAlloc(&h_beamformed,N_OUTPUTS_BF*sizeof(cuComplex),cudaHostAllocMapped);
// Allocate memory for accumulated data
cudaMallocHost(&h_accu_stokes,N_OUTPUTS*sizeof(float));
// Allocate memory to host arrays - This is all memory allocated to arrays that are used by gemmBatched. Allocate 3 arrays on CPU
cudaHostAlloc((void **)&h_arr_A, nr_rows_A * nr_cols_A *N_FBIN*sizeof(cuComplex*),cudaHostAllocMapped);
cudaHostAlloc((void **)&h_arr_B, nr_rows_B * nr_cols_B *N_FBIN*sizeof(cuComplex*),cudaHostAllocMapped);
cudaHostAlloc((void **)&h_arr_C, nr_rows_C * nr_cols_B *N_FBIN*sizeof(cuComplex*),cudaHostAllocMapped);
// Allocate memory on GPU
cudaMalloc(&d_weights, N_WEIGHTS*sizeof(cuComplex));
cudaMalloc(&d_idata_r,2*nr_rows_B * nr_cols_B *N_FBIN*sizeof(signed char));
cudaMalloc(&d_tdata, N_INPUTS*sizeof(cuComplex));
cudaMalloc(&d_idata, N_INPUTS*sizeof(cuComplex));
cudaMalloc(&d_net_data, N_INPUTS*sizeof(cuComplex));
cudaMalloc(&d_beamformed,N_OUTPUTS_BF*sizeof(cuComplex));
cudaMalloc(&d_stokes_out,N_OUTPUTS_BF*2*sizeof(float));
cudaMalloc(&d_power_out,N_OUTPUTS_BF*sizeof(float));
cudaMalloc(&d_accu_stokes_in,N_OUTPUTS_BF*2*sizeof(float));
cudaMalloc(&d_accu_stokes,N_OUTPUTS*sizeof(float));
// Allocate memory for each batch in an array.
for(int i = 0; i < N_FBIN; i++){
h_arr_A[i] = d_weights + i*nr_rows_A*nr_cols_A;
h_arr_B[i] = d_tdata + i*nr_rows_B*nr_cols_B;
h_arr_C[i] = d_beamformed + i*nr_rows_C*nr_cols_B;
}
// Get the device pointers for the pinned CPU memory mapped into the GPU memory, implement zero copy
cudaStat = cudaHostGetDevicePointer((void **)&d_arr_A, (void *)h_arr_A, 0);
assert(!cudaStat);
cudaStat = cudaHostGetDevicePointer((void **)&d_arr_B, (void *)h_arr_B, 0);
assert(!cudaStat);
cudaStat = cudaHostGetDevicePointer((void **)&d_arr_C, (void *)h_arr_C, 0);
assert(!cudaStat);
//cudaMalloc(&d_weights_r,nr_rows_A * nr_cols_A *2*N_FBIN*sizeof(float));
//cudaMemcpy(d_weights_r,h_weights_r,nr_rows_A * nr_cols_A *2*N_FBIN*sizeof(float),cudaMemcpyHostToDevice);
cudaStat = cudaHostGetDevicePointer((void **)&d_weights_r, (void *)h_weights_r, 0);
assert(!cudaStat);
//cudaMalloc(&d_idata_r,2*nr_rows_B * nr_cols_B *N_FBIN*sizeof(signed char));
//cudaMemcpy(d_idata_r,h_data_r,2*nr_rows_B * nr_cols_B *N_FBIN*sizeof(float),cudaMemcpyHostToDevice);
//cudaStat = cudaHostGetDevicePointer((void **)&d_idata_r, (void *)h_data_r, 0);
//assert(!cudaStat);
}
void loadWeights(char * filename){
sprintf(fn_weight,"%s%s" ,dir,filename);
printf("Read weights from: %s\n",fn_weight);
FILE * f_weight;
f_weight = fopen(fn_weight, "rb");
size_t size1 = fread(h_weights_r, sizeof(float), nr_rows_A * nr_cols_A *2*N_FBIN, f_weight);
fclose(f_weight);
}
__global__ void realToComplex(float *idata, cuComplex *odata, int width, int height, int nreps)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
odata[x*height*width + y*width + z].x = idata[2*(x*height*width + y*width + z)];
odata[x*height*width + y*width + z].y = idata[2*(x*height*width + y*width + z)+1];
}
__global__ void realDataToComplex(signed char *idata, cuComplex *odata, int width, int height, int nreps)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
//odata[x*height*width + y*width + z].x = idata[2*(x*height*width + y*width + z)];
//odata[x*height*width + y*width + z].y = idata[2*(x*height*width + y*width + z)+1];
odata[x*height*width + y*width + z].x = (int)idata[2*(x*height*width + y*width + z)]*1.0f;
odata[x*height*width + y*width + z].y = (int)idata[2*(x*height*width + y*width + z)+1]*1.0f;
//printf("%3.0f %3.0f ",odata[x*height*width + y*width + z].x,odata[x*height*width + y*width + z].y);
}
__global__ void copyData(cuComplex *idata, cuComplex *odata)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
odata[x*N_ELEM*N_TSAMP + y*N_ELEM + z] = idata[x*N_ELEM*N_TSAMP + y*N_ELEM + z];
}
__global__ void transposeNetData(cuComplex *idata, cuComplex *odata)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
// N_PACK_PER_ELEM, N_TSAMP_PER_PACK, N_ELEM_PER_PACK*N_FBIN
int in_p = x*N_TSAMP_PER_PACK*N_ELEM_PER_PACK*N_FBIN+y*N_ELEM_PER_PACK*N_FBIN+z;
int out_p = y*N_PACK_PER_ELEM*N_ELEM_PER_PACK*N_FBIN+x*N_ELEM_PER_PACK*N_FBIN+z;
odata[out_p] = idata[in_p];
/*__syncthreads();
for (int i=0;i<N_PACK_PER_TSAMP;i++){
odata[4096*12*i+out_p] = idata[4096*12*i+in_p];
}
__syncthreads();*/
}
__global__ void transposeData(cuComplex *idata, cuComplex *odata)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
//odata[z*N_ELEM*N_TSAMP + y*N_ELEM + x] = idata[x*N_ELEM*N_FBIN + y*N_FBIN + z];
odata[z*N_ELEM*N_TSAMP + x*N_ELEM + y] = idata[x*N_ELEM*N_FBIN + y*N_FBIN + z];
}
__global__ void transposeData2(cuComplex *idata, cuComplex *odata)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
odata[y*N_TSAMP*N_ELEM + x*N_ELEM + z] = idata[x*N_FBIN*N_ELEM + y*N_ELEM + z];
}
__global__ void calcStokes(cuComplex * idata, float * odata) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
int m,n;
//__syncthreads();
//Dimension: N_FBIN x N_TSAMP x N_BEAM_STOKES
m = N_STOKES*(x*N_TSAMP*N_BEAM_STOKES+y*N_BEAM_STOKES+z);
n = 2*(x*N_TSAMP*N_BEAM_STOKES+y*N_BEAM_STOKES+z);
// Re(X)^2 + Im(X)^2
odata[m] = idata[n].x * idata[n].x + idata[n+1].x * idata[n+1].x;
// Re(Y)^2 + Im(Y)^2
odata[m+1] = idata[n].y * idata[n].y + idata[n+1].y * idata[n+1].y;
// Re(XY*)
odata[m+2] = idata[n].x * idata[n+1].x + idata[n].y * idata[n+1].y;
// Im(XY*)
odata[m+3] = idata[n].y * idata[n+1].x - idata[n].x * idata[n+1].y;
//__syncthreads();
}
__global__ void calcPWR(cuComplex * idata, float * odata) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
int i;
//__syncthreads();
//Dimension: N_FBIN x N_TSAMP x N_BEAM_STOKES
i = x*N_TSAMP*N_BEAM_STOKES+y*N_BEAM_STOKES+z;
// Power
odata[i] = idata[i].x * idata[i].x + idata[i].y * idata[i].y;
}
__global__ void transposeStokes(float *idata, float *odata)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
//dimGrid = dim3(N_FBIN/TILE_DIM, N_TSAMP/BLOCK_ROWS,N_BEAM_STOKES*N_STOKES/BLOCK_COLS);
odata[x*N_TSAMP*N_BEAM_STOKES*N_STOKES+z*N_TSAMP+y] = idata[x*N_TSAMP*N_BEAM_STOKES*N_STOKES+y*N_BEAM_STOKES*N_STOKES+z];
}
__global__ void accuStokes(float *idata, float *odata){
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
int m;
//__syncthreads();
m = x*N_BEAM_STOKES*N_STOKES*N_ACCU + y*N_ACCU + z;
for(int i=0;i<N_TSAMP_ACCU;++i){
odata[m]+=idata[m*N_TSAMP_ACCU+i];
}
//__syncthreads();
}
__global__ void accuPWR(float *idata, float *odata){
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
int m;
//__syncthreads();
m = x*N_ACCU*N_BEAM_STOKES + y*N_BEAM_STOKES + z;
for(int i=0;i<N_TSAMP_ACCU;++i){
odata[m]+=idata[m*N_TSAMP_ACCU+i];
}
//__syncthreads();
}
// Function to free resources
void bfCleanup(){
// Free pinned memory
cudaFreeHost(h_arr_A);
cudaFreeHost(h_arr_B);
cudaFreeHost(h_arr_C);
cudaFreeHost(h_weights_r);
//cudaFreeHost(h_data_r);
cudaFreeHost(h_beamformed);
//cudaFreeHost(h_accu_stokes);
// Free GPU memory
if (d_tdata != NULL) {
cudaFree(d_tdata);
}
if (d_idata != NULL) {
cudaFree(d_idata);
}
if (d_net_data != NULL) {
cudaFree(d_net_data);
}
if (d_weights != NULL) {
cudaFree(d_weights);
}
if (d_beamformed != NULL) {
cudaFree(d_beamformed);
}
if (d_stokes_out != NULL) {
cudaFree(d_stokes_out);
}
if (d_power_out != NULL) {
cudaFree(d_power_out);
}
if (d_accu_stokes_in != NULL) {
cudaFree(d_accu_stokes_in);
}
if (d_accu_stokes != NULL) {
cudaFree(d_accu_stokes);
}
if (d_arr_A != NULL) {
cudaFree(d_arr_A);
}
if (d_arr_B != NULL) {
cudaFree(d_arr_B);
}
if (d_arr_C != NULL) {
cudaFree(d_arr_C);
}
// Free up and release cublas handle
cublasDestroy(handle);
cudaEventDestroy( start );
cudaEventDestroy( stop );
}
//int main(int argc, char *argv[])
//void runBeamformer(signed char * data_in, float * data_out)
void runBeamformer()
//void runBeamformer(signed char * data_in)
{
if (GPU_TEST_PRINT) {begin_main = clock();}
//print(cudaGetDeviceCount());
//print(cudaGetDeviceProperties(0));
//if(argc>1){
//cuda_core = atoi(argv[1]);}
//cudaSetDevice(cuda_core);
//sprintf(dir_output,"%s%s%d%s" ,dir,"gpu",cuda_core,"/");
// Convert to complex numbers
if (GPU_TEST_PRINT) {begin = clock();}
// Weights in dimension of N_FBIN x N_BEAM x N_ELE
TILE_DIM = 4;
BLOCK_ROWS = 1;
BLOCK_COLS = 8;
dimBlock = dim3(TILE_DIM,BLOCK_ROWS,BLOCK_COLS);
dimGrid = dim3(N_ELEM/TILE_DIM, N_BEAM/BLOCK_ROWS, N_FBIN/BLOCK_COLS);
//dimBlock = dim3(16, 16, 2);// number of threads per block must be less than 1024
cudaDeviceSynchronize();
realToComplex<<<dimGrid,dimBlock>>>(d_weights_r, d_weights, N_FBIN, N_BEAM, N_ELEM);
cudaDeviceSynchronize();
if (GPU_TEST_PRINT) {
end = clock();
time_spent = (double)(end - begin)/CLOCKS_PER_SEC*1000;
printf("**************************************************************************\n");
printf("Convert weights to complex numbers elapsed: %3.3f ms\n",time_spent);
}
// Read data from file
//begin = clock();
//sprintf(fn_data,"%s%s%d%s%d%s%d%s" ,dir,"data_",N_FBIN,"x",N_ELEM,"x",N_TSAMP,".bin");
/*sprintf(fn_data,"%s%s%d%s%d%s%d%s%d%s" ,dir,"data",cuda_core,"_",N_FBIN,"x",N_ELEM,"x",N_TSAMP,".bin");
FILE * f_data;
f_data = fopen(fn_data, "rb");
size_t size2 = fread(h_data_r, sizeof(float), 2*nr_rows_B * nr_cols_B *N_FBIN, f_data);
fclose(f_data);*/
//h_data_r = data_in;
//cudaStat = cudaHostGetDevicePointer((void **)&d_idata_r, (void *)data_in, 0);
//assert(!cudaStat);
//memcpy(h_data_r,data_in,N_INPUTS*2*sizeof(signed char));
//cudaMemcpy(d_idata_r,h_data_r,2*nr_rows_B * nr_cols_B *N_FBIN*sizeof(float),cudaMemcpyHostToDevice);
/*for (int i=0;i<8192;i++){
printf("%d ",h_data_r[i]);
}*/
/*if (GPU_TEST_PRINT) {
end = clock();
time_spent = (double)(end - begin)/CLOCKS_PER_SEC*1000;
printf("Read data from %s elapsed: %3.3f ms\n",fn_data,time_spent);
}*/
// Convert to complex numbers
if (GPU_TEST_PRINT) {begin = clock();}
cudaDeviceSynchronize();
// If input data dimension is: N_TSAMP x N_ELE x N_FBIN
if(TRANS_INPUT==1){
//printf("TRANS_INPUT is: %d\n",TRANS_INPUT);
//dimGrid = dim3(N_TSAMP/TILE_DIM, N_ELEM/BLOCK_ROWS,N_FBIN/BLOCK_COLS);
TILE_DIM = 32;
BLOCK_ROWS = 8;
BLOCK_COLS = 4;
dimBlock = dim3(TILE_DIM,BLOCK_ROWS,BLOCK_COLS);
dimGrid = dim3(N_TSAMP/TILE_DIM, N_FBIN/BLOCK_ROWS,N_ELEM/BLOCK_COLS);
realDataToComplex<<<dimGrid,dimBlock>>>(d_idata_r, d_idata, N_ELEM, N_FBIN, N_TSAMP);
}
else{
dimGrid = dim3(N_FBIN/TILE_DIM, N_TSAMP/BLOCK_ROWS,N_ELEM/BLOCK_COLS);
realDataToComplex<<<dimGrid,dimBlock>>>(d_idata_r, d_idata, N_ELEM, N_TSAMP, N_FBIN);
}
cudaDeviceSynchronize();
if (GPU_TEST_PRINT) {
end = clock();
time_spent = (double)(end - begin)/CLOCKS_PER_SEC*1000;
printf("Convert data to complex numbers elapsed: %3.3f ms\n",time_spent);
}
if (GPU_TEST_PRINT) {begin = clock();}
cudaDeviceSynchronize();
// If transpose input data is needed, then transpose data to dimension: N_FBIN x N_TSAMP x N_ELE
if(TRANS_INPUT==1){
if(FAKED_INPUT==1){
TILE_DIM = 12;
dimBlock = dim3(TILE_DIM,BLOCK_ROWS,BLOCK_COLS);
dimGrid = dim3(N_PACK_PER_ELEM/TILE_DIM, N_TSAMP_PER_PACK/BLOCK_ROWS,N_ELEM_PER_PACK*N_FBIN/BLOCK_COLS);
transposeNetData<<<dimGrid, dimBlock>>>(d_idata, d_net_data);
for (int i=0;i<N_PACK_PER_TSAMP;i++){
transposeNetData<<<dimGrid, dimBlock>>>(d_idata+4096*12*i, d_net_data+4096*12*i);
}
TILE_DIM = 16;
dimBlock = dim3(TILE_DIM,BLOCK_ROWS,BLOCK_COLS);
dimGrid = dim3(N_TSAMP/TILE_DIM, N_ELEM/BLOCK_ROWS,N_FBIN/BLOCK_COLS);
transposeData<<<dimGrid, dimBlock>>>(d_net_data, d_tdata);
}
else{
if(N_POLS==1){
//printf("N_POLS is: %d\n",N_POLS);
TILE_DIM = 32;
BLOCK_ROWS = 8;
BLOCK_COLS = 4;
dimBlock = dim3(TILE_DIM,BLOCK_ROWS,BLOCK_COLS);
dimGrid = dim3(N_TSAMP/TILE_DIM, N_FBIN/BLOCK_ROWS,N_ELEM/BLOCK_COLS);
transposeData2<<<dimGrid, dimBlock>>>(d_idata, d_tdata);
}
else{
dimGrid = dim3(N_TSAMP/TILE_DIM, N_ELEM/BLOCK_ROWS,N_FBIN/BLOCK_COLS);
transposeData<<<dimGrid, dimBlock>>>(d_idata, d_tdata);
}
}
}
else{
dimGrid = dim3(N_TSAMP/TILE_DIM, N_ELEM/BLOCK_ROWS,N_FBIN/BLOCK_COLS);
copyData<<<dimGrid, dimBlock>>>(d_idata, d_tdata);
}
/*if(TRANS_INPUT==1){
transposeData<<<dimGrid, dimBlock>>>(d_idata, d_tdata);
}
else{
copyData<<<dimGrid, dimBlock>>>(d_idata, d_tdata);
}*/
cudaDeviceSynchronize();
if (GPU_TEST_PRINT) {
end = clock();
time_spent = (double)(end - begin)/CLOCKS_PER_SEC*1000;
printf("Transpose data elapsed: %3.3f ms\n",time_spent);
}
// Execute matrix multipulication kernel
if (GPU_TEST_PRINT) {cudaEventRecord( start, 0 ) ;}
// Leading dimensions are always the rows of each matrix since the data is stored in a column-wise order.
int lda=nr_rows_A, ldb=nr_rows_B, ldc=nr_rows_C;
cuComplex alf;
cuComplex bet;
alf.x = 1;
alf.y = 0;
bet.x = 0;
bet.y = 0;
int batchCount = N_FBIN; // There must be the same number of batches in each array.
cudaDeviceSynchronize();
stat = cublasCgemmBatched(
handle, // handle to the cuBLAS library context.
CUBLAS_OP_N, // Operation on matrices within array A.
CUBLAS_OP_N, // Operation on matrices within array B.
nr_rows_A, // Number of rows in matrix A and C.
nr_cols_B, // Number of columns in matrix B and C.
nr_cols_A, // Number of columns and rows in matrix A and B respectively.
&alf, // Scalar used for multiplication.
(const cuComplex **)d_arr_A, // Weight array of pointers.
lda, // Leading dimension of each batch or matrix in array A.
(const cuComplex **)d_arr_B, // Data array of pointers.
ldb, // Leading dimension of each batch or matrix in array B.
&bet, // Scalar used for multiplication.
(cuComplex **)d_arr_C, // Output array of pointers.
ldc, // Leading dimension of each batch or matrix in array C.
batchCount); // Number of batches in each array.
cudaDeviceSynchronize();
if (stat == CUBLAS_STATUS_INVALID_VALUE) {
printf("RTBF: Invalid CUBLAS values\n");
} else if (stat == CUBLAS_STATUS_EXECUTION_FAILED) {
printf("RTBF: Execution failed.\n");
}
if(stat != CUBLAS_STATUS_SUCCESS){
cerr << "cublasCgemmBatched failed" << endl;
exit(1);
}
assert(!cudaGetLastError());
if (GPU_TEST_PRINT) {
cudaEventRecord( stop, 0 ) ;
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsedTime,start, stop );
printf( "Matrix multiplication kernel(cublasSgemmBatched) duration: %3.3f ms\n", elapsedTime );
}
if (RECORD_BF_RAW==1){
// copy beamformed data back to host, zero copy cannot map memory from GPU to CPU
if (GPU_TEST_PRINT) {begin = clock();}
cudaDeviceSynchronize();
cudaStat = cudaMemcpy(h_beamformed, d_beamformed, N_OUTPUTS_BF*sizeof(cuComplex), cudaMemcpyDeviceToHost);
assert(!cudaStat);
//cudaStat = cudaHostGetDevicePointer((void **)&d_beamformed, (void *)h_beamformed, 0);
//assert(!cudaStat);
cudaDeviceSynchronize();
//dimGrid = dim3(N_FBIN/TILE_DIM, N_TSAMP/BLOCK_ROWS, N_BEAM/BLOCK_COLS);
//copyData<<<dimGrid, dimBlock>>>(d_beamformed, h_beamformed);
if (GPU_TEST_PRINT) {
end = clock();
time_spent = (double)(end - begin)/CLOCKS_PER_SEC*1000;
printf("Copy beamformed data back to host elapsed: %3.3f ms\n",time_spent);
}
if (GPU_TEST_PRINT) {begin = clock();}
// Write beamformed result to file
sprintf(fn_output_bf,"%s%s%d%s%d%s%d%s" ,dir_output,"output_bf_",N_FBIN,"x",N_BEAM,"x",N_TSAMP,".bin");///home/peix/workspace/paf_sim/output_**.bin
FILE * f_output_bf;
f_output_bf = fopen(fn_output_bf, "wb");
fwrite(h_beamformed, sizeof(cuComplex), N_OUTPUTS_BF, f_output_bf);
fclose(f_output_bf);
if (GPU_TEST_PRINT) {
end = clock();
time_spent = (double)(end - begin)/CLOCKS_PER_SEC*1000;
printf("Write beamformed result to file elapsed: %3.3f ms\n",time_spent);
}
}
if (GPU_TEST_PRINT) {begin = clock();}
cudaDeviceSynchronize();
if (N_STOKES==4){
dimGrid = dim3(N_FBIN/TILE_DIM, N_TSAMP/BLOCK_ROWS, N_BEAM_STOKES/BLOCK_COLS);
calcStokes<<<dimGrid,dimBlock>>>(d_beamformed, d_stokes_out);
dimGrid = dim3(N_FBIN/TILE_DIM, N_TSAMP/BLOCK_ROWS, N_BEAM_STOKES*N_STOKES/BLOCK_COLS);
transposeStokes<<<dimGrid,dimBlock>>>(d_stokes_out,d_accu_stokes_in);
cudaMemset(d_accu_stokes,0,N_OUTPUTS*sizeof(float));
dimGrid = dim3(N_FBIN/TILE_DIM, N_BEAM_STOKES*N_STOKES/BLOCK_ROWS, N_ACCU/BLOCK_COLS);
accuStokes<<<dimGrid,dimBlock>>>(d_accu_stokes_in, d_accu_stokes);}
else if (N_STOKES==1){
TILE_DIM = 8;
BLOCK_ROWS = 128;
BLOCK_COLS = 1;
dimBlock = dim3(TILE_DIM,BLOCK_ROWS,BLOCK_COLS);
dimGrid = dim3(N_FBIN/TILE_DIM, N_TSAMP/BLOCK_ROWS, N_BEAM_STOKES/BLOCK_COLS);
calcPWR<<<dimGrid,dimBlock>>>(d_beamformed, d_power_out);
cudaMemset(d_accu_stokes,0,N_OUTPUTS*sizeof(float));
TILE_DIM = 8;
BLOCK_ROWS = 1;
BLOCK_COLS = 1;
dimBlock = dim3(TILE_DIM,BLOCK_ROWS,BLOCK_COLS);
//dimGrid = dim3(N_FBIN/TILE_DIM, N_BEAM_STOKES*N_STOKES/BLOCK_ROWS, N_ACCU/BLOCK_COLS);
dimGrid = dim3(N_FBIN/TILE_DIM, N_ACCU/BLOCK_ROWS, N_BEAM_STOKES*N_STOKES/BLOCK_COLS);
accuPWR<<<dimGrid,dimBlock>>>(d_power_out, d_accu_stokes);
}
cudaDeviceSynchronize();
if (GPU_TEST_PRINT) {
end = clock();
time_spent = (double)(end - begin)/CLOCKS_PER_SEC*1000;
printf("Calculate Stokes parameter and accumulate data elapsed: %3.3f ms\n",time_spent);
}
begin = clock();
cudaDeviceSynchronize();
// copy accumulated Stokes data back to host
cudaStat = cudaMemcpy(h_accu_stokes, d_accu_stokes, N_OUTPUTS*sizeof(float), cudaMemcpyDeviceToHost);
assert(!cudaStat);
//dimGrid = dim3(N_FBIN/TILE_DIM, N_BEAM_STOKES*N_STOKES/BLOCK_ROWS, N_ACCU/BLOCK_COLS);
//copyDataReal<<<dimGrid, dimBlock>>>(d_accu_stokes, h_accu_stokes);
cudaDeviceSynchronize();
if (GPU_TEST_PRINT) {
end = clock();
time_spent = (double)(end - begin)/CLOCKS_PER_SEC*1000;
printf("Copy accumulated Stokes data to host elapsed: %3.3f ms\n",time_spent);
}
//begin = clock();
/*sprintf(fn_output,"%s%s%d%s%d%s%d%s" ,dir_output,"output_",N_FBIN,"x",N_BEAM_STOKES,"x",N_ACCU,".bin");///home/peix/workspace/paf_sim/output_**.bin
//sprintf(fn_output,"%s%s%d%s%d%s%d%s" ,dir_output,"output_",N_FBIN,"x",N_BEAM_STOKES,"x",N_ACCU,".bin");///home/peix/workspace/paf_sim/output_**.bin
FILE * f_output;
f_output = fopen(fn_output, "wb");
fwrite(h_accu_stokes, sizeof(float), N_OUTPUTS, f_output);
fclose(f_output);*/
//data_out = h_accu_stokes;
//memcpy(data_out,h_accu_stokes,N_OUTPUTS*sizeof(float));
/*for (int i=0;i<N_FBIN;i++){
for (int j=0;j<N_BEAM_STOKES*N_STOKES;j++){
if(j<4){
printf("%3.2f ",h_accu_stokes[i*N_BEAM_STOKES*N_STOKES*N_ACCU+j*N_ACCU]);
}
}
}*/
/*if (GPU_TEST_PRINT) {
end = clock();
time_spent = (double)(end - begin)/CLOCKS_PER_SEC*1000;
printf("Write accumulated Stokes data to file elapsed: %3.3f ms\n",time_spent);
}*/
/*begin = clock();
// Free resources
//bfCleanup();
if (GPU_TEST_PRINT) {
end = clock();
time_spent = (double)(end - begin)/CLOCKS_PER_SEC*1000;
printf("Free memory elapsed: %3.3f ms\n",time_spent);
}*/
if (GPU_TEST_PRINT) {
end_main = clock();
time_spent = (double)(end_main - begin_main)/CLOCKS_PER_SEC*1000;
printf("The run_beamformer program totally elapsed: %3.3f ms\n",time_spent);
//printf("**************************************************************************\n");
}
//return 0;
}
|
3fcf7c13ff9f6b6e8b259fcd507edb307fa8be48.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// source: https://github.com/sunnlo/BellmanFord/blob/master/cuda_bellman_ford.cu
#include <stdio.h>
__device__ int calcStep(int n1, int n2){
int result = n2 - n1; //Check relative position of the coordinate
if(result < 0){result = -1;} //destination is less then src, thus the negative step
else if(result > 0){result = 1;}
return result;
}
__global__ void fillPaths(int start, int end, int x, int y, int xBound, int yBound, int* routesX, int* routesY){
int xStep = calcStep(x, xBound);
int yStep = calcStep(y, yBound);
int stepType = 0;
for(int i = start; i < end; i++){
stepType = i%2;
routesX[i] = x;
routesY[i] = y;
if(stepType == 0){
if(x != xBound){
x += xStep;
}
else if(y != yBound){
y += yStep;
}
}
else{
if(y != yBound){
y += yStep;
}
else if (x != xBound){
x += xStep;
}
}
}
}
/*getPaths
* Each threads fill a portions of routesX and routesY. The begin at the coordinates of srcsX[tid] and srcsY[tid]
* then then move allong the coordiante grid in a stair pattern (right-up-right etc) until they reach the
coordinates associated with destsX[tid] and destsY[tid].
*/
__global__ void getPaths(int totalSize, int* routesX, int* routesY, int* srcsX, int* srcsY, int* destsX, int* destsY, int* sizes){
int tid = threadIdx.x + blockIdx.x*blockDim.x; //Num threads path_sz - 1
int start;
int end;
int x = srcsX[tid];
int y = srcsY[tid];
int xBound = destsX[tid];
int yBound = destsY[tid];
if(tid == 0){
start = 0;
end = sizes[tid];
}
else{
start = sizes[tid-1];
end = sizes[tid];
}
hipLaunchKernelGGL(( fillPaths), dim3(1), dim3(1), 0, 0, start, end, x, y, xBound, yBound, routesX, routesY);
hipDeviceSynchronize();
}
| 3fcf7c13ff9f6b6e8b259fcd507edb307fa8be48.cu | // source: https://github.com/sunnlo/BellmanFord/blob/master/cuda_bellman_ford.cu
#include <stdio.h>
__device__ int calcStep(int n1, int n2){
int result = n2 - n1; //Check relative position of the coordinate
if(result < 0){result = -1;} //destination is less then src, thus the negative step
else if(result > 0){result = 1;}
return result;
}
__global__ void fillPaths(int start, int end, int x, int y, int xBound, int yBound, int* routesX, int* routesY){
int xStep = calcStep(x, xBound);
int yStep = calcStep(y, yBound);
int stepType = 0;
for(int i = start; i < end; i++){
stepType = i%2;
routesX[i] = x;
routesY[i] = y;
if(stepType == 0){
if(x != xBound){
x += xStep;
}
else if(y != yBound){
y += yStep;
}
}
else{
if(y != yBound){
y += yStep;
}
else if (x != xBound){
x += xStep;
}
}
}
}
/*getPaths
* Each threads fill a portions of routesX and routesY. The begin at the coordinates of srcsX[tid] and srcsY[tid]
* then then move allong the coordiante grid in a stair pattern (right-up-right etc) until they reach the
coordinates associated with destsX[tid] and destsY[tid].
*/
__global__ void getPaths(int totalSize, int* routesX, int* routesY, int* srcsX, int* srcsY, int* destsX, int* destsY, int* sizes){
int tid = threadIdx.x + blockIdx.x*blockDim.x; //Num threads path_sz - 1
int start;
int end;
int x = srcsX[tid];
int y = srcsY[tid];
int xBound = destsX[tid];
int yBound = destsY[tid];
if(tid == 0){
start = 0;
end = sizes[tid];
}
else{
start = sizes[tid-1];
end = sizes[tid];
}
fillPaths<<<1, 1>>> (start, end, x, y, xBound, yBound, routesX, routesY);
cudaDeviceSynchronize();
}
|
26326bbceadd838cf352bf47bfd2d35c0332d7bd.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2011, Texas State University-San Marcos. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted for academic, research, experimental, or personal use provided
that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Texas State University-San Marcos nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
For all other uses, please contact the Office for Commercialization and Industry
Relations at Texas State University-San Marcos <http://www.txstate.edu/ocir/>.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors: Martin Burtscher and Molly A. O'Neil
*/
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#define MULT 1103515245
#define ADD 12345
#define MASK 0x7fffffff
#define CIPOW2 128 /* power of 2 and as large as the largest possible input */
#define TOURS110 (65536*2)
__device__ volatile unsigned int gblkcnt, gcurr;
__device__ volatile unsigned long long best;
/******************************************************************************/
/* Kernels to initialize best tour tracking and GPU state. */
/******************************************************************************/
__global__ void ResetKernel()
{
register int k;
k = threadIdx.x + blockIdx.x * blockDim.x;
if (k == 0) {
best = 0xffffffffffffffffULL;
}
}
__global__ void Reset110Kernel(int blocksTimesThreads)
{
gblkcnt = 0;
gcurr = blocksTimesThreads;
}
/******************************************************************************/
/* The GPU TSP kernel: for each thread, initializes a first climber and */
/* randomizes a starting tour, then evaluates IHC steps until a local minimum */
/* is found, at which point a new climber is obtained from the worklist. */
/******************************************************************************/
/*
Parameters for TSP110Kernel:
*gdist: global distance matrix with distances between cities
*gresult: the result of performing TwoOpt moves for all tours, an array of 3 ints
touroffset:
cities: the number of cities in the tour
tours: The number of blocks
*gtours:
*/
__global__
__launch_bounds__(1024, 1)
void TSP110Kernel(int *gdist, int *gresult, int touroffset, int cities, int tours, int *gtours)
{
register int i, j, change, mini, minj, minchange, randx, citiesm1, citiespad;
register int ti, tj, tiplus1, tjplus1, dist_i_iplus1, ti_p1, mytour, from, to, cost;
register int *sdist_ti, *sdist_tiplus1, *ltour;
register unsigned long long mycost, current;
unsigned char tour[110]; // local memory for coalesced accesses, shifted by one entry relative to gresult
__shared__ int sdist[110 * 110];
for (i = threadIdx.x; i < cities * cities; i += blockDim.x) {
sdist[i] = gdist[i];
}
__syncthreads();
citiesm1 = cities - 1;
citiespad = (citiesm1 + 33) & (~31);
mytour = threadIdx.x + blockIdx.x * blockDim.x;//Set mytour to the global index of the current thread
//Only randomizes a new tour if the current thread index is less than the total
//number of concurrent threads -- "restarts" from cmd input. We pick this as
//our end condition for a single threaded solver.
// This basically makes sure that if our number of "restarts" taken in from
// cmd is less than the number of threads we're using, so we don't exceed
// the max number of "restarts" when initializing threads (i think?)
if (mytour < tours) {
// Default starting tour for this thread's first climber
for (i = 0; i < citiesm1; i++) {
tour[i] = i + 1;//Copy each
}
tour[citiesm1] = 0;
// Randomize the initial tour
randx = mytour + touroffset; // use mytour (global thread id) as random seed
for (i = 0; i < citiesm1; i++) {
randx = (MULT * randx + ADD) & MASK;
j = randx % citiesm1;
to = tour[i];
tour[i] = tour[j];
tour[j] = to;
}
//Main loop for the thread (after initializing it)
do {
minchange = 0;
ti = 0; // tour[-1]
ti_p1= tour[0];
// Evaluate the 2-opt moves: Remove edges (i, i+1) and (j, j+1) and replace with (i, j) and (i+1, j+1)
// Evaluate each city i (except first/last) against all cities from i+2 to last city, thus not evaluating
// duplicate reversals or adjacent city pairs, which cannot improve tour.
for (i = 2; i < cities; i++) { // loop bias = 2, loop from i=1 to i=cities-2
tiplus1 = ti_p1 * cities;
dist_i_iplus1 = sdist[ti + ti_p1];
tj = ti_p1 = tour[i-1];
sdist_ti = &sdist[ti]; // Save pointers to i and i+1 rows of distance matrix
sdist_tiplus1 = &sdist[tiplus1];
#pragma unroll 8
for (j = i; j < cities; j++) {
tjplus1 = tour[j];
// Instead of recomputing new tour length after 2-opt move, just calculate the change from
// adding edges (i, j) & (i+1, j+1) and removing edges (i, i+1) & (j, j+1)
change = sdist_ti[tj] + sdist_tiplus1[tjplus1] - dist_i_iplus1 - sdist[tj * cities + tjplus1];
tj = tjplus1;
// If tour length reduction is new minimum, save the (i, j) coordinates of the 2-opt move
if ((minchange > change) && (j < cities)) {
minchange = change;
mini = i;
minj = j;
}
}
ti = tiplus1;
}
// If this climber found an improved tour, perform the city ordering swap
// and continue evaluating with a new IHC step
if (minchange < 0) {
// new tour is 0 to mini, minj downto mini+1, minj+1 to cities
i = mini - 2;
j = minj - 1;
while (i < j) {
to = tour[j];
tour[j] = tour[i];
tour[i] = to;
i++;
j--;
}
}
// Otherwise, this climber found a local minimum, so compute the tour cost,
// record if best solution so far, and get a new climber from the worklist
else {
cost = 0;
from = 0;
for (i = 0; i < citiesm1; i++) {
to = tour[i];
cost += sdist[from * cities + to];
from = to;
}
mycost = cost + sdist[from];
mycost = (mycost << 32) + mytour + touroffset;
current = best;
// Is our local minimum the best solution found? If so, compare-and-swap to
// save it as the current best
while (mycost < current) {
atomicCAS((unsigned long long *)&best, current, mycost);
current = best;
}
if (mycost == current) {
ltour = >ours[mytour * citiespad];
for (i = 0; i < citiesm1; i++) {
ltour[i] = tour[i];
}
}
// Get the next climber and randomize a new tour
mytour = atomicAdd((int *)&gcurr, 1);
//Parallel increment of gcurr (current number of "restarts" performed)
//atomicAdd returns gcurr's value before the increment took place
if (mytour < tours) {
//If we still have "restarts" left to perform, we'll create a new tour
//for the thread, and randomize it
for (i = 0; i < citiesm1; i++) {
tour[i] = i + 1;
}
tour[citiesm1] = 0;
randx = mytour + touroffset;
for (i = 0; i < citiesm1; i++) {
randx = (MULT * randx + ADD) & MASK;
j = randx % (citiesm1);
to = tour[i];
tour[i] = tour[j];
tour[j] = to;
}
}
}
} while (mytour < tours);
}
__syncthreads();
if (threadIdx.x == 0) {
to = gridDim.x - 1;
if (to == atomicInc((unsigned int *)&gblkcnt, to)) {
mytour = best & 0xffffffff;
gresult[0] = best >> 32;
gresult[1] = 0;
gresult[2] = mytour;
mytour %= TOURS110;
ltour = >ours[mytour * citiespad];
for (i = 0; i < citiesm1; i++) {
gresult[i+3] = ltour[i];
}
}
}
}
/******************************************************************************/
/* Function to read the TSP database input file and initialize the distance */
/* matrix. */
/******************************************************************************/
static int readFile(char *filename, int *dist)
{
register int i, j, ch, cnt, cities;
int i1;
float i2, i3;
register float *posx, *posy;
register double dx, dy;
register FILE *f;
char str[256];
f = fopen(filename, "r+t");
if (f == NULL) {fprintf(stderr, "could not open file %s\n", filename); exit(-1);}
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != ':')) ch = getc(f);
fscanf(f, "%s\n", str);
cities = atoi(str);
if (cities == 0) {
fprintf(stderr, "%d cities\n", cities);
exit(-1);
}
if (cities >= CIPOW2) {
fprintf(stderr, "%d cities is too large\n", cities);
exit(-1);
}
posx = (float *)malloc(sizeof(float) * cities);
posy = (float *)malloc(sizeof(float) * cities);
if ((posx == NULL) || (posy == NULL)) {
fprintf(stderr, "out of memory\n");
exit(-1);
}
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
fscanf(f, "%s\n", str);
if (strcmp(str, "NODE_COORD_SECTION") != 0) {
fprintf(stderr, "wrong file format\n");
exit(-1);
}
cnt = 0;
while (fscanf(f, "%d %f %f\n", &i1, &i2, &i3)) {
posx[cnt] = i2;
posy[cnt] = i3;
cnt++;
if (cnt > cities) {fprintf(stderr, "input too long\n"); exit(-1);}
if (cnt != i1) {fprintf(stderr, "input line mismatch: expected %d instead of %d\n", cnt, i1); exit(-1);}
}
if (cnt != cities) {
fprintf(stderr, "read %d instead of %d cities\n", cnt, cities);
exit(-1);
}
fscanf(f, "%s", str);
if (strcmp(str, "EOF") != 0) {
fprintf(stderr, "didn't see 'EOF' at end of file\n");
exit(-1);
}
fclose(f);
//This for loop creates a distance matrix, unfortunately this method means
//we lose city ordering, but we will retain a minimum cost for metrics.
for (i = 0; i < cities; i++) {
for (j = 0; j < cities; j++) {
dx = posx[i] - posx[j];
dy = posy[i] - posy[j];
dist[j * cities + i] = dist[i * cities + j] = (int)(sqrt(dx * dx + dy * dy) + 0.5);
}
dist[i * cities + i] = 0x3fffffff; // half of maxint
}
free(posx);
free(posy);
return cities;
}
/******************************************************************************/
/* Functions to synchronize GPU threads and check for error status, as well */
/* as to ascertain number of SMs in device and proper architecture version. */
/******************************************************************************/
static void CudaTest(char *msg)
{
hipError_t e;
hipDeviceSynchronize();
if (hipSuccess != (e = hipGetLastError())) {
fprintf(stderr, "%s: %d\n", msg, e);
fprintf(stderr, "%s\n", hipGetErrorString(e));
exit(-1);
}
}
static int VerifySystemParameters(int *SMs)
{
int deviceCount, currentDevice = 0, bestSMArch = 0;
int maxComputePerf = 0, maxPerfDevice = 0, SMPerMP;
int archCoresSM[3] = { 1, 8, 32 };
hipDeviceProp_t deviceProp;
hipGetDeviceCount(&deviceCount);
if(deviceCount <= 0) {
fprintf(stderr, "There is no device supporting CUDA\n");
exit(-1);
}
// Find the best SM architecture device
while(currentDevice < deviceCount) {
hipGetDeviceProperties(&deviceProp, currentDevice);
if(deviceProp.major > 0 && deviceProp.major < 9999) {
bestSMArch = max(bestSMArch, deviceProp.major);
}
currentDevice++;
}
// Find the best GPU device
currentDevice = 0;
while(currentDevice < deviceCount) {
hipGetDeviceProperties(&deviceProp, currentDevice);
if(deviceProp.major == 9999 && deviceProp.minor == 9999) {
SMPerMP = 1;
} else if (deviceProp.major <= 2) {
SMPerMP = archCoresSM[deviceProp.major];
} else { // SM major > 2
SMPerMP = archCoresSM[2];
}
int computePerf = deviceProp.multiProcessorCount * SMPerMP * deviceProp.clockRate;
if((deviceProp.major == bestSMArch) && (computePerf > maxComputePerf)) {
maxComputePerf = computePerf;
maxPerfDevice = currentDevice;
}
currentDevice++;
}
hipGetDeviceProperties(&deviceProp, maxPerfDevice);
if(deviceProp.major < 2) {
fprintf(stderr, "No device found with compute capability 2.0 or above\n");
exit(-1);
}
*SMs = deviceProp.multiProcessorCount;
return maxPerfDevice;
}
/******************************************************************************/
/* Run function reads input database and launches the GPU kernels. */
/* Prints to std out: GPU best tour and tour cost, as well as error from */
/* optimal tour read from input file. */
/******************************************************************************/
void run(char *filename, int tours, int SMs)
{
int *lgdist, tour, blocks, best;
int *lgresult, *lgtours, *lscratch;
int dist[CIPOW2 * CIPOW2];
int result[3 + CIPOW2];
int cities;
cities = readFile(filename, dist);
printf("%s: %d tours with %d cities each\n", filename, tours, cities);
if (tours < 1) {
fprintf(stderr, "tour count must be positive\n");
exit(-1);
}
if (hipSuccess != hipMalloc((void **)&lgtours, ((cities + 32) & (~31)) * min(TOURS110, tours) * sizeof(int))) fprintf(stderr, "could not allocate gtours\n"); CudaTest("couldn't allocate gtours");
if (hipSuccess != hipMalloc((void **)&lscratch, ((cities + 32) & (~31)) * min(TOURS110, tours) * sizeof(int))) fprintf(stderr, "could not allocate scratch\n"); CudaTest("couldn't allocate scratch");
if (hipSuccess != hipMalloc((void **)&lgresult, sizeof(int) * (cities + 3))) fprintf(stderr, "could not allocate gresult\n"); CudaTest("couldn't allocate gresult");
if (hipSuccess != hipMalloc((void **)&lgdist, sizeof(int) * cities * cities)) fprintf(stderr, "could not allocate gdist\n"); CudaTest("couldn't allocate gdist");
if (hipSuccess != hipMemcpy(lgdist, dist, sizeof(int) * cities * cities, hipMemcpyHostToDevice)) fprintf(stderr, "copying of dist to device failed\n"); CudaTest("dist copy to device failed");
//Above if statement copies the dist matrix to the GPU, we will just pass the matrix directly for single threaded
hipLaunchKernelGGL(( ResetKernel), dim3(SMs*3), dim3(512), 0, 0, );
best = 0x7fffffff;
tour = 0;
if (cities <= 110) {
blocks = min(tours, TOURS110); //Selects the minimum between the number of climbers given in cmd input, and TOURS110 (a huge number)
while (tours > tour) {//While our tour number is less than that huge number
hipLaunchKernelGGL(( Reset110Kernel), dim3(1), dim3(1), 0, 0, SMs*2*512);
hipLaunchKernelGGL(( TSP110Kernel), dim3(SMs), dim3(1024), 0, 0, lgdist, lgresult, tour, cities, blocks, lgtours);
if (hipSuccess != hipMemcpy(result, lgresult, sizeof(int) * 2, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of result from device failed\n"); CudaTest("result copy from device failed");
if (best > result[0]) {
best = result[0];
if (hipSuccess != hipMemcpy(result, lgresult, sizeof(int) * (cities + 3), hipMemcpyDeviceToHost)) fprintf(stderr, "copying of result from device failed\n"); CudaTest("result copy from device failed");
}
tour += blocks;
blocks = min(tours-tour, TOURS110);
}
}
else {
fprintf(stderr, "city count must be <= 110\n");
exit(-1);
}
printf("GPU min cost = %d\n", best);
printf("GPU min tour = %d\n", result[2]);
hipFree(lgtours);
hipFree(lscratch);
hipFree(lgresult);
hipFree(lgdist);
}
/******************************************************************************/
/* MAIN */
/* Usage: ./TSP_GPU <path to input database> <number of climbers> */
/******************************************************************************/
int main(int argc, char *argv[])
{
register int climbers, SMs, deviceID;
if(argc != 3) {
fprintf(stderr, "usage: %s <path to input database> <number of climbers>\n", argv[0]);
exit(-1);
}
printf("\nTSP_GPU v1.0 Copyright (c) 2011 Texas State University-San Marcos\n");
deviceID = VerifySystemParameters(&SMs);
hipSetDevice(deviceID);
CudaTest("initialization");
hipFuncSetCacheConfig(TSP110Kernel, hipFuncCachePreferShared);
climbers = atoi(argv[2]);
run(argv[1], climbers, SMs);
return 0;
}
| 26326bbceadd838cf352bf47bfd2d35c0332d7bd.cu | /*
Copyright (c) 2011, Texas State University-San Marcos. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted for academic, research, experimental, or personal use provided
that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Texas State University-San Marcos nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
For all other uses, please contact the Office for Commercialization and Industry
Relations at Texas State University-San Marcos <http://www.txstate.edu/ocir/>.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors: Martin Burtscher and Molly A. O'Neil
*/
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#define MULT 1103515245
#define ADD 12345
#define MASK 0x7fffffff
#define CIPOW2 128 /* power of 2 and as large as the largest possible input */
#define TOURS110 (65536*2)
__device__ volatile unsigned int gblkcnt, gcurr;
__device__ volatile unsigned long long best;
/******************************************************************************/
/* Kernels to initialize best tour tracking and GPU state. */
/******************************************************************************/
__global__ void ResetKernel()
{
register int k;
k = threadIdx.x + blockIdx.x * blockDim.x;
if (k == 0) {
best = 0xffffffffffffffffULL;
}
}
__global__ void Reset110Kernel(int blocksTimesThreads)
{
gblkcnt = 0;
gcurr = blocksTimesThreads;
}
/******************************************************************************/
/* The GPU TSP kernel: for each thread, initializes a first climber and */
/* randomizes a starting tour, then evaluates IHC steps until a local minimum */
/* is found, at which point a new climber is obtained from the worklist. */
/******************************************************************************/
/*
Parameters for TSP110Kernel:
*gdist: global distance matrix with distances between cities
*gresult: the result of performing TwoOpt moves for all tours, an array of 3 ints
touroffset:
cities: the number of cities in the tour
tours: The number of blocks
*gtours:
*/
__global__
__launch_bounds__(1024, 1)
void TSP110Kernel(int *gdist, int *gresult, int touroffset, int cities, int tours, int *gtours)
{
register int i, j, change, mini, minj, minchange, randx, citiesm1, citiespad;
register int ti, tj, tiplus1, tjplus1, dist_i_iplus1, ti_p1, mytour, from, to, cost;
register int *sdist_ti, *sdist_tiplus1, *ltour;
register unsigned long long mycost, current;
unsigned char tour[110]; // local memory for coalesced accesses, shifted by one entry relative to gresult
__shared__ int sdist[110 * 110];
for (i = threadIdx.x; i < cities * cities; i += blockDim.x) {
sdist[i] = gdist[i];
}
__syncthreads();
citiesm1 = cities - 1;
citiespad = (citiesm1 + 33) & (~31);
mytour = threadIdx.x + blockIdx.x * blockDim.x;//Set mytour to the global index of the current thread
//Only randomizes a new tour if the current thread index is less than the total
//number of concurrent threads -- "restarts" from cmd input. We pick this as
//our end condition for a single threaded solver.
// This basically makes sure that if our number of "restarts" taken in from
// cmd is less than the number of threads we're using, so we don't exceed
// the max number of "restarts" when initializing threads (i think?)
if (mytour < tours) {
// Default starting tour for this thread's first climber
for (i = 0; i < citiesm1; i++) {
tour[i] = i + 1;//Copy each
}
tour[citiesm1] = 0;
// Randomize the initial tour
randx = mytour + touroffset; // use mytour (global thread id) as random seed
for (i = 0; i < citiesm1; i++) {
randx = (MULT * randx + ADD) & MASK;
j = randx % citiesm1;
to = tour[i];
tour[i] = tour[j];
tour[j] = to;
}
//Main loop for the thread (after initializing it)
do {
minchange = 0;
ti = 0; // tour[-1]
ti_p1= tour[0];
// Evaluate the 2-opt moves: Remove edges (i, i+1) and (j, j+1) and replace with (i, j) and (i+1, j+1)
// Evaluate each city i (except first/last) against all cities from i+2 to last city, thus not evaluating
// duplicate reversals or adjacent city pairs, which cannot improve tour.
for (i = 2; i < cities; i++) { // loop bias = 2, loop from i=1 to i=cities-2
tiplus1 = ti_p1 * cities;
dist_i_iplus1 = sdist[ti + ti_p1];
tj = ti_p1 = tour[i-1];
sdist_ti = &sdist[ti]; // Save pointers to i and i+1 rows of distance matrix
sdist_tiplus1 = &sdist[tiplus1];
#pragma unroll 8
for (j = i; j < cities; j++) {
tjplus1 = tour[j];
// Instead of recomputing new tour length after 2-opt move, just calculate the change from
// adding edges (i, j) & (i+1, j+1) and removing edges (i, i+1) & (j, j+1)
change = sdist_ti[tj] + sdist_tiplus1[tjplus1] - dist_i_iplus1 - sdist[tj * cities + tjplus1];
tj = tjplus1;
// If tour length reduction is new minimum, save the (i, j) coordinates of the 2-opt move
if ((minchange > change) && (j < cities)) {
minchange = change;
mini = i;
minj = j;
}
}
ti = tiplus1;
}
// If this climber found an improved tour, perform the city ordering swap
// and continue evaluating with a new IHC step
if (minchange < 0) {
// new tour is 0 to mini, minj downto mini+1, minj+1 to cities
i = mini - 2;
j = minj - 1;
while (i < j) {
to = tour[j];
tour[j] = tour[i];
tour[i] = to;
i++;
j--;
}
}
// Otherwise, this climber found a local minimum, so compute the tour cost,
// record if best solution so far, and get a new climber from the worklist
else {
cost = 0;
from = 0;
for (i = 0; i < citiesm1; i++) {
to = tour[i];
cost += sdist[from * cities + to];
from = to;
}
mycost = cost + sdist[from];
mycost = (mycost << 32) + mytour + touroffset;
current = best;
// Is our local minimum the best solution found? If so, compare-and-swap to
// save it as the current best
while (mycost < current) {
atomicCAS((unsigned long long *)&best, current, mycost);
current = best;
}
if (mycost == current) {
ltour = >ours[mytour * citiespad];
for (i = 0; i < citiesm1; i++) {
ltour[i] = tour[i];
}
}
// Get the next climber and randomize a new tour
mytour = atomicAdd((int *)&gcurr, 1);
//Parallel increment of gcurr (current number of "restarts" performed)
//atomicAdd returns gcurr's value before the increment took place
if (mytour < tours) {
//If we still have "restarts" left to perform, we'll create a new tour
//for the thread, and randomize it
for (i = 0; i < citiesm1; i++) {
tour[i] = i + 1;
}
tour[citiesm1] = 0;
randx = mytour + touroffset;
for (i = 0; i < citiesm1; i++) {
randx = (MULT * randx + ADD) & MASK;
j = randx % (citiesm1);
to = tour[i];
tour[i] = tour[j];
tour[j] = to;
}
}
}
} while (mytour < tours);
}
__syncthreads();
if (threadIdx.x == 0) {
to = gridDim.x - 1;
if (to == atomicInc((unsigned int *)&gblkcnt, to)) {
mytour = best & 0xffffffff;
gresult[0] = best >> 32;
gresult[1] = 0;
gresult[2] = mytour;
mytour %= TOURS110;
ltour = >ours[mytour * citiespad];
for (i = 0; i < citiesm1; i++) {
gresult[i+3] = ltour[i];
}
}
}
}
/******************************************************************************/
/* Function to read the TSP database input file and initialize the distance */
/* matrix. */
/******************************************************************************/
static int readFile(char *filename, int *dist)
{
register int i, j, ch, cnt, cities;
int i1;
float i2, i3;
register float *posx, *posy;
register double dx, dy;
register FILE *f;
char str[256];
f = fopen(filename, "r+t");
if (f == NULL) {fprintf(stderr, "could not open file %s\n", filename); exit(-1);}
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != ':')) ch = getc(f);
fscanf(f, "%s\n", str);
cities = atoi(str);
if (cities == 0) {
fprintf(stderr, "%d cities\n", cities);
exit(-1);
}
if (cities >= CIPOW2) {
fprintf(stderr, "%d cities is too large\n", cities);
exit(-1);
}
posx = (float *)malloc(sizeof(float) * cities);
posy = (float *)malloc(sizeof(float) * cities);
if ((posx == NULL) || (posy == NULL)) {
fprintf(stderr, "out of memory\n");
exit(-1);
}
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
fscanf(f, "%s\n", str);
if (strcmp(str, "NODE_COORD_SECTION") != 0) {
fprintf(stderr, "wrong file format\n");
exit(-1);
}
cnt = 0;
while (fscanf(f, "%d %f %f\n", &i1, &i2, &i3)) {
posx[cnt] = i2;
posy[cnt] = i3;
cnt++;
if (cnt > cities) {fprintf(stderr, "input too long\n"); exit(-1);}
if (cnt != i1) {fprintf(stderr, "input line mismatch: expected %d instead of %d\n", cnt, i1); exit(-1);}
}
if (cnt != cities) {
fprintf(stderr, "read %d instead of %d cities\n", cnt, cities);
exit(-1);
}
fscanf(f, "%s", str);
if (strcmp(str, "EOF") != 0) {
fprintf(stderr, "didn't see 'EOF' at end of file\n");
exit(-1);
}
fclose(f);
//This for loop creates a distance matrix, unfortunately this method means
//we lose city ordering, but we will retain a minimum cost for metrics.
for (i = 0; i < cities; i++) {
for (j = 0; j < cities; j++) {
dx = posx[i] - posx[j];
dy = posy[i] - posy[j];
dist[j * cities + i] = dist[i * cities + j] = (int)(sqrt(dx * dx + dy * dy) + 0.5);
}
dist[i * cities + i] = 0x3fffffff; // half of maxint
}
free(posx);
free(posy);
return cities;
}
/******************************************************************************/
/* Functions to synchronize GPU threads and check for error status, as well */
/* as to ascertain number of SMs in device and proper architecture version. */
/******************************************************************************/
static void CudaTest(char *msg)
{
cudaError_t e;
cudaThreadSynchronize();
if (cudaSuccess != (e = cudaGetLastError())) {
fprintf(stderr, "%s: %d\n", msg, e);
fprintf(stderr, "%s\n", cudaGetErrorString(e));
exit(-1);
}
}
static int VerifySystemParameters(int *SMs)
{
int deviceCount, currentDevice = 0, bestSMArch = 0;
int maxComputePerf = 0, maxPerfDevice = 0, SMPerMP;
int archCoresSM[3] = { 1, 8, 32 };
cudaDeviceProp deviceProp;
cudaGetDeviceCount(&deviceCount);
if(deviceCount <= 0) {
fprintf(stderr, "There is no device supporting CUDA\n");
exit(-1);
}
// Find the best SM architecture device
while(currentDevice < deviceCount) {
cudaGetDeviceProperties(&deviceProp, currentDevice);
if(deviceProp.major > 0 && deviceProp.major < 9999) {
bestSMArch = max(bestSMArch, deviceProp.major);
}
currentDevice++;
}
// Find the best GPU device
currentDevice = 0;
while(currentDevice < deviceCount) {
cudaGetDeviceProperties(&deviceProp, currentDevice);
if(deviceProp.major == 9999 && deviceProp.minor == 9999) {
SMPerMP = 1;
} else if (deviceProp.major <= 2) {
SMPerMP = archCoresSM[deviceProp.major];
} else { // SM major > 2
SMPerMP = archCoresSM[2];
}
int computePerf = deviceProp.multiProcessorCount * SMPerMP * deviceProp.clockRate;
if((deviceProp.major == bestSMArch) && (computePerf > maxComputePerf)) {
maxComputePerf = computePerf;
maxPerfDevice = currentDevice;
}
currentDevice++;
}
cudaGetDeviceProperties(&deviceProp, maxPerfDevice);
if(deviceProp.major < 2) {
fprintf(stderr, "No device found with compute capability 2.0 or above\n");
exit(-1);
}
*SMs = deviceProp.multiProcessorCount;
return maxPerfDevice;
}
/******************************************************************************/
/* Run function reads input database and launches the GPU kernels. */
/* Prints to std out: GPU best tour and tour cost, as well as error from */
/* optimal tour read from input file. */
/******************************************************************************/
void run(char *filename, int tours, int SMs)
{
int *lgdist, tour, blocks, best;
int *lgresult, *lgtours, *lscratch;
int dist[CIPOW2 * CIPOW2];
int result[3 + CIPOW2];
int cities;
cities = readFile(filename, dist);
printf("%s: %d tours with %d cities each\n", filename, tours, cities);
if (tours < 1) {
fprintf(stderr, "tour count must be positive\n");
exit(-1);
}
if (cudaSuccess != cudaMalloc((void **)&lgtours, ((cities + 32) & (~31)) * min(TOURS110, tours) * sizeof(int))) fprintf(stderr, "could not allocate gtours\n"); CudaTest("couldn't allocate gtours");
if (cudaSuccess != cudaMalloc((void **)&lscratch, ((cities + 32) & (~31)) * min(TOURS110, tours) * sizeof(int))) fprintf(stderr, "could not allocate scratch\n"); CudaTest("couldn't allocate scratch");
if (cudaSuccess != cudaMalloc((void **)&lgresult, sizeof(int) * (cities + 3))) fprintf(stderr, "could not allocate gresult\n"); CudaTest("couldn't allocate gresult");
if (cudaSuccess != cudaMalloc((void **)&lgdist, sizeof(int) * cities * cities)) fprintf(stderr, "could not allocate gdist\n"); CudaTest("couldn't allocate gdist");
if (cudaSuccess != cudaMemcpy(lgdist, dist, sizeof(int) * cities * cities, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of dist to device failed\n"); CudaTest("dist copy to device failed");
//Above if statement copies the dist matrix to the GPU, we will just pass the matrix directly for single threaded
ResetKernel<<<SMs*3, 512>>>();
best = 0x7fffffff;
tour = 0;
if (cities <= 110) {
blocks = min(tours, TOURS110); //Selects the minimum between the number of climbers given in cmd input, and TOURS110 (a huge number)
while (tours > tour) {//While our tour number is less than that huge number
Reset110Kernel<<<1, 1>>>(SMs*2*512);
TSP110Kernel<<<SMs, 1024>>>(lgdist, lgresult, tour, cities, blocks, lgtours);
if (cudaSuccess != cudaMemcpy(result, lgresult, sizeof(int) * 2, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of result from device failed\n"); CudaTest("result copy from device failed");
if (best > result[0]) {
best = result[0];
if (cudaSuccess != cudaMemcpy(result, lgresult, sizeof(int) * (cities + 3), cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of result from device failed\n"); CudaTest("result copy from device failed");
}
tour += blocks;
blocks = min(tours-tour, TOURS110);
}
}
else {
fprintf(stderr, "city count must be <= 110\n");
exit(-1);
}
printf("GPU min cost = %d\n", best);
printf("GPU min tour = %d\n", result[2]);
cudaFree(lgtours);
cudaFree(lscratch);
cudaFree(lgresult);
cudaFree(lgdist);
}
/******************************************************************************/
/* MAIN */
/* Usage: ./TSP_GPU <path to input database> <number of climbers> */
/******************************************************************************/
int main(int argc, char *argv[])
{
register int climbers, SMs, deviceID;
if(argc != 3) {
fprintf(stderr, "usage: %s <path to input database> <number of climbers>\n", argv[0]);
exit(-1);
}
printf("\nTSP_GPU v1.0 Copyright (c) 2011 Texas State University-San Marcos\n");
deviceID = VerifySystemParameters(&SMs);
cudaSetDevice(deviceID);
CudaTest("initialization");
cudaFuncSetCacheConfig(TSP110Kernel, cudaFuncCachePreferShared);
climbers = atoi(argv[2]);
run(argv[1], climbers, SMs);
return 0;
}
|
4924eb354240e77fc5bb8a5b8b43a56a2ab61662.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "datadef.h"
#include "LCRNG.cuh"
__global__ void pop_secondaries_kernel(unsigned N, unsigned RNUM_PER_THREAD, unsigned* completed, unsigned* scanned, unsigned* yield, unsigned* done, unsigned* index, unsigned* rxn, source_point* space, float* E , unsigned* rn_bank, float** energydata){
int tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid >= N){return;}
if (yield[tid]==0){return;}
//if(done[tid]){return;}
// external data
unsigned position = scanned[tid];
unsigned this_yield = yield[tid];
unsigned dex = index[tid];
float this_E = E[tid];
//unsigned this_rxn = rxn[tid];
float * this_array = energydata[dex];
unsigned data_dex = 0;
source_point this_space = space[tid];
unsigned rn = rn_bank[tid];
// internal data
float Emin=1e-11;
float Emax=20.0;
unsigned k, n, offset, vlen, next_vlen, law;
float sampled_E, phi, mu, rn1, rn2, last_E, next_E, e_start, E0, E1, Ek, next_e_start, next_e_end, last_e_start, last_e_end, diff;;
float cdf0, cdf1, e0, e1, m, pdf0, pdf1, arg;
const float pi = 3.14159265359 ;
// sample spectrum, set data.
// reset self then write elsewhere
//read in values
rn1 = get_rand(&rn);
rn2 = get_rand(&rn);
offset = 5;
//printf("rxn %u eptr %p\n",this_rxn,this_array);
memcpy(&last_E, &this_array[0], sizeof(float));
memcpy(&next_E, &this_array[1], sizeof(float));
memcpy(&vlen, &this_array[2], sizeof(float));
memcpy(&next_vlen, &this_array[3], sizeof(float));
memcpy(&law, &this_array[4], sizeof(float));
float r = (this_E-last_E)/(next_E-last_E);
last_e_start = this_array[ offset ];
last_e_end = this_array[ offset + vlen - 1 ];
next_e_start = this_array[ offset + 3*vlen ];
next_e_end = this_array[ offset + 3*vlen + next_vlen - 1];
//printf("rxn=%u law=%u vlen/next= %u %u, E-last/this/next= %6.4E %6.4E %6.4E\n",this_rxn,law,vlen,next_vlen,last_E,this_E,next_E);
//sample energy dist
sampled_E = 0.0;
if( rn2 >= r ){ //sample last E
diff = next_e_end - next_e_start;
e_start = next_e_start;
for ( n=0 ; n<vlen-1 ; n++ ){
cdf0 = this_array[ (offset + vlen ) + n+0];
cdf1 = this_array[ (offset + vlen ) + n+1];
pdf0 = this_array[ (offset + 2*vlen ) + n+0];
pdf1 = this_array[ (offset + 2*vlen ) + n+1];
e0 = this_array[ (offset ) + n+0];
e1 = this_array[ (offset ) + n+1];
if( rn1 >= cdf0 & rn1 < cdf1 ){
break;
}
}
}
else{
diff = next_e_end - next_e_start;
e_start = next_e_start;
for ( n=0 ; n<next_vlen-1 ; n++ ){
cdf0 = this_array[ (offset + 3*vlen + next_vlen ) + n+0];
cdf1 = this_array[ (offset + 3*vlen + next_vlen ) + n+1];
pdf0 = this_array[ (offset + 3*vlen + 2*next_vlen ) + n+0];
pdf1 = this_array[ (offset + 3*vlen + 2*next_vlen ) + n+1];
e0 = this_array[ (offset + 3*vlen ) + n+0];
e1 = this_array[ (offset + 3*vlen ) + n+1];
if( rn1 >= cdf0 & rn1 < cdf1 ){
break;
}
}
}
// interpolate the values
m = (pdf1 - pdf0)/(e1-e0);
arg = pdf0*pdf0 + 2.0 * m * (rn1-cdf0);
if(arg<0){arg=0.0;}
E0 = e0 + ( sqrtf( arg ) - pdf0) / m ;
//sampled_E = e0 + (rn1-cdf0)/pdf0;
//printf("%u %u %u %u %u %p %6.4E %u %u %6.4E %6.4E %6.4E %6.4E %6.4E %6.4E %6.4E %6.4E %6.4E\n",tid,tid*RNUM_PER_THREAD + 12,fork,n,dex,this_array,rn1,next_vlen,vlen,this_E,e0,e1,cdf0,cdf1,pdf0,pdf1,m,sampled_E);
// scale it
E1 = last_e_start + r*( next_e_start - last_e_start );
Ek = last_e_end + r*( next_e_end - last_e_end );
sampled_E = E1 +(E0-e_start)*(Ek-E1)/diff;
//sample isotropic directions
rn1 = get_rand(&rn);
rn2 = get_rand(&rn);
mu = 2.0*rn1-1.0;
phi = 2.0*pi*rn2;
//check limits
if (sampled_E >= Emax){sampled_E = Emax * 0.9;}//printf("enforcing limits in pop data_dex=%u, sampled_E = %6.4E\n",data_dex,sampled_E);}
if (sampled_E <= Emin){sampled_E = Emin * 1.1;}//printf("enforcing limits in pop data_dex=%u, sampled_E = %6.4E\n",data_dex,sampled_E);}
// sync before writes
__syncthreads();
// set self data
E [ tid ] = sampled_E;
space[ tid ].xhat = sqrtf(1.0-(mu*mu))*cosf(phi);
space[ tid ].yhat = sqrtf(1.0-(mu*mu))*sinf(phi);
space[ tid ].zhat = mu;
done [ tid ] = 0;
yield[ tid ] = 0;
rxn [ tid ] = 0;//this_rxn;
//printf("popped - dex %u rxn %u ptr %p sampled_E %6.4E\n",tid,this_rxn,this_array,sampled_E);
for(k=0 ; k < this_yield-1 ; k++ ){
//get proper data index
data_dex=completed[position+k];
//printf("tid %u position %u k %u data_dex %u done %u (xyz) % 6.4E % 6.4E % 6.4E\n",tid,position,k,data_dex,done[data_dex],this_space.x,this_space.y,this_space.z);
//make sure data is done
if(!done[data_dex]){printf("overwriting into active data!\n");}
//copy in values
rn1 = get_rand(&rn);
rn2 = get_rand(&rn);
//rn1 = rn_bank[ tid*RNUM_PER_THREAD + 11 + (k+1)*4];
//rn2 = rn_bank[ tid*RNUM_PER_THREAD + 12 + (k+1)*4];
//sample energy dist
sampled_E = 0.0;
if( rn2 >= r ){ //sample last E
diff = next_e_end - next_e_start;
e_start = next_e_start;
for ( n=0 ; n<vlen-1 ; n++ ){
cdf0 = this_array[ (offset + vlen ) + n+0];
cdf1 = this_array[ (offset + vlen ) + n+1];
pdf0 = this_array[ (offset + 2*vlen ) + n+0];
pdf1 = this_array[ (offset + 2*vlen ) + n+1];
e0 = this_array[ (offset ) + n+0];
e1 = this_array[ (offset ) + n+1];
if( rn1 >= cdf0 & rn1 < cdf1 ){
break;
}
}
}
else{
diff = next_e_end - next_e_start;
e_start = next_e_start;
for ( n=0 ; n<next_vlen-1 ; n++ ){
cdf0 = this_array[ (offset + 3*vlen + next_vlen ) + n+0];
cdf1 = this_array[ (offset + 3*vlen + next_vlen ) + n+1];
pdf0 = this_array[ (offset + 3*vlen + 2*next_vlen ) + n+0];
pdf1 = this_array[ (offset + 3*vlen + 2*next_vlen ) + n+1];
e0 = this_array[ (offset + 3*vlen ) + n+0];
e1 = this_array[ (offset + 3*vlen ) + n+1];
if( rn1 >= cdf0 & rn1 < cdf1 ){
break;
}
}
}
// interpolate the values
m = (pdf1 - pdf0)/(e1-e0);
arg = pdf0*pdf0 + 2.0 * m * (rn1-cdf0);
if(arg<0){arg=0.0;}
E0 = e0 + ( sqrtf( arg ) - pdf0) / m ;
//sampled_E = e0 + (rn1-cdf0)/pdf0;
//printf("%u %u %u %u %u %p %6.4E %u %u %6.4E %6.4E %6.4E %6.4E %6.4E %6.4E %6.4E %6.4E %6.4E\n",tid,tid*RNUM_PER_THREAD + 11 + (k+1)*3,fork,n,dex,this_array,rn1,next_vlen,vlen,this_E,e0,e1,cdf0,cdf1,pdf0,pdf1,m,sampled_E);
// scale it
E1 = last_e_start + r*( next_e_start - last_e_start );
Ek = last_e_end + r*( next_e_end - last_e_end );
sampled_E = E1 +(E0-e_start)*(Ek-E1)/diff;
//sample isotropic directions
rn1 = get_rand(&rn);
rn2 = get_rand(&rn);
mu = 2.0*rn1-1.0;
phi = 2.0*pi*rn2;
//printf("tid %u k %u mu % 6.4E phi % 6.4E rn1 % 6.4E rn2 % 6.4E compactpos %u realpos %u\n",tid,k,mu,phi,rn1,rn2, position, completed[k+position]);
//check data
//printf("done? %u\n",done[ data_dex ]);
//check limits
if (sampled_E >= Emax){sampled_E = Emax * 0.9;}//printf("enforcing limits in pop data_dex=%u, sampled_E = %6.4E\n",data_dex,sampled_E);}
if (sampled_E <= Emin){sampled_E = Emin * 1.1;}//printf("enforcing limits in pop data_dex=%u, sampled_E = %6.4E\n",data_dex,sampled_E);}
// sync before writes
__syncthreads();
// set data
E [ data_dex ] = sampled_E;
space[ data_dex ].x = this_space.x;
space[ data_dex ].y = this_space.y;
space[ data_dex ].z = this_space.z;
space[ data_dex ].xhat = sqrtf(1.0-(mu*mu))*cosf(phi);
space[ data_dex ].yhat = sqrtf(1.0-(mu*mu))*sinf(phi);
space[ data_dex ].zhat = mu;
done [ data_dex ] = 0;
yield[ data_dex ] = 0;
rxn [ data_dex ] = 0;//this_rxn;
//printf("popped - dex %u rxn %u ptr %p sampled_E %6.4E\n",data_dex,this_rxn,this_array,sampled_E);
}
rn_bank[tid] = rn;
}
void pop_secondaries( unsigned NUM_THREADS, unsigned N, unsigned RNUM_PER_THREAD, unsigned* d_completed, unsigned* d_scanned, unsigned* d_yield, unsigned* d_done, unsigned* d_index, unsigned* d_rxn, source_point* d_space, float* d_E , unsigned* d_rn_bank, float ** energydata){
unsigned blks = ( N + NUM_THREADS - 1 ) / NUM_THREADS;
hipLaunchKernelGGL(( pop_secondaries_kernel) , dim3(blks), dim3(NUM_THREADS) , 0, 0, N, RNUM_PER_THREAD, d_completed, d_scanned, d_yield, d_done, d_index, d_rxn, d_space, d_E , d_rn_bank, energydata);
hipDeviceSynchronize();
}
| 4924eb354240e77fc5bb8a5b8b43a56a2ab61662.cu | #include <cuda.h>
#include <stdio.h>
#include "datadef.h"
#include "LCRNG.cuh"
__global__ void pop_secondaries_kernel(unsigned N, unsigned RNUM_PER_THREAD, unsigned* completed, unsigned* scanned, unsigned* yield, unsigned* done, unsigned* index, unsigned* rxn, source_point* space, float* E , unsigned* rn_bank, float** energydata){
int tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid >= N){return;}
if (yield[tid]==0){return;}
//if(done[tid]){return;}
// external data
unsigned position = scanned[tid];
unsigned this_yield = yield[tid];
unsigned dex = index[tid];
float this_E = E[tid];
//unsigned this_rxn = rxn[tid];
float * this_array = energydata[dex];
unsigned data_dex = 0;
source_point this_space = space[tid];
unsigned rn = rn_bank[tid];
// internal data
float Emin=1e-11;
float Emax=20.0;
unsigned k, n, offset, vlen, next_vlen, law;
float sampled_E, phi, mu, rn1, rn2, last_E, next_E, e_start, E0, E1, Ek, next_e_start, next_e_end, last_e_start, last_e_end, diff;;
float cdf0, cdf1, e0, e1, m, pdf0, pdf1, arg;
const float pi = 3.14159265359 ;
// sample spectrum, set data.
// reset self then write elsewhere
//read in values
rn1 = get_rand(&rn);
rn2 = get_rand(&rn);
offset = 5;
//printf("rxn %u eptr %p\n",this_rxn,this_array);
memcpy(&last_E, &this_array[0], sizeof(float));
memcpy(&next_E, &this_array[1], sizeof(float));
memcpy(&vlen, &this_array[2], sizeof(float));
memcpy(&next_vlen, &this_array[3], sizeof(float));
memcpy(&law, &this_array[4], sizeof(float));
float r = (this_E-last_E)/(next_E-last_E);
last_e_start = this_array[ offset ];
last_e_end = this_array[ offset + vlen - 1 ];
next_e_start = this_array[ offset + 3*vlen ];
next_e_end = this_array[ offset + 3*vlen + next_vlen - 1];
//printf("rxn=%u law=%u vlen/next= %u %u, E-last/this/next= %6.4E %6.4E %6.4E\n",this_rxn,law,vlen,next_vlen,last_E,this_E,next_E);
//sample energy dist
sampled_E = 0.0;
if( rn2 >= r ){ //sample last E
diff = next_e_end - next_e_start;
e_start = next_e_start;
for ( n=0 ; n<vlen-1 ; n++ ){
cdf0 = this_array[ (offset + vlen ) + n+0];
cdf1 = this_array[ (offset + vlen ) + n+1];
pdf0 = this_array[ (offset + 2*vlen ) + n+0];
pdf1 = this_array[ (offset + 2*vlen ) + n+1];
e0 = this_array[ (offset ) + n+0];
e1 = this_array[ (offset ) + n+1];
if( rn1 >= cdf0 & rn1 < cdf1 ){
break;
}
}
}
else{
diff = next_e_end - next_e_start;
e_start = next_e_start;
for ( n=0 ; n<next_vlen-1 ; n++ ){
cdf0 = this_array[ (offset + 3*vlen + next_vlen ) + n+0];
cdf1 = this_array[ (offset + 3*vlen + next_vlen ) + n+1];
pdf0 = this_array[ (offset + 3*vlen + 2*next_vlen ) + n+0];
pdf1 = this_array[ (offset + 3*vlen + 2*next_vlen ) + n+1];
e0 = this_array[ (offset + 3*vlen ) + n+0];
e1 = this_array[ (offset + 3*vlen ) + n+1];
if( rn1 >= cdf0 & rn1 < cdf1 ){
break;
}
}
}
// interpolate the values
m = (pdf1 - pdf0)/(e1-e0);
arg = pdf0*pdf0 + 2.0 * m * (rn1-cdf0);
if(arg<0){arg=0.0;}
E0 = e0 + ( sqrtf( arg ) - pdf0) / m ;
//sampled_E = e0 + (rn1-cdf0)/pdf0;
//printf("%u %u %u %u %u %p %6.4E %u %u %6.4E %6.4E %6.4E %6.4E %6.4E %6.4E %6.4E %6.4E %6.4E\n",tid,tid*RNUM_PER_THREAD + 12,fork,n,dex,this_array,rn1,next_vlen,vlen,this_E,e0,e1,cdf0,cdf1,pdf0,pdf1,m,sampled_E);
// scale it
E1 = last_e_start + r*( next_e_start - last_e_start );
Ek = last_e_end + r*( next_e_end - last_e_end );
sampled_E = E1 +(E0-e_start)*(Ek-E1)/diff;
//sample isotropic directions
rn1 = get_rand(&rn);
rn2 = get_rand(&rn);
mu = 2.0*rn1-1.0;
phi = 2.0*pi*rn2;
//check limits
if (sampled_E >= Emax){sampled_E = Emax * 0.9;}//printf("enforcing limits in pop data_dex=%u, sampled_E = %6.4E\n",data_dex,sampled_E);}
if (sampled_E <= Emin){sampled_E = Emin * 1.1;}//printf("enforcing limits in pop data_dex=%u, sampled_E = %6.4E\n",data_dex,sampled_E);}
// sync before writes
__syncthreads();
// set self data
E [ tid ] = sampled_E;
space[ tid ].xhat = sqrtf(1.0-(mu*mu))*cosf(phi);
space[ tid ].yhat = sqrtf(1.0-(mu*mu))*sinf(phi);
space[ tid ].zhat = mu;
done [ tid ] = 0;
yield[ tid ] = 0;
rxn [ tid ] = 0;//this_rxn;
//printf("popped - dex %u rxn %u ptr %p sampled_E %6.4E\n",tid,this_rxn,this_array,sampled_E);
for(k=0 ; k < this_yield-1 ; k++ ){
//get proper data index
data_dex=completed[position+k];
//printf("tid %u position %u k %u data_dex %u done %u (xyz) % 6.4E % 6.4E % 6.4E\n",tid,position,k,data_dex,done[data_dex],this_space.x,this_space.y,this_space.z);
//make sure data is done
if(!done[data_dex]){printf("overwriting into active data!\n");}
//copy in values
rn1 = get_rand(&rn);
rn2 = get_rand(&rn);
//rn1 = rn_bank[ tid*RNUM_PER_THREAD + 11 + (k+1)*4];
//rn2 = rn_bank[ tid*RNUM_PER_THREAD + 12 + (k+1)*4];
//sample energy dist
sampled_E = 0.0;
if( rn2 >= r ){ //sample last E
diff = next_e_end - next_e_start;
e_start = next_e_start;
for ( n=0 ; n<vlen-1 ; n++ ){
cdf0 = this_array[ (offset + vlen ) + n+0];
cdf1 = this_array[ (offset + vlen ) + n+1];
pdf0 = this_array[ (offset + 2*vlen ) + n+0];
pdf1 = this_array[ (offset + 2*vlen ) + n+1];
e0 = this_array[ (offset ) + n+0];
e1 = this_array[ (offset ) + n+1];
if( rn1 >= cdf0 & rn1 < cdf1 ){
break;
}
}
}
else{
diff = next_e_end - next_e_start;
e_start = next_e_start;
for ( n=0 ; n<next_vlen-1 ; n++ ){
cdf0 = this_array[ (offset + 3*vlen + next_vlen ) + n+0];
cdf1 = this_array[ (offset + 3*vlen + next_vlen ) + n+1];
pdf0 = this_array[ (offset + 3*vlen + 2*next_vlen ) + n+0];
pdf1 = this_array[ (offset + 3*vlen + 2*next_vlen ) + n+1];
e0 = this_array[ (offset + 3*vlen ) + n+0];
e1 = this_array[ (offset + 3*vlen ) + n+1];
if( rn1 >= cdf0 & rn1 < cdf1 ){
break;
}
}
}
// interpolate the values
m = (pdf1 - pdf0)/(e1-e0);
arg = pdf0*pdf0 + 2.0 * m * (rn1-cdf0);
if(arg<0){arg=0.0;}
E0 = e0 + ( sqrtf( arg ) - pdf0) / m ;
//sampled_E = e0 + (rn1-cdf0)/pdf0;
//printf("%u %u %u %u %u %p %6.4E %u %u %6.4E %6.4E %6.4E %6.4E %6.4E %6.4E %6.4E %6.4E %6.4E\n",tid,tid*RNUM_PER_THREAD + 11 + (k+1)*3,fork,n,dex,this_array,rn1,next_vlen,vlen,this_E,e0,e1,cdf0,cdf1,pdf0,pdf1,m,sampled_E);
// scale it
E1 = last_e_start + r*( next_e_start - last_e_start );
Ek = last_e_end + r*( next_e_end - last_e_end );
sampled_E = E1 +(E0-e_start)*(Ek-E1)/diff;
//sample isotropic directions
rn1 = get_rand(&rn);
rn2 = get_rand(&rn);
mu = 2.0*rn1-1.0;
phi = 2.0*pi*rn2;
//printf("tid %u k %u mu % 6.4E phi % 6.4E rn1 % 6.4E rn2 % 6.4E compactpos %u realpos %u\n",tid,k,mu,phi,rn1,rn2, position, completed[k+position]);
//check data
//printf("done? %u\n",done[ data_dex ]);
//check limits
if (sampled_E >= Emax){sampled_E = Emax * 0.9;}//printf("enforcing limits in pop data_dex=%u, sampled_E = %6.4E\n",data_dex,sampled_E);}
if (sampled_E <= Emin){sampled_E = Emin * 1.1;}//printf("enforcing limits in pop data_dex=%u, sampled_E = %6.4E\n",data_dex,sampled_E);}
// sync before writes
__syncthreads();
// set data
E [ data_dex ] = sampled_E;
space[ data_dex ].x = this_space.x;
space[ data_dex ].y = this_space.y;
space[ data_dex ].z = this_space.z;
space[ data_dex ].xhat = sqrtf(1.0-(mu*mu))*cosf(phi);
space[ data_dex ].yhat = sqrtf(1.0-(mu*mu))*sinf(phi);
space[ data_dex ].zhat = mu;
done [ data_dex ] = 0;
yield[ data_dex ] = 0;
rxn [ data_dex ] = 0;//this_rxn;
//printf("popped - dex %u rxn %u ptr %p sampled_E %6.4E\n",data_dex,this_rxn,this_array,sampled_E);
}
rn_bank[tid] = rn;
}
void pop_secondaries( unsigned NUM_THREADS, unsigned N, unsigned RNUM_PER_THREAD, unsigned* d_completed, unsigned* d_scanned, unsigned* d_yield, unsigned* d_done, unsigned* d_index, unsigned* d_rxn, source_point* d_space, float* d_E , unsigned* d_rn_bank, float ** energydata){
unsigned blks = ( N + NUM_THREADS - 1 ) / NUM_THREADS;
pop_secondaries_kernel <<< blks, NUM_THREADS >>> ( N, RNUM_PER_THREAD, d_completed, d_scanned, d_yield, d_done, d_index, d_rxn, d_space, d_E , d_rn_bank, energydata);
cudaThreadSynchronize();
}
|
4eae07ad07f249c33a1b0ce7f5eed1562e65c7c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <thrust/device_vector.h>
#include "include/nvgraph_error.hxx"
#include "include/nvgraph_vector_kernels.hxx"
#include "include/pagerank_kernels.hxx"
namespace nvgraph
{
template <typename ValueType_>
__global__ void update_dn_kernel(int num_vertices, ValueType_* aa, ValueType_ beta)
{
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
for (int r = tidx; r < num_vertices; r += blockDim.x * gridDim.x)
{
// NOTE 1 : a = alpha*a + (1-alpha)e
if (aa[r] == 0.0)
aa[r] = beta; // NOTE 2 : alpha*0 + (1-alpha)*1 = (1-alpha)
}
}
template <typename ValueType_>
void update_dangling_nodes(int num_vertices, ValueType_* dangling_nodes, ValueType_ damping_factor, hipStream_t stream)
{
int num_threads = 256;
int max_grid_size = 4096;
int num_blocks = ::min(max_grid_size, (num_vertices/num_threads)+1);
ValueType_ beta = 1.0-damping_factor;
hipLaunchKernelGGL(( update_dn_kernel), dim3(num_blocks), dim3(num_threads), 0, stream, num_vertices, dangling_nodes,beta);
cudaCheckError();
}
//Explicit
template void update_dangling_nodes<double> (int num_vertices, double* dangling_nodes, double damping_factor, hipStream_t stream);
template void update_dangling_nodes<float> (int num_vertices, float* dangling_nodes, float damping_factor, hipStream_t stream);
} // end namespace nvgraph
| 4eae07ad07f249c33a1b0ce7f5eed1562e65c7c8.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <thrust/device_vector.h>
#include "include/nvgraph_error.hxx"
#include "include/nvgraph_vector_kernels.hxx"
#include "include/pagerank_kernels.hxx"
namespace nvgraph
{
template <typename ValueType_>
__global__ void update_dn_kernel(int num_vertices, ValueType_* aa, ValueType_ beta)
{
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
for (int r = tidx; r < num_vertices; r += blockDim.x * gridDim.x)
{
// NOTE 1 : a = alpha*a + (1-alpha)e
if (aa[r] == 0.0)
aa[r] = beta; // NOTE 2 : alpha*0 + (1-alpha)*1 = (1-alpha)
}
}
template <typename ValueType_>
void update_dangling_nodes(int num_vertices, ValueType_* dangling_nodes, ValueType_ damping_factor, cudaStream_t stream)
{
int num_threads = 256;
int max_grid_size = 4096;
int num_blocks = std::min(max_grid_size, (num_vertices/num_threads)+1);
ValueType_ beta = 1.0-damping_factor;
update_dn_kernel<<<num_blocks, num_threads, 0, stream>>>(num_vertices, dangling_nodes,beta);
cudaCheckError();
}
//Explicit
template void update_dangling_nodes<double> (int num_vertices, double* dangling_nodes, double damping_factor, cudaStream_t stream);
template void update_dangling_nodes<float> (int num_vertices, float* dangling_nodes, float damping_factor, cudaStream_t stream);
} // end namespace nvgraph
|
c1f718d7182dea8aa59fac803efe6b62582c90b3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Udacity HW5
Histogramming for Speed
The goal of this assignment is compute a histogram
as fast as possible. We have simplified the problem as much as
possible to allow you to focus solely on the histogramming algorithm.
The input values that you need to histogram are already the exact
bins that need to be updated. This is unlike in HW3 where you needed
to compute the range of the data and then do:
bin = (val - valMin) / valRange to determine the bin.
Here the bin is just:
bin = val
so the serial histogram calculation looks like:
for (i = 0; i < numElems; ++i)
histo[val[i]]++;
That's it! Your job is to make it run as fast as possible!
The values are normally distributed - you may take
advantage of this fact in your implementation.
*/
#include "utils.h"
#include "device_launch_parameters.h"
#include <thrust/host_vector.h>
__global__
void naiveHisto(
const unsigned int* const values,
unsigned int* const histogram,
int numVals
){
if ((threadIdx.x + blockDim.x*blockIdx.x) >= numVals) return;
atomicAdd(&(histogram[values[threadIdx.x + blockDim.x*blockIdx.x]]), 1);
}
__global__
void fastHistogram(
const unsigned int* const values,
unsigned int* const histogram,
int numVals,int numBins
){
extern __shared__ unsigned int sharedHistogram[];
for (int i = threadIdx.x; i < numBins; i += blockDim.x) {
sharedHistogram[i] = 0;
}
__syncthreads();
atomicAdd(&sharedHistogram[values[threadIdx.x + blockIdx.x*blockDim.x]], 1);
__syncthreads();
for (int i = threadIdx.x; i < numBins; i += blockDim.x) {
atomicAdd(&histogram[i], sharedHistogram[i]);
}
}
void computeHistogram(const unsigned int* const d_vals, //INPUT
unsigned int* const d_histo, //OUTPUT
const unsigned int numBins,
const unsigned int numElems)
{
const int NUM_THREADS = 1024;
int numBlocks = ceil(numElems / NUM_THREADS);
hipLaunchKernelGGL(( naiveHisto) , dim3(numBlocks), dim3(NUM_THREADS), 0, 0, d_vals, d_histo, numElems);
// fastHistogram <<<numBlocks, NUM_THREADS, sizeof(unsigned int)*numBins>>> (d_vals, d_histo, numElems, numBins);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
} | c1f718d7182dea8aa59fac803efe6b62582c90b3.cu | /* Udacity HW5
Histogramming for Speed
The goal of this assignment is compute a histogram
as fast as possible. We have simplified the problem as much as
possible to allow you to focus solely on the histogramming algorithm.
The input values that you need to histogram are already the exact
bins that need to be updated. This is unlike in HW3 where you needed
to compute the range of the data and then do:
bin = (val - valMin) / valRange to determine the bin.
Here the bin is just:
bin = val
so the serial histogram calculation looks like:
for (i = 0; i < numElems; ++i)
histo[val[i]]++;
That's it! Your job is to make it run as fast as possible!
The values are normally distributed - you may take
advantage of this fact in your implementation.
*/
#include "utils.h"
#include "device_launch_parameters.h"
#include <thrust/host_vector.h>
__global__
void naiveHisto(
const unsigned int* const values,
unsigned int* const histogram,
int numVals
){
if ((threadIdx.x + blockDim.x*blockIdx.x) >= numVals) return;
atomicAdd(&(histogram[values[threadIdx.x + blockDim.x*blockIdx.x]]), 1);
}
__global__
void fastHistogram(
const unsigned int* const values,
unsigned int* const histogram,
int numVals,int numBins
){
extern __shared__ unsigned int sharedHistogram[];
for (int i = threadIdx.x; i < numBins; i += blockDim.x) {
sharedHistogram[i] = 0;
}
__syncthreads();
atomicAdd(&sharedHistogram[values[threadIdx.x + blockIdx.x*blockDim.x]], 1);
__syncthreads();
for (int i = threadIdx.x; i < numBins; i += blockDim.x) {
atomicAdd(&histogram[i], sharedHistogram[i]);
}
}
void computeHistogram(const unsigned int* const d_vals, //INPUT
unsigned int* const d_histo, //OUTPUT
const unsigned int numBins,
const unsigned int numElems)
{
const int NUM_THREADS = 1024;
int numBlocks = ceil(numElems / NUM_THREADS);
naiveHisto <<<numBlocks, NUM_THREADS>>> (d_vals, d_histo, numElems);
// fastHistogram <<<numBlocks, NUM_THREADS, sizeof(unsigned int)*numBins>>> (d_vals, d_histo, numElems, numBins);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
} |
ada69049de6cdd079cd955d976311419e6a428d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include "SR_kernel_start.cu"
#include <stdio.h>
#include <time.h>
extern int *test;
texture<int, 1, hipReadModeElementType> TR;
texture<int, 1, hipReadModeElementType> TG;
texture<int, 1, hipReadModeElementType> TB;
texture<int ,1, hipReadModeElementType> TansR;
texture<int ,1, hipReadModeElementType> TansG;
texture<int ,1, hipReadModeElementType> TansB;
//extern __shared__ int row[];
__constant__ float d_u0[5];
__constant__ float d_u1[5];
extern "C" void set_filter_up(float *u0, float *u1){
hipMemcpyToSymbol(d_u0, u0, 5 * sizeof(float));
hipMemcpyToSymbol(d_u1, u1, 5 * sizeof(float));
}
__device__ int convolusion_col(int index, int ww, int hh, int *ans, int *row0, int *row1){
int e_aft=0;
int temp[2];
// i==0
temp[0]=(int)(d_u0[2]*row0[0]+d_u0[3]*row0[1]+d_u0[4]*row0[2]);
temp[1]=(int)(d_u1[2]*row1[0]+d_u1[3]*row1[1]+d_u1[4]*row1[2]);
ans[index]=temp[0]+temp[1];
e_aft+=(temp[0]+temp[1]);
// i==1
temp[0]=(int)(d_u0[1]*row0[0]+d_u0[2]*row0[1]+d_u0[3]*row0[2]+d_u0[4]*row0[3]);
temp[1]=(int)(d_u1[1]*row1[0]+d_u1[2]*row1[1]+d_u1[3]*row1[2]+d_u1[4]*row1[3]);
ans[ww +index]=temp[0]+temp[1];
e_aft+=(temp[0]+temp[1]);
// i==hh-2
temp[0]=(int)(d_u0[0]*row0[hh-4]+d_u0[1]*row0[hh-3]+d_u0[2]*row0[hh-2]+d_u0[3]*row0[hh-1]);
temp[1]=(int)(d_u1[0]*row1[hh-4]+d_u1[1]*row1[hh-3]+d_u1[2]*row1[hh-2]+d_u1[3]*row1[hh-1]);
ans[(hh-2)*ww +index]=temp[0]+temp[1];
e_aft+=(temp[0]+temp[1]);
// i==hh-1
temp[0]=(int)(d_u0[0]*row0[hh-3]+d_u0[1]*row0[hh-2]+d_u0[2]*row0[hh-1]);
temp[1]=(int)(d_u1[0]*row1[hh-3]+d_u1[1]*row1[hh-2]+d_u1[2]*row1[hh-1]);
ans[(hh-1)*ww +index]=temp[0]+temp[1];
e_aft+=(temp[0]+temp[1]);
#pragma unroll
for(int i=2; i<hh-2; ++i){
temp[0]=(int)(d_u0[0]*row0[i-2]+d_u0[1]*row0[i-1]+d_u0[2]*row0[i]+d_u0[3]*row0[i+1]+d_u0[4]*row0[i+2]);
temp[1]=(int)(d_u1[0]*row1[i-2]+d_u1[1]*row1[i-1]+d_u1[2]*row1[i]+d_u1[3]*row1[i+1]+d_u1[4]*row1[i+2]);
ans[i*ww +index]=temp[0]+temp[1];
e_aft+=(temp[0]+temp[1]);
}
return e_aft;
}
__device__ int convolusion_row(int a_index, int w, int ww, int *ans, int index, int *row0, int *row1){
int e_aft=0;
int temp[2];
// i==0
temp[0]=(int)(d_u0[2]*row0[0]+d_u0[3]*row0[1]+d_u0[4]*row0[2]);
temp[1]=(int)(d_u1[2]*row1[0]+d_u1[3]*row1[1]+d_u1[4]*row1[2]);
ans[a_index*ww]=temp[0]+temp[1];
e_aft+=(temp[0]+temp[1]);
// i==1
temp[0]=(int)(d_u0[0]*row0[ww-4]+d_u0[1]*row0[ww-3]+d_u0[2]*row0[ww-2]+d_u0[3]*row0[ww-1]);
temp[1]=(int)(d_u1[0]*row1[ww-4]+d_u1[1]*row1[ww-3]+d_u1[2]*row1[ww-2]+d_u1[3]*row1[ww-1]);
ans[a_index*ww +1]=temp[0]+temp[1];
e_aft+=(temp[0]+temp[1]);
// i==ww-2
temp[0]=(int)(d_u0[0]*row0[ww-4]+d_u0[1]*row0[ww-3]+d_u0[2]*row0[ww-2]+d_u0[3]*row0[ww-1]);
temp[1]=(int)(d_u1[0]*row1[ww-4]+d_u1[1]*row1[ww-3]+d_u1[2]*row1[ww-2]+d_u1[3]*row1[ww-1]);
ans[a_index*ww +ww-2]=temp[0]+temp[1];
e_aft+=(temp[0]+temp[1]);
// i==ww-1
temp[0]=(int)(d_u0[0]*row0[ww-3]+d_u0[1]*row0[ww-2]+d_u0[2]*row0[ww-1]);
temp[1]=(int)(d_u1[0]*row1[ww-3]+d_u1[1]*row1[ww-2]+d_u1[2]*row1[ww-1]);
ans[a_index*ww +ww-1]=temp[0]+temp[1];
e_aft+=(temp[0]+temp[1]);
#pragma unroll
for(int i=2; i<ww-2; ++i){
temp[0]=(int)(d_u0[0]*row0[i-2]+d_u0[1]*row0[i-1]+d_u0[2]*row0[i]+d_u0[3]*row0[i+1]+d_u0[4]*row0[i+2]);
temp[1]=(int)(d_u1[0]*row1[i-2]+d_u1[1]*row1[i-1]+d_u1[2]*row1[i]+d_u1[3]*row1[i+1]+d_u1[4]*row1[i+2]);
ans[a_index*ww +i]=temp[0]+temp[1];
e_aft+=(temp[0]+temp[1]);
}
return e_aft;
}
__global__ void run_cuda_col(int round, int *ans_R, int *ans_G, int *ans_B, int w, int h, int ww, int hh){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(round+tid<ww){
int R_ori=0, G_ori=0, B_ori=0; // store weight of original img
int e_aft;
float R_rate, G_rate, B_rate;
int index=(round+tid)*h;
#pragma unroll
for(int i=0; i<h; ++i){ // compute weight
R_ori+=tex1Dfetch(TansR, index +i);
G_ori+=tex1Dfetch(TansG, index +i);
B_ori+=tex1Dfetch(TansB, index +i);
/*
R_ori+=ans_R[i*ww +index];
G_ori+=ans_G[i*ww +index];
B_ori+=ans_B[i*ww +index];
*/
}
int row0[1080];
int row1[1080];
// red
#pragma unroll
for(int i=0; i<hh; ++i){
if(i%3==0) row0[i]=tex1Dfetch(TansR, index +i*2/3);
else row0[i]=0;
if(i%3==2) row1[i]=tex1Dfetch(TansR, index +(i-2)*2/3+1);
else row1[i]=0;
/*
if(i%3==0) row0[i]=ans_R[(i*2/3)*ww +index];
else row0[i]=0;
if(i%3==2) row1[i]=ans_R[((i-2)*2/3+1)*ww +index];
else row1[i]=0;
*/
}
e_aft=convolusion_col(round+tid, ww, hh, ans_R, row0, row1);
R_rate=(float)e_aft/(float)(R_ori*3/2);
// green
#pragma unroll
for(int i=0; i<hh; ++i){
if(i%3==0) row0[i]=tex1Dfetch(TansG, index +i*2/3);
else row0[i]=0;
if(i%3==2) row1[i]=tex1Dfetch(TansG, index +(i-2)*2/3+1);
else row1[i]=0;
/*
if(i%3==0) row0[i]=ans_G[(i*2/3)*ww +index];
else row0[i]=0;
if(i%3==2) row1[i]=ans_G[((i-2)*2/3+1)*ww +index];
else row1[i]=0;
*/
}
e_aft=convolusion_col(round+tid, ww, hh, ans_G, row0, row1);
G_rate=(float)e_aft/(float)(G_ori*3/2);
// blue
#pragma unroll
for(int i=0; i<hh; ++i){
if(i%3==0) row0[i]=tex1Dfetch(TansB, index +i*2/3);
else row0[i]=0;
if(i%3==2) row1[i]=tex1Dfetch(TansB, index +(i-2)*2/3+1);
else row1[i]=0;
/*
if(i%3==0) row0[i]=ans_B[(i*2/3)*ww +index];
else row0[i]=0;
if(i%3==2) row1[i]=ans_B[((i-2)*2/3+1)*ww +index];
else row1[i]=0;
*/
}
e_aft=convolusion_col(round+tid, ww, hh, ans_B, row0, row1);
B_rate=(float)e_aft/(float)(B_ori*3/2);
index=round+tid;
#pragma unroll
for(int i=0; i<hh; ++i){
ans_R[i*ww +index]=(int)((float)ans_R[i*ww +index]/R_rate);
ans_G[i*ww +index]=(int)((float)ans_G[i*ww +index]/G_rate);
ans_B[i*ww +index]=(int)((float)ans_B[i*ww +index]/B_rate);
/*
if(ans_R[i*ww +round+tid]>255) ans_R[i*ww +round+tid]=255;
else if(ans_R[i*ww +round+tid]<0) ans_R[i*ww +round+tid]=0;
if(ans_G[i*ww +round+tid]>255) ans_G[i*ww +round+tid]=255;
else if(ans_G[i*ww +round+tid]<0) ans_G[i*ww +round+tid]=0;
if(ans_B[i*ww +round+tid]>255) ans_B[i*ww +round+tid]=255;
else if(ans_B[i*ww +round+tid]<0) ans_B[i*ww +round+tid]=0;
*/
}
}
}
__global__ void run_cuda_row(int round, int *ans_R, int *ans_G, int *ans_B, int w, int h, int ww, int hh, int *temp_R, int *temp_G, int *temp_B){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//__shared__ int row[540*2*8];
if(round+tid<h){
//test[0]=1139;
int R_ori=0, G_ori=0, B_ori=0; // store weight of original img
int e_aft;
float R_rate, G_rate, B_rate;
int index=(round+tid)*w;
#pragma unroll
for(int i=0; i<w; ++i){ // compute weight
R_ori+=tex1Dfetch(TR, index +i);
G_ori+=tex1Dfetch(TG, index +i);
B_ori+=tex1Dfetch(TB, index +i);
}
int row0[1920];
int row1[1920];
// red
#pragma unroll
for(int i=0; i<ww; ++i){ // setup row
/*
if(i%3==0) row[threadIdx.x*ww*2 +i]=tex1Dfetch(TR, index +i*2/3);
else row[threadIdx.x*ww*2 +i]=0;
if(i%3==2) row[threadIdx.x*ww*2 +ww+i]=tex1Dfetch(TR, index +(i-2)*2/3+1);
else row[threadIdx.x*ww*2 +ww+i]=0;
*/
if(i%3==0) row0[i]=tex1Dfetch(TR, index +i*2/3);
else row0[i]=0;
if(i%3==2) row1[i]=tex1Dfetch(TR, index +(i-2)*2/3+1);
else row1[i]=0;
}
e_aft=convolusion_row(round+tid, w, ww, ans_R, threadIdx.x*ww*2, row0, row1);
R_rate=(float)e_aft/(float)(R_ori*3/2);
// green
#pragma unroll
for(int i=0; i<ww; ++i){ // setup row
/*
if(i%3==0) row[threadIdx.x*ww*2 +i]=tex1Dfetch(TG, index +i*2/3);
else row[threadIdx.x*ww*2 +i]=0;
if(i%3==2) row[threadIdx.x*ww*2 +ww+i]=tex1Dfetch(TG, index +(i-2)*2/3+1);
else row[threadIdx.x*ww*2 +ww+i]=0;
*/
if(i%3==0) row0[i]=tex1Dfetch(TG, index +i*2/3);
else row0[i]=0;
if(i%3==2) row1[i]=tex1Dfetch(TG, index +(i-2)*2/3+1);
else row1[i]=0;
}
e_aft=convolusion_row(round+tid, w, ww, ans_G, threadIdx.x*ww*2, row0, row1);
G_rate=(float)e_aft/(float)(G_ori*3/2);
// blue
#pragma unroll
for(int i=0; i<ww; ++i){ // setup row
/*
if(i%3==0) row[threadIdx.x*ww*2 +i]=tex1Dfetch(TB, index +i*2/3);
else row[threadIdx.x*ww*2 +i]=0;
if(i%3==2) row[threadIdx.x*ww*2 +ww+i]=tex1Dfetch(TB, index +(i-2)*2/3+1);
else row[threadIdx.x*ww*2 +ww+i]=0;
*/
if(i%3==0) row0[i]=tex1Dfetch(TB, index +i*2/3);
else row0[i]=0;
if(i%3==2) row1[i]=tex1Dfetch(TB, index +(i-2)*2/3+1);
else row1[i]=0;
}
e_aft=convolusion_row(round+tid, w, ww, ans_B, threadIdx.x*ww*2, row0, row1);
B_rate=(float)e_aft/(float)(B_ori*3/2);
index=(round+tid)*ww;
#pragma unroll
for(int i=0; i<ww; ++i){
temp_R[i*h +round+tid]=ans_R[index +i]=(int)((float)ans_R[index +i]/R_rate);
temp_G[i*h +round+tid]=ans_G[index +i]=(int)((float)ans_G[index +i]/G_rate);
temp_B[i*h +round+tid]=ans_B[index +i]=(int)((float)ans_B[index +i]/B_rate);
/*
if(ans_R[(round+tid)*ww +i]>255) ans_R[(round+tid)*ww +i]=255;
else if(ans_R[(round+tid)*ww +i]<0) ans_R[(round+tid)*ww +i]=0;
if(ans_G[(round+tid)*ww +i]>255) ans_G[(round+tid)*ww +i]=255;
else if(ans_G[(round+tid)*ww +i]<0) ans_G[(round+tid)*ww +i]=0;
if(ans_B[(round+tid)*ww +i]>255) ans_B[(round+tid)*ww +i]=255;
else if(ans_B[(round+tid)*ww +i]<0) ans_B[(round+tid)*ww +i]=0;
*/
}
}
__syncthreads();
}
void SR_kernel_up(int *ori_R, int *ori_G, int *ori_B, int *aft_R, int *aft_G, int *aft_B, int w, int h, int ww, int hh){
float u0[5]={-0.047, 0.6, 0.927, 0.119, -0.1};
float u1[5]={-0.1, 0.119, 0.927, 0.6, -0.047};
int *d_ori_R, *d_ori_G, *d_ori_B;
int *d_ans_R, *d_ans_G, *d_ans_B;
int *temp_R, *temp_G, *temp_B;
//int ww=w*3/2;
//int hh=h*3/2;
hipMalloc((void**)&d_ori_R, w*h*sizeof(int));
hipMalloc((void**)&d_ori_G, w*h*sizeof(int));
hipMalloc((void**)&d_ori_B, w*h*sizeof(int));
hipMalloc((void**)&temp_R, ww*h*sizeof(int));
hipMalloc((void**)&temp_G, ww*h*sizeof(int));
hipMalloc((void**)&temp_B, ww*h*sizeof(int));
hipMalloc((void**)&d_ans_R, ww*hh*sizeof(int));
hipMalloc((void**)&d_ans_G, ww*hh*sizeof(int));
hipMalloc((void**)&d_ans_B, ww*hh*sizeof(int));
hipMemcpy(d_ori_R, ori_R, w*h*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_ori_G, ori_G, w*h*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_ori_B, ori_B, w*h*sizeof(int), hipMemcpyHostToDevice);
hipBindTexture(0, TR, d_ori_R);
hipBindTexture(0, TG, d_ori_G);
hipBindTexture(0, TB, d_ori_B);
set_filter_up(u0, u1);
int threads=64;
int blocks=64;
for(int i=0; i<(h-1)/(threads*blocks) +1; ++i) // a thread do a row
hipLaunchKernelGGL(( run_cuda_row), dim3(blocks), dim3(threads), 0, 0, i*threads*blocks, d_ans_R, d_ans_G, d_ans_B, w, h, ww, hh, temp_R, temp_G, temp_B);
//run_cuda_row<<<blocks, threads, threads*sizeof(int)*ww*2>>>(i*threads*blocks, d_ans_R, d_ans_G, d_ans_B, w, h, ww, hh, temp_R, temp_G, temp_B);
hipBindTexture(0, TansR, temp_R);
hipBindTexture(0, TansG, temp_G);
hipBindTexture(0, TansB, temp_B);
for(int i=0; i<(ww-1)/(threads*blocks) +1; ++i) // a thread do a column
hipLaunchKernelGGL(( run_cuda_col), dim3(blocks), dim3(threads), 0, 0, i*threads*blocks, d_ans_R, d_ans_G, d_ans_B, w, h, ww, hh);
hipMemcpy(aft_R, d_ans_R, ww*hh*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(aft_G, d_ans_G, ww*hh*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(aft_B, d_ans_B, ww*hh*sizeof(int), hipMemcpyDeviceToHost);
hipUnbindTexture(TR);
hipUnbindTexture(TG);
hipUnbindTexture(TB);
hipUnbindTexture(TansR);
hipUnbindTexture(TansG);
hipUnbindTexture(TansB);
hipFree(d_ori_R);
hipFree(d_ori_G);
hipFree(d_ori_B);
hipFree(d_ans_R);
hipFree(d_ans_G);
hipFree(d_ans_B);
hipFree(temp_R);
hipFree(temp_G);
hipFree(temp_B);
}
| ada69049de6cdd079cd955d976311419e6a428d4.cu | //#include "SR_kernel_start.cu"
#include <stdio.h>
#include <time.h>
extern int *test;
texture<int, 1, cudaReadModeElementType> TR;
texture<int, 1, cudaReadModeElementType> TG;
texture<int, 1, cudaReadModeElementType> TB;
texture<int ,1, cudaReadModeElementType> TansR;
texture<int ,1, cudaReadModeElementType> TansG;
texture<int ,1, cudaReadModeElementType> TansB;
//extern __shared__ int row[];
__constant__ float d_u0[5];
__constant__ float d_u1[5];
extern "C" void set_filter_up(float *u0, float *u1){
cudaMemcpyToSymbol(d_u0, u0, 5 * sizeof(float));
cudaMemcpyToSymbol(d_u1, u1, 5 * sizeof(float));
}
__device__ int convolusion_col(int index, int ww, int hh, int *ans, int *row0, int *row1){
int e_aft=0;
int temp[2];
// i==0
temp[0]=(int)(d_u0[2]*row0[0]+d_u0[3]*row0[1]+d_u0[4]*row0[2]);
temp[1]=(int)(d_u1[2]*row1[0]+d_u1[3]*row1[1]+d_u1[4]*row1[2]);
ans[index]=temp[0]+temp[1];
e_aft+=(temp[0]+temp[1]);
// i==1
temp[0]=(int)(d_u0[1]*row0[0]+d_u0[2]*row0[1]+d_u0[3]*row0[2]+d_u0[4]*row0[3]);
temp[1]=(int)(d_u1[1]*row1[0]+d_u1[2]*row1[1]+d_u1[3]*row1[2]+d_u1[4]*row1[3]);
ans[ww +index]=temp[0]+temp[1];
e_aft+=(temp[0]+temp[1]);
// i==hh-2
temp[0]=(int)(d_u0[0]*row0[hh-4]+d_u0[1]*row0[hh-3]+d_u0[2]*row0[hh-2]+d_u0[3]*row0[hh-1]);
temp[1]=(int)(d_u1[0]*row1[hh-4]+d_u1[1]*row1[hh-3]+d_u1[2]*row1[hh-2]+d_u1[3]*row1[hh-1]);
ans[(hh-2)*ww +index]=temp[0]+temp[1];
e_aft+=(temp[0]+temp[1]);
// i==hh-1
temp[0]=(int)(d_u0[0]*row0[hh-3]+d_u0[1]*row0[hh-2]+d_u0[2]*row0[hh-1]);
temp[1]=(int)(d_u1[0]*row1[hh-3]+d_u1[1]*row1[hh-2]+d_u1[2]*row1[hh-1]);
ans[(hh-1)*ww +index]=temp[0]+temp[1];
e_aft+=(temp[0]+temp[1]);
#pragma unroll
for(int i=2; i<hh-2; ++i){
temp[0]=(int)(d_u0[0]*row0[i-2]+d_u0[1]*row0[i-1]+d_u0[2]*row0[i]+d_u0[3]*row0[i+1]+d_u0[4]*row0[i+2]);
temp[1]=(int)(d_u1[0]*row1[i-2]+d_u1[1]*row1[i-1]+d_u1[2]*row1[i]+d_u1[3]*row1[i+1]+d_u1[4]*row1[i+2]);
ans[i*ww +index]=temp[0]+temp[1];
e_aft+=(temp[0]+temp[1]);
}
return e_aft;
}
__device__ int convolusion_row(int a_index, int w, int ww, int *ans, int index, int *row0, int *row1){
int e_aft=0;
int temp[2];
// i==0
temp[0]=(int)(d_u0[2]*row0[0]+d_u0[3]*row0[1]+d_u0[4]*row0[2]);
temp[1]=(int)(d_u1[2]*row1[0]+d_u1[3]*row1[1]+d_u1[4]*row1[2]);
ans[a_index*ww]=temp[0]+temp[1];
e_aft+=(temp[0]+temp[1]);
// i==1
temp[0]=(int)(d_u0[0]*row0[ww-4]+d_u0[1]*row0[ww-3]+d_u0[2]*row0[ww-2]+d_u0[3]*row0[ww-1]);
temp[1]=(int)(d_u1[0]*row1[ww-4]+d_u1[1]*row1[ww-3]+d_u1[2]*row1[ww-2]+d_u1[3]*row1[ww-1]);
ans[a_index*ww +1]=temp[0]+temp[1];
e_aft+=(temp[0]+temp[1]);
// i==ww-2
temp[0]=(int)(d_u0[0]*row0[ww-4]+d_u0[1]*row0[ww-3]+d_u0[2]*row0[ww-2]+d_u0[3]*row0[ww-1]);
temp[1]=(int)(d_u1[0]*row1[ww-4]+d_u1[1]*row1[ww-3]+d_u1[2]*row1[ww-2]+d_u1[3]*row1[ww-1]);
ans[a_index*ww +ww-2]=temp[0]+temp[1];
e_aft+=(temp[0]+temp[1]);
// i==ww-1
temp[0]=(int)(d_u0[0]*row0[ww-3]+d_u0[1]*row0[ww-2]+d_u0[2]*row0[ww-1]);
temp[1]=(int)(d_u1[0]*row1[ww-3]+d_u1[1]*row1[ww-2]+d_u1[2]*row1[ww-1]);
ans[a_index*ww +ww-1]=temp[0]+temp[1];
e_aft+=(temp[0]+temp[1]);
#pragma unroll
for(int i=2; i<ww-2; ++i){
temp[0]=(int)(d_u0[0]*row0[i-2]+d_u0[1]*row0[i-1]+d_u0[2]*row0[i]+d_u0[3]*row0[i+1]+d_u0[4]*row0[i+2]);
temp[1]=(int)(d_u1[0]*row1[i-2]+d_u1[1]*row1[i-1]+d_u1[2]*row1[i]+d_u1[3]*row1[i+1]+d_u1[4]*row1[i+2]);
ans[a_index*ww +i]=temp[0]+temp[1];
e_aft+=(temp[0]+temp[1]);
}
return e_aft;
}
__global__ void run_cuda_col(int round, int *ans_R, int *ans_G, int *ans_B, int w, int h, int ww, int hh){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(round+tid<ww){
int R_ori=0, G_ori=0, B_ori=0; // store weight of original img
int e_aft;
float R_rate, G_rate, B_rate;
int index=(round+tid)*h;
#pragma unroll
for(int i=0; i<h; ++i){ // compute weight
R_ori+=tex1Dfetch(TansR, index +i);
G_ori+=tex1Dfetch(TansG, index +i);
B_ori+=tex1Dfetch(TansB, index +i);
/*
R_ori+=ans_R[i*ww +index];
G_ori+=ans_G[i*ww +index];
B_ori+=ans_B[i*ww +index];
*/
}
int row0[1080];
int row1[1080];
// red
#pragma unroll
for(int i=0; i<hh; ++i){
if(i%3==0) row0[i]=tex1Dfetch(TansR, index +i*2/3);
else row0[i]=0;
if(i%3==2) row1[i]=tex1Dfetch(TansR, index +(i-2)*2/3+1);
else row1[i]=0;
/*
if(i%3==0) row0[i]=ans_R[(i*2/3)*ww +index];
else row0[i]=0;
if(i%3==2) row1[i]=ans_R[((i-2)*2/3+1)*ww +index];
else row1[i]=0;
*/
}
e_aft=convolusion_col(round+tid, ww, hh, ans_R, row0, row1);
R_rate=(float)e_aft/(float)(R_ori*3/2);
// green
#pragma unroll
for(int i=0; i<hh; ++i){
if(i%3==0) row0[i]=tex1Dfetch(TansG, index +i*2/3);
else row0[i]=0;
if(i%3==2) row1[i]=tex1Dfetch(TansG, index +(i-2)*2/3+1);
else row1[i]=0;
/*
if(i%3==0) row0[i]=ans_G[(i*2/3)*ww +index];
else row0[i]=0;
if(i%3==2) row1[i]=ans_G[((i-2)*2/3+1)*ww +index];
else row1[i]=0;
*/
}
e_aft=convolusion_col(round+tid, ww, hh, ans_G, row0, row1);
G_rate=(float)e_aft/(float)(G_ori*3/2);
// blue
#pragma unroll
for(int i=0; i<hh; ++i){
if(i%3==0) row0[i]=tex1Dfetch(TansB, index +i*2/3);
else row0[i]=0;
if(i%3==2) row1[i]=tex1Dfetch(TansB, index +(i-2)*2/3+1);
else row1[i]=0;
/*
if(i%3==0) row0[i]=ans_B[(i*2/3)*ww +index];
else row0[i]=0;
if(i%3==2) row1[i]=ans_B[((i-2)*2/3+1)*ww +index];
else row1[i]=0;
*/
}
e_aft=convolusion_col(round+tid, ww, hh, ans_B, row0, row1);
B_rate=(float)e_aft/(float)(B_ori*3/2);
index=round+tid;
#pragma unroll
for(int i=0; i<hh; ++i){
ans_R[i*ww +index]=(int)((float)ans_R[i*ww +index]/R_rate);
ans_G[i*ww +index]=(int)((float)ans_G[i*ww +index]/G_rate);
ans_B[i*ww +index]=(int)((float)ans_B[i*ww +index]/B_rate);
/*
if(ans_R[i*ww +round+tid]>255) ans_R[i*ww +round+tid]=255;
else if(ans_R[i*ww +round+tid]<0) ans_R[i*ww +round+tid]=0;
if(ans_G[i*ww +round+tid]>255) ans_G[i*ww +round+tid]=255;
else if(ans_G[i*ww +round+tid]<0) ans_G[i*ww +round+tid]=0;
if(ans_B[i*ww +round+tid]>255) ans_B[i*ww +round+tid]=255;
else if(ans_B[i*ww +round+tid]<0) ans_B[i*ww +round+tid]=0;
*/
}
}
}
__global__ void run_cuda_row(int round, int *ans_R, int *ans_G, int *ans_B, int w, int h, int ww, int hh, int *temp_R, int *temp_G, int *temp_B){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//__shared__ int row[540*2*8];
if(round+tid<h){
//test[0]=1139;
int R_ori=0, G_ori=0, B_ori=0; // store weight of original img
int e_aft;
float R_rate, G_rate, B_rate;
int index=(round+tid)*w;
#pragma unroll
for(int i=0; i<w; ++i){ // compute weight
R_ori+=tex1Dfetch(TR, index +i);
G_ori+=tex1Dfetch(TG, index +i);
B_ori+=tex1Dfetch(TB, index +i);
}
int row0[1920];
int row1[1920];
// red
#pragma unroll
for(int i=0; i<ww; ++i){ // setup row
/*
if(i%3==0) row[threadIdx.x*ww*2 +i]=tex1Dfetch(TR, index +i*2/3);
else row[threadIdx.x*ww*2 +i]=0;
if(i%3==2) row[threadIdx.x*ww*2 +ww+i]=tex1Dfetch(TR, index +(i-2)*2/3+1);
else row[threadIdx.x*ww*2 +ww+i]=0;
*/
if(i%3==0) row0[i]=tex1Dfetch(TR, index +i*2/3);
else row0[i]=0;
if(i%3==2) row1[i]=tex1Dfetch(TR, index +(i-2)*2/3+1);
else row1[i]=0;
}
e_aft=convolusion_row(round+tid, w, ww, ans_R, threadIdx.x*ww*2, row0, row1);
R_rate=(float)e_aft/(float)(R_ori*3/2);
// green
#pragma unroll
for(int i=0; i<ww; ++i){ // setup row
/*
if(i%3==0) row[threadIdx.x*ww*2 +i]=tex1Dfetch(TG, index +i*2/3);
else row[threadIdx.x*ww*2 +i]=0;
if(i%3==2) row[threadIdx.x*ww*2 +ww+i]=tex1Dfetch(TG, index +(i-2)*2/3+1);
else row[threadIdx.x*ww*2 +ww+i]=0;
*/
if(i%3==0) row0[i]=tex1Dfetch(TG, index +i*2/3);
else row0[i]=0;
if(i%3==2) row1[i]=tex1Dfetch(TG, index +(i-2)*2/3+1);
else row1[i]=0;
}
e_aft=convolusion_row(round+tid, w, ww, ans_G, threadIdx.x*ww*2, row0, row1);
G_rate=(float)e_aft/(float)(G_ori*3/2);
// blue
#pragma unroll
for(int i=0; i<ww; ++i){ // setup row
/*
if(i%3==0) row[threadIdx.x*ww*2 +i]=tex1Dfetch(TB, index +i*2/3);
else row[threadIdx.x*ww*2 +i]=0;
if(i%3==2) row[threadIdx.x*ww*2 +ww+i]=tex1Dfetch(TB, index +(i-2)*2/3+1);
else row[threadIdx.x*ww*2 +ww+i]=0;
*/
if(i%3==0) row0[i]=tex1Dfetch(TB, index +i*2/3);
else row0[i]=0;
if(i%3==2) row1[i]=tex1Dfetch(TB, index +(i-2)*2/3+1);
else row1[i]=0;
}
e_aft=convolusion_row(round+tid, w, ww, ans_B, threadIdx.x*ww*2, row0, row1);
B_rate=(float)e_aft/(float)(B_ori*3/2);
index=(round+tid)*ww;
#pragma unroll
for(int i=0; i<ww; ++i){
temp_R[i*h +round+tid]=ans_R[index +i]=(int)((float)ans_R[index +i]/R_rate);
temp_G[i*h +round+tid]=ans_G[index +i]=(int)((float)ans_G[index +i]/G_rate);
temp_B[i*h +round+tid]=ans_B[index +i]=(int)((float)ans_B[index +i]/B_rate);
/*
if(ans_R[(round+tid)*ww +i]>255) ans_R[(round+tid)*ww +i]=255;
else if(ans_R[(round+tid)*ww +i]<0) ans_R[(round+tid)*ww +i]=0;
if(ans_G[(round+tid)*ww +i]>255) ans_G[(round+tid)*ww +i]=255;
else if(ans_G[(round+tid)*ww +i]<0) ans_G[(round+tid)*ww +i]=0;
if(ans_B[(round+tid)*ww +i]>255) ans_B[(round+tid)*ww +i]=255;
else if(ans_B[(round+tid)*ww +i]<0) ans_B[(round+tid)*ww +i]=0;
*/
}
}
__syncthreads();
}
void SR_kernel_up(int *ori_R, int *ori_G, int *ori_B, int *aft_R, int *aft_G, int *aft_B, int w, int h, int ww, int hh){
float u0[5]={-0.047, 0.6, 0.927, 0.119, -0.1};
float u1[5]={-0.1, 0.119, 0.927, 0.6, -0.047};
int *d_ori_R, *d_ori_G, *d_ori_B;
int *d_ans_R, *d_ans_G, *d_ans_B;
int *temp_R, *temp_G, *temp_B;
//int ww=w*3/2;
//int hh=h*3/2;
cudaMalloc((void**)&d_ori_R, w*h*sizeof(int));
cudaMalloc((void**)&d_ori_G, w*h*sizeof(int));
cudaMalloc((void**)&d_ori_B, w*h*sizeof(int));
cudaMalloc((void**)&temp_R, ww*h*sizeof(int));
cudaMalloc((void**)&temp_G, ww*h*sizeof(int));
cudaMalloc((void**)&temp_B, ww*h*sizeof(int));
cudaMalloc((void**)&d_ans_R, ww*hh*sizeof(int));
cudaMalloc((void**)&d_ans_G, ww*hh*sizeof(int));
cudaMalloc((void**)&d_ans_B, ww*hh*sizeof(int));
cudaMemcpy(d_ori_R, ori_R, w*h*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_ori_G, ori_G, w*h*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_ori_B, ori_B, w*h*sizeof(int), cudaMemcpyHostToDevice);
cudaBindTexture(0, TR, d_ori_R);
cudaBindTexture(0, TG, d_ori_G);
cudaBindTexture(0, TB, d_ori_B);
set_filter_up(u0, u1);
int threads=64;
int blocks=64;
for(int i=0; i<(h-1)/(threads*blocks) +1; ++i) // a thread do a row
run_cuda_row<<<blocks, threads>>>(i*threads*blocks, d_ans_R, d_ans_G, d_ans_B, w, h, ww, hh, temp_R, temp_G, temp_B);
//run_cuda_row<<<blocks, threads, threads*sizeof(int)*ww*2>>>(i*threads*blocks, d_ans_R, d_ans_G, d_ans_B, w, h, ww, hh, temp_R, temp_G, temp_B);
cudaBindTexture(0, TansR, temp_R);
cudaBindTexture(0, TansG, temp_G);
cudaBindTexture(0, TansB, temp_B);
for(int i=0; i<(ww-1)/(threads*blocks) +1; ++i) // a thread do a column
run_cuda_col<<<blocks, threads>>>(i*threads*blocks, d_ans_R, d_ans_G, d_ans_B, w, h, ww, hh);
cudaMemcpy(aft_R, d_ans_R, ww*hh*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(aft_G, d_ans_G, ww*hh*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(aft_B, d_ans_B, ww*hh*sizeof(int), cudaMemcpyDeviceToHost);
cudaUnbindTexture(TR);
cudaUnbindTexture(TG);
cudaUnbindTexture(TB);
cudaUnbindTexture(TansR);
cudaUnbindTexture(TansG);
cudaUnbindTexture(TansB);
cudaFree(d_ori_R);
cudaFree(d_ori_G);
cudaFree(d_ori_B);
cudaFree(d_ans_R);
cudaFree(d_ans_G);
cudaFree(d_ans_B);
cudaFree(temp_R);
cudaFree(temp_G);
cudaFree(temp_B);
}
|
f93f62a14d039efcf80c264a0d4ce991ef95f524.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel_extract_roi(float* input, float* output, char* mean, const int input_w, const int output_w, const int output_h, const int in_plane_r, const int in_plane_g, const int in_plane_b, const int out_plane_r, const int out_plane_g, const int out_plane_b, const int bbox_x, const int bbox_y, const int bbox_w, const int bbox_h)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
if( x < output_w && y < output_h)
{
float r[2] = { float(x) * bbox_w / output_w + bbox_x,
float(y) * bbox_h / output_h + bbox_y };
int pos[4][2] = { { int(floor(r[0])), int(floor(r[1])) },
{ int( ceil(r[0])), int(floor(r[1])) },
{ int(floor(r[0])), int(ceil(r[1])) },
{ int( ceil(r[0])), int(ceil(r[1])) } };
float u = r[0]-floor(r[0]);
float v = r[1]-floor(r[1]);
float s[4] = { (1-u)*(1-v), u*(1-v), (1-u)*v, u*v };
int map[4] = { pos[0][1]*input_w + pos[0][0], pos[1][1]*input_w + pos[1][0],
pos[2][1]*input_w + pos[2][0], pos[3][1]*input_w + pos[3][0]};
int idx = y * output_w + x;
output[idx+out_plane_r] = round( s[0]*input[map[0]+in_plane_r]
+ s[1]*input[map[1]+in_plane_r]
+ s[2]*input[map[2]+in_plane_r]
+ s[3]*input[map[3]+in_plane_r] );// float(mean[idx+out_plane_r]));
output[idx+out_plane_g] = round( s[0]*input[map[0]+in_plane_g]
+ s[1]*input[map[1]+in_plane_g]
+ s[2]*input[map[2]+in_plane_g]
+ s[3]*input[map[3]+in_plane_g] );//float(mean[idx+out_plane_g]));
output[idx+out_plane_b] = round( s[0]*input[map[0]+in_plane_b]
+ s[1]*input[map[1]+in_plane_b]
+ s[2]*input[map[2]+in_plane_b]
+ s[3]*input[map[3]+in_plane_b] );//float(mean[idx+out_plane_b]));
}
} | f93f62a14d039efcf80c264a0d4ce991ef95f524.cu | #include "includes.h"
__global__ void kernel_extract_roi(float* input, float* output, char* mean, const int input_w, const int output_w, const int output_h, const int in_plane_r, const int in_plane_g, const int in_plane_b, const int out_plane_r, const int out_plane_g, const int out_plane_b, const int bbox_x, const int bbox_y, const int bbox_w, const int bbox_h)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
if( x < output_w && y < output_h)
{
float r[2] = { float(x) * bbox_w / output_w + bbox_x,
float(y) * bbox_h / output_h + bbox_y };
int pos[4][2] = { { int(floor(r[0])), int(floor(r[1])) },
{ int( ceil(r[0])), int(floor(r[1])) },
{ int(floor(r[0])), int(ceil(r[1])) },
{ int( ceil(r[0])), int(ceil(r[1])) } };
float u = r[0]-floor(r[0]);
float v = r[1]-floor(r[1]);
float s[4] = { (1-u)*(1-v), u*(1-v), (1-u)*v, u*v };
int map[4] = { pos[0][1]*input_w + pos[0][0], pos[1][1]*input_w + pos[1][0],
pos[2][1]*input_w + pos[2][0], pos[3][1]*input_w + pos[3][0]};
int idx = y * output_w + x;
output[idx+out_plane_r] = round( s[0]*input[map[0]+in_plane_r]
+ s[1]*input[map[1]+in_plane_r]
+ s[2]*input[map[2]+in_plane_r]
+ s[3]*input[map[3]+in_plane_r] );// float(mean[idx+out_plane_r]));
output[idx+out_plane_g] = round( s[0]*input[map[0]+in_plane_g]
+ s[1]*input[map[1]+in_plane_g]
+ s[2]*input[map[2]+in_plane_g]
+ s[3]*input[map[3]+in_plane_g] );//float(mean[idx+out_plane_g]));
output[idx+out_plane_b] = round( s[0]*input[map[0]+in_plane_b]
+ s[1]*input[map[1]+in_plane_b]
+ s[2]*input[map[2]+in_plane_b]
+ s[3]*input[map[3]+in_plane_b] );//float(mean[idx+out_plane_b]));
}
} |
ea4962537c9defd23fade64f459acc79d11555d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void delta(double** Z, double** Y, long A, long B, double C)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i < A && j < B){
Z(i,j) -= C*Y(i,j);
}
}
| ea4962537c9defd23fade64f459acc79d11555d4.cu | __global__ void delta(double** Z, double** Y, long A, long B, double C)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i < A && j < B){
Z(i,j) -= C*Y(i,j);
}
}
|
789304e7d08448385b8726ba39b7ac646cfd139d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "processBoxes.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int size = XSIZE*YSIZE;
const float *src = NULL;
hipMalloc(&src, XSIZE*YSIZE);
float *dst = NULL;
hipMalloc(&dst, XSIZE*YSIZE);
const int stridex = 1;
const int stridey = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
processBoxes), dim3(gridBlock),dim3(threadBlock), 0, 0, size,src,dst,stridex,stridey);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
processBoxes), dim3(gridBlock),dim3(threadBlock), 0, 0, size,src,dst,stridex,stridey);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
processBoxes), dim3(gridBlock),dim3(threadBlock), 0, 0, size,src,dst,stridex,stridey);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 789304e7d08448385b8726ba39b7ac646cfd139d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "processBoxes.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int size = XSIZE*YSIZE;
const float *src = NULL;
cudaMalloc(&src, XSIZE*YSIZE);
float *dst = NULL;
cudaMalloc(&dst, XSIZE*YSIZE);
const int stridex = 1;
const int stridey = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
processBoxes<<<gridBlock,threadBlock>>>(size,src,dst,stridex,stridey);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
processBoxes<<<gridBlock,threadBlock>>>(size,src,dst,stridex,stridey);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
processBoxes<<<gridBlock,threadBlock>>>(size,src,dst,stridex,stridey);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
04c14def136dc7f024f8361abbf85c9b5caa3524.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <opencv2\opencv.hpp>
#include <cusolverDn.h>
#define BLOCKSIZE_X 32
#define BLOCKSIZE_Y 32
/******************/
/* ERROR CHECKING */
/******************/
#define cudaCHECK(ans) { checkAssert((ans), __FILE__, __LINE__); }
inline void checkAssert(hipError_t errorCode, const char *file, int line, bool abort = true)
{
if (errorCode != hipSuccess)
{
fprintf(stderr, "Check assert: %s %s %d\n", hipGetErrorString(errorCode), file, line);
if (abort) exit(errorCode);
}
}
/***************************/
/* cuSOLVER ERROR CHECKING */
/***************************/
static const char *_cuSolverReturnErrorString(cusolverStatus_t errorCode)
{
switch (errorCode) {
case CUSOLVER_STATUS_SUCCESS: return "cuSolver successful call";
case CUSOLVER_STATUS_NOT_INITIALIZED: return "cuSolver is not initialized";
case CUSOLVER_STATUS_ALLOC_FAILED: return "cuSolver internal resource allocation failed";
case CUSOLVER_STATUS_INVALID_VALUE: return "cuSolver function has an unsupported value or parameter";
case CUSOLVER_STATUS_ARCH_MISMATCH: return "cuSolver function requires an unsupported architecture feature";
case CUSOLVER_STATUS_EXECUTION_FAILED: return "cuSolver function failed to execute";
case CUSOLVER_STATUS_INTERNAL_ERROR: return "cuSolver internal operation failed";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "Matrix type not supported"; }
return "<unknown>";
}
inline void __cuSolverCHECK(cusolverStatus_t errorCode, const char *file, const int line)
{
if (CUSOLVER_STATUS_SUCCESS != errorCode) {
fprintf(stderr, "cuSolver was unsuccessful in file '%s', line %d; the reported errorCodeor is: %s \nterminating!\n", __FILE__, __LINE__, \
_cuSolverReturnErrorString(errorCode)); \
assert(0); \
}
}
void cuSolverCHECK(cusolverStatus_t errorCode) { __cuSolverCHECK(errorCode, __FILE__, __LINE__); }
/**********************/
/* REMOVE MEAN KERNEL */
/**********************/
__global__ void removeMeanKernel(const float * __restrict__ srcPtr, float * __restrict__ dstPtr, const size_t srcStep, const size_t dstStep, const int Nrows, const int Ncols) {
int rowIdx = blockIdx.y * blockDim.y + threadIdx.y;
int colIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (rowIdx >= Nrows || colIdx >= Ncols) return;
const float *rowSrcPtr = (const float *)(((char *)srcPtr) + 0 * srcStep);
float *rowDstPtr = ( float *)(((char *)dstPtr) + rowIdx * dstStep);
rowDstPtr[colIdx] = rowDstPtr[colIdx] - rowSrcPtr[colIdx];
}
/********/
/* MAIN */
/********/
int main() {
// --- 2x3 float, one-channel matrix
cv::Mat h_A1(2, 3, CV_32FC1);
cv::Mat h_A2(2, 3, CV_32FC1);
// --- First row
h_A1.at<float>(0, 0) = 3.f;
h_A1.at<float>(0, 1) = 4.f;
h_A1.at<float>(0, 2) = 12.f;
// --- Second row
h_A1.at<float>(1, 0) = -1.f;
h_A1.at<float>(1, 1) = 0.3f;
h_A1.at<float>(1, 2) = -4.323f;
// --- First row
h_A2.at<float>(0, 0) = -1.f;
h_A2.at<float>(0, 1) = 2.f;
h_A2.at<float>(0, 2) = 0.34f;
// --- Second row
h_A2.at<float>(1, 0) = -2.32f;
h_A2.at<float>(1, 1) = 37.f;
h_A2.at<float>(1, 2) = 11.121f;
std::cout << "Matrix 1 = " << std::endl << " " << h_A1 << std::endl << std::endl;
std::cout << "Matrix 2 = " << std::endl << " " << h_A2 << std::endl << std::endl;
// --- Transform matrix A1 into row
h_A1 = h_A1.reshape(0, 1);
// --- Transform matrix A2 into row
h_A2 = h_A2.reshape(0, 1);
std::cout << "Matrix 1 = " << std::endl << " " << h_A1 << std::endl << std::endl;
std::cout << "Matrix 2 = " << std::endl << " " << h_A2 << std::endl << std::endl;
// --- GPU memory allocation
cv::cuda::GpuMat d_A(2, h_A1.total(), CV_32FC1);
// --- Copy first row
float *rowPointer = d_A.ptr<float>(0);
cudaCHECK(hipMemcpy2D(rowPointer,
d_A.step * sizeof(float),
h_A1.ptr<float>(0),
h_A1.step * sizeof(float),
h_A1.cols * sizeof(float),
1,
hipMemcpyHostToDevice));
// --- Copy second row
rowPointer = d_A.ptr<float>(1);
cudaCHECK(hipMemcpy2D(rowPointer,
d_A.step * sizeof(float),
h_A2.ptr<float>(0),
h_A2.step * sizeof(float),
h_A2.cols * sizeof(float),
1,
hipMemcpyHostToDevice));
cv::Mat h_result(d_A);
std::cout << "CPU -> GPU memory movement: result matrix = " << std::endl << " " << h_result << std::endl << std::endl;
// --- Average
cv::cuda::GpuMat d_mean(1, h_A1.total(), CV_32FC1);
cv::cuda::reduce(d_A, d_mean, 0, 1);
cv::Mat h_mean(d_mean);
std::cout << "Average matrix over columns = " << std::endl << " " << h_mean << std::endl << std::endl;
dim3 blockDim(BLOCKSIZE_X, BLOCKSIZE_Y);
dim3 gridDim(1, 1);
removeMeanKernel << <gridDim, blockDim >> > ((float *)d_mean.data, (float *)d_A.data, d_mean.step, d_A.step, 2, h_A1.total());
cudaCHECK(hipPeekAtLastError());
cudaCHECK(hipDeviceSynchronize());
cv::Mat h_A(d_A);
std::cout << "Matrix with removed average = " << std::endl << " " << h_A << std::endl << std::endl;
// --- Compute covariance matrix
const int Nrows = 2;
const int Ncols = 2;
cv::cuda::GpuMat d_Cov(Nrows, Ncols, CV_32FC1);
cv::cuda::gemm(d_A, d_A, 1.f, d_Cov, 0.f, d_Cov, cv::GEMM_2_T);
cv::Mat h_Cov(d_Cov);
std::cout << "Covariance matrix = " << std::endl << " " << h_Cov << std::endl << std::endl;
// --- Compute SVD
hipsolverDnHandle_t cuSolverHandle;
cuSolverCHECK(hipsolverDnCreate(&cuSolverHandle));
int workSize = 0;
cuSolverCHECK(hipsolverDnSgesvd_bufferSize(cuSolverHandle, Nrows, Ncols, &workSize));
float *workArray; cudaCHECK(hipMalloc(&workArray, workSize * sizeof(float)));
// --- Allocating SVD space on the host
float *h_U = (float *)malloc(Nrows * Nrows * sizeof(float));
float *h_V = (float *)malloc(Ncols * Ncols * sizeof(float));
float *h_S = (float *)malloc(min(Nrows, Ncols) * sizeof(float));
// --- Allocating SVD space on the device
float *d_U; cudaCHECK(hipMalloc(&d_U, Nrows * Nrows * sizeof(float)));
float *d_V; cudaCHECK(hipMalloc(&d_V, Ncols * Ncols * sizeof(float)));
float *d_S; cudaCHECK(hipMalloc(&d_S, min(Nrows, Ncols) * sizeof(float)));
//float *rWork; cudaCHECK(hipMalloc(&rWork, 1 * sizeof(float)));
int *devInfo; cudaCHECK(hipMalloc(&devInfo, sizeof(int)));
cuSolverCHECK(hipsolverDnSgesvd(
cuSolverHandle,
'A',
'A',
Nrows,
Ncols,
(float *)d_Cov.data,
d_Cov.step1(),
d_S,
d_U,
Nrows,
d_V,
Ncols,
workArray,
workSize,
NULL,
//rWork
devInfo));
int devInfo_h = 0; cudaCHECK(hipMemcpy(&devInfo_h, devInfo, sizeof(int), hipMemcpyDeviceToHost));
if (devInfo_h != 0) std::cout << "Unsuccessful SVD execution\n\n";
// --- Moving the results from device to host
cudaCHECK(hipMemcpy(h_S, d_S, min(Nrows, Ncols) * sizeof(float), hipMemcpyDeviceToHost));
cudaCHECK(hipMemcpy(h_U, d_U, Nrows * Nrows * sizeof(float), hipMemcpyDeviceToHost));
cudaCHECK(hipMemcpy(h_V, d_V, Ncols * Ncols * sizeof(float), hipMemcpyDeviceToHost));
printf("\n\nSingular values = %f %f\n", h_S[0], h_S[1]);
printf("\n\nFirst column of U = %f %f\n", h_U[0], h_U[1]);
printf("\n\nSecond column of U = %f %f\n", h_U[2], h_U[3]);
printf("\n\nFirst column of V = %f %f\n", h_V[0], h_V[1]);
printf("\n\nSecond column of V = %f %f\n", h_V[2], h_V[3]);
printf("%d\n", d_Cov.step);
return 0;
}
| 04c14def136dc7f024f8361abbf85c9b5caa3524.cu | #include <opencv2\opencv.hpp>
#include <cusolverDn.h>
#define BLOCKSIZE_X 32
#define BLOCKSIZE_Y 32
/******************/
/* ERROR CHECKING */
/******************/
#define cudaCHECK(ans) { checkAssert((ans), __FILE__, __LINE__); }
inline void checkAssert(cudaError_t errorCode, const char *file, int line, bool abort = true)
{
if (errorCode != cudaSuccess)
{
fprintf(stderr, "Check assert: %s %s %d\n", cudaGetErrorString(errorCode), file, line);
if (abort) exit(errorCode);
}
}
/***************************/
/* cuSOLVER ERROR CHECKING */
/***************************/
static const char *_cuSolverReturnErrorString(cusolverStatus_t errorCode)
{
switch (errorCode) {
case CUSOLVER_STATUS_SUCCESS: return "cuSolver successful call";
case CUSOLVER_STATUS_NOT_INITIALIZED: return "cuSolver is not initialized";
case CUSOLVER_STATUS_ALLOC_FAILED: return "cuSolver internal resource allocation failed";
case CUSOLVER_STATUS_INVALID_VALUE: return "cuSolver function has an unsupported value or parameter";
case CUSOLVER_STATUS_ARCH_MISMATCH: return "cuSolver function requires an unsupported architecture feature";
case CUSOLVER_STATUS_EXECUTION_FAILED: return "cuSolver function failed to execute";
case CUSOLVER_STATUS_INTERNAL_ERROR: return "cuSolver internal operation failed";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "Matrix type not supported"; }
return "<unknown>";
}
inline void __cuSolverCHECK(cusolverStatus_t errorCode, const char *file, const int line)
{
if (CUSOLVER_STATUS_SUCCESS != errorCode) {
fprintf(stderr, "cuSolver was unsuccessful in file '%s', line %d; the reported errorCodeor is: %s \nterminating!\n", __FILE__, __LINE__, \
_cuSolverReturnErrorString(errorCode)); \
assert(0); \
}
}
void cuSolverCHECK(cusolverStatus_t errorCode) { __cuSolverCHECK(errorCode, __FILE__, __LINE__); }
/**********************/
/* REMOVE MEAN KERNEL */
/**********************/
__global__ void removeMeanKernel(const float * __restrict__ srcPtr, float * __restrict__ dstPtr, const size_t srcStep, const size_t dstStep, const int Nrows, const int Ncols) {
int rowIdx = blockIdx.y * blockDim.y + threadIdx.y;
int colIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (rowIdx >= Nrows || colIdx >= Ncols) return;
const float *rowSrcPtr = (const float *)(((char *)srcPtr) + 0 * srcStep);
float *rowDstPtr = ( float *)(((char *)dstPtr) + rowIdx * dstStep);
rowDstPtr[colIdx] = rowDstPtr[colIdx] - rowSrcPtr[colIdx];
}
/********/
/* MAIN */
/********/
int main() {
// --- 2x3 float, one-channel matrix
cv::Mat h_A1(2, 3, CV_32FC1);
cv::Mat h_A2(2, 3, CV_32FC1);
// --- First row
h_A1.at<float>(0, 0) = 3.f;
h_A1.at<float>(0, 1) = 4.f;
h_A1.at<float>(0, 2) = 12.f;
// --- Second row
h_A1.at<float>(1, 0) = -1.f;
h_A1.at<float>(1, 1) = 0.3f;
h_A1.at<float>(1, 2) = -4.323f;
// --- First row
h_A2.at<float>(0, 0) = -1.f;
h_A2.at<float>(0, 1) = 2.f;
h_A2.at<float>(0, 2) = 0.34f;
// --- Second row
h_A2.at<float>(1, 0) = -2.32f;
h_A2.at<float>(1, 1) = 37.f;
h_A2.at<float>(1, 2) = 11.121f;
std::cout << "Matrix 1 = " << std::endl << " " << h_A1 << std::endl << std::endl;
std::cout << "Matrix 2 = " << std::endl << " " << h_A2 << std::endl << std::endl;
// --- Transform matrix A1 into row
h_A1 = h_A1.reshape(0, 1);
// --- Transform matrix A2 into row
h_A2 = h_A2.reshape(0, 1);
std::cout << "Matrix 1 = " << std::endl << " " << h_A1 << std::endl << std::endl;
std::cout << "Matrix 2 = " << std::endl << " " << h_A2 << std::endl << std::endl;
// --- GPU memory allocation
cv::cuda::GpuMat d_A(2, h_A1.total(), CV_32FC1);
// --- Copy first row
float *rowPointer = d_A.ptr<float>(0);
cudaCHECK(cudaMemcpy2D(rowPointer,
d_A.step * sizeof(float),
h_A1.ptr<float>(0),
h_A1.step * sizeof(float),
h_A1.cols * sizeof(float),
1,
cudaMemcpyHostToDevice));
// --- Copy second row
rowPointer = d_A.ptr<float>(1);
cudaCHECK(cudaMemcpy2D(rowPointer,
d_A.step * sizeof(float),
h_A2.ptr<float>(0),
h_A2.step * sizeof(float),
h_A2.cols * sizeof(float),
1,
cudaMemcpyHostToDevice));
cv::Mat h_result(d_A);
std::cout << "CPU -> GPU memory movement: result matrix = " << std::endl << " " << h_result << std::endl << std::endl;
// --- Average
cv::cuda::GpuMat d_mean(1, h_A1.total(), CV_32FC1);
cv::cuda::reduce(d_A, d_mean, 0, 1);
cv::Mat h_mean(d_mean);
std::cout << "Average matrix over columns = " << std::endl << " " << h_mean << std::endl << std::endl;
dim3 blockDim(BLOCKSIZE_X, BLOCKSIZE_Y);
dim3 gridDim(1, 1);
removeMeanKernel << <gridDim, blockDim >> > ((float *)d_mean.data, (float *)d_A.data, d_mean.step, d_A.step, 2, h_A1.total());
cudaCHECK(cudaPeekAtLastError());
cudaCHECK(cudaDeviceSynchronize());
cv::Mat h_A(d_A);
std::cout << "Matrix with removed average = " << std::endl << " " << h_A << std::endl << std::endl;
// --- Compute covariance matrix
const int Nrows = 2;
const int Ncols = 2;
cv::cuda::GpuMat d_Cov(Nrows, Ncols, CV_32FC1);
cv::cuda::gemm(d_A, d_A, 1.f, d_Cov, 0.f, d_Cov, cv::GEMM_2_T);
cv::Mat h_Cov(d_Cov);
std::cout << "Covariance matrix = " << std::endl << " " << h_Cov << std::endl << std::endl;
// --- Compute SVD
cusolverDnHandle_t cuSolverHandle;
cuSolverCHECK(cusolverDnCreate(&cuSolverHandle));
int workSize = 0;
cuSolverCHECK(cusolverDnSgesvd_bufferSize(cuSolverHandle, Nrows, Ncols, &workSize));
float *workArray; cudaCHECK(cudaMalloc(&workArray, workSize * sizeof(float)));
// --- Allocating SVD space on the host
float *h_U = (float *)malloc(Nrows * Nrows * sizeof(float));
float *h_V = (float *)malloc(Ncols * Ncols * sizeof(float));
float *h_S = (float *)malloc(min(Nrows, Ncols) * sizeof(float));
// --- Allocating SVD space on the device
float *d_U; cudaCHECK(cudaMalloc(&d_U, Nrows * Nrows * sizeof(float)));
float *d_V; cudaCHECK(cudaMalloc(&d_V, Ncols * Ncols * sizeof(float)));
float *d_S; cudaCHECK(cudaMalloc(&d_S, min(Nrows, Ncols) * sizeof(float)));
//float *rWork; cudaCHECK(cudaMalloc(&rWork, 1 * sizeof(float)));
int *devInfo; cudaCHECK(cudaMalloc(&devInfo, sizeof(int)));
cuSolverCHECK(cusolverDnSgesvd(
cuSolverHandle,
'A',
'A',
Nrows,
Ncols,
(float *)d_Cov.data,
d_Cov.step1(),
d_S,
d_U,
Nrows,
d_V,
Ncols,
workArray,
workSize,
NULL,
//rWork
devInfo));
int devInfo_h = 0; cudaCHECK(cudaMemcpy(&devInfo_h, devInfo, sizeof(int), cudaMemcpyDeviceToHost));
if (devInfo_h != 0) std::cout << "Unsuccessful SVD execution\n\n";
// --- Moving the results from device to host
cudaCHECK(cudaMemcpy(h_S, d_S, min(Nrows, Ncols) * sizeof(float), cudaMemcpyDeviceToHost));
cudaCHECK(cudaMemcpy(h_U, d_U, Nrows * Nrows * sizeof(float), cudaMemcpyDeviceToHost));
cudaCHECK(cudaMemcpy(h_V, d_V, Ncols * Ncols * sizeof(float), cudaMemcpyDeviceToHost));
printf("\n\nSingular values = %f %f\n", h_S[0], h_S[1]);
printf("\n\nFirst column of U = %f %f\n", h_U[0], h_U[1]);
printf("\n\nSecond column of U = %f %f\n", h_U[2], h_U[3]);
printf("\n\nFirst column of V = %f %f\n", h_V[0], h_V[1]);
printf("\n\nSecond column of V = %f %f\n", h_V[2], h_V[3]);
printf("%d\n", d_Cov.step);
return 0;
}
|
1d697121f5f796a114817d1c7e7924ad33915cfd.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
thrust::device_vector<int> ivec(idata, idata+n);
thrust::device_vector<int> ovec(odata, odata+n);
thrust::exclusive_scan(ivec.begin(), ivec.end(), ovec.begin());
thrust::copy(ovec.begin(), ovec.end(), odata);
}
}
}
| 1d697121f5f796a114817d1c7e7924ad33915cfd.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
thrust::device_vector<int> ivec(idata, idata+n);
thrust::device_vector<int> ovec(odata, odata+n);
thrust::exclusive_scan(ivec.begin(), ivec.end(), ovec.begin());
thrust::copy(ovec.begin(), ovec.end(), odata);
}
}
}
|
3a3d310a2eed5ba7a162502c73c24db08445c17f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@generated c Tue Aug 13 16:45:12 2013
*/
#include "common_magma.h"
#include <cblas.h>
#include "magma.h"
#define PRECISION_c
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 512
#else
#define BLOCK_SIZE 768
#endif
__global__ void magma_cgemv_kernel3(int m, const magmaFloatComplex * __restrict__ V, int ldv,
magmaFloatComplex *c, magmaFloatComplex *dwork,
magmaFloatComplex *tau);
/* --------------------------------------------------------------------------- */
template< int n >
__device__ void sum_reduce( /*int n,*/ int i, float* x )
{
__syncthreads();
// if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
/* --------------------------------------------------------------------------- */
#define BLOCK_SIZE1 192
__global__ void
magma_cswap_gemv_kernel(int m, int rk, int n, const magmaFloatComplex * __restrict__ V, int ldv,
const magmaFloatComplex * __restrict__ x, int ldx, magmaFloatComplex *c, magmaFloatComplex *b)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE1 * blockIdx.x;
magmaFloatComplex lsum, tmp;
V += j;
lsum = MAGMA_C_ZERO;
if (j < m){
tmp = b[j];
b[j] = c[j];
if (j>=rk)
for(int k=0; k<n; k++)
lsum += MAGMA_C_MUL( V[k*ldv], MAGMA_C_CNJG(x[k*ldx]));
c[j] = tmp - lsum;
}
}
__global__ void
magma_cgemv_kernel(int m, int n, const magmaFloatComplex * __restrict__ V, int ldv,
const magmaFloatComplex * __restrict__ x, magmaFloatComplex *b, magmaFloatComplex *c)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE1 * blockIdx.x;
magmaFloatComplex lsum;
V += j;
lsum = MAGMA_C_ZERO;
if (j < m){
for(int k=0; k<n; k++)
lsum += MAGMA_C_MUL( V[k*ldv], x[k]);
c[j] = b[j] - lsum;
}
}
__global__
void magma_cscale_kernel(int n, magmaFloatComplex* dx0,
magmaFloatComplex *dtau, float *dxnorm, magmaFloatComplex* dAkk)
{
const int i = threadIdx.x;
magmaFloatComplex tmp;
__shared__ magmaFloatComplex scale;
/* === Compute the norm of dx0 === */
magmaFloatComplex *dx = dx0;
__shared__ float sum[ BLOCK_SIZE ];
float re, lsum;
lsum = 0;
for( int k = i; k < n; k += BLOCK_SIZE ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[k];
lsum += re*re;
#else
re = MAGMA_C_REAL( dx[k] );
float im = MAGMA_C_IMAG( dx[k] );
lsum += re*re + im*im;
#endif
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
/* === Compute the scaling factor === */
if (i==0){
float beta = sqrt(sum[0]);
if ( beta == 0 ) {
*dtau = MAGMA_C_ZERO;
}
else {
tmp = dx0[0];
#if (defined(PRECISION_s) || defined(PRECISION_d))
beta = -copysign( beta, tmp );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = (beta - tmp) / beta;
*dAkk = beta;
scale = 1. / (tmp - beta);
#else
float alphar = MAGMA_C_REAL(tmp), alphai = MAGMA_C_IMAG(tmp);
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = MAGMA_C_MAKE((beta - alphar)/beta, -alphai/beta);
*dAkk = MAGMA_C_MAKE(beta, 0.);
tmp = MAGMA_C_MAKE( alphar - beta, alphai);
scale = MAGMA_C_DIV( MAGMA_C_ONE, tmp);
#endif
}
}
__syncthreads();
/* === Scale the vector === */
for(int j=i; j<n; j+=BLOCK_SIZE)
dx0[j] = MAGMA_C_MUL(dx0[j], scale);
/* === Make temporary the first element to 1; value is stored in dAkk === */
if (i==0)
dx0[0] = MAGMA_C_ONE;
}
template< int n >
__device__ void zsum_reduce( /*int n,*/ int i, magmaFloatComplex* x )
{
__syncthreads();
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
__global__ void
magma_cgemv_kernel1(int m, magmaFloatComplex *tau, const magmaFloatComplex * __restrict__ V, int ldv,
const magmaFloatComplex * __restrict__ c,
magmaFloatComplex *dwork)
{
const int i = threadIdx.x;
const magmaFloatComplex *dV = V + (blockIdx.x) * ldv;
__shared__ magmaFloatComplex sum[ BLOCK_SIZE ];
magmaFloatComplex lsum;
/* lsum := v' * C */
lsum = MAGMA_C_ZERO;
for( int j = i; j < m; j += BLOCK_SIZE )
lsum += MAGMA_C_MUL( MAGMA_C_CNJG( dV[j] ), c[j] );
sum[i] = lsum;
zsum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i==0)
dwork [blockIdx.x] = (*tau)*sum[0];
}
#define BLOCK_SIZE2 192
#if (defined(PRECISION_z) || defined(PRECISION_d))
#define TOL 1.e-8
#else
#define TOL 1.e-4
#endif
__global__ void
magma_cgemv_kernel_adjust(int n, int k, magmaFloatComplex * A, int lda,
magmaFloatComplex *B, int ldb, magmaFloatComplex *C,
float *xnorm, float *xnorm2, magmaFloatComplex *Akk, int *lsticc, int *lsticcs)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE2 * blockIdx.x;
magmaFloatComplex sum;
float temp, oldnorm;
if (j<n) {
B += j;
sum = MAGMA_C_CNJG( B[(k-1)*ldb] );
// sum = MAGMA_C_ZERO;
for(int m=0; m<k-1; m++) {
sum += MAGMA_C_MUL( MAGMA_C_CNJG( B[m*ldb] ), A[m*lda] );
}
C[j*lda] -= sum;
oldnorm = xnorm[j];
temp = MAGMA_C_ABS( C[j*lda] ) / oldnorm;
temp = (1.0 + temp) * (1.0 - temp);
temp = oldnorm * sqrt(temp);
xnorm[j] = temp;
// Below 'j' was 'i'; was that a bug?
float temp2 = xnorm[j] / xnorm2[j];
temp2 = temp*(temp2 * temp2);
if (temp2 <= TOL){
*lsticc = 1;
lsticcs[j] = 1;
}
}
if (j==0)
A[(k-1)*lda] = *Akk;
/*
__syncthreads();
// Check if the norm has to be recomputed
if (blockIdx.x==0) {
//if (2.*temp < oldnorm) {
//printf("recompute norm\n");
magmaFloatComplex *dx = C+blockIdx.x*lda+1;
__shared__ float sum[ BLOCK_SIZE2 ];
float re, lsum;
// get norm of dx
lsum = 0;
for( int k = i; k < n1; k += BLOCK_SIZE2 ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[k];
lsum += re*re;
#else
re = MAGMA_C_REAL( dx[k] );
float im = MAGMA_C_IMAG( dx[k] );
lsum += re*re + im*im;
#endif
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE2 >( i, sum );
if (i==0){
printf("adjusted = %f recomputed = %f\n", xnorm[blockIdx.x], sqrt(sum[0]));
xnorm[blockIdx.x] = sqrt(sum[0]);
}
}
// }
*/
}
__global__ void
magmablas_scnrm2_check_kernel(int m, magmaFloatComplex *da, int ldda,
float *dxnorm, float *dxnorm2,
int *dlsticc, int *dlsticcs)
{
const int i = threadIdx.x;
magmaFloatComplex *dx = da + blockIdx.x * ldda;
__shared__ float sum[ BLOCK_SIZE ];
float re, lsum;
if (blockIdx.x == 0 && i==0)
*dlsticc = 0;
// get norm of dx only if lsticc[blockIdx] != 0
if( dlsticcs[blockIdx.x] == 0 )
return;
else
dlsticcs[blockIdx.x] = 0;
lsum = 0;
for( int j = i; j < m; j += BLOCK_SIZE ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_C_REAL( dx[j] );
float im = MAGMA_C_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
if (i==0){
dxnorm[blockIdx.x] = sqrt(sum[0]);
dxnorm2[blockIdx.x] = sqrt(sum[0]);
}
}
/* --------------------------------------------------------------------------- */
extern "C" magma_int_t
magma_claqps3_gpu(magma_int_t m, magma_int_t n, magma_int_t offset,
magma_int_t nb, magma_int_t *kb,
magmaFloatComplex *A, magma_int_t lda,
magma_int_t *jpvt, magmaFloatComplex *tau,
float *vn1, float *vn2,
magmaFloatComplex *auxv,
magmaFloatComplex *F, magma_int_t ldf)
{
/*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
Purpose
=======
CLAQPS computes a step of QR factorization with column pivoting
of a complex M-by-N matrix A by using Blas-3. It tries to factorize
NB columns from A starting from the row OFFSET+1, and updates all
of the matrix with Blas-3 xGEMM.
In some cases, due to catastrophic cancellations, it cannot
factorize NB columns. Hence, the actual number of factorized
columns is returned in KB.
Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized.
Arguments
=========
M (input) INTEGER
The number of rows of the matrix A. M >= 0.
N (input) INTEGER
The number of columns of the matrix A. N >= 0
OFFSET (input) INTEGER
The number of rows of A that have been factorized in
previous steps.
NB (input) INTEGER
The number of columns to factorize.
KB (output) INTEGER
The number of columns actually factorized.
A (input/output) COMPLEX*16 array, dimension (LDA,N)
On entry, the M-by-N matrix A.
On exit, block A(OFFSET+1:M,1:KB) is the triangular
factor obtained and block A(1:OFFSET,1:N) has been
accordingly pivoted, but no factorized.
The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has
been updated.
LDA (input) INTEGER
The leading dimension of the array A. LDA >= max(1,M).
JPVT (input/output) INTEGER array, dimension (N)
JPVT(I) = K <==> Column K of the full matrix A has been
permuted into position I in AP.
TAU (output) COMPLEX*16 array, dimension (KB)
The scalar factors of the elementary reflectors.
VN1 (input/output) DOUBLE PRECISION array, dimension (N)
The vector with the partial column norms.
VN2 (input/output) DOUBLE PRECISION array, dimension (N)
The vector with the exact column norms.
AUXV (input/output) COMPLEX*16 array, dimension (NB)
Auxiliar vector.
F (input/output) COMPLEX*16 array, dimension (LDF,NB)
Matrix F' = L*Y'*A.
LDF (input) INTEGER
The leading dimension of the array F. LDF >= max(1,N).
===================================================================== */
#define A(i, j) (A + (i) + (j)*(lda ))
#define F(i, j) (F + (i) + (j)*(ldf ))
magmaFloatComplex c_zero = MAGMA_C_MAKE( 0.,0.);
magmaFloatComplex c_one = MAGMA_C_MAKE( 1.,0.);
magmaFloatComplex c_neg_one = MAGMA_C_MAKE(-1.,0.);
magma_int_t ione = 1;
magma_int_t i__1, i__2;
magma_int_t k, rk;
magmaFloatComplex tauk;
magma_int_t pvt, itemp;
magmaFloatComplex *dAkk = auxv;
auxv+=1;
int lsticc, *dlsticc, *dlsticcs;
magma_malloc( (void**) &dlsticcs, (n+1)*sizeof(int) );
hipMemset( dlsticcs, 0, (n+1)*sizeof(int) );
dlsticc = dlsticcs + n;
// float tol3z = magma_ssqrt( lapackf77_slamch("Epsilon"));
lsticc = 0;
k = 0;
while( k < nb && lsticc == 0 ) {
rk = offset + k;
/* Determine ith pivot column and swap if necessary */
pvt = k - 1 + magma_isamax( n-k, &vn1[k], ione );
if (pvt != k) {
magmablas_cswap( k, F(pvt,0), ldf, F(k,0), ldf);
itemp = jpvt[pvt];
jpvt[pvt] = jpvt[k];
jpvt[k] = itemp;
#if (defined(PRECISION_d) || defined(PRECISION_z))
//magma_dswap( 1, &vn1[pvt], 1, &vn1[k], 1 );
//magma_dswap( 1, &vn2[pvt], 1, &vn2[k], 1 );
magma_dswap( 2, &vn1[pvt], n+offset, &vn1[k], n+offset);
#else
//magma_sswap( 1, &vn1[pvt], 1, &vn1[k], 1 );
//magma_sswap( 1, &vn2[pvt], 1, &vn2[k], 1 );
magma_sswap(2, &vn1[pvt], n+offset, &vn1[k], n+offset);
#endif
}
/* Apply previous Householder reflectors to column K:
A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)' */
hipLaunchKernelGGL(( magma_cswap_gemv_kernel), dim3((m + BLOCK_SIZE1-1) / BLOCK_SIZE1), dim3(BLOCK_SIZE1), 0, magma_stream ,
m, rk, k, A(0, 0), lda, F(k, 0), ldf, A(0, k), A(0,pvt));
/* Generate elementary reflector H(k). */
hipLaunchKernelGGL(( magma_cscale_kernel), dim3(1), dim3(BLOCK_SIZE), 0, magma_stream ,
m-rk, A(rk, k), &tau[k], &vn1[k], dAkk);
// printf("m-rk = %d\n", m-rk);
/* Compute Kth column of F:
Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) on the GPU */
if (k < n-1) {
magma_cgetvector( 1, &tau[k], 1, &tauk, 1 );
magmablas_cgemv( MagmaConjTrans, m-rk, n,
tauk, A( rk, 0 ), lda,
A( rk, k ), 1,
c_zero, auxv, 1 );
if (k==0)
magmablas_clacpy(MagmaUpperLower, n-k-1, 1, auxv+k+1, n-k-1, F( k+1, k ), n-k-1);
}
/* Incremental updating of F:
F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K).
F(1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K)
:= tau(K)(A(RK:M,K+1:N)' - F(1:N,1:K-1)*A(RK:M,1:K-1)') A(RK:M,K)
so, F is (updated A)*V */
if (k > 0) {
/* I think we only need stricly lower-triangular part */
hipLaunchKernelGGL(( magma_cgemv_kernel), dim3((n-k-1 + BLOCK_SIZE1 -1)/BLOCK_SIZE1), dim3(BLOCK_SIZE1), 0, magma_stream ,
n-k-1, k, F(k+1,0), ldf, auxv, auxv+k+1, F(k+1,k));
}
/* Update the current row of A:
A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */
if (k < n-1) {
i__1 = n - k - 1;
i__2 = k + 1;
/* left-looking update of rows, *
* since F=A'v with original A, so no right-looking */
hipLaunchKernelGGL(( magma_cgemv_kernel_adjust), dim3((n-k-1 + BLOCK_SIZE2-1)/BLOCK_SIZE2), dim3(BLOCK_SIZE2), 0, magma_stream,
n-k-1, k+1, A(rk, 0 ), lda, F(k+1,0 ), ldf, A(rk, k+1),
&vn1[k+1], &vn2[k+1], dAkk, dlsticc, dlsticcs);
magma_getmatrix(1,1, sizeof(int), dlsticc, 1, &lsticc, 1);
// TTT: force not to recompute; has to be finally commented
if ( nb<3 )
lsticc = 0;
// printf("k=%d n-k = %d\n", k, n-k);
// forcing recompute works! - forcing it requires changing dlsticcs as well, e.g.,
// can be done in the kernel directly (magmablas_scnrm2_check_kernel)
// if (k==16) lsticc = 1;
}
/* Update partial column norms. */
/*
if (rk < min(m, n+offset)-1){
magmablas_scnrm2_row_check_adjust(n-k-1, tol3z, &vn1[k+1],
&vn2[k+1], A(rk,k+1), lda, lsticcs);
}
#if defined(PRECISION_d) || defined(PRECISION_z)
magma_dgetvector( 1, &lsticcs[0], 1, &lsticc, 1 );
#else
magma_sgetvector( 1, &lsticcs[0], 1, &lsticc, 1 );
#endif
*/
if (k>=n-1)
magmablas_clacpy(MagmaUpperLower, 1, 1, dAkk, 1, A(rk, k), 1);
++k;
}
// leave k as the last column done
--k;
*kb = k + 1;
rk = offset + *kb - 1;
//printf("actually factored = %d",*kb);
/* Apply the block reflector to the rest of the matrix:
A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) -
A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)' */
if (*kb < min(n, m - offset)-1) {
i__1 = m - rk - 1;
i__2 = n - *kb;
magma_cgemm( MagmaNoTrans, MagmaConjTrans, i__1, i__2, *kb,
c_neg_one, A(rk+1, 0 ), lda,
F(*kb, 0 ), ldf,
c_one, A(rk+1, *kb), lda );
}
/* Recomputation of difficult columns. */
if( lsticc > 0 ) {
printf( " -- recompute dnorms --\n" );
//magmablas_scnrm2_check(m-rk-1, n-*kb, A(rk+1,rk+1), lda,
// &vn1[rk+1], &vn2[rk+1], dlsticcs);
// There is a bug when we get to recompute
hipLaunchKernelGGL(( magmablas_scnrm2_check_kernel), dim3(n-*kb), dim3(BLOCK_SIZE) , 0, 0,
m-rk-1, A(rk+1,rk+1), lda, &vn1[rk+1], &vn2[rk+1], dlsticc, dlsticcs);
}
magma_free(dlsticcs);
return MAGMA_SUCCESS;
} /* magma_claqps */
| 3a3d310a2eed5ba7a162502c73c24db08445c17f.cu | /*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@generated c Tue Aug 13 16:45:12 2013
*/
#include "common_magma.h"
#include <cblas.h>
#include "magma.h"
#define PRECISION_c
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 512
#else
#define BLOCK_SIZE 768
#endif
__global__ void magma_cgemv_kernel3(int m, const magmaFloatComplex * __restrict__ V, int ldv,
magmaFloatComplex *c, magmaFloatComplex *dwork,
magmaFloatComplex *tau);
/* --------------------------------------------------------------------------- */
template< int n >
__device__ void sum_reduce( /*int n,*/ int i, float* x )
{
__syncthreads();
// if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
/* --------------------------------------------------------------------------- */
#define BLOCK_SIZE1 192
__global__ void
magma_cswap_gemv_kernel(int m, int rk, int n, const magmaFloatComplex * __restrict__ V, int ldv,
const magmaFloatComplex * __restrict__ x, int ldx, magmaFloatComplex *c, magmaFloatComplex *b)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE1 * blockIdx.x;
magmaFloatComplex lsum, tmp;
V += j;
lsum = MAGMA_C_ZERO;
if (j < m){
tmp = b[j];
b[j] = c[j];
if (j>=rk)
for(int k=0; k<n; k++)
lsum += MAGMA_C_MUL( V[k*ldv], MAGMA_C_CNJG(x[k*ldx]));
c[j] = tmp - lsum;
}
}
__global__ void
magma_cgemv_kernel(int m, int n, const magmaFloatComplex * __restrict__ V, int ldv,
const magmaFloatComplex * __restrict__ x, magmaFloatComplex *b, magmaFloatComplex *c)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE1 * blockIdx.x;
magmaFloatComplex lsum;
V += j;
lsum = MAGMA_C_ZERO;
if (j < m){
for(int k=0; k<n; k++)
lsum += MAGMA_C_MUL( V[k*ldv], x[k]);
c[j] = b[j] - lsum;
}
}
__global__
void magma_cscale_kernel(int n, magmaFloatComplex* dx0,
magmaFloatComplex *dtau, float *dxnorm, magmaFloatComplex* dAkk)
{
const int i = threadIdx.x;
magmaFloatComplex tmp;
__shared__ magmaFloatComplex scale;
/* === Compute the norm of dx0 === */
magmaFloatComplex *dx = dx0;
__shared__ float sum[ BLOCK_SIZE ];
float re, lsum;
lsum = 0;
for( int k = i; k < n; k += BLOCK_SIZE ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[k];
lsum += re*re;
#else
re = MAGMA_C_REAL( dx[k] );
float im = MAGMA_C_IMAG( dx[k] );
lsum += re*re + im*im;
#endif
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
/* === Compute the scaling factor === */
if (i==0){
float beta = sqrt(sum[0]);
if ( beta == 0 ) {
*dtau = MAGMA_C_ZERO;
}
else {
tmp = dx0[0];
#if (defined(PRECISION_s) || defined(PRECISION_d))
beta = -copysign( beta, tmp );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = (beta - tmp) / beta;
*dAkk = beta;
scale = 1. / (tmp - beta);
#else
float alphar = MAGMA_C_REAL(tmp), alphai = MAGMA_C_IMAG(tmp);
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = MAGMA_C_MAKE((beta - alphar)/beta, -alphai/beta);
*dAkk = MAGMA_C_MAKE(beta, 0.);
tmp = MAGMA_C_MAKE( alphar - beta, alphai);
scale = MAGMA_C_DIV( MAGMA_C_ONE, tmp);
#endif
}
}
__syncthreads();
/* === Scale the vector === */
for(int j=i; j<n; j+=BLOCK_SIZE)
dx0[j] = MAGMA_C_MUL(dx0[j], scale);
/* === Make temporary the first element to 1; value is stored in dAkk === */
if (i==0)
dx0[0] = MAGMA_C_ONE;
}
template< int n >
__device__ void zsum_reduce( /*int n,*/ int i, magmaFloatComplex* x )
{
__syncthreads();
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
__global__ void
magma_cgemv_kernel1(int m, magmaFloatComplex *tau, const magmaFloatComplex * __restrict__ V, int ldv,
const magmaFloatComplex * __restrict__ c,
magmaFloatComplex *dwork)
{
const int i = threadIdx.x;
const magmaFloatComplex *dV = V + (blockIdx.x) * ldv;
__shared__ magmaFloatComplex sum[ BLOCK_SIZE ];
magmaFloatComplex lsum;
/* lsum := v' * C */
lsum = MAGMA_C_ZERO;
for( int j = i; j < m; j += BLOCK_SIZE )
lsum += MAGMA_C_MUL( MAGMA_C_CNJG( dV[j] ), c[j] );
sum[i] = lsum;
zsum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i==0)
dwork [blockIdx.x] = (*tau)*sum[0];
}
#define BLOCK_SIZE2 192
#if (defined(PRECISION_z) || defined(PRECISION_d))
#define TOL 1.e-8
#else
#define TOL 1.e-4
#endif
__global__ void
magma_cgemv_kernel_adjust(int n, int k, magmaFloatComplex * A, int lda,
magmaFloatComplex *B, int ldb, magmaFloatComplex *C,
float *xnorm, float *xnorm2, magmaFloatComplex *Akk, int *lsticc, int *lsticcs)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE2 * blockIdx.x;
magmaFloatComplex sum;
float temp, oldnorm;
if (j<n) {
B += j;
sum = MAGMA_C_CNJG( B[(k-1)*ldb] );
// sum = MAGMA_C_ZERO;
for(int m=0; m<k-1; m++) {
sum += MAGMA_C_MUL( MAGMA_C_CNJG( B[m*ldb] ), A[m*lda] );
}
C[j*lda] -= sum;
oldnorm = xnorm[j];
temp = MAGMA_C_ABS( C[j*lda] ) / oldnorm;
temp = (1.0 + temp) * (1.0 - temp);
temp = oldnorm * sqrt(temp);
xnorm[j] = temp;
// Below 'j' was 'i'; was that a bug?
float temp2 = xnorm[j] / xnorm2[j];
temp2 = temp*(temp2 * temp2);
if (temp2 <= TOL){
*lsticc = 1;
lsticcs[j] = 1;
}
}
if (j==0)
A[(k-1)*lda] = *Akk;
/*
__syncthreads();
// Check if the norm has to be recomputed
if (blockIdx.x==0) {
//if (2.*temp < oldnorm) {
//printf("recompute norm\n");
magmaFloatComplex *dx = C+blockIdx.x*lda+1;
__shared__ float sum[ BLOCK_SIZE2 ];
float re, lsum;
// get norm of dx
lsum = 0;
for( int k = i; k < n1; k += BLOCK_SIZE2 ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[k];
lsum += re*re;
#else
re = MAGMA_C_REAL( dx[k] );
float im = MAGMA_C_IMAG( dx[k] );
lsum += re*re + im*im;
#endif
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE2 >( i, sum );
if (i==0){
printf("adjusted = %f recomputed = %f\n", xnorm[blockIdx.x], sqrt(sum[0]));
xnorm[blockIdx.x] = sqrt(sum[0]);
}
}
// }
*/
}
__global__ void
magmablas_scnrm2_check_kernel(int m, magmaFloatComplex *da, int ldda,
float *dxnorm, float *dxnorm2,
int *dlsticc, int *dlsticcs)
{
const int i = threadIdx.x;
magmaFloatComplex *dx = da + blockIdx.x * ldda;
__shared__ float sum[ BLOCK_SIZE ];
float re, lsum;
if (blockIdx.x == 0 && i==0)
*dlsticc = 0;
// get norm of dx only if lsticc[blockIdx] != 0
if( dlsticcs[blockIdx.x] == 0 )
return;
else
dlsticcs[blockIdx.x] = 0;
lsum = 0;
for( int j = i; j < m; j += BLOCK_SIZE ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_C_REAL( dx[j] );
float im = MAGMA_C_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
if (i==0){
dxnorm[blockIdx.x] = sqrt(sum[0]);
dxnorm2[blockIdx.x] = sqrt(sum[0]);
}
}
/* --------------------------------------------------------------------------- */
extern "C" magma_int_t
magma_claqps3_gpu(magma_int_t m, magma_int_t n, magma_int_t offset,
magma_int_t nb, magma_int_t *kb,
magmaFloatComplex *A, magma_int_t lda,
magma_int_t *jpvt, magmaFloatComplex *tau,
float *vn1, float *vn2,
magmaFloatComplex *auxv,
magmaFloatComplex *F, magma_int_t ldf)
{
/*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
Purpose
=======
CLAQPS computes a step of QR factorization with column pivoting
of a complex M-by-N matrix A by using Blas-3. It tries to factorize
NB columns from A starting from the row OFFSET+1, and updates all
of the matrix with Blas-3 xGEMM.
In some cases, due to catastrophic cancellations, it cannot
factorize NB columns. Hence, the actual number of factorized
columns is returned in KB.
Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized.
Arguments
=========
M (input) INTEGER
The number of rows of the matrix A. M >= 0.
N (input) INTEGER
The number of columns of the matrix A. N >= 0
OFFSET (input) INTEGER
The number of rows of A that have been factorized in
previous steps.
NB (input) INTEGER
The number of columns to factorize.
KB (output) INTEGER
The number of columns actually factorized.
A (input/output) COMPLEX*16 array, dimension (LDA,N)
On entry, the M-by-N matrix A.
On exit, block A(OFFSET+1:M,1:KB) is the triangular
factor obtained and block A(1:OFFSET,1:N) has been
accordingly pivoted, but no factorized.
The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has
been updated.
LDA (input) INTEGER
The leading dimension of the array A. LDA >= max(1,M).
JPVT (input/output) INTEGER array, dimension (N)
JPVT(I) = K <==> Column K of the full matrix A has been
permuted into position I in AP.
TAU (output) COMPLEX*16 array, dimension (KB)
The scalar factors of the elementary reflectors.
VN1 (input/output) DOUBLE PRECISION array, dimension (N)
The vector with the partial column norms.
VN2 (input/output) DOUBLE PRECISION array, dimension (N)
The vector with the exact column norms.
AUXV (input/output) COMPLEX*16 array, dimension (NB)
Auxiliar vector.
F (input/output) COMPLEX*16 array, dimension (LDF,NB)
Matrix F' = L*Y'*A.
LDF (input) INTEGER
The leading dimension of the array F. LDF >= max(1,N).
===================================================================== */
#define A(i, j) (A + (i) + (j)*(lda ))
#define F(i, j) (F + (i) + (j)*(ldf ))
magmaFloatComplex c_zero = MAGMA_C_MAKE( 0.,0.);
magmaFloatComplex c_one = MAGMA_C_MAKE( 1.,0.);
magmaFloatComplex c_neg_one = MAGMA_C_MAKE(-1.,0.);
magma_int_t ione = 1;
magma_int_t i__1, i__2;
magma_int_t k, rk;
magmaFloatComplex tauk;
magma_int_t pvt, itemp;
magmaFloatComplex *dAkk = auxv;
auxv+=1;
int lsticc, *dlsticc, *dlsticcs;
magma_malloc( (void**) &dlsticcs, (n+1)*sizeof(int) );
cudaMemset( dlsticcs, 0, (n+1)*sizeof(int) );
dlsticc = dlsticcs + n;
// float tol3z = magma_ssqrt( lapackf77_slamch("Epsilon"));
lsticc = 0;
k = 0;
while( k < nb && lsticc == 0 ) {
rk = offset + k;
/* Determine ith pivot column and swap if necessary */
pvt = k - 1 + magma_isamax( n-k, &vn1[k], ione );
if (pvt != k) {
magmablas_cswap( k, F(pvt,0), ldf, F(k,0), ldf);
itemp = jpvt[pvt];
jpvt[pvt] = jpvt[k];
jpvt[k] = itemp;
#if (defined(PRECISION_d) || defined(PRECISION_z))
//magma_dswap( 1, &vn1[pvt], 1, &vn1[k], 1 );
//magma_dswap( 1, &vn2[pvt], 1, &vn2[k], 1 );
magma_dswap( 2, &vn1[pvt], n+offset, &vn1[k], n+offset);
#else
//magma_sswap( 1, &vn1[pvt], 1, &vn1[k], 1 );
//magma_sswap( 1, &vn2[pvt], 1, &vn2[k], 1 );
magma_sswap(2, &vn1[pvt], n+offset, &vn1[k], n+offset);
#endif
}
/* Apply previous Householder reflectors to column K:
A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)' */
magma_cswap_gemv_kernel<<< (m + BLOCK_SIZE1-1) / BLOCK_SIZE1, BLOCK_SIZE1, 0, magma_stream >>>
( m, rk, k, A(0, 0), lda, F(k, 0), ldf, A(0, k), A(0,pvt));
/* Generate elementary reflector H(k). */
magma_cscale_kernel<<< 1, BLOCK_SIZE, 0, magma_stream >>>
(m-rk, A(rk, k), &tau[k], &vn1[k], dAkk);
// printf("m-rk = %d\n", m-rk);
/* Compute Kth column of F:
Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) on the GPU */
if (k < n-1) {
magma_cgetvector( 1, &tau[k], 1, &tauk, 1 );
magmablas_cgemv( MagmaConjTrans, m-rk, n,
tauk, A( rk, 0 ), lda,
A( rk, k ), 1,
c_zero, auxv, 1 );
if (k==0)
magmablas_clacpy(MagmaUpperLower, n-k-1, 1, auxv+k+1, n-k-1, F( k+1, k ), n-k-1);
}
/* Incremental updating of F:
F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K).
F(1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K)
:= tau(K)(A(RK:M,K+1:N)' - F(1:N,1:K-1)*A(RK:M,1:K-1)') A(RK:M,K)
so, F is (updated A)*V */
if (k > 0) {
/* I think we only need stricly lower-triangular part */
magma_cgemv_kernel<<< (n-k-1 + BLOCK_SIZE1 -1)/BLOCK_SIZE1, BLOCK_SIZE1, 0, magma_stream >>>
(n-k-1, k, F(k+1,0), ldf, auxv, auxv+k+1, F(k+1,k));
}
/* Update the current row of A:
A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */
if (k < n-1) {
i__1 = n - k - 1;
i__2 = k + 1;
/* left-looking update of rows, *
* since F=A'v with original A, so no right-looking */
magma_cgemv_kernel_adjust<<<(n-k-1 + BLOCK_SIZE2-1)/BLOCK_SIZE2, BLOCK_SIZE2, 0, magma_stream>>>
(n-k-1, k+1, A(rk, 0 ), lda, F(k+1,0 ), ldf, A(rk, k+1),
&vn1[k+1], &vn2[k+1], dAkk, dlsticc, dlsticcs);
magma_getmatrix(1,1, sizeof(int), dlsticc, 1, &lsticc, 1);
// TTT: force not to recompute; has to be finally commented
if ( nb<3 )
lsticc = 0;
// printf("k=%d n-k = %d\n", k, n-k);
// forcing recompute works! - forcing it requires changing dlsticcs as well, e.g.,
// can be done in the kernel directly (magmablas_scnrm2_check_kernel)
// if (k==16) lsticc = 1;
}
/* Update partial column norms. */
/*
if (rk < min(m, n+offset)-1){
magmablas_scnrm2_row_check_adjust(n-k-1, tol3z, &vn1[k+1],
&vn2[k+1], A(rk,k+1), lda, lsticcs);
}
#if defined(PRECISION_d) || defined(PRECISION_z)
magma_dgetvector( 1, &lsticcs[0], 1, &lsticc, 1 );
#else
magma_sgetvector( 1, &lsticcs[0], 1, &lsticc, 1 );
#endif
*/
if (k>=n-1)
magmablas_clacpy(MagmaUpperLower, 1, 1, dAkk, 1, A(rk, k), 1);
++k;
}
// leave k as the last column done
--k;
*kb = k + 1;
rk = offset + *kb - 1;
//printf("actually factored = %d",*kb);
/* Apply the block reflector to the rest of the matrix:
A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) -
A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)' */
if (*kb < min(n, m - offset)-1) {
i__1 = m - rk - 1;
i__2 = n - *kb;
magma_cgemm( MagmaNoTrans, MagmaConjTrans, i__1, i__2, *kb,
c_neg_one, A(rk+1, 0 ), lda,
F(*kb, 0 ), ldf,
c_one, A(rk+1, *kb), lda );
}
/* Recomputation of difficult columns. */
if( lsticc > 0 ) {
printf( " -- recompute dnorms --\n" );
//magmablas_scnrm2_check(m-rk-1, n-*kb, A(rk+1,rk+1), lda,
// &vn1[rk+1], &vn2[rk+1], dlsticcs);
// There is a bug when we get to recompute
magmablas_scnrm2_check_kernel<<< n-*kb, BLOCK_SIZE >>>
( m-rk-1, A(rk+1,rk+1), lda, &vn1[rk+1], &vn2[rk+1], dlsticc, dlsticcs);
}
magma_free(dlsticcs);
return MAGMA_SUCCESS;
} /* magma_claqps */
|
a44932a5f8487c6957641861d499c05739d1df78.hip | // !!! This is a file automatically generated by hipify!!!
#include "common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
/*
* This example demonstrates a simple vector sum on the GPU and on the host.
* sumArraysOnGPU splits the work of the vector sum across CUDA threads on the
* GPU. A 1D thread block and 1D grid are used. sumArraysOnHost sequentially
* iterates through vector elements on the host.
*/
void initialDataSingle(float *ip, const int size){
int i;
for(i = 0; i < size; i++){
//ip[i] = i;
ip[i] = (float)(rand() & 0xFF ) / 10.0f;
}
return;
}
void initialDataDouble(float *ip,float *ip2, const int size){
int i;
for(i = 0; i < size; i++){
//ip[i] = i;
ip2[i] = ip[i] = (float)(rand() & 0xFF ) / 10.0f;
}
return;
}
void initialDataTriple(float *ip,float *ip2,float *ip3, const int size){
for(int i = 0; i < size; i++){
//ip[i] = i;
ip3[i] = ip2[i] = ip[i] = (float)(rand() & 0xFF ) / 10.0f;
}
return;
}
void initialDataEdge(float *ip,float *ip2,float *ip3, int nx, int ny){
memset(ip, 0, nx * ny * sizeof(float));
memset(ip2, 0, nx * ny * sizeof(float));
memset(ip3, 0, nx * ny * sizeof(float));
for(int i = 0; i < (nx * ny); i++){
if(i < nx || i > (nx*ny-nx))
ip3[i] = ip2[i] = ip[i] = (float)(rand() & 0xFF ) / 10.0f;
else if(i% nx == 0){
ip3[i] = ip2[i] = ip[i] = (float)(rand() & 0xFF ) / 10.0f;
ip3[i+nx-1] = ip2[i+nx-1] = ip[i+nx-1] = (float)(rand() & 0xFF ) / 10.0f;
}
}
return;
}
void imprime(float *ip, const int size, int nx){
int i;
for(i = 0; i < size; i++){
if(i%nx==0)
printf("\n");
printf("%.4f\t",ip[i]);
}
printf("\n");
return;
}
void jacobiOnHost(int iter_max, float *matriz, float* host_ref, int dim_x, int dim_y){
int iter = 0;
float *ia = matriz;
float *Anew = host_ref;
for (int k = 0; k < iter_max; ++k) {
ia = matriz + dim_y;
Anew = host_ref + dim_y;
for( int i = 1; i < dim_x-1; i++) {
for(int j = 1; j < dim_y-1; j++) {
printf("%.4f, %.4f, %.4f, %.4f\n",ia[j+1], ia[j-1], ia[j-dim_y], ia[j+dim_y]);
Anew[j] = 0.25 * (ia[j+1] + ia[j-1] + ia[j-dim_y] + ia[j+dim_y]);
}
ia += dim_y;
Anew += dim_y;
}
ia = matriz;
Anew = host_ref;
for(int i = 0; i < (dim_x * dim_y); i++){
ia[i] = Anew[i];
}
iter++;
}
return;
}
// grid 1D block 1D
__global__ void jacobiOnGpuNaive(int iter_max, float *MatA, float *MatC, int dim_x, int dim_y){
//unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iter = 0;
float *ia = MatA;
float *Anew = MatC;
for (int k = 0; k < iter_max; ++k) {
ia = MatA + dim_y;
Anew = MatC + dim_y;
for( int i = 1; i < dim_x-1; i++) {
for(int j = 1; j < dim_y-1; j++) {
printf("%.4f, %.4f, %.4f, %.4f\n",ia[j+1], ia[j-1], ia[j-dim_y], ia[j+dim_y]);
Anew[j] = 0.25 * (ia[j+1] + ia[j-1] + ia[j-dim_y] + ia[j+dim_y]);
}
ia += dim_y;
Anew += dim_y;
}
ia = MatA;
Anew = MatC;
for(int i = 0; i < (dim_x * dim_y); i++){
ia[i] = Anew[i];
}
iter++;
}
}
void checkResult(float *hostRef, float *gpuRef, const int N){
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++){
if (abs(hostRef[i] - gpuRef[i]) > epsilon){
match = 0;
printf("host %f gpu %f\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match)
printf("Arrays match.\n\n");
else
printf("Arrays do not match.\n\n");
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// set up data size of matrix
int iter_max =1;
int nx = 3;//1 << 14;
int ny = 5;//1 << 14;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
printf("Matrix size: nx %d ny %d\n", nx, ny);
// malloc host memory
float *h_Matriz,*hostRef, *gpuRef;
h_Matriz = (float*)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
double iStart = seconds();
//initialDataEdge(h_Matriz,hostRef,gpuRef, nx, ny);
//imprime(h_Matriz, nxy,nx);
initialDataTriple(h_Matriz,hostRef,gpuRef, nxy);
double iElaps = seconds() - iStart;
printf("initialize matrix elapsed %f sec\n", iElaps);
//imprime(h_Matriz, nxy,nx);
// Jacobi at host side for result checks
iStart = seconds();
jacobiOnHost(iter_max, h_Matriz, hostRef, ny, nx);
iElaps = seconds() - iStart;
printf("jacobiOnHost elapsed %f sec\n", iElaps);
imprime(hostRef, nxy,nx);
// malloc device global memory
float *d_MatA, *d_MatC;
CHECK(hipMalloc((void **)&d_MatA, nBytes));
CHECK(hipMalloc((void **)&d_MatC, nBytes));
// transfer data from host to device
CHECK(hipMemcpy(d_MatA, gpuRef, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_MatC, gpuRef, nBytes, hipMemcpyHostToDevice));
// invoke kernel at host side
int dimx = 32;//32
dim3 block(dimx, 1);
dim3 grid((nx + block.x - 1) / block.x, 1);
iStart = seconds();
hipLaunchKernelGGL(( jacobiOnGpuNaive), dim3(grid), dim3(block), 0, 0, iter_max, d_MatA, d_MatC, nx, ny);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
printf("sumMatrixOnGPU1D <<<(%d,%d), (%d,%d)>>> elapsed %f sec\n", grid.x,
grid.y,
block.x, block.y, iElaps);
// check kernel error
CHECK(hipGetLastError());
// copy kernel result back to host side
CHECK(hipMemcpy(gpuRef, d_MatC, nBytes, hipMemcpyDeviceToHost));
imprime(gpuRef, nxy,nx);
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
CHECK(hipFree(d_MatA));
CHECK(hipFree(d_MatC));
// free host memory
free(h_Matriz);
free(hostRef);
free(gpuRef);
// reset device
CHECK(hipDeviceReset());
return (0);
}
| a44932a5f8487c6957641861d499c05739d1df78.cu | #include "common.h"
#include <cuda_runtime.h>
#include <stdio.h>
/*
* This example demonstrates a simple vector sum on the GPU and on the host.
* sumArraysOnGPU splits the work of the vector sum across CUDA threads on the
* GPU. A 1D thread block and 1D grid are used. sumArraysOnHost sequentially
* iterates through vector elements on the host.
*/
void initialDataSingle(float *ip, const int size){
int i;
for(i = 0; i < size; i++){
//ip[i] = i;
ip[i] = (float)(rand() & 0xFF ) / 10.0f;
}
return;
}
void initialDataDouble(float *ip,float *ip2, const int size){
int i;
for(i = 0; i < size; i++){
//ip[i] = i;
ip2[i] = ip[i] = (float)(rand() & 0xFF ) / 10.0f;
}
return;
}
void initialDataTriple(float *ip,float *ip2,float *ip3, const int size){
for(int i = 0; i < size; i++){
//ip[i] = i;
ip3[i] = ip2[i] = ip[i] = (float)(rand() & 0xFF ) / 10.0f;
}
return;
}
void initialDataEdge(float *ip,float *ip2,float *ip3, int nx, int ny){
memset(ip, 0, nx * ny * sizeof(float));
memset(ip2, 0, nx * ny * sizeof(float));
memset(ip3, 0, nx * ny * sizeof(float));
for(int i = 0; i < (nx * ny); i++){
if(i < nx || i > (nx*ny-nx))
ip3[i] = ip2[i] = ip[i] = (float)(rand() & 0xFF ) / 10.0f;
else if(i% nx == 0){
ip3[i] = ip2[i] = ip[i] = (float)(rand() & 0xFF ) / 10.0f;
ip3[i+nx-1] = ip2[i+nx-1] = ip[i+nx-1] = (float)(rand() & 0xFF ) / 10.0f;
}
}
return;
}
void imprime(float *ip, const int size, int nx){
int i;
for(i = 0; i < size; i++){
if(i%nx==0)
printf("\n");
printf("%.4f\t",ip[i]);
}
printf("\n");
return;
}
void jacobiOnHost(int iter_max, float *matriz, float* host_ref, int dim_x, int dim_y){
int iter = 0;
float *ia = matriz;
float *Anew = host_ref;
for (int k = 0; k < iter_max; ++k) {
ia = matriz + dim_y;
Anew = host_ref + dim_y;
for( int i = 1; i < dim_x-1; i++) {
for(int j = 1; j < dim_y-1; j++) {
printf("%.4f, %.4f, %.4f, %.4f\n",ia[j+1], ia[j-1], ia[j-dim_y], ia[j+dim_y]);
Anew[j] = 0.25 * (ia[j+1] + ia[j-1] + ia[j-dim_y] + ia[j+dim_y]);
}
ia += dim_y;
Anew += dim_y;
}
ia = matriz;
Anew = host_ref;
for(int i = 0; i < (dim_x * dim_y); i++){
ia[i] = Anew[i];
}
iter++;
}
return;
}
// grid 1D block 1D
__global__ void jacobiOnGpuNaive(int iter_max, float *MatA, float *MatC, int dim_x, int dim_y){
//unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iter = 0;
float *ia = MatA;
float *Anew = MatC;
for (int k = 0; k < iter_max; ++k) {
ia = MatA + dim_y;
Anew = MatC + dim_y;
for( int i = 1; i < dim_x-1; i++) {
for(int j = 1; j < dim_y-1; j++) {
printf("%.4f, %.4f, %.4f, %.4f\n",ia[j+1], ia[j-1], ia[j-dim_y], ia[j+dim_y]);
Anew[j] = 0.25 * (ia[j+1] + ia[j-1] + ia[j-dim_y] + ia[j+dim_y]);
}
ia += dim_y;
Anew += dim_y;
}
ia = MatA;
Anew = MatC;
for(int i = 0; i < (dim_x * dim_y); i++){
ia[i] = Anew[i];
}
iter++;
}
}
void checkResult(float *hostRef, float *gpuRef, const int N){
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++){
if (abs(hostRef[i] - gpuRef[i]) > epsilon){
match = 0;
printf("host %f gpu %f\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match)
printf("Arrays match.\n\n");
else
printf("Arrays do not match.\n\n");
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// set up data size of matrix
int iter_max =1;
int nx = 3;//1 << 14;
int ny = 5;//1 << 14;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
printf("Matrix size: nx %d ny %d\n", nx, ny);
// malloc host memory
float *h_Matriz,*hostRef, *gpuRef;
h_Matriz = (float*)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
double iStart = seconds();
//initialDataEdge(h_Matriz,hostRef,gpuRef, nx, ny);
//imprime(h_Matriz, nxy,nx);
initialDataTriple(h_Matriz,hostRef,gpuRef, nxy);
double iElaps = seconds() - iStart;
printf("initialize matrix elapsed %f sec\n", iElaps);
//imprime(h_Matriz, nxy,nx);
// Jacobi at host side for result checks
iStart = seconds();
jacobiOnHost(iter_max, h_Matriz, hostRef, ny, nx);
iElaps = seconds() - iStart;
printf("jacobiOnHost elapsed %f sec\n", iElaps);
imprime(hostRef, nxy,nx);
// malloc device global memory
float *d_MatA, *d_MatC;
CHECK(cudaMalloc((void **)&d_MatA, nBytes));
CHECK(cudaMalloc((void **)&d_MatC, nBytes));
// transfer data from host to device
CHECK(cudaMemcpy(d_MatA, gpuRef, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_MatC, gpuRef, nBytes, cudaMemcpyHostToDevice));
// invoke kernel at host side
int dimx = 32;//32
dim3 block(dimx, 1);
dim3 grid((nx + block.x - 1) / block.x, 1);
iStart = seconds();
jacobiOnGpuNaive<<<grid, block>>>(iter_max, d_MatA, d_MatC, nx, ny);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
printf("sumMatrixOnGPU1D <<<(%d,%d), (%d,%d)>>> elapsed %f sec\n", grid.x,
grid.y,
block.x, block.y, iElaps);
// check kernel error
CHECK(cudaGetLastError());
// copy kernel result back to host side
CHECK(cudaMemcpy(gpuRef, d_MatC, nBytes, cudaMemcpyDeviceToHost));
imprime(gpuRef, nxy,nx);
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
CHECK(cudaFree(d_MatA));
CHECK(cudaFree(d_MatC));
// free host memory
free(h_Matriz);
free(hostRef);
free(gpuRef);
// reset device
CHECK(cudaDeviceReset());
return (0);
}
|
4244f06654bcba78b211c3b53e66b8c8bb70d9b5.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <random>
#include <vector>
#include "gtest/gtest.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/fused/attn_feed_forward.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace framework = paddle::framework;
namespace platform = paddle::platform;
USE_OP(matmul);
USE_OP_ITSELF(elementwise_add);
PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PD_DECLARE_KERNEL(add_grad, GPU, ALL_LAYOUT);
#endif
// get paddle matmul op results as baseline
template <typename T>
void GetLinearOp(const std::vector<T> &x, const std::vector<T> &y,
const framework::DDim &x_dim, const framework::DDim &y_dim,
const platform::CUDADeviceContext &ctx, bool transpose_a,
bool transpose_b, float alpha, std::vector<T> *out) {
framework::Scope scope;
auto var_x = scope.Var("X");
auto tensor_x = var_x->GetMutable<framework::LoDTensor>();
auto var_y = scope.Var("Y");
auto tensor_y = var_y->GetMutable<framework::LoDTensor>();
auto var_out = scope.Var("Out");
auto tensor_out = var_out->GetMutable<framework::LoDTensor>();
tensor_x->Resize(x_dim);
tensor_y->Resize(y_dim);
tensor_out->Resize({x_dim[0], x_dim[1], y_dim[0]});
auto x_ptr = tensor_x->mutable_data<T>(ctx.GetPlace());
auto y_ptr = tensor_y->mutable_data<T>(ctx.GetPlace());
auto z_ptr = tensor_out->mutable_data<T>(ctx.GetPlace());
auto size_x = static_cast<size_t>(phi::product(x_dim));
auto size_y = static_cast<size_t>(phi::product(y_dim));
auto size_z = x_dim[0] * x_dim[1] * y_dim[0];
hipMemcpy(x_ptr, x.data(), size_x * sizeof(T), hipMemcpyHostToDevice);
hipMemcpy(y_ptr, y.data(), size_y * sizeof(T), hipMemcpyHostToDevice);
framework::AttributeMap attrs;
attrs.insert({"transpose_X", transpose_a});
attrs.insert({"transpose_Y", transpose_b});
attrs.insert({"alpha", alpha});
auto op = framework::OpRegistry::CreateOp(
"matmul", {{"X", {"X"}}, {"Y", {"Y"}}}, {{"Out", {"Out"}}}, attrs);
op->Run(scope, ctx.GetPlace());
hipMemcpy(out->data(), z_ptr, size_z * sizeof(T), hipMemcpyDeviceToHost);
ctx.Wait();
}
// get paddle elementwise_add op results as baseline
template <typename T>
void GetElementwiseAddOp(const std::vector<T> &x, const std::vector<T> &y,
const int bsz_seq, const int output_size,
const platform::CUDADeviceContext &ctx,
std::vector<T> *out) {
framework::Scope scope;
auto var_x = scope.Var("X");
auto tensor_x = var_x->GetMutable<framework::LoDTensor>();
auto var_y = scope.Var("Y");
auto tensor_y = var_y->GetMutable<framework::LoDTensor>();
auto var_out = scope.Var("Out");
auto tensor_out = var_out->GetMutable<framework::LoDTensor>();
tensor_x->Resize({bsz_seq, output_size});
tensor_y->Resize({output_size});
tensor_out->Resize({bsz_seq, output_size});
auto x_ptr = tensor_x->mutable_data<T>(ctx.GetPlace());
auto y_ptr = tensor_y->mutable_data<T>(ctx.GetPlace());
auto z_ptr = tensor_out->mutable_data<T>(ctx.GetPlace());
auto size_x = bsz_seq * output_size;
auto size_y = output_size;
auto size_z = bsz_seq * output_size;
hipMemcpy(x_ptr, x.data(), size_x * sizeof(T), hipMemcpyHostToDevice);
hipMemcpy(y_ptr, y.data(), size_y * sizeof(T), hipMemcpyHostToDevice);
framework::AttributeMap attrs;
auto op = framework::OpRegistry::CreateOp("elementwise_add",
{{"X", {"X"}}, {"Y", {"Y"}}},
{{"Out", {"Out"}}}, attrs);
op->Run(scope, ctx.GetPlace());
hipMemcpy(out->data(), z_ptr, size_z * sizeof(T), hipMemcpyDeviceToHost);
ctx.Wait();
}
// get paddle matmul_grad op results as baseline
template <typename T>
void GetLinearOpGrad(const std::vector<T> &x_vec, const std::vector<T> &y_vec,
const std::vector<T> &dout_vec,
const framework::DDim &x_dim, const framework::DDim &y_dim,
const framework::DDim &out_dim,
const platform::CUDADeviceContext &ctx, bool transpose_a,
bool transpose_b, float alpha, std::vector<T> *dinput_vec,
std::vector<T> *dweight_vec) {
framework::Scope scope;
auto var_x = scope.Var("X");
auto tensor_x = var_x->GetMutable<framework::LoDTensor>();
auto var_y = scope.Var("Y");
auto tensor_y = var_y->GetMutable<framework::LoDTensor>();
auto var_dout = scope.Var("DOut");
auto tensor_dout = var_dout->GetMutable<framework::LoDTensor>();
tensor_x->Resize(x_dim);
tensor_y->Resize(y_dim);
tensor_dout->Resize(out_dim);
auto var_dx = scope.Var("DX");
auto tensor_dx = var_dx->GetMutable<framework::LoDTensor>();
auto var_dy = scope.Var("DY");
auto tensor_dy = var_dy->GetMutable<framework::LoDTensor>();
tensor_dx->Resize(x_dim);
tensor_dy->Resize(y_dim);
auto x_ptr = tensor_x->mutable_data<T>(ctx.GetPlace());
auto y_ptr = tensor_y->mutable_data<T>(ctx.GetPlace());
auto dout_ptr = tensor_dout->mutable_data<T>(ctx.GetPlace());
auto dinput_ptr = tensor_dx->mutable_data<T>(ctx.GetPlace());
auto dweight_ptr = tensor_dy->mutable_data<T>(ctx.GetPlace());
auto size_x = static_cast<size_t>(phi::product(x_dim));
auto size_y = static_cast<size_t>(phi::product(y_dim));
auto size_z = x_dim[0] * x_dim[1] * y_dim[0];
hipMemcpy(x_ptr, x_vec.data(), size_x * sizeof(T), hipMemcpyHostToDevice);
hipMemcpy(y_ptr, y_vec.data(), size_y * sizeof(T), hipMemcpyHostToDevice);
hipMemcpy(dout_ptr, dout_vec.data(), size_z * sizeof(T),
hipMemcpyHostToDevice);
bool use_mkldnn = false;
std::vector<int> fused_reshape_X = {};
std::vector<int> fused_reshape_Y = {};
std::vector<int> fused_reshape_Out = {};
std::vector<int> fused_transpose_X = {};
std::vector<int> fused_transpose_Y = {};
std::vector<int> fused_transpose_Out = {};
bool use_quantizer = false, force_fp32_output = false;
std::string mkldnn_data_type = "float32";
float Scale_x = 1.0, Scale_y = 1.0, Scale_out = 1.0;
framework::AttributeMap attrs;
attrs.insert({"transpose_X", transpose_a});
attrs.insert({"transpose_Y", transpose_b});
attrs.insert({"alpha", alpha});
attrs.insert({"use_mkldnn", use_mkldnn});
attrs.insert({"fused_reshape_X", fused_reshape_X});
attrs.insert({"fused_reshape_Y", fused_reshape_Y});
attrs.insert({"fused_reshape_Out", fused_reshape_Out});
attrs.insert({"fused_transpose_X", fused_transpose_X});
attrs.insert({"fused_transpose_Y", fused_transpose_Y});
attrs.insert({"fused_transpose_Out", fused_transpose_Out});
attrs.insert({"use_quantizer", use_quantizer});
attrs.insert({"mkldnn_data_type", mkldnn_data_type});
attrs.insert({"Scale_x", Scale_x});
attrs.insert({"Scale_y", Scale_y});
attrs.insert({"Scale_out", Scale_out});
attrs.insert({"force_fp32_output", force_fp32_output});
auto op = framework::OpRegistry::CreateOp(
"matmul_grad", {{"Out@GRAD", {"DOut"}}, {"X", {"X"}}, {"Y", {"Y"}}},
{{"X@GRAD", {"DX"}}, {"Y@GRAD", {"DY"}}}, attrs);
op->Run(scope, ctx.GetPlace());
hipMemcpy(dinput_vec->data(), dinput_ptr, size_x * sizeof(T),
hipMemcpyDeviceToHost);
hipMemcpy(dweight_vec->data(), dweight_ptr, size_y * sizeof(T),
hipMemcpyDeviceToHost);
ctx.Wait();
}
// get paddle elementwise_add_grad op results as baseline
template <typename T>
void GetElementwiseAddOpGrad(const std::vector<T> &dout_vec, const int bsz_seq,
const int output_size,
const platform::CUDADeviceContext &ctx,
std::vector<T> *dy_vec) {
framework::Scope scope;
auto var_x = scope.Var("X");
auto tensor_x = var_x->GetMutable<framework::LoDTensor>();
auto var_y = scope.Var("Y");
auto tensor_y = var_y->GetMutable<framework::LoDTensor>();
auto var_dout = scope.Var("DOut");
auto tensor_dout = var_dout->GetMutable<framework::LoDTensor>();
tensor_x->Resize({bsz_seq, output_size});
tensor_y->Resize({output_size});
tensor_dout->Resize({bsz_seq, output_size});
auto var_dx = scope.Var("DX");
auto tensor_dx = var_dx->GetMutable<framework::LoDTensor>();
auto var_dy = scope.Var("DY");
auto tensor_dy = var_dy->GetMutable<framework::LoDTensor>();
tensor_dx->Resize({bsz_seq, output_size});
tensor_dy->Resize({output_size});
auto dout_ptr = tensor_dout->mutable_data<T>(ctx.GetPlace());
auto tensor_dy_ptr = tensor_dy->mutable_data<T>(ctx.GetPlace());
auto size_z = static_cast<size_t>(bsz_seq * output_size);
hipMemcpy(dout_ptr, dout_vec.data(), size_z * sizeof(T),
hipMemcpyHostToDevice);
int axis = -1;
bool use_mkldnn = false, use_quantizer = false;
std::string mkldnn_data_type = "float32";
std::string x_data_format = "", y_data_format = "";
float Scale_x = 1.0, Scale_y = 1.0, Scale_out = 1.0;
framework::AttributeMap attrs;
attrs.insert({"axis", axis});
attrs.insert({"use_mkldnn", use_mkldnn});
attrs.insert({"x_data_format", x_data_format});
attrs.insert({"y_data_format", y_data_format});
attrs.insert({"use_quantizer", use_quantizer});
attrs.insert({"mkldnn_data_type", mkldnn_data_type});
attrs.insert({"Scale_x", Scale_x});
attrs.insert({"Scale_y", Scale_y});
attrs.insert({"Scale_out", Scale_out});
auto op = framework::OpRegistry::CreateOp(
"elementwise_add_grad",
{{"Out@GRAD", {"DOut"}}, {"X", {"X"}}, {"Y", {"Y"}}},
{{"X@GRAD", {"DX"}}, {"Y@GRAD", {"DY"}}}, attrs);
op->Run(scope, ctx.GetPlace());
auto size_y = static_cast<size_t>(output_size);
hipMemcpy(dy_vec->data(), tensor_dy_ptr, size_y * sizeof(T),
hipMemcpyDeviceToHost);
ctx.Wait();
}
template <typename T>
class TestFeedForward {
public:
TestFeedForward() {
batch_size_ = 16;
seq_len_ = 128;
num_head_ = 16;
dim_head_ = 64;
dim_embed_ = 1024;
has_bias_ = false;
}
TestFeedForward(int batch_size, int seq_len, int num_head, int dim_head,
int dim_embed, bool has_bias) {
batch_size_ = batch_size;
seq_len_ = seq_len;
num_head_ = num_head;
dim_head_ = dim_head;
dim_embed_ = dim_embed;
has_bias_ = has_bias;
}
~TestFeedForward() { delete ctx_; }
void SetUp() {
bsz_seq_ = batch_size_ * seq_len_;
output_size_ = 3 * num_head_ * dim_head_;
input_size_ = dim_embed_;
ctx_ = new platform::CUDADeviceContext(place_);
ctx_->SetAllocator(paddle::memory::allocation::AllocatorFacade::Instance()
.GetAllocator(place_, ctx_->stream())
.get());
ctx_->SetHostAllocator(
paddle::memory::allocation::AllocatorFacade::Instance()
.GetAllocator(paddle::platform::CPUPlace())
.get());
ctx_->SetZeroAllocator(
paddle::memory::allocation::AllocatorFacade::Instance()
.GetZeroAllocator(place_)
.get());
ctx_->PartialInitWithAllocator();
size_src_ = bsz_seq_ * dim_embed_; // src: [bs, seq_len, em_dim]
size_weight_ = dim_embed_ * output_size_; // weight: [output_size, em_dim]
size_output_ =
bsz_seq_ * output_size_; // output: [bs, seq_len, output_size]
size_bias_ = output_size_;
base_out_vec_.resize(size_output_);
base_bias_out_vec_.resize(size_output_);
base_dinput_vec_.resize(size_src_);
base_dweight_vec_.resize(size_weight_);
base_dbias_vec_.resize(size_bias_);
src_vec_.resize(size_src_);
weight_vec_.resize(size_weight_);
bias_vec_.resize(size_bias_);
doutput_vec_.resize(size_output_);
std::default_random_engine random(time(NULL));
std::uniform_real_distribution<float> dis(0.0, 1.0);
for (int i = 0; i < size_src_; i++) {
src_vec_[i] = static_cast<T>(dis(random));
}
for (int i = 0; i < size_weight_; i++) {
weight_vec_[i] = static_cast<T>(dis(random));
}
for (int i = 0; i < size_bias_; i++) {
bias_vec_[i] = static_cast<T>(dis(random));
}
for (int i = 0; i < size_output_; i++) {
doutput_vec_[i] = static_cast<T>(dis(random));
}
framework::TensorFromVector<T>(src_vec_, *ctx_, &src_);
src_.Resize({batch_size_, seq_len_, dim_embed_});
framework::TensorFromVector<T>(weight_vec_, *ctx_, &weight_);
weight_.Resize({output_size_, dim_embed_});
out_.Resize({batch_size_, seq_len_, output_size_});
out_.mutable_data<T>(place_);
if (has_bias_) {
framework::TensorFromVector<T>(bias_vec_, *ctx_, &bias_);
bias_.Resize({output_size_});
bias_out_.Resize({batch_size_, seq_len_, output_size_});
bias_out_.mutable_data<T>(place_);
}
framework::TensorFromVector<T>(doutput_vec_, *ctx_, &doutput_);
doutput_.Resize({batch_size_, seq_len_, output_size_});
dinput_.Resize({batch_size_, seq_len_, dim_embed_});
dinput_.mutable_data<T>(place_);
dweight_.Resize({output_size_, dim_embed_});
dweight_.mutable_data<T>(place_);
if (has_bias_) {
dbias_.Resize({output_size_});
dbias_.mutable_data<T>(place_);
}
}
void BaselineForward() {
bool transpose_a = false, transpose_b = true;
float alpha = 1;
GetLinearOp(src_vec_, weight_vec_, src_.dims(), weight_.dims(), *ctx_,
transpose_a, transpose_b, alpha, &base_out_vec_);
if (has_bias_) {
GetElementwiseAddOp(base_out_vec_, bias_vec_, bsz_seq_, output_size_,
*ctx_, &base_bias_out_vec_);
}
ctx_->Wait();
}
// get forward results of feedforward.
void FusedForward() {
T *p_weight = weight_.data<T>();
T *p_src = src_.data<T>();
T *p_output = out_.data<T>();
T *p_bias = nullptr;
T *p_bias_output = nullptr;
if (has_bias_) {
p_bias = bias_.data<T>();
p_bias_output = bias_out_.data<T>();
}
auto qkv_compute = paddle::operators::FeedForward<T>(
*ctx_, bsz_seq_, output_size_, input_size_, has_bias_);
qkv_compute.ComputeForward(p_weight, p_src, p_bias, p_output,
p_bias_output);
ctx_->Wait();
}
void BaselineBackward() {
bool transpose_a = false, transpose_b = true;
float alpha = 1;
GetLinearOpGrad(src_vec_, weight_vec_, doutput_vec_, src_.dims(),
weight_.dims(), out_.dims(), *ctx_, transpose_a,
transpose_b, alpha, &base_dinput_vec_, &base_dweight_vec_);
if (has_bias_) {
GetElementwiseAddOpGrad(doutput_vec_, bsz_seq_, output_size_, *ctx_,
&base_dbias_vec_);
}
ctx_->Wait();
}
// get backward results of feedforward.
void FusedBackward() {
T *p_weight = weight_.data<T>();
T *p_src = src_.data<T>();
T *p_doutput = doutput_.data<T>();
T *p_dinput = dinput_.data<T>();
T *p_dweight = dweight_.data<T>();
T *bias_ptr = nullptr;
if (has_bias_) {
bias_ptr = dbias_.data<T>();
}
auto qkv_compute = paddle::operators::FeedForward<T>(
*ctx_, bsz_seq_, output_size_, input_size_, has_bias_);
qkv_compute.ComputeBackward(p_src, p_weight, p_doutput, p_dinput, p_dweight,
bias_ptr);
ctx_->Wait();
}
void Run() {
SetUp();
BaselineForward();
FusedForward();
BaselineBackward();
FusedBackward();
}
// check forward correctness between baseline and results of feedforward.
void CheckOut(const T diff, bool is_relative_atol = false) {
std::vector<T> out(size_output_);
std::vector<T> bias_out(size_output_);
paddle::framework::TensorToVector(out_, *ctx_, &out);
if (has_bias_) {
paddle::framework::TensorToVector(bias_out_, *ctx_, &bias_out);
}
ctx_->Wait();
for (int i = 0; i < size_output_; i++) {
if (is_relative_atol) {
EXPECT_LT(std::abs((out[i] - base_out_vec_[i]) / base_out_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(out[i] - base_out_vec_[i]), diff);
}
if (has_bias_) {
if (is_relative_atol) {
EXPECT_LT(std::abs((bias_out[i] - base_bias_out_vec_[i]) /
base_bias_out_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(bias_out[i] - base_bias_out_vec_[i]), diff);
}
}
}
}
// check backward correctness between baseline and results of feedforward.
void CheckGrad(const T diff, bool is_relative_atol = false) {
std::vector<T> h_dinput(size_src_);
paddle::framework::TensorToVector(dinput_, *ctx_, &h_dinput);
for (int i = 0; i < size_src_; i++) {
if (is_relative_atol) {
EXPECT_LT(
std::abs((h_dinput[i] - base_dinput_vec_[i]) / base_dinput_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(h_dinput[i] - base_dinput_vec_[i]), diff);
}
}
std::vector<T> h_dweight(size_weight_);
paddle::framework::TensorToVector(dweight_, *ctx_, &h_dweight);
for (int i = 0; i < size_weight_; i++) {
if (is_relative_atol) {
EXPECT_LT(std::abs((h_dweight[i] - base_dweight_vec_[i]) /
base_dweight_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(h_dweight[i] - base_dweight_vec_[i]), diff);
}
}
if (has_bias_) {
std::vector<T> h_dbias(size_bias_);
paddle::framework::TensorToVector(dbias_, *ctx_, &h_dbias);
for (int i = 0; i < size_bias_; i++) {
if (is_relative_atol) {
EXPECT_LT(
std::abs((h_dbias[i] - base_dbias_vec_[i]) / base_dbias_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(h_dbias[i] - base_dbias_vec_[i]), diff);
}
}
}
}
private:
int batch_size_, seq_len_, num_head_, dim_head_, dim_embed_;
int bsz_seq_, output_size_, input_size_;
bool has_bias_;
int size_src_, size_weight_, size_bias_, size_output_;
framework::Tensor src_, weight_, bias_, out_, bias_out_;
framework::Tensor dinput_, dweight_, dbias_, doutput_;
std::vector<T> src_vec_, weight_vec_, bias_vec_, out_vec_, bias_out_vec_;
std::vector<T> dinput_vec_, dweight_vec_, dbias_vec_, doutput_vec_;
// results of baseline.
std::vector<T> base_out_vec_, base_bias_out_vec_;
std::vector<T> base_dinput_vec_, base_dweight_vec_, base_dbias_vec_;
platform::CUDAPlace place_;
platform::CUDADeviceContext *ctx_;
};
// test for fp32, fp16, fp32+bias and fp16+bias
TEST(FeedForward, GPUFeedforwardBertLargeSizeFp32) {
int batch_size = 16;
int seq_len = 128;
int num_head = 16;
int dim_head = 64;
int dim_embed = 1024;
bool has_bias = false;
TestFeedForward<float> test(batch_size, seq_len, num_head, dim_head,
dim_embed, has_bias);
test.Run();
test.CheckOut(static_cast<float>(1e-5));
test.CheckGrad(static_cast<float>(1e-5));
}
TEST(FeedForward, GPUFeedforwardBertLargeSizeFp16) {
int batch_size = 16;
int seq_len = 128;
int num_head = 16;
int dim_head = 64;
int dim_embed = 1024;
bool has_bias = false;
TestFeedForward<paddle::platform::float16> test(
batch_size, seq_len, num_head, dim_head, dim_embed, has_bias);
test.Run();
test.CheckOut(static_cast<paddle::platform::float16>(1e-5));
test.CheckGrad(static_cast<paddle::platform::float16>(1e-5));
}
TEST(FeedForward, GPUFeedforwardBertLargeSizeFp32Bias) {
int batch_size = 16;
int seq_len = 128;
int num_head = 16;
int dim_head = 64;
int dim_embed = 1024;
bool has_bias = true;
TestFeedForward<float> test(batch_size, seq_len, num_head, dim_head,
dim_embed, has_bias);
test.Run();
test.CheckOut(static_cast<float>(1e-5));
test.CheckGrad(static_cast<float>(1e-3));
}
TEST(FeedForward, GPUFeedforwardBertLargeSizeFp16Bias) {
int batch_size = 16;
int seq_len = 128;
int num_head = 16;
int dim_head = 64;
int dim_embed = 1024;
bool has_bias = true;
TestFeedForward<paddle::platform::float16> test(
batch_size, seq_len, num_head, dim_head, dim_embed, has_bias);
test.Run();
test.CheckOut(static_cast<paddle::platform::float16>(1e-2));
test.CheckGrad(static_cast<paddle::platform::float16>(1e-2), true);
}
| 4244f06654bcba78b211c3b53e66b8c8bb70d9b5.cu | /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <random>
#include <vector>
#include "gtest/gtest.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/fused/attn_feed_forward.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace framework = paddle::framework;
namespace platform = paddle::platform;
USE_OP(matmul);
USE_OP_ITSELF(elementwise_add);
PD_DECLARE_KERNEL(add, CPU, ALL_LAYOUT);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PD_DECLARE_KERNEL(add_grad, GPU, ALL_LAYOUT);
#endif
// get paddle matmul op results as baseline
template <typename T>
void GetLinearOp(const std::vector<T> &x, const std::vector<T> &y,
const framework::DDim &x_dim, const framework::DDim &y_dim,
const platform::CUDADeviceContext &ctx, bool transpose_a,
bool transpose_b, float alpha, std::vector<T> *out) {
framework::Scope scope;
auto var_x = scope.Var("X");
auto tensor_x = var_x->GetMutable<framework::LoDTensor>();
auto var_y = scope.Var("Y");
auto tensor_y = var_y->GetMutable<framework::LoDTensor>();
auto var_out = scope.Var("Out");
auto tensor_out = var_out->GetMutable<framework::LoDTensor>();
tensor_x->Resize(x_dim);
tensor_y->Resize(y_dim);
tensor_out->Resize({x_dim[0], x_dim[1], y_dim[0]});
auto x_ptr = tensor_x->mutable_data<T>(ctx.GetPlace());
auto y_ptr = tensor_y->mutable_data<T>(ctx.GetPlace());
auto z_ptr = tensor_out->mutable_data<T>(ctx.GetPlace());
auto size_x = static_cast<size_t>(phi::product(x_dim));
auto size_y = static_cast<size_t>(phi::product(y_dim));
auto size_z = x_dim[0] * x_dim[1] * y_dim[0];
cudaMemcpy(x_ptr, x.data(), size_x * sizeof(T), cudaMemcpyHostToDevice);
cudaMemcpy(y_ptr, y.data(), size_y * sizeof(T), cudaMemcpyHostToDevice);
framework::AttributeMap attrs;
attrs.insert({"transpose_X", transpose_a});
attrs.insert({"transpose_Y", transpose_b});
attrs.insert({"alpha", alpha});
auto op = framework::OpRegistry::CreateOp(
"matmul", {{"X", {"X"}}, {"Y", {"Y"}}}, {{"Out", {"Out"}}}, attrs);
op->Run(scope, ctx.GetPlace());
cudaMemcpy(out->data(), z_ptr, size_z * sizeof(T), cudaMemcpyDeviceToHost);
ctx.Wait();
}
// get paddle elementwise_add op results as baseline
template <typename T>
void GetElementwiseAddOp(const std::vector<T> &x, const std::vector<T> &y,
const int bsz_seq, const int output_size,
const platform::CUDADeviceContext &ctx,
std::vector<T> *out) {
framework::Scope scope;
auto var_x = scope.Var("X");
auto tensor_x = var_x->GetMutable<framework::LoDTensor>();
auto var_y = scope.Var("Y");
auto tensor_y = var_y->GetMutable<framework::LoDTensor>();
auto var_out = scope.Var("Out");
auto tensor_out = var_out->GetMutable<framework::LoDTensor>();
tensor_x->Resize({bsz_seq, output_size});
tensor_y->Resize({output_size});
tensor_out->Resize({bsz_seq, output_size});
auto x_ptr = tensor_x->mutable_data<T>(ctx.GetPlace());
auto y_ptr = tensor_y->mutable_data<T>(ctx.GetPlace());
auto z_ptr = tensor_out->mutable_data<T>(ctx.GetPlace());
auto size_x = bsz_seq * output_size;
auto size_y = output_size;
auto size_z = bsz_seq * output_size;
cudaMemcpy(x_ptr, x.data(), size_x * sizeof(T), cudaMemcpyHostToDevice);
cudaMemcpy(y_ptr, y.data(), size_y * sizeof(T), cudaMemcpyHostToDevice);
framework::AttributeMap attrs;
auto op = framework::OpRegistry::CreateOp("elementwise_add",
{{"X", {"X"}}, {"Y", {"Y"}}},
{{"Out", {"Out"}}}, attrs);
op->Run(scope, ctx.GetPlace());
cudaMemcpy(out->data(), z_ptr, size_z * sizeof(T), cudaMemcpyDeviceToHost);
ctx.Wait();
}
// get paddle matmul_grad op results as baseline
template <typename T>
void GetLinearOpGrad(const std::vector<T> &x_vec, const std::vector<T> &y_vec,
const std::vector<T> &dout_vec,
const framework::DDim &x_dim, const framework::DDim &y_dim,
const framework::DDim &out_dim,
const platform::CUDADeviceContext &ctx, bool transpose_a,
bool transpose_b, float alpha, std::vector<T> *dinput_vec,
std::vector<T> *dweight_vec) {
framework::Scope scope;
auto var_x = scope.Var("X");
auto tensor_x = var_x->GetMutable<framework::LoDTensor>();
auto var_y = scope.Var("Y");
auto tensor_y = var_y->GetMutable<framework::LoDTensor>();
auto var_dout = scope.Var("DOut");
auto tensor_dout = var_dout->GetMutable<framework::LoDTensor>();
tensor_x->Resize(x_dim);
tensor_y->Resize(y_dim);
tensor_dout->Resize(out_dim);
auto var_dx = scope.Var("DX");
auto tensor_dx = var_dx->GetMutable<framework::LoDTensor>();
auto var_dy = scope.Var("DY");
auto tensor_dy = var_dy->GetMutable<framework::LoDTensor>();
tensor_dx->Resize(x_dim);
tensor_dy->Resize(y_dim);
auto x_ptr = tensor_x->mutable_data<T>(ctx.GetPlace());
auto y_ptr = tensor_y->mutable_data<T>(ctx.GetPlace());
auto dout_ptr = tensor_dout->mutable_data<T>(ctx.GetPlace());
auto dinput_ptr = tensor_dx->mutable_data<T>(ctx.GetPlace());
auto dweight_ptr = tensor_dy->mutable_data<T>(ctx.GetPlace());
auto size_x = static_cast<size_t>(phi::product(x_dim));
auto size_y = static_cast<size_t>(phi::product(y_dim));
auto size_z = x_dim[0] * x_dim[1] * y_dim[0];
cudaMemcpy(x_ptr, x_vec.data(), size_x * sizeof(T), cudaMemcpyHostToDevice);
cudaMemcpy(y_ptr, y_vec.data(), size_y * sizeof(T), cudaMemcpyHostToDevice);
cudaMemcpy(dout_ptr, dout_vec.data(), size_z * sizeof(T),
cudaMemcpyHostToDevice);
bool use_mkldnn = false;
std::vector<int> fused_reshape_X = {};
std::vector<int> fused_reshape_Y = {};
std::vector<int> fused_reshape_Out = {};
std::vector<int> fused_transpose_X = {};
std::vector<int> fused_transpose_Y = {};
std::vector<int> fused_transpose_Out = {};
bool use_quantizer = false, force_fp32_output = false;
std::string mkldnn_data_type = "float32";
float Scale_x = 1.0, Scale_y = 1.0, Scale_out = 1.0;
framework::AttributeMap attrs;
attrs.insert({"transpose_X", transpose_a});
attrs.insert({"transpose_Y", transpose_b});
attrs.insert({"alpha", alpha});
attrs.insert({"use_mkldnn", use_mkldnn});
attrs.insert({"fused_reshape_X", fused_reshape_X});
attrs.insert({"fused_reshape_Y", fused_reshape_Y});
attrs.insert({"fused_reshape_Out", fused_reshape_Out});
attrs.insert({"fused_transpose_X", fused_transpose_X});
attrs.insert({"fused_transpose_Y", fused_transpose_Y});
attrs.insert({"fused_transpose_Out", fused_transpose_Out});
attrs.insert({"use_quantizer", use_quantizer});
attrs.insert({"mkldnn_data_type", mkldnn_data_type});
attrs.insert({"Scale_x", Scale_x});
attrs.insert({"Scale_y", Scale_y});
attrs.insert({"Scale_out", Scale_out});
attrs.insert({"force_fp32_output", force_fp32_output});
auto op = framework::OpRegistry::CreateOp(
"matmul_grad", {{"Out@GRAD", {"DOut"}}, {"X", {"X"}}, {"Y", {"Y"}}},
{{"X@GRAD", {"DX"}}, {"Y@GRAD", {"DY"}}}, attrs);
op->Run(scope, ctx.GetPlace());
cudaMemcpy(dinput_vec->data(), dinput_ptr, size_x * sizeof(T),
cudaMemcpyDeviceToHost);
cudaMemcpy(dweight_vec->data(), dweight_ptr, size_y * sizeof(T),
cudaMemcpyDeviceToHost);
ctx.Wait();
}
// get paddle elementwise_add_grad op results as baseline
template <typename T>
void GetElementwiseAddOpGrad(const std::vector<T> &dout_vec, const int bsz_seq,
const int output_size,
const platform::CUDADeviceContext &ctx,
std::vector<T> *dy_vec) {
framework::Scope scope;
auto var_x = scope.Var("X");
auto tensor_x = var_x->GetMutable<framework::LoDTensor>();
auto var_y = scope.Var("Y");
auto tensor_y = var_y->GetMutable<framework::LoDTensor>();
auto var_dout = scope.Var("DOut");
auto tensor_dout = var_dout->GetMutable<framework::LoDTensor>();
tensor_x->Resize({bsz_seq, output_size});
tensor_y->Resize({output_size});
tensor_dout->Resize({bsz_seq, output_size});
auto var_dx = scope.Var("DX");
auto tensor_dx = var_dx->GetMutable<framework::LoDTensor>();
auto var_dy = scope.Var("DY");
auto tensor_dy = var_dy->GetMutable<framework::LoDTensor>();
tensor_dx->Resize({bsz_seq, output_size});
tensor_dy->Resize({output_size});
auto dout_ptr = tensor_dout->mutable_data<T>(ctx.GetPlace());
auto tensor_dy_ptr = tensor_dy->mutable_data<T>(ctx.GetPlace());
auto size_z = static_cast<size_t>(bsz_seq * output_size);
cudaMemcpy(dout_ptr, dout_vec.data(), size_z * sizeof(T),
cudaMemcpyHostToDevice);
int axis = -1;
bool use_mkldnn = false, use_quantizer = false;
std::string mkldnn_data_type = "float32";
std::string x_data_format = "", y_data_format = "";
float Scale_x = 1.0, Scale_y = 1.0, Scale_out = 1.0;
framework::AttributeMap attrs;
attrs.insert({"axis", axis});
attrs.insert({"use_mkldnn", use_mkldnn});
attrs.insert({"x_data_format", x_data_format});
attrs.insert({"y_data_format", y_data_format});
attrs.insert({"use_quantizer", use_quantizer});
attrs.insert({"mkldnn_data_type", mkldnn_data_type});
attrs.insert({"Scale_x", Scale_x});
attrs.insert({"Scale_y", Scale_y});
attrs.insert({"Scale_out", Scale_out});
auto op = framework::OpRegistry::CreateOp(
"elementwise_add_grad",
{{"Out@GRAD", {"DOut"}}, {"X", {"X"}}, {"Y", {"Y"}}},
{{"X@GRAD", {"DX"}}, {"Y@GRAD", {"DY"}}}, attrs);
op->Run(scope, ctx.GetPlace());
auto size_y = static_cast<size_t>(output_size);
cudaMemcpy(dy_vec->data(), tensor_dy_ptr, size_y * sizeof(T),
cudaMemcpyDeviceToHost);
ctx.Wait();
}
template <typename T>
class TestFeedForward {
public:
TestFeedForward() {
batch_size_ = 16;
seq_len_ = 128;
num_head_ = 16;
dim_head_ = 64;
dim_embed_ = 1024;
has_bias_ = false;
}
TestFeedForward(int batch_size, int seq_len, int num_head, int dim_head,
int dim_embed, bool has_bias) {
batch_size_ = batch_size;
seq_len_ = seq_len;
num_head_ = num_head;
dim_head_ = dim_head;
dim_embed_ = dim_embed;
has_bias_ = has_bias;
}
~TestFeedForward() { delete ctx_; }
void SetUp() {
bsz_seq_ = batch_size_ * seq_len_;
output_size_ = 3 * num_head_ * dim_head_;
input_size_ = dim_embed_;
ctx_ = new platform::CUDADeviceContext(place_);
ctx_->SetAllocator(paddle::memory::allocation::AllocatorFacade::Instance()
.GetAllocator(place_, ctx_->stream())
.get());
ctx_->SetHostAllocator(
paddle::memory::allocation::AllocatorFacade::Instance()
.GetAllocator(paddle::platform::CPUPlace())
.get());
ctx_->SetZeroAllocator(
paddle::memory::allocation::AllocatorFacade::Instance()
.GetZeroAllocator(place_)
.get());
ctx_->PartialInitWithAllocator();
size_src_ = bsz_seq_ * dim_embed_; // src: [bs, seq_len, em_dim]
size_weight_ = dim_embed_ * output_size_; // weight: [output_size, em_dim]
size_output_ =
bsz_seq_ * output_size_; // output: [bs, seq_len, output_size]
size_bias_ = output_size_;
base_out_vec_.resize(size_output_);
base_bias_out_vec_.resize(size_output_);
base_dinput_vec_.resize(size_src_);
base_dweight_vec_.resize(size_weight_);
base_dbias_vec_.resize(size_bias_);
src_vec_.resize(size_src_);
weight_vec_.resize(size_weight_);
bias_vec_.resize(size_bias_);
doutput_vec_.resize(size_output_);
std::default_random_engine random(time(NULL));
std::uniform_real_distribution<float> dis(0.0, 1.0);
for (int i = 0; i < size_src_; i++) {
src_vec_[i] = static_cast<T>(dis(random));
}
for (int i = 0; i < size_weight_; i++) {
weight_vec_[i] = static_cast<T>(dis(random));
}
for (int i = 0; i < size_bias_; i++) {
bias_vec_[i] = static_cast<T>(dis(random));
}
for (int i = 0; i < size_output_; i++) {
doutput_vec_[i] = static_cast<T>(dis(random));
}
framework::TensorFromVector<T>(src_vec_, *ctx_, &src_);
src_.Resize({batch_size_, seq_len_, dim_embed_});
framework::TensorFromVector<T>(weight_vec_, *ctx_, &weight_);
weight_.Resize({output_size_, dim_embed_});
out_.Resize({batch_size_, seq_len_, output_size_});
out_.mutable_data<T>(place_);
if (has_bias_) {
framework::TensorFromVector<T>(bias_vec_, *ctx_, &bias_);
bias_.Resize({output_size_});
bias_out_.Resize({batch_size_, seq_len_, output_size_});
bias_out_.mutable_data<T>(place_);
}
framework::TensorFromVector<T>(doutput_vec_, *ctx_, &doutput_);
doutput_.Resize({batch_size_, seq_len_, output_size_});
dinput_.Resize({batch_size_, seq_len_, dim_embed_});
dinput_.mutable_data<T>(place_);
dweight_.Resize({output_size_, dim_embed_});
dweight_.mutable_data<T>(place_);
if (has_bias_) {
dbias_.Resize({output_size_});
dbias_.mutable_data<T>(place_);
}
}
void BaselineForward() {
bool transpose_a = false, transpose_b = true;
float alpha = 1;
GetLinearOp(src_vec_, weight_vec_, src_.dims(), weight_.dims(), *ctx_,
transpose_a, transpose_b, alpha, &base_out_vec_);
if (has_bias_) {
GetElementwiseAddOp(base_out_vec_, bias_vec_, bsz_seq_, output_size_,
*ctx_, &base_bias_out_vec_);
}
ctx_->Wait();
}
// get forward results of feedforward.
void FusedForward() {
T *p_weight = weight_.data<T>();
T *p_src = src_.data<T>();
T *p_output = out_.data<T>();
T *p_bias = nullptr;
T *p_bias_output = nullptr;
if (has_bias_) {
p_bias = bias_.data<T>();
p_bias_output = bias_out_.data<T>();
}
auto qkv_compute = paddle::operators::FeedForward<T>(
*ctx_, bsz_seq_, output_size_, input_size_, has_bias_);
qkv_compute.ComputeForward(p_weight, p_src, p_bias, p_output,
p_bias_output);
ctx_->Wait();
}
void BaselineBackward() {
bool transpose_a = false, transpose_b = true;
float alpha = 1;
GetLinearOpGrad(src_vec_, weight_vec_, doutput_vec_, src_.dims(),
weight_.dims(), out_.dims(), *ctx_, transpose_a,
transpose_b, alpha, &base_dinput_vec_, &base_dweight_vec_);
if (has_bias_) {
GetElementwiseAddOpGrad(doutput_vec_, bsz_seq_, output_size_, *ctx_,
&base_dbias_vec_);
}
ctx_->Wait();
}
// get backward results of feedforward.
void FusedBackward() {
T *p_weight = weight_.data<T>();
T *p_src = src_.data<T>();
T *p_doutput = doutput_.data<T>();
T *p_dinput = dinput_.data<T>();
T *p_dweight = dweight_.data<T>();
T *bias_ptr = nullptr;
if (has_bias_) {
bias_ptr = dbias_.data<T>();
}
auto qkv_compute = paddle::operators::FeedForward<T>(
*ctx_, bsz_seq_, output_size_, input_size_, has_bias_);
qkv_compute.ComputeBackward(p_src, p_weight, p_doutput, p_dinput, p_dweight,
bias_ptr);
ctx_->Wait();
}
void Run() {
SetUp();
BaselineForward();
FusedForward();
BaselineBackward();
FusedBackward();
}
// check forward correctness between baseline and results of feedforward.
void CheckOut(const T diff, bool is_relative_atol = false) {
std::vector<T> out(size_output_);
std::vector<T> bias_out(size_output_);
paddle::framework::TensorToVector(out_, *ctx_, &out);
if (has_bias_) {
paddle::framework::TensorToVector(bias_out_, *ctx_, &bias_out);
}
ctx_->Wait();
for (int i = 0; i < size_output_; i++) {
if (is_relative_atol) {
EXPECT_LT(std::abs((out[i] - base_out_vec_[i]) / base_out_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(out[i] - base_out_vec_[i]), diff);
}
if (has_bias_) {
if (is_relative_atol) {
EXPECT_LT(std::abs((bias_out[i] - base_bias_out_vec_[i]) /
base_bias_out_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(bias_out[i] - base_bias_out_vec_[i]), diff);
}
}
}
}
// check backward correctness between baseline and results of feedforward.
void CheckGrad(const T diff, bool is_relative_atol = false) {
std::vector<T> h_dinput(size_src_);
paddle::framework::TensorToVector(dinput_, *ctx_, &h_dinput);
for (int i = 0; i < size_src_; i++) {
if (is_relative_atol) {
EXPECT_LT(
std::abs((h_dinput[i] - base_dinput_vec_[i]) / base_dinput_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(h_dinput[i] - base_dinput_vec_[i]), diff);
}
}
std::vector<T> h_dweight(size_weight_);
paddle::framework::TensorToVector(dweight_, *ctx_, &h_dweight);
for (int i = 0; i < size_weight_; i++) {
if (is_relative_atol) {
EXPECT_LT(std::abs((h_dweight[i] - base_dweight_vec_[i]) /
base_dweight_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(h_dweight[i] - base_dweight_vec_[i]), diff);
}
}
if (has_bias_) {
std::vector<T> h_dbias(size_bias_);
paddle::framework::TensorToVector(dbias_, *ctx_, &h_dbias);
for (int i = 0; i < size_bias_; i++) {
if (is_relative_atol) {
EXPECT_LT(
std::abs((h_dbias[i] - base_dbias_vec_[i]) / base_dbias_vec_[i]),
diff);
} else {
EXPECT_LT(std::abs(h_dbias[i] - base_dbias_vec_[i]), diff);
}
}
}
}
private:
int batch_size_, seq_len_, num_head_, dim_head_, dim_embed_;
int bsz_seq_, output_size_, input_size_;
bool has_bias_;
int size_src_, size_weight_, size_bias_, size_output_;
framework::Tensor src_, weight_, bias_, out_, bias_out_;
framework::Tensor dinput_, dweight_, dbias_, doutput_;
std::vector<T> src_vec_, weight_vec_, bias_vec_, out_vec_, bias_out_vec_;
std::vector<T> dinput_vec_, dweight_vec_, dbias_vec_, doutput_vec_;
// results of baseline.
std::vector<T> base_out_vec_, base_bias_out_vec_;
std::vector<T> base_dinput_vec_, base_dweight_vec_, base_dbias_vec_;
platform::CUDAPlace place_;
platform::CUDADeviceContext *ctx_;
};
// test for fp32, fp16, fp32+bias and fp16+bias
TEST(FeedForward, GPUFeedforwardBertLargeSizeFp32) {
int batch_size = 16;
int seq_len = 128;
int num_head = 16;
int dim_head = 64;
int dim_embed = 1024;
bool has_bias = false;
TestFeedForward<float> test(batch_size, seq_len, num_head, dim_head,
dim_embed, has_bias);
test.Run();
test.CheckOut(static_cast<float>(1e-5));
test.CheckGrad(static_cast<float>(1e-5));
}
TEST(FeedForward, GPUFeedforwardBertLargeSizeFp16) {
int batch_size = 16;
int seq_len = 128;
int num_head = 16;
int dim_head = 64;
int dim_embed = 1024;
bool has_bias = false;
TestFeedForward<paddle::platform::float16> test(
batch_size, seq_len, num_head, dim_head, dim_embed, has_bias);
test.Run();
test.CheckOut(static_cast<paddle::platform::float16>(1e-5));
test.CheckGrad(static_cast<paddle::platform::float16>(1e-5));
}
TEST(FeedForward, GPUFeedforwardBertLargeSizeFp32Bias) {
int batch_size = 16;
int seq_len = 128;
int num_head = 16;
int dim_head = 64;
int dim_embed = 1024;
bool has_bias = true;
TestFeedForward<float> test(batch_size, seq_len, num_head, dim_head,
dim_embed, has_bias);
test.Run();
test.CheckOut(static_cast<float>(1e-5));
test.CheckGrad(static_cast<float>(1e-3));
}
TEST(FeedForward, GPUFeedforwardBertLargeSizeFp16Bias) {
int batch_size = 16;
int seq_len = 128;
int num_head = 16;
int dim_head = 64;
int dim_embed = 1024;
bool has_bias = true;
TestFeedForward<paddle::platform::float16> test(
batch_size, seq_len, num_head, dim_head, dim_embed, has_bias);
test.Run();
test.CheckOut(static_cast<paddle::platform::float16>(1e-2));
test.CheckGrad(static_cast<paddle::platform::float16>(1e-2), true);
}
|
9fb00faba60a038e2b41ba3c5e38bca308023638.hip | // !!! This is a file automatically generated by hipify!!!
// STD include
#include <vector>
#include <thread>
// CUDA include
#ifdef __HIPCC__
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "hip/hip_runtime.h"
#endif
// OPENGL include
#include <GL/glut.h>
#include <GL/freeglut.h>
// STIM include
#include <stim/visualization/gl_network.h>
#include <stim/visualization/gl_aaboundingbox.h>
#include <stim/parser/arguments.h>
#include <stim/visualization/camera.h>
#include <stim/biomodels/flow.h>
#include <stim/visualization/colormap.h>
#include <stim/math/matrix.h>
#include <stim/grids/image_stack.h>
#include <stim/cuda/cudatools/error.h>
#include <stim/ui/progressbar.h>
//****************************parameter setting*********************************
// user input parameters
float u = 0.0f; // viscosity
float h = 0.0f; // height of edge(channel)
float dx, dy, dz; // x, y and z image scaling(units/pixel)
float main_feeder_radii; // default radii of main feeder (50um will be great for microfluidics manufacture)
float default_radii = 5.0f; // default radii of network vertex
float minimum_radii = 0.0f; // minimum radii that current machine can manufacture
float max_pressure = 0.0f; // maximum pressure that the channel can bear
int X; // workspace X
int Y; // workspace Y
int size_x, size_y, size_z; // size of image stack
std::string units; // units
std::string stackdir = ""; // directory where image stacks will be stored
// window console parameters
int mouse_x = -5; // mouse x window position
int mouse_y = -5; // mouse y window position
int vX; // viewport X
int vY; // viewport Y
// hard-coded parameters
float delta = 0.01f; // discrepancy
float eps = 15.0f; // epsilon threshold
std::vector<std::string> menu_option = { "generate network", "simulation", "build inlet", "build outlet", "manufacture" };
int cur_menu_num; // number of current menu option
int new_menu_num; // number of new menu option
int mode; // menu options
int mods; // special keyboard input
float border = 20.0f; // bar edge position
float radii_factor = 0.4f; // change ratio of network vertex radii
GLint subdivision = 20; // slices and stacks
float cur_max_radii = 0.0f; // store the maximum radii in the network for manufacture
// new structure type definition
struct vertex {
stim::vec3<float> c; // coordinates
float r = default_radii; // radii
};
struct edge {
unsigned p[2]; // start and end vertex indices
float v = 0.0f; // velocity along edge
};
struct sphere {
stim::vec3<float> c; // center of sphere
float r; // radii
};
struct cylinder { // radii changes gradually
stim::vec3<float> c1; // center of geometry start hat
stim::vec3<float> c2; // center of geometry end hat
float r1; // radii at start hat
float r2; // radii at end hat
};
// parameters for generating new networks
bool generate_network = false; // flag indicates in generating network mode
bool first_click = true; // flag indicates first click of one line of edges
bool flag = false; // flag indicates found a near vertex or edge
unsigned num = 0; // number of vertex in a new line
unsigned iter = 0; // iterator indicates index of current vertex
unsigned name = 0; // output network's main name in sequences
unsigned sub_name = 0; // output network's sub_name in sequences
vertex new_vertex; // stores current acceptable vertex
vertex tmp_vertex; // temporarily stores a vertex when moving mouse
edge new_edge; // stores current acceptable edge
edge tmp_edge; // temporarily stores a edge when moving mouse
std::vector<unsigned> dangle_vertex; // boundary(dangle) vertices list
stim::vec3<float> L = stim::vec3<float>(FLT_MAX, FLT_MAX, FLT_MAX); // minimum point in the bounding box
stim::vec3<float> U = stim::vec3<float>(-FLT_MAX, -FLT_MAX, -FLT_MAX); // maximum point in the bounding box
std::vector<unsigned> color_scheme; // color scheme for each edge
unsigned color_index = 0;
// parameters for simulation
bool simulation = false; // flag indicates in simulation network mode
bool first_simulation = true; // initialize simulation, all inlet to maximum pressure, all outlet to zero pressure
bool select_pressure = false; // flag indicates having selected a vertex to modify pressure, next step is to set specific pressure value
bool select_radii = false; // flag indicates having selected a vertex to change radii, next step is to set specific radii value
bool radii_changed = false; // flag indicates one vertex has been changed radii
bool grow = false; // flag indicates grow new line of edges
unsigned pressure_index = 0; // index of picked vertex for pressure
unsigned radii_index = 0; // index of picked vertex for radii
unsigned edge_index = 0; // index of picked edge
float max_v; // maximum velocity in units / s
float min_v;
stim::flow<float> Flow; // flow object for calculating network fluid flow
std::vector<typename stim::triple<unsigned, unsigned, float> > input; // first one store which vertex, second one stores which edge, third one stores in/out volume flow rate of that vertex
std::vector<typename stim::triple<unsigned, unsigned, float> > output;
std::vector<unsigned char> color; // color map based on velocity
bool color_bound = false; // flag indicates color map has been bound to 1D texture
std::vector<int> velocity_map; // velocity map
std::vector<typename edge> tmp_E; // temp list of edges
unsigned new_num = 0; // number of new growing vertex
// parameters for building bridge
bool build_inlet = false; // flag indicates in building inlet mode
bool build_outlet = false; // flag indicates in building outlet mode
bool select_bridge = false; // flag indicates now user can select bridge to modify
bool select_corner = false; // flag indicates having selected a bridge to modify, the next click is to choose a new position for the corner vertex
bool inlet_done = false; // finished choosing the inlet main feeder position
bool outlet_done = false; // finished choosing the outlet main feeder position
std::vector<typename stim::bridge<float> > inlet; // input bridge
std::vector<typename stim::bridge<float> > outlet; // output bridge
stim::vec3<float> inlet_port; // inlet main feeder port
stim::vec3<float> outlet_port; // outlet main feeder port
stim::vec3<float> corner_vertex; // corner vertex
unsigned bridge_index; // selected bridge index
float inlet_flow_rate = 0.0f; // volume flow rate at main inlet feeder
float outlet_flow_rate = 0.0f; // volume flow rate at main outlet feeder
float inlet_pressure; // pressure at main inlet feeder
float outlet_pressure; // pressure at main outlet feeder
unsigned min_input_index; // maximum output pressure index
unsigned max_output_index; // minimum input pressure index
std::vector<bool> inlet_feasibility;// list of flag indicates ith inlet bridge feasibility
std::vector<bool> outlet_feasibility;
// parameters for manufacture
bool manufacture = false; // flag indicates in manufacture mode
bool mask_done = false; // flag indicates having made a mask
// network
unsigned num_edge = 0; // number of edges in current network
unsigned num_vertex = 0; // number of vertices in current network
std::vector<vertex> V; // list of vertices
std::vector<edge> E; // list of edges
// image stack
stim::image_stack<unsigned char, float> I; // image stack object
std::vector<sphere> A; // sphere model for making image stack
unsigned feeder_start_index;
std::vector<cylinder> B; // cylinder model for making image stack
unsigned bridge_start_index;
// camera object
stim::camera cam; // camera object
float camera_factor = 1.2f; // start point of the camera as a function of X and Y size
// colors
#define JACK_CTRL_PTS 11
static float JACKCP[JACK_CTRL_PTS * 3] = { 0.671f, 0.851f, 0.914f,
0.502f, 0.804f, 0.757f,
0.651f, 0.851f, 0.416f,
0.945f, 0.714f, 0.855f,
0.600f, 0.439f, 0.671f,
0.914f, 0.761f, 0.490f,
0.729f, 0.729f, 0.729f,
0.957f, 0.647f, 0.510f,
0.996f, 0.878f, 0.565f,
0.992f, 0.722f, 0.388f,
0.957f, 0.427f, 0.263f };
//****************************auxiliary functions*********************************
// find the nearest vertex of current click position
// return true and a value if found
inline bool epsilon_vertex(int x, int y, unsigned& v) {
float d = FLT_MAX; // minimum distance between 2 vertices
float tmp_d = 0.0f; // temporary stores distance for loop
unsigned tmp_i = 0; // temporary stores connection index for loop
stim::vec3<float> tmp_v; // temporary stores current loop point
d = FLT_MAX; // set to max of float number
for (unsigned i = 0; i < V.size(); i++) {
tmp_v = stim::vec3<float>((float)x, (float)(vY - y), 0.0f);
tmp_v[0] = tmp_v[0] * (float)X / vX;
tmp_v[1] = tmp_v[1] * (float)Y / vY;
tmp_v = tmp_v - V[i].c; // calculate a vector between two vertices
tmp_d = tmp_v.len(); // calculate length of that vector
if (tmp_d < d) {
d = tmp_d; // if found a nearer vertex
tmp_i = i; // get the index of that vertex
}
}
if (d < eps) { // if current click is close to vertex we set before
// must have at least three point to make a plane or loop
if (tmp_i < num && (tmp_i == V.size() - 1 || tmp_i == V.size() - 2) && !first_click && mods == 0) {
Sleep(100); // make flash effect
std::cout << "\r";
std::cout << "[ ERROR ] ";
std::cout << "You can't do that!";
std::cout.flush();
}
else {
v = tmp_i; // copy the extant vertex's index to v
}
return true;
}
return false;
}
// check out whether the projection of v0 onto line segment v1-v2 is on extensed line
// set distance to FLT_MAX if true
inline void is_outside(stim::vec3<float> v0, stim::vec3<float> v1, stim::vec3<float> v2, float &distance) {
float a = (v0 - v1).dot((v2 - v1).norm());
float b = (v0 - v2).dot((v1 - v2).norm());
float length = (v2 - v1).len();
if (a > length || b > length)
distance = FLT_MAX;
}
// find the nearest inlet/outlet connection line of current click position
// return true and a value if found
inline bool epsilon_edge(int x, int y, unsigned &idx) {
float d = FLT_MAX;
float tmp_d;
unsigned tmp_i;
stim::vec3<float> v1;
stim::vec3<float> v2;
stim::vec3<float> v0 = stim::vec3<float>((float)x, (float)(vY - y), 0.0f);
v0[0] = v0[0] * (float)X / vX;
v0[1] = v0[1] * (float)Y / vY;
if (build_inlet) {
for (unsigned i = 0; i < inlet.size(); i++) {
if (inlet[i].V.size() == 2) { // direct line connection
v1 = inlet[i].V[0]; // the inlet port vertex
v2 = inlet[i].V[1]; // the dangle vertex
// the distance between a point and a line segment, d = (|(x0 - x1)x(x0 - x2)|) / (|x2 - x1|)
tmp_d = ((v0 - v1).cross(v0 - v2)).len() / (v2 - v1).len();
if (tmp_d < d) {
d = tmp_d;
tmp_i = i;
// check whether the projection is on the line segment
is_outside(v0, v1, v2, d);
}
}
else if (inlet[i].V.size() == 3) { // broken line connection
// first half of bridge
v1 = inlet[i].V[0]; // the inlet port vertex
v2 = inlet[i].V[1]; // the corner vertex
// the distance between a point and a line segment, d = (|(x0 - x1)x(x0 - x2)|) / (|x2 - x1|)
tmp_d = ((v0 - v1).cross(v0 - v2)).len() / (v2 - v1).len();
if (tmp_d < d) {
d = tmp_d;
tmp_i = i;
is_outside(v0, v1, v2, d);
}
// second half of bridge
v1 = inlet[i].V[1]; // the corner vertex
v2 = inlet[i].V[2]; // the dangle vertex
// the distance between a point and a line segment, d = (|(x0 - x1)x(x0 - x2)|) / (|x2 - x1|)
tmp_d = ((v0 - v1).cross(v0 - v2)).len() / (v2 - v1).len();
if (tmp_d < d) {
d = tmp_d;
tmp_i = i;
is_outside(v0, v1, v2, d);
}
}
}
if (d < eps) {
idx = tmp_i;
return true;
}
}
else if (build_outlet) {
for (unsigned i = 0; i < outlet.size(); i++) {
if (outlet[i].V.size() == 2) { // direct line connection
// first half of bridge
v1 = outlet[i].V[0]; // the inlet port vertex
v2 = outlet[i].V[1]; // the dangle vertex
// the distance between a point and a line segment, d = (|(x0 - x1)x(x0 - x2)|) / (|x2 - x1|)
tmp_d = ((v0 - v1).cross(v0 - v2)).len() / (v2 - v1).len();
if (tmp_d < d) {
d = tmp_d;
tmp_i = i;
is_outside(v0, v1, v2, d);
}
}
else if (outlet[i].V.size() == 3) { // broken line connection
v1 = outlet[i].V[0]; // the inlet port vertex
v2 = outlet[i].V[1]; // the corner vertex
// the distance between a point and a line segment, d = (|(x0 - x1)x(x0 - x2)|) / (|x2 - x1|)
tmp_d = ((v0 - v1).cross(v0 - v2)).len() / (v2 - v1).len();
if (tmp_d < d) {
d = tmp_d;
tmp_i = i;
is_outside(v0, v1, v2, d);
}
// second half of bridge
v1 = outlet[i].V[1]; // the corner vertex
v2 = outlet[i].V[2]; // the dangle vertex
// the distance between a point and a line segment, d = (|(x0 - x1)x(x0 - x2)|) / (|x2 - x1|)
tmp_d = ((v0 - v1).cross(v0 - v2)).len() / (v2 - v1).len();
if (tmp_d < d) {
d = tmp_d;
tmp_i = i;
is_outside(v0, v1, v2, d);
}
}
}
if (d < eps) { // check to see whether the smallest distance is within the threshold
idx = tmp_i;
return true;
}
}
return false;
}
// find the nearest edge
// retrun true, edge index and index within edge if found
inline bool epsilon_edge(int x, int y, unsigned &idx, unsigned &i) {
float d = FLT_MAX;
float tmp_d;
unsigned tmp_i;
stim::vec3<float> v1;
stim::vec3<float> v2;
stim::vec3<float> v0 = stim::vec3<float>((float)x, (float)(vY - y), 0.0f);
v0[0] = v0[0] * (float)X / vX;
v0[1] = v0[1] * (float)Y / vY;
for (unsigned i = 0; i < E.size(); i++) {
v1 = V[E[i].p[0]].c; // starting vertex
v2 = V[E[i].p[1]].c; // ending vertex
// the distance between a point and a line segment, d = (|(x0 - x1)x(x0 - x2)|) / (|x2 - x1|)
tmp_d = ((v0 - v1).cross(v0 - v2)).len() / (v2 - v1).len();
if (tmp_d < d) {
d = tmp_d;
tmp_i = i;
// check whether the projection is on the line segment
is_outside(v0, v1, v2, d);
}
}
if (d < eps) {
idx = tmp_i; // get the edge index
float px;
float py;
// get the projection coordinates
v1 = V[E[idx].p[0]].c;
v2 = V[E[idx].p[1]].c;
float dx = v2[0] - v1[0];
float dy = v2[1] - v1[1];
float dAB = dx * dx + dy * dy;
float u = ((v0[0] - v1[0]) * dx + (v0[1] - v1[1]) * dy) / dAB;
px = v1[0] + u * dx;
py = v1[1] + u * dy;
float l = (v1 - v2).len();
tmp_d = sqrt(::pow(px - v1[0], 2) + ::pow(py - v1[1], 2));
if (tmp_d < l - tmp_d) // if the projection is near starting vertex
i = 0;
else
i = 1;
return true;
}
return false;
}
// check whether there is a edge between two vertices
// return true if found
inline bool is_edge(unsigned idx) {
for (unsigned i = 0; i < E.size(); i++) { // brute force method
if (E[i].p[0] == new_edge.p[0] && E[i].p[1] == idx)
return true;
else if (E[i].p[1] == new_edge.p[0] && E[i].p[0] == idx)
return true;
}
return false;
}
// find the distance between two vertices
inline float length(unsigned i) {
stim::vec3<float> v1 = V[E[i].p[0]].c;
stim::vec3<float> v2 = V[E[i].p[1]].c;
v1 = v1 - v2;
return v1.len();
}
// find the average radius of one edge
inline float radius(unsigned i) {
return (V[E[i].p[0]].r + V[E[i].p[1]].r) / 2;
}
// find two envelope caps for two spheres
// @param cp1, cp2: list of points on the cap
// @param center1, center2: center point of cap
// @param r1, r2: radii of cap
inline void find_envelope(std::vector<typename stim::vec3<float> > &cp1, std::vector<typename stim::vec3<float> > &cp2, stim::vec3<float> center1, stim::vec3<float> center2, float r1, float r2) {
stim::vec3<float> tmp_d;
if (r1 == r2) { // two vertices have the same radius
tmp_d = center2 - center1; // calculate the direction vector
tmp_d = tmp_d.norm();
stim::circle<float> tmp_c; // in order to get zero direction vector
tmp_c.rotate(tmp_d);
stim::circle<float> c1(center1, r1, tmp_d, tmp_c.U);
stim::circle<float> c2(center2, r2, tmp_d, tmp_c.U);
cp1 = c1.glpoints(subdivision);
cp2 = c2.glpoints(subdivision);
}
else {
if (r1 < r2) { // switch index, we always want r1 to be larger than r2
stim::vec3<float> tmp_c = center2;
center2 = center1;
center1 = tmp_c;
float tmp_r = r2;
r2 = r1;
r1 = tmp_r;
}
tmp_d = center2 - center1; // bigger one points to smaller one
tmp_d = tmp_d.norm();
float D = (center1 - center2).len();
stim::vec3<float> exp;
exp[0] = (center2[0] * r1 - center1[0] * r2) / (r1 - r2);
exp[1] = (center2[1] * r1 - center1[1] * r2) / (r1 - r2);
stim::vec3<float> t1, t2, t3, t4;
t1[2] = t2[2] = t3[2] = t4[2] = 0.0f;
// first two
t1[0] = pow(r1, 2)*(exp[0] - center1[0]);
t1[0] += r1*(exp[1] - center1[1])*sqrt(pow((exp[0] - center1[0]), 2) + pow((exp[1] - center1[1]), 2) - pow(r1, 2));
t1[0] /= (pow((exp[0] - center1[0]), 2) + pow((exp[1] - center1[1]), 2));
t1[0] += center1[0];
t2[0] = pow(r1, 2)*(exp[0] - center1[0]);
t2[0] -= r1*(exp[1] - center1[1])*sqrt(pow((exp[0] - center1[0]), 2) + pow((exp[1] - center1[1]), 2) - pow(r1, 2));
t2[0] /= (pow((exp[0] - center1[0]), 2) + pow((exp[1] - center1[1]), 2));
t2[0] += center1[0];
t1[1] = pow(r1, 2)*(exp[1] - center1[1]);
t1[1] -= r1*(exp[0] - center1[0])*sqrt(pow((exp[0] - center1[0]), 2) + pow((exp[1] - center1[1]), 2) - pow(r1, 2));
t1[1] /= (pow((exp[0] - center1[0]), 2) + pow((exp[1] - center1[1]), 2));
t1[1] += center1[1];
t2[1] = pow(r1, 2)*(exp[1] - center1[1]);
t2[1] += r1*(exp[0] - center1[0])*sqrt(pow((exp[0] - center1[0]), 2) + pow((exp[1] - center1[1]), 2) - pow(r1, 2));
t2[1] /= (pow((exp[0] - center1[0]), 2) + pow((exp[1] - center1[1]), 2));
t2[1] += center1[1];
// check the correctness of the points
//float s = (center1[1] - t1[1])*(exp[1] - t1[1]) / ((t1[0] - center1[0])*(t1[0] - exp[0]));
//if (s != 1) { // swap t1[1] and t2[1]
// float tmp_t = t2[1];
// t2[1] = t1[1];
// t1[1] = tmp_t;
//}
// second two
t3[0] = pow(r2, 2)*(exp[0] - center2[0]);
t3[0] += r2*(exp[1] - center2[1])*sqrt(pow((exp[0] - center2[0]), 2) + pow((exp[1] - center2[1]), 2) - pow(r2, 2));
t3[0] /= (pow((exp[0] - center2[0]), 2) + pow((exp[1] - center2[1]), 2));
t3[0] += center2[0];
t4[0] = pow(r2, 2)*(exp[0] - center2[0]);
t4[0] -= r2*(exp[1] - center2[1])*sqrt(pow((exp[0] - center2[0]), 2) + pow((exp[1] - center2[1]), 2) - pow(r2, 2));
t4[0] /= (pow((exp[0] - center2[0]), 2) + pow((exp[1] - center2[1]), 2));
t4[0] += center2[0];
t3[1] = pow(r2, 2)*(exp[1] - center2[1]);
t3[1] -= r2*(exp[0] - center2[0])*sqrt(pow((exp[0] - center2[0]), 2) + pow((exp[1] - center2[1]), 2) - pow(r2, 2));
t3[1] /= (pow((exp[0] - center2[0]), 2) + pow((exp[1] - center2[1]), 2));
t3[1] += center2[1];
t4[1] = pow(r2, 2)*(exp[1] - center2[1]);
t4[1] += r2*(exp[0] - center2[0])*sqrt(pow((exp[0] - center2[0]), 2) + pow((exp[1] - center2[1]), 2) - pow(r2, 2));
t4[1] /= (pow((exp[0] - center2[0]), 2) + pow((exp[1] - center2[1]), 2));
t4[1] += center2[1];
// check the correctness of the points
//s = (center2[1] - t3[1])*(exp[1] - t3[1]) / ((t3[0] - center2[0])*(t3[0] - exp[0]));
//if (s != 1) { // swap t1[1] and t2[1]
// float tmp_t = t4[1];
// t4[1] = t3[1];
// t3[1] = tmp_t;
//}
stim::vec3<float> d1;
float dot;
float a;
float new_r;
stim::vec3<float> new_u;
stim::vec3<float> new_c;
// calculate the bigger circle
d1 = t1 - center1;
dot = d1.dot(tmp_d);
a = dot / (r1 * 1) * r1; // a = cos(alpha) * radii
new_c = center1 + a * tmp_d;
new_r = sqrt(pow(r1, 2) - pow(a, 2));
new_u = t1 - new_c;
stim::circle<float> c1(new_c, new_r, tmp_d, new_u);
cp1 = c1.glpoints(subdivision);
// calculate the smaller circle
d1 = t3 - center2;
dot = d1.dot(tmp_d);
a = dot / (r2 * 1) * r2;
new_c = center2 + a * tmp_d;
new_r = sqrt(pow(r2, 2) - pow(a, 2));
new_u = t3 - new_c;
stim::circle<float> c2(new_c, new_r, tmp_d, new_u);
cp2 = c2.glpoints(subdivision);
}
}
// check to see whether current bridge is acceptable
// if it is not acceptable, print error reminder
inline void is_acceptable() {
if (build_inlet) {
unsigned midx; // get the index from inlet list
for (unsigned i = 0; i < inlet.size(); i++) {
if (inlet[i].v[0] == min_input_index) {
midx = i;
break;
}
}
float tmp_r;
unsigned idx;
std::vector<bool> tmp(inlet.size(), true);
std::swap(tmp, inlet_feasibility);
for (unsigned i = 0; i < inlet.size(); i++) {
idx = inlet[i].v[0];
if (i != midx) {
if (mode == 2)
tmp_r = ((Flow.pressure[min_input_index] + ((12 * u * inlet[midx].l * inlet[midx].Q) / (::pow(h, 3) * 2 * minimum_radii)) - Flow.pressure[idx]) * (::pow(h, 3)) / (12 * u * inlet[i].l * inlet[i].Q)) / 2;
else if (mode == 3)
tmp_r = (Flow.pressure[min_input_index] + ((8 * u * inlet[midx].l * inlet[midx].Q) / (::pow(minimum_radii, 4) * (float)stim::PI)) - Flow.pressure[idx]) * (float)stim::PI / (8 * u * inlet[i].l * inlet[i].Q);
if (tmp_r <= 0) { // degenerate case where radii ie less than zero
Sleep(100); // make flash effect
std::cout << "\r";
std::cout << "[ ERROR ] ";
std::cout << "Inlet bridge for vertex " << min_input_index << " is not feasible";
inlet_feasibility[i] = false;
break;
}
else // feasible
inlet_feasibility[i] = true;
}
}
}
else if (build_outlet) {
unsigned midx; // get the index from outlet list
for (unsigned i = 0; i < outlet.size(); i++) {
if (outlet[i].v[0] == max_output_index) {
midx = i;
break;
}
}
float tmp_r;
unsigned idx;
std::vector<bool> tmp(outlet.size(), true);
std::swap(tmp, outlet_feasibility);
for (unsigned i = 0; i < outlet.size(); i++) {
idx = outlet[i].v[0];
if (i != midx) {
if (mode == 2)
tmp_r = ((Flow.pressure[idx] - (Flow.pressure[max_output_index] - (12 * u * outlet[midx].l * outlet[midx].Q) / (::pow(h, 3) * 2 * minimum_radii))) * (::pow(h, 3)) / (12 * u * outlet[i].l * outlet[i].Q)) / 2;
else if (mode == 3)
tmp_r = (Flow.pressure[idx] - (Flow.pressure[max_output_index] - (8 * u * outlet[midx].l * outlet[midx].Q) / (::pow(minimum_radii, 4) * (float)stim::PI))) * (float)stim::PI / (8 * u * outlet[i].l * outlet[i].Q);
if (tmp_r <= 0) { // not enough length to satisfy to situation
std::cout << "\r";
std::cout << "[ ERROR ] ";
std::cout << "Outlet bridge for vertex " << max_output_index << " is not feasible";
outlet_feasibility[i] = false;
break;
}
else // feasible
outlet_feasibility[i] = true;
}
}
}
}
//****************************simulation functions*********************************
// get the network information
void get_background() {
num_edge = E.size(); // get the number of edge on current network
num_vertex = V.size(); // get the number of vertices on current network
// get the bounding box of current network
float tmp;
for (unsigned i = 0; i < num_vertex; i++) {
for (unsigned j = 0; j < 3; j++) {
tmp = V[i].c[j];
if (tmp < L[j])
L[j] = tmp;
if (tmp > U[j])
U[j] = tmp;
}
}
// get the dangle vertex
dangle_vertex.clear();
for (unsigned i = 0; i < num_vertex; i++) {
unsigned k = 0;
for (unsigned j = 0; j < num_edge; j++) {
if (E[j].p[0] == i || E[j].p[1] == i)
k++;
}
if (k == 1)
dangle_vertex.push_back(i);
}
// print out
std::cout << "OBJECT NUMBER" << std::endl;
std::cout << "edge " << num_edge << std::endl;
std::cout << "vertex " << num_vertex << std::endl;
std::cout << "dangle vertex " << dangle_vertex.size() << std::endl;
Flow.init(num_edge, num_vertex); // initialize flow object
}
// initialize flow
void flow_initialize() {
// clear up non-dangle vertex pressure
for (unsigned i = 0; i < num_vertex; i++) {
bool is_dangle = false;
for (unsigned j = 0; j < dangle_vertex.size(); j++) {
if (dangle_vertex[j] == i)
is_dangle = true;
}
if (!is_dangle)
Flow.P[i] = 0;
}
if (!grow) { // when it is to grow a new edge, do not initialize again
float mid = 0.0f;
for (unsigned i = 0; i < dangle_vertex.size(); i++) {
mid += V[dangle_vertex[i]].c[0];
}
mid /= dangle_vertex.size();
for (unsigned i = 0; i < dangle_vertex.size(); i++) {
if (V[dangle_vertex[i]].c[0] <= mid)
Flow.P[dangle_vertex[i]] = max_pressure - i * delta; // should set minor discrepancy
else
Flow.P[dangle_vertex[i]] = (i + 1) * delta; // algorithm treat 0 as no initial pressure
}
}
}
// find the stable flow state
void find_stable_state(float threshold = 0.01f) {
// clear up last time simulation
input.clear();
output.clear();
std::vector<float> zero_QQ(num_vertex);
std::swap(Flow.QQ, zero_QQ);
std::vector<float> zero_pressure(num_vertex);
std::swap(Flow.pressure, zero_pressure);
// set the conductance matrix of flow object
unsigned start_vertex = 0;
unsigned end_vertex = 0;
for (unsigned i = 0; i < num_edge; i++) {
start_vertex = E[i].p[0]; // get the start vertex index of current edge
end_vertex = E[i].p[1]; // get the end vertex index of current edge
if (mode == 2) {
Flow.C[start_vertex][end_vertex] = -(2 * radius(i) * ::pow(h, 3)) / (12 * u * length(i)); // UNITS: g/mm^4/s
}
else if (mode == 3) {
Flow.C[start_vertex][end_vertex] = -((float)stim::PI * ::pow(radius(i), 4)) / (8 * u * length(i));
}
Flow.C[end_vertex][start_vertex] = Flow.C[start_vertex][end_vertex];
}
// set the diagonal to the negative sum of row element
float sum = 0.0;
for (unsigned i = 0; i < num_vertex; i++) {
for (unsigned j = 0; j < num_vertex; j++) {
sum += Flow.C[i][j];
}
Flow.C[i][i] = -sum;
sum = 0.0;
}
// get the Q' vector QQ
// matrix manipulation to zero out the conductance matrix as defined by the boundary values that were enterd
for (unsigned i = 0; i < num_vertex; i++) {
if (Flow.P[i] != 0) { // for every dangle vertex
for (unsigned j = 0; j < num_vertex; j++) {
if (j == i) {
Flow.QQ[i] = Flow.C[i][i] * Flow.P[i];
}
else {
Flow.C[i][j] = 0;
Flow.QQ[j] = Flow.QQ[j] - Flow.C[j][i] * Flow.P[i];
Flow.C[j][i] = 0;
}
}
}
}
// get the inverse of conductance matrix
stim::matrix<float> _C(num_vertex, num_vertex);
//float** _C = (float**)calloc(num_vertex, sizeof(float*));
//for (unsigned i = 0; i < num_vertex; i++) {
// _C[i] = new float[num_vertex]();
//}
Flow.inversion(Flow.C, num_vertex, _C.data());
// get the pressure in the network
for (unsigned i = 0; i < num_vertex; i++) {
for (unsigned j = 0; j < num_vertex; j++) {
//Flow.pressure[i] += _C[i][j] * Flow.QQ[j];
Flow.pressure[i] += _C(i, j) * Flow.QQ[j];
}
}
// get the flow state from known pressure
float start_pressure = 0.0;
float end_pressure = 0.0;
float deltaP = 0.0;
for (unsigned i = 0; i < num_edge; i++) {
start_vertex = E[i].p[0];
end_vertex = E[i].p[1];
start_pressure = Flow.pressure[start_vertex]; // get the start vertex pressure of current edge
end_pressure = Flow.pressure[end_vertex]; // get the end vertex pressure of current edge
deltaP = start_pressure - end_pressure; // deltaP = Pa - Pb
Flow.Q[i].first = start_vertex;
Flow.Q[i].second = end_vertex;
if (mode == 2) {
Flow.Q[i].third = (2 * radius(i) * ::pow(h, 3) * deltaP) / (12 * u * length(i));
E[i].v = Flow.Q[i].third / (h * 2 * radius(i));
}
else if (mode == 3) {
Flow.Q[i].third = ((float)stim::PI * ::pow(radius(i), 4) * deltaP) / (8 * u * length(i));
E[i].v = Flow.Q[i].third / ((float)stim::PI * ::pow(radius(i), 2));
}
}
// find both input and output vertex
stim::triple<unsigned, unsigned, float> tmp;
unsigned N = dangle_vertex.size(); // get the number of dangle vertex
unsigned idx = 0;
for (unsigned i = 0; i < N; i++) { // for every boundary vertex
idx = dangle_vertex[i];
for (unsigned j = 0; j < num_edge; j++) { // for every edge
if (Flow.Q[j].first == idx) { // starting vertex
if (Flow.Q[j].third > 0) { // flow comes in
tmp.first = idx;
tmp.second = j;
tmp.third = Flow.Q[j].third;
input.push_back(tmp);
break;
}
// their might be a degenerate case that it equals to 0?
else if (Flow.Q[j].third < 0) { // flow comes out
tmp.first = idx;
tmp.second = j;
tmp.third = -Flow.Q[j].third;
output.push_back(tmp);
break;
}
}
else if (Flow.Q[j].second == idx) { // ending vertex
if (Flow.Q[j].third > 0) { // flow comes in
tmp.first = idx;
tmp.second = j;
tmp.third = Flow.Q[j].third;
output.push_back(tmp);
break;
}
// their might be a degenerate case that it equals to 0?
else if (Flow.Q[j].third < 0) { // flow comes out
tmp.first = idx;
tmp.second = j;
tmp.third = -Flow.Q[j].third;
input.push_back(tmp);
break;
}
}
}
}
// find the absolute maximum velocity and minimum velocity
std::vector<float> abs_V(num_edge);
for (unsigned i = 0; i < num_edge; i++) {
abs_V[i] = std::fabsf(E[i].v);
if (abs_V[i] < threshold)
abs_V[i] = 0.0f;
}
max_v = *std::max_element(abs_V.begin(), abs_V.end());
min_v = *std::min_element(abs_V.begin(), abs_V.end());
// get the color map based on velocity range along the network
color.clear();
if (dangle_vertex.size() == 2 && num_edge - num_vertex + 1 <= 0) // only one inlet and one outlet
color.resize(num_edge * 3, (unsigned char)0);
else {
color.resize(num_edge * 3);
stim::cpu2cpu<float>(&abs_V[0], &color[0], num_edge, min_v, max_v, stim::cmBrewer);
}
color_bound = true;
// sort the velocity bar in ascending order
velocity_map.resize(num_edge);
for (unsigned i = 0; i < num_edge; i++)
velocity_map[i] = i;
std::sort(velocity_map.begin(), velocity_map.end(), [&](int x, int y) {return abs_V[x] < abs_V[y]; });
Flow.reset(num_vertex); // reset flow object for next time simulation
// find the minimum pressure input port
if (input.size()) {
min_input_index = input[0].first;
for (unsigned i = 1; i < input.size(); i++) {
unsigned idx = input[i].first;
if (Flow.pressure[idx] < Flow.pressure[min_input_index])
min_input_index = idx;
}
}
// find the minimum pressure output port
if (output.size()) {
max_output_index = output[0].first;
for (unsigned i = 1; i < output.size(); i++) {
unsigned idx = output[i].first;
if (Flow.pressure[idx] > Flow.pressure[max_output_index])
max_output_index = idx;
}
}
// get the number of input/output
inlet_feasibility.resize(input.size(), true);
outlet_feasibility.resize(output.size(), true);
}
// display and output final state
void show_stable_state() {
std::cout << std::endl;
// save the pressure information to CSV file
std::string p_filename = "pressure.csv";
std::ofstream p_file;
p_file.open(p_filename.c_str());
p_file << "Vertex, Pressure(g/" << units << "/s^2)" << std::endl;
for (unsigned i = 0; i < num_vertex; i++)
p_file << i << "," << Flow.pressure[i] << std::endl;
p_file.close();
// show the pressure information in console box
std::cout << "PRESSURE(g/" << units << "/s^2):" << std::endl;
for (unsigned i = 0; i < num_vertex; i++) {
std::cout << "[" << i << "] " << Flow.pressure[i] << std::endl;
}
// save the flow information to CSV file
std::string f_filename = "flow.csv";
std::ofstream f_file;
f_file.open(f_filename.c_str());
f_file << "Edge, Volume flow rate(" << units << "^3/s)" << std::endl;
for (unsigned i = 0; i < num_edge; i++)
f_file << Flow.Q[i].first << "->" << Flow.Q[i].second << "," << Flow.Q[i].third << std::endl;
f_file.close();
// show the flow rate information in console box
std::cout << "VOLUME FLOW RATE(" << units << "^3/s):" << std::endl;
for (unsigned i = 0; i < num_edge; i++) {
std::cout << "(" << Flow.Q[i].first << "," << Flow.Q[i].second << ")" << Flow.Q[i].third << std::endl;
}
}
//****************************manufacture functions*********************************
// indicator functions
// indicator for sphere
__global__ void find_near_sphere(const sphere* V, unsigned num, size_t* R, float* S, unsigned char* ptr, unsigned z, int Size) {
unsigned ix = blockDim.x * blockIdx.x + threadIdx.x; // col
unsigned iy = blockDim.y * blockIdx.y + threadIdx.y; // row
if (ix >= R[1] || iy >= R[2]) return; // avoid segfault
stim::vec3<float> world_pixel;
world_pixel[0] = (float)ix * S[1];
world_pixel[1] = (float)iy * S[2];
world_pixel[2] = ((float)z - Size / 2) * S[3];
float distance = FLT_MAX;
float tmp_distance;
unsigned idx;
for (unsigned i = 0; i < num; i++) {
tmp_distance = (V[i].c - world_pixel).len();
if (tmp_distance <= distance) {
distance = tmp_distance;
idx = i;
}
}
if (distance <= V[idx].r)
ptr[(R[2] - 1 - iy) * R[0] * R[1] + ix * R[0]] = 255;
}
// indicator for cylinder(envelope/hyperboloid)
__global__ void find_near_cylinder(cylinder* E, unsigned num, size_t* R, float* S, unsigned char* ptr, unsigned z, int Size) {
unsigned ix = blockDim.x * blockIdx.x + threadIdx.x;
unsigned iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix >= R[1] || iy >= R[2]) return; // avoid segfault
stim::vec3<float> world_pixel;
world_pixel[0] = (float)ix * S[1];
world_pixel[1] = (float)iy * S[2];
world_pixel[2] = ((float)z - Size / 2) * S[3];
float distance = FLT_MAX;
float tmp_distance;
float rr; // radii at the surface where projection meets
for (unsigned i = 0; i < num; i++) { // find the nearest cylinder
tmp_distance = ((world_pixel - E[i].c1).cross(world_pixel - E[i].c2)).len() / (E[i].c2 - E[i].c1).len();
if (tmp_distance <= distance) {
// we only focus on point to line segment
// check to see whether projection is lying outside the line segment
float a = (world_pixel - E[i].c1).dot((E[i].c2 - E[i].c1).norm());
float b = (world_pixel - E[i].c2).dot((E[i].c1 - E[i].c2).norm());
float length = (E[i].c1 - E[i].c2).len();
if (a <= length && b <= length) { // projection lying inside the line segment
distance = tmp_distance;
rr = E[i].r1 + (E[i].r2 - E[i].r1) * a / (length); // linear change
}
}
}
if (distance <= rr)
ptr[(R[2] - 1 - iy) * R[0] * R[1] + ix * R[0]] = 255;
}
// make image stack using gpu
void make_image_stack() {
std::cout << "[-----ON PROGRESS-----]" << std::endl;
// initilize the image stack object
I.init(1, size_x, size_y, size_z);
I.set_dim(dx, dy, dz);
// because of lack of memory, we have to computer one slice of stack per time
// allocate vertex and edge
sphere* d_V;
cylinder* d_E;
HANDLE_ERROR(hipMalloc((void**)&d_V, A.size() * sizeof(sphere)));
HANDLE_ERROR(hipMalloc((void**)&d_E, B.size() * sizeof(cylinder)));
HANDLE_ERROR(hipMemcpy(d_V, &A[0], A.size() * sizeof(sphere), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_E, &B[0], B.size() * sizeof(cylinder), hipMemcpyHostToDevice));
// allocate image stack information memory
float* d_S;
size_t* d_R;
size_t* R = (size_t*)malloc(4 * sizeof(size_t)); // size in 4 dimension
R[0] = 1;
R[1] = (size_t)size_x;
R[2] = (size_t)size_y;
R[3] = (size_t)size_z;
float* S = (float*)malloc(4 * sizeof(float)); // spacing in 4 dimension
S[0] = 1.0f;
S[1] = dx;
S[2] = dy;
S[3] = dz;
size_t num = size_x * size_y;
HANDLE_ERROR(hipMalloc((void**)&d_S, 4 * sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&d_R, 4 * sizeof(size_t)));
HANDLE_ERROR(hipMemcpy(d_R, R, 4 * sizeof(size_t), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(d_S, S, 4 * sizeof(float), hipMemcpyHostToDevice));
// for every slice of image
unsigned p = 0; // percentage of progress
for (unsigned i = 0; i < size_z; i++) {
// allocate image slice memory
unsigned char* d_ptr;
unsigned char* ptr = (unsigned char*)malloc(num * sizeof(unsigned char));
memset(ptr, 0, num * sizeof(unsigned char));
HANDLE_ERROR(hipMalloc((void**)&d_ptr, num * sizeof(unsigned char)));
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0); // get cuda device properties structure
size_t max_thread = sqrt(prop.maxThreadsPerBlock); // get the maximum number of thread per block
dim3 block(size_x / max_thread + 1, size_y / max_thread + 1);
dim3 thread(max_thread, max_thread);
find_near_sphere << <block, thread >> > (d_V, A.size(), d_R, d_S, d_ptr, i, size_z);
hipDeviceSynchronize();
find_near_cylinder << <block, thread >> > (d_E, B.size(), d_R, d_S, d_ptr, i, size_z);
HANDLE_ERROR(hipMemcpy(ptr, d_ptr, num * sizeof(unsigned char), hipMemcpyDeviceToHost));
I.set(ptr, i);
free(ptr);
HANDLE_ERROR(hipFree(d_ptr));
// print progress bar
p = (float)(i + 1) / (float)size_z * 100;
rtsProgressBar(p);
}
// clear up
free(R);
free(S);
HANDLE_ERROR(hipFree(d_R));
HANDLE_ERROR(hipFree(d_S));
HANDLE_ERROR(hipFree(d_V));
HANDLE_ERROR(hipFree(d_E));
if (stackdir == "")
I.save_images("image????.bmp");
else
I.save_images(stackdir + "/image????.bmp");
std::cout << std::endl << "[-----SUCCEEDED-----]" << std::endl;
}
// preparation for making image stack
void preparation() {
// clear result from last time
A.clear();
B.clear();
// firstly push back the network
sphere new_sphere;
cylinder new_cylinder;
// push back current network
for (unsigned i = 0; i < num_vertex; i++) {
new_sphere.c = V[i].c;
new_sphere.r = V[i].r;
A.push_back(new_sphere);
if (V[i].r > cur_max_radii)
cur_max_radii = V[i].r;
}
for (unsigned i = 0; i < num_edge; i++) {
new_cylinder.c1 = V[E[i].p[0]].c;
new_cylinder.c2 = V[E[i].p[1]].c;
new_cylinder.r1 = V[E[i].p[0]].r;
new_cylinder.r2 = V[E[i].p[1]].r;
B.push_back(new_cylinder);
}
bridge_start_index = B.size();
feeder_start_index = A.size();
// push back the inlet main feeder
if (inlet_done) {
new_sphere.c = inlet_port;
new_sphere.r = main_feeder_radii;
A.push_back(new_sphere);
if (main_feeder_radii > cur_max_radii)
cur_max_radii = main_feeder_radii;
}
// push back the outlet main feeder
if (outlet_done) {
new_sphere.c = outlet_port;
new_sphere.r = main_feeder_radii;
A.push_back(new_sphere);
}
// connect input port to inlet main feeder
float mid_r;
float p1;
float p2;
stim::vec3<float> center1;
stim::vec3<float> center2;
float r1;
float r2;
for (unsigned i = 0; i < inlet.size(); i++) {
if (inlet[i].V.size() == 2) { // straight connection
mid_r = 2 * inlet[i].r - 1.0f / 2.0f * (V[inlet[i].v[0]].r + default_radii); // mid_r = 2*ave_r - 1/2(r1 + r2), set proportion to be half
if (mid_r > cur_max_radii)
cur_max_radii = mid_r;
// calculate the envelope along the inlet
// first half
center1 = (inlet[i].V[0] + inlet[i].V[1]) / 2; // normally, the radii of middle point is the largest among those two
center2 = inlet[i].V[0];
r1 = mid_r;
r2 = default_radii;
// push back middle point
new_sphere.c = center1;
new_sphere.r = mid_r;
A.push_back(new_sphere);
// push back current cylinder
new_cylinder.c1 = center1;
new_cylinder.c2 = center2;
new_cylinder.r1 = r1;
new_cylinder.r2 = r2;
B.push_back(new_cylinder);
//second half
center2 = inlet[i].V[1];
r2 = V[inlet[i].v[0]].r;
// push back current cylinder
new_cylinder.c1 = center1;
new_cylinder.c2 = center2;
new_cylinder.r1 = r1;
new_cylinder.r2 = r2;
B.push_back(new_cylinder);
}
else { // broken line connection
p1 = (inlet[i].V[0] - inlet[i].V[1]).len() / inlet[i].l; // calculate the two line segments length proportion
p2 = (inlet[i].V[1] - inlet[i].V[2]).len() / inlet[i].l;
mid_r = (inlet[i].r - (p1 / 2 * default_radii + p2 / 2 * V[inlet[i].v[0]].r)) * 2;
if (mid_r > cur_max_radii)
cur_max_radii = mid_r;
// first half
center1 = inlet[i].V[1];
center2 = inlet[i].V[0];
r1 = mid_r;
r2 = default_radii;
// push back corner point
new_sphere.c = center1;
new_sphere.r = mid_r;
A.push_back(new_sphere);
// push back current cylinder
new_cylinder.c1 = center1;
new_cylinder.c2 = center2;
new_cylinder.r1 = r1;
new_cylinder.r2 = r2;
B.push_back(new_cylinder);
// second half
center2 = inlet[i].V[2];
r2 = V[inlet[i].v[0]].r;
// push back current cylinder
new_cylinder.c1 = center1;
new_cylinder.c2 = center2;
new_cylinder.r1 = r1;
new_cylinder.r2 = r2;
B.push_back(new_cylinder);
}
}
// connect output port to outlet main feeder
for (unsigned i = 0; i < outlet.size(); i++) {
if (outlet[i].V.size() == 2) { // straight connection
mid_r = 2 * outlet[i].r - 1.0f / 2.0f * (V[outlet[i].v[0]].r + default_radii); // mid_r = 2*ave_r - 1/2(r1 + r2), set proportion to be half
if (mid_r > cur_max_radii)
cur_max_radii = mid_r;
// calculate the envelope along the inlet
// first half
center1 = (outlet[i].V[0] + outlet[i].V[1]) / 2; // normally, the radii of middle poipnt is the largest of these two
center2 = outlet[i].V[0];
r1 = mid_r;
r2 = default_radii;
// push back middle point
new_sphere.c = center1;
new_sphere.r = mid_r;
A.push_back(new_sphere);
// push back current cylinder
new_cylinder.c1 = center1;
new_cylinder.c2 = center2;
new_cylinder.r1 = r1;
new_cylinder.r2 = r2;
B.push_back(new_cylinder);
//second half
center2 = outlet[i].V[1];
r2 = V[outlet[i].v[0]].r;
// push back current cylinder
new_cylinder.c1 = center1;
new_cylinder.c2 = center2;
new_cylinder.r1 = r1;
new_cylinder.r2 = r2;
B.push_back(new_cylinder);
}
else { // broken line connection
p1 = (outlet[i].V[0] - outlet[i].V[1]).len() / outlet[i].l; // calculate the two line segments length proportion
p2 = (outlet[i].V[1] - outlet[i].V[2]).len() / outlet[i].l;
mid_r = (outlet[i].r - (p1 / 2 * default_radii + p2 / 2 * V[outlet[i].v[0]].r)) * 2;
if (mid_r > cur_max_radii)
cur_max_radii = mid_r;
// first half
center1 = outlet[i].V[1];
center2 = outlet[i].V[0];
r1 = mid_r;
r2 = default_radii;
// push back corner point
new_sphere.c = center1;
new_sphere.r = mid_r;
A.push_back(new_sphere);
// push back current cylinder
new_cylinder.c1 = center1;
new_cylinder.c2 = center2;
new_cylinder.r1 = r1;
new_cylinder.r2 = r2;
B.push_back(new_cylinder);
// second half
center2 = outlet[i].V[2];
r2 = V[outlet[i].v[0]].r;
// push back current cylinder
new_cylinder.c1 = center1;
new_cylinder.c2 = center2;
new_cylinder.r1 = r1;
new_cylinder.r2 = r2;
B.push_back(new_cylinder);
}
}
// get the size of image stack in pixel
size_x = X / dx + 1;
size_y = Y / dy + 1;
size_z = 2.0f * cur_max_radii / dz;
size_z += 5; // expand a little bit
}
//*****************************glut functions*********************************
// dynamically set menu
// @param num: number of current menu options
// @param range: range of option to be set from menu_option list
void glut_set_menu(int num, int range) {
// remove last time menu options
for (int i = 1; i < num + 1; i++)
glutRemoveMenuItem(1);
// set new menu options
std::string menu_name;
for (int i = 1; i < range + 1; i++) {
menu_name = menu_option[i - 1];
glutAddMenuEntry(menu_name.c_str(), i);
}
}
// glut projection setting, do squash transformation(from pyramid to cube)
void glut_projection() {
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
X = glutGet(GLUT_WINDOW_WIDTH);
Y = glutGet(GLUT_WINDOW_HEIGHT);
glViewport(0, 0, X, Y);
float aspect = (float)X / (float)Y;
gluPerspective(60, aspect, 0.1, 1000000);
glPopMatrix();
}
// glut modelview setting, translate camera to origin
void glut_modelview() {
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
glLoadIdentity();
glPopMatrix();
stim::vec3<float> eye = cam.getPosition();
stim::vec3<float> focus = cam.getLookAt();
stim::vec3<float> up = cam.getUp();
gluLookAt(eye[0], eye[1], eye[2], focus[0], focus[1], focus[2], up[0], up[1], up[2]);
}
// render vertex as point
void glut_draw_point() {
stim::circle<float> tmp_c;
tmp_c.rotate(stim::vec3<float>(0.0, 0.0, -1.0)); // model circle waiting to be translated and scaled
for (unsigned i = 0; i < V.size(); i++) {
if (grow) {
if (i >= V.size() - new_num)
break;
}
if (!manufacture) { // in modes except manufacture mode
if (Flow.P.empty()) // if current vertex hasn't been set initial pressure
glColor3f(0.992f, 0.859f, 0.780f); // orange point
else
if (Flow.P[i] != 0) {
stim::vec3<float> new_color;
new_color[0] = (Flow.P[i] / max_pressure) > 0.5f ? 1.0f : 2.0f * Flow.P[i] / max_pressure; // red
new_color[1] = 0.0f; // green
new_color[2] = (Flow.P[i] / max_pressure) > 0.5f ? 1.0f - 2.0f * (Flow.P[i] / max_pressure - 0.5f) : 1.0f; // blue
glColor3f(new_color[0], new_color[1], new_color[2]);
}
else
glColor3f(0.5f, 0.5f, 0.5f); // gray point
stim::circle<float> c(V[i].c, V[i].r, stim::vec3<float>(0.0, 0.0, 1.0), tmp_c.U); // create a circle in order to draw the point
std::vector<typename stim::vec3<float> > cp = c.glpoints(20); // get points along the circle
glBegin(GL_TRIANGLE_FAN); // draw circle as bunch of triangles
glVertex2f(V[i].c[0], V[i].c[1]);
for (unsigned i = 0; i < cp.size(); i++) {
glVertex2f(cp[i][0], cp[i][1]);
}
glEnd();
glFlush();
}
}
if (!generate_network && !simulation && !manufacture) {
glColor3f(0.0f, 0.0f, 0.0f);
if (inlet.size() != 0) {
// draw the inlet main feeder
stim::circle<float> c(inlet_port, main_feeder_radii, stim::vec3<float>(0.0, 0.0, 1.0), tmp_c.U); // create a circle in order to draw the point
std::vector<typename stim::vec3<float> > cp = c.glpoints(20); // get points along the circle
glBegin(GL_TRIANGLE_FAN);
glVertex2f(inlet_port[0], inlet_port[1]);
for (unsigned i = 0; i < cp.size(); i++) {
glVertex2f(cp[i][0], cp[i][1]);
}
glEnd();
glFlush();
}
if (outlet.size() != 0) {
// draw the outlet main feeder
stim::circle<float> c(outlet_port, main_feeder_radii, stim::vec3<float>(0.0, 0.0, 1.0), tmp_c.U); // create a circle in order to draw the point
std::vector<typename stim::vec3<float> > cp = c.glpoints(20); // get points along the circle
glBegin(GL_TRIANGLE_FAN);
glVertex2f(outlet_port[0], outlet_port[1]);
for (unsigned i = 0; i < cp.size(); i++) {
glVertex2f(cp[i][0], cp[i][1]);
}
glEnd();
glFlush();
}
}
}
// render centerline(edge) as line
void glut_draw_line() {
stim::vec3<float> ori_v; // direction vector of current edge
stim::vec3<float> per_v; // vector perpendicular to direction vector
stim::vec3<float> v1; // four vertices for drawing trapezoid
stim::vec3<float> v2;
stim::vec3<float> v3;
stim::vec3<float> v4;
for (unsigned i = 0; i < E.size(); i++) { // for every edge
ori_v = V[E[i].p[1]].c - V[E[i].p[0]].c;
ori_v = ori_v.norm();
per_v[0] = -ori_v[1]; // for x dot y = 0, the best solution is x1 = -y2, y1 = x2
per_v[1] = ori_v[0];
per_v[2] = ori_v[2];
v1 = V[E[i].p[0]].c + V[E[i].p[0]].r * per_v;
v2 = V[E[i].p[0]].c - V[E[i].p[0]].r * per_v;
v3 = V[E[i].p[1]].c + V[E[i].p[1]].r * per_v;
v4 = V[E[i].p[1]].c - V[E[i].p[1]].r * per_v;
if (!manufacture) {
if (color_bound) // get corresponding color from color map
glColor3f((float)color[i * 3 + 0] / 255, (float)color[i * 3 + 1] / 255, (float)color[i * 3 + 2] / 255);
glBegin(GL_QUAD_STRIP);
if (!color_bound) {
glEnable(GL_BLEND); // enable color blend
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); // set blend function
glColor4f(JACKCP[color[i] * 3 + 0], JACKCP[color[i] * 3 + 1], JACKCP[color[i] * 3 + 2], 0.7f);
}
glVertex2f(v1[0], v1[1]);
glVertex2f(v2[0], v2[1]);
glVertex2f(v3[0], v3[1]);
glVertex2f(v4[0], v4[1]);
glEnd();
if (!color_bound)
glDisable(GL_BLEND);
}
glFlush();
}
if (!generate_network && !simulation && !manufacture) {
glLineWidth(1);
if (inlet.size() != 0) {
for (unsigned i = 0; i < inlet.size(); i++) {
if (inlet_feasibility[i])
glColor3f(0.0f, 0.0f, 0.0f); // white means feasible
else
glColor3f(1.0f, 0.0f, 0.0f); // red means nonfeasible
glBegin(GL_LINE_STRIP);
for (unsigned j = 0; j < inlet[i].V.size(); j++) {
glVertex2f(inlet[i].V[j][0], inlet[i].V[j][1]);
}
glEnd();
}
}
if (outlet.size() != 0) {
for (unsigned i = 0; i < outlet.size(); i++) {
if (outlet_feasibility[i])
glColor3f(0.0f, 0.0f, 0.0f); // white means feasible
else
glColor3f(1.0f, 0.0f, 0.0f); // red means nonfeasible
glBegin(GL_LINE_STRIP);
for (unsigned j = 0; j < outlet[i].V.size(); j++) {
glVertex2f(outlet[i].V[j][0], outlet[i].V[j][1]);
}
glEnd();
}
}
glFlush();
}
}
// render flow rane as triangle
void glut_draw_triangle(float threshold = 0.01f) {
stim::vec3<float> ori_v; // edge direction vector
stim::vec3<float> per_v; // perpendicular vector of ori_v
stim::vec3<float> mid_p; // middle point of current edge
stim::vec3<float> left; // left point
stim::vec3<float> right; // right point
stim::vec3<float> top; // top point
for (unsigned i = 0; i < E.size(); i++) {
// find the perpendicular vector of current edge
ori_v = V[E[i].p[1]].c - V[E[i].p[0]].c;
ori_v = ori_v.norm();
per_v[0] = -ori_v[1];
per_v[1] = ori_v[0];
per_v[2] = ori_v[2];
mid_p = (V[E[i].p[0]].c + V[E[i].p[1]].c) / 2;
left = mid_p + per_v * default_radii / 2;
right = mid_p - per_v * default_radii / 2;
if (E[i].v > threshold)
top = mid_p + ori_v * default_radii * sqrt(3.0f);
else if(E[i].v < -threshold)
top = mid_p - ori_v * default_radii * sqrt(3.0f);
if (E[i].v > threshold || E[i].v < -threshold) {
glColor3f(0.600f, 0.847f, 0.788f); // lime color
glBegin(GL_TRIANGLES);
glVertex2f(left[0], left[1]);
glVertex2f(right[0], right[1]);
glVertex2f(top[0], top[1]);
glEnd();
glFlush();
}
}
}
// render inlet/outlet bridge as cylinder
void glut_draw_bridge() {
glColor3f(0.0f, 0.0f, 0.0f);
std::vector<typename stim::vec3<float> > cp1(subdivision + 1);
std::vector<typename stim::vec3<float> > cp2(subdivision + 1);
// draw spheres on the end/middle of bridge
for (unsigned i = feeder_start_index; i < A.size(); i++) {
glPushMatrix();
glTranslatef(A[i].c[0], A[i].c[1], A[i].c[2]);
glutSolidSphere(A[i].r, subdivision, subdivision);
glPopMatrix();
}
// draw inlet/outlet bridge
for (unsigned i = bridge_start_index; i < B.size(); i++) {
// calculate the envelope caps
find_envelope(cp1, cp2, B[i].c1, B[i].c2, B[i].r1, B[i].r2);
glBegin(GL_QUAD_STRIP);
for (unsigned j = 0; j < cp1.size(); j++) {
glVertex3f(cp1[j][0], cp1[j][1], cp1[j][2]);
glVertex3f(cp2[j][0], cp2[j][1], cp2[j][2]);
}
glEnd();
}
glFlush();
}
// render point as sphere
void glut_draw_sphere() {
glColor3f(0.0f, 0.0f, 0.0f);
for (unsigned i = 0; i < V.size(); i++) {
glPushMatrix();
glTranslatef(V[i].c[0], V[i].c[1], V[i].c[2]);
glutSolidSphere(V[i].r, subdivision, subdivision);
glPopMatrix();
}
if (inlet.size() != 0) {
// draw the inlet main feeder
glPushMatrix();
glTranslatef(inlet_port[0], inlet_port[1], inlet_port[2]);
glutSolidSphere(main_feeder_radii, subdivision, subdivision);
glPopMatrix();
}
if (outlet.size() != 0) {
// draw the outlet main feeder
glPushMatrix();
glTranslatef(outlet_port[0], outlet_port[1], outlet_port[2]);
glutSolidSphere(main_feeder_radii, subdivision, subdivision);
glPopMatrix();
}
glFlush();
}
// render line as cylinder
void glut_draw_cylinder() {
glColor3f(0.0f, 0.0f, 0.0f);
stim::vec3<float> tmp_d;
stim::vec3<float> tmp_n;
stim::vec3<float> center1;
stim::vec3<float> center2;
float r1;
float r2;
std::vector<typename stim::vec3<float> > cp1(subdivision + 1);
std::vector<typename stim::vec3<float> > cp2(subdivision + 1);
for (unsigned i = 0; i < E.size(); i++) {
center1 = V[E[i].p[0]].c;
center2 = V[E[i].p[1]].c;
r1 = V[E[i].p[0]].r;
r2 = V[E[i].p[1]].r;
// calculate the envelope caps
find_envelope(cp1, cp2, center1, center2, r1, r2);
glBegin(GL_QUAD_STRIP);
for (unsigned j = 0; j < cp1.size(); j++) {
glVertex3f(cp1[j][0], cp1[j][1], cp1[j][2]);
glVertex3f(cp2[j][0], cp2[j][1], cp2[j][2]);
}
glEnd();
glFlush();
}
}
// main render function
void glut_render() {
glEnable(GL_SMOOTH);
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glut_draw_line(); // draw the edge as line
glut_draw_point(); // draw the vertex as point
if (!first_click && generate_network) { // render a transparent line to indicate your next click position
glEnable(GL_BLEND); // enable color blend
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); // set blend function
glColor4f(JACKCP[color_index * 3 + 0], JACKCP[color_index * 3 + 1], JACKCP[color_index * 3 + 2], 0.2f);
stim::vec3<float> tmp_d;
stim::circle<float> tmp_c;
std::vector<typename stim::vec3<float> > cp1(subdivision + 1);
std::vector<typename stim::vec3<float> > cp2(subdivision + 1);
tmp_d = tmp_vertex.c - V[tmp_edge.p[0]].c;
tmp_d = tmp_d.norm();
tmp_c.rotate(tmp_d);
stim::circle<float> c1(V[tmp_edge.p[0]].c, V[tmp_edge.p[0]].r, tmp_d, tmp_c.U);
stim::circle<float> c2(tmp_vertex.c, tmp_vertex.r, tmp_d, tmp_c.U);
cp1 = c1.glpoints(subdivision);
cp2 = c2.glpoints(subdivision);
glBegin(GL_QUAD_STRIP);
for (unsigned j = 0; j < subdivision + 1; j++) {
glVertex3f(cp1[j][0], cp1[j][1], cp1[j][2]);
glVertex3f(cp2[j][0], cp2[j][1], cp2[j][2]);
}
glEnd();
glFlush();
glDisable(GL_BLEND);
}
if (grow) { // render a gray line to indicate grow edge
glColor3f(0.5f, 0.5f, 0.5f);
glBegin(GL_LINES);
glVertex2f(V[tmp_edge.p[0]].c[0], V[tmp_edge.p[0]].c[1]);
glVertex2f(tmp_vertex.c[0], tmp_vertex.c[1]);
glEnd();
// render the new edges and new vertex
for (unsigned i = num_vertex; i < V.size(); i++) {
glPointSize(10);
glBegin(GL_POINT);
glVertex2f(V[i].c[0], V[i].c[1]);
glEnd();
}
for (unsigned i = 0; i < tmp_E.size(); i++) {
glBegin(GL_LINES);
glVertex2f(V[tmp_E[i].p[0]].c[0], V[tmp_E[i].p[0]].c[1]);
glVertex2f(V[tmp_E[i].p[1]].c[0], V[tmp_E[i].p[1]].c[1]);
glEnd();
}
glFlush();
}
if (select_corner) {
glEnable(GL_BLEND); // enable color blend
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); // set blend function
glColor4f(0.0f, 0.0f, 0.0f, 0.4f);
// draw the joint position as a point
glBegin(GL_POINT);
glVertex2f(corner_vertex[0], corner_vertex[1]);
glEnd();
// draw the bridge
glBegin(GL_LINE_STRIP);
if (build_inlet) {
glVertex2f(inlet[bridge_index].V[0][0], inlet[bridge_index].V[0][1]);
glVertex2f(corner_vertex[0], corner_vertex[1]);
unsigned idx = inlet[bridge_index].V.size() - 1;
glVertex2f(inlet[bridge_index].V[idx][0], inlet[bridge_index].V[idx][1]);
}
else if (build_outlet) {
glVertex2f(outlet[bridge_index].V[0][0], outlet[bridge_index].V[0][1]);
glVertex2f(corner_vertex[0], corner_vertex[1]);
unsigned idx = outlet[bridge_index].V.size() - 1;
glVertex2f(outlet[bridge_index].V[idx][0], outlet[bridge_index].V[idx][1]);
}
glEnd();
glFlush();
glDisable(GL_BLEND);
}
if (!manufacture) {
if (simulation || build_inlet || build_outlet) {
glut_draw_triangle();
}
for (unsigned i = 0; i < V.size(); i++) {
glColor3f(0.0f, 0.0f, 0.0f);
glRasterPos2f(V[i].c[0], V[i].c[1] + 0.5f); // mark index right above the vertex
std::stringstream ss;
ss << i;
glutBitmapString(GLUT_BITMAP_HELVETICA_18, (const unsigned char*)(ss.str().c_str()));
}
// bring up a pressure bar on left
if (select_pressure) {
glLineWidth(100);
glBegin(GL_LINES);
glColor3f(0.0f, 0.0f, 1.0f); // blue to red
glVertex2f(border * X / vX, border * Y / vY);
glColor3f(1.0, 0.0, 0.0);
glVertex2f(border * X / vX, (vY - 2 * border) * Y / vY);
glEnd();
glFlush();
// pressure bar text
glColor3f(0.0f, 0.0f, 0.0f);
glRasterPos2f(0.0f, (vY - border) * Y / vY);
std::stringstream ss_p;
ss_p << "Pressure Bar";
glutBitmapString(GLUT_BITMAP_HELVETICA_18, (const unsigned char*)(ss_p.str().c_str()));
// pressure range text
float step = vY - 3 * border;
step /= 10;
for (unsigned i = 0; i < 11; i++) {
glRasterPos2f((border * 1.5f) * X / vX, (border + i * step) * Y / vY);
std::stringstream ss_n;
ss_n << i * max_pressure / 10;
glutBitmapString(GLUT_BITMAP_HELVETICA_18, (const unsigned char*)(ss_n.str().c_str()));
}
}
}
// print the velocity range bar
if (simulation && !select_pressure) {
if (dangle_vertex.size() == 2 && num_edge - num_vertex + 1 <= 0) {
// do nothing
}
else {
float step = (vY - 3 * border) * Y / vY;
step /= BREWER_CTRL_PTS - 1;
for (unsigned i = 0; i < BREWER_CTRL_PTS - 1; i++) {
glLineWidth(100);
glBegin(GL_LINES);
glColor3f(BREWERCP[i * 4 + 0], BREWERCP[i * 4 + 1], BREWERCP[i * 4 + 2]);
glVertex2f(border * X / vX, border * Y / vY + i * step);
glColor3f(BREWERCP[(i + 1) * 4 + 0], BREWERCP[(i + 1) * 4 + 1], BREWERCP[(i + 1) * 4 + 2]);
glVertex2f(border * X / vX, border * Y / vY + (i + 1) * step);
glEnd();
}
glFlush();
// pressure bar text
glColor3f(0.0f, 0.0f, 0.0f);
glRasterPos2f(0.0f, (vY - border) * Y / vY);
std::stringstream ss_p;
ss_p << "Velocity range";
glutBitmapString(GLUT_BITMAP_HELVETICA_18, (const unsigned char*)(ss_p.str().c_str()));
// pressure range text
step = vY - 3 * border;
step /= 10;
for (unsigned i = 0; i < 11; i++) {
glRasterPos2f((border * 1.5f) * X / vX, (border + i * step) * Y / vY);
std::stringstream ss_n;
ss_n << min_v + i * (max_v - min_v) / 10;
glutBitmapString(GLUT_BITMAP_HELVETICA_18, (const unsigned char*)(ss_n.str().c_str()));
}
}
}
if (manufacture) {
glut_draw_sphere();
glut_draw_cylinder();
glut_draw_bridge();
}
if (radii_changed) {
glColor3f(0.835f, 0.243f, 0.310f);
glRasterPos2f(V[radii_index].c[0], V[radii_index].c[1] - 1.0f);
std::stringstream ss_r;
ss_r << "r=" << V[radii_index].r;
glutBitmapString(GLUT_BITMAP_HELVETICA_18, (const unsigned char*)(ss_r.str().c_str()));
radii_changed = false;
}
glutSwapBuffers();
}
// register mouse click events
void glut_mouse(int button, int state, int x, int y) {
if (button == GLUT_RIGHT_BUTTON)
return;
mods = glutGetModifiers(); // get special keyboard input
if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN) {
std::cout << "\r"; // clear up ERROR reminder
std::cout << "\t\t\t\t\t\t\t\t\t";
std::cout.flush();
}
// to generate a new network by mouse click
if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && generate_network && mods == 0) {
mouse_x = x; // get the click position in the window coordinates
mouse_y = y;
unsigned idx = UINT_MAX; // stores the vertex's index
if (first_click) { // first click of one line of edge
flag = epsilon_vertex(x, y, idx); // find out whether current position appears a vertex
if (flag) {
new_edge.p[0] = idx; // store the geometry start vertex index
tmp_edge.p[0] = idx;
num++;
}
else {
new_vertex.c = stim::vec3<float>(x, (vY - y), 0); // make a new vertex
new_vertex.c[0] = new_vertex.c[0] * (float)X / vX;
new_vertex.c[1] = new_vertex.c[1] * (float)Y / vY;
new_edge.p[0] = iter; // make a new edge and set the starting vertex
tmp_edge.p[0] = iter;
V.push_back(new_vertex); // push a new vertex
iter++; // iterator + 1
num++; // added a vertex
}
first_click = false; // finished first click
}
else { // following click of one line of edge
flag = epsilon_vertex(x, y, idx);
if (flag) {
if (!is_edge(idx)) { // no edge between two vertices
if (idx != UINT_MAX) { // acceptable click
new_edge.p[1] = idx;
if (new_edge.p[0] != new_edge.p[1]) { // simple graph, no loop and parallel edge
E.push_back(new_edge);
color.push_back(color_index); // record the color scheme
first_click = true;
num = 0; // start a new line of edges
color_index = (color_index == JACK_CTRL_PTS - 1) ? 0 : color_index + 1; // update color scheme for new line of edges
}
else {
Sleep(100); // make flash effect
std::cout << "\r";
std::cout << "[ ERROR ] ";
std::cout << "You can't do that!";
std::cout.flush();
}
}
}
else {
Sleep(100); // make flash effect
std::cout << "\r";
std::cout << "[ ERROR ] ";
std::cout << "There exists an edge between these two vertices";
std::cout.flush();
}
}
else {
new_vertex.c = stim::vec3<float>(x, (vY - y), 0); // make a new vertex
new_vertex.c[0] = new_vertex.c[0] * (float)X / vX;
new_vertex.c[1] = new_vertex.c[1] * (float)Y / vY;
new_edge.p[1] = iter; // make a new edge and set the starting vertex to current
V.push_back(new_vertex); // push a new vertex
E.push_back(new_edge); // push a new edge
color.push_back(color_index); // record the color scheme
new_edge.p[0] = iter; // make a new edge and set the starting vertex to current
tmp_edge.p[0] = iter;
iter++; // iterator + 1
num++; // added a vertex
}
}
}
// modify pressure
if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && simulation && mods == 0 && !grow) {
mouse_x = x;
mouse_y = y;
if (select_pressure) { // if a vertex had been selected to be modified pressure
if (vY - y < border || vY - y > vY - 2 * border) { // click outside the bar along y-axis
Sleep(100); // make flash effect
std::cout << "\r";
std::cout << "[ ERROR ] ";
std::cout << "Click exceeds the range of pressure bar";
std::cout.flush();
}
else {
select_pressure = false; // finished setting the pressure of chose vertex
Flow.P[pressure_index] = (vY - mouse_y - border) / (vY - 3 * border) * max_pressure; // get the pressure value on pressure bar
system("CLS"); // clear up console box
std::cout << " ===================" << std::endl;
std::cout << "| SIMULATION MODE |" << std::endl;
std::cout << " ===================" << std::endl << std::endl;
std::cout << "[ TIP ] ";
std::cout << "Click dangle vertex to set pressure" << std::endl;
std::cout << " Move wheel to change radii of the vertex which the cursor meets" << std::endl;
// simulate again
find_stable_state();
show_stable_state();
}
}
else {
unsigned tmp_p = 0;
bool flag = epsilon_vertex(mouse_x, mouse_y, tmp_p);
if (flag) {
std::vector<unsigned>::iterator it = std::find(dangle_vertex.begin(), dangle_vertex.end(), tmp_p);
if (it == dangle_vertex.end()) { // if it is not dangle vertex
Sleep(100); // make flash effect
std::cout << "\r";
std::cout << "[ ERROR ] ";
std::cout << "Only dangle vertex pressure need to be set";
std::cout.flush();
}
else { // if it is dangle vertex
select_pressure = true; // set flag to true
pressure_index = tmp_p; // stores the index of vertex
}
}
}
}
// build inlet and outlet
if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && (build_inlet || build_outlet) && !select_bridge && !select_corner && mods == 0) {
mouse_x = x;
mouse_y = y;
select_bridge = true;
if (build_inlet) {
inlet_port = stim::vec3<float>(x, (vY - y), 0); // get the inlet port coordinates
inlet_port[0] = inlet_port[0] * (float)X / vX;
inlet_port[1] = inlet_port[1] * (float)Y / vY;
inlet_done = true;
float tmp_l;
for (unsigned i = 0; i < input.size(); i++) {
stim::bridge<float> b;
// push back vertices
b.V.push_back(inlet_port);
b.V.push_back(V[input[i].first].c);
// one direct line
tmp_l = (inlet_port - V[input[i].first].c).len();
b.Q = input[i].third;
b.l = tmp_l;
b.v.push_back(input[i].first); // only store the dangle vertex index information
inlet.push_back(b);
}
// check out current connection
is_acceptable();
}
else if (build_outlet) {
outlet_port = stim::vec3<float>(x, (vY - y), 0); // get the inlet port coordinates
outlet_port[0] = outlet_port[0] * (float)X / vX;
outlet_port[1] = outlet_port[1] * (float)Y / vY;
outlet_done = true;
float tmp_l;
for (unsigned i = 0; i < output.size(); i++) {
stim::bridge<float> b;
// push back vertices
b.V.push_back(outlet_port);
b.V.push_back(V[output[i].first].c);
// one direct line
tmp_l = (outlet_port - V[output[i].first].c).len();
b.Q = output[i].third;
b.l = tmp_l;
b.v.push_back(output[i].first);
outlet.push_back(b);
}
// check out current connection
is_acceptable();
}
}
// select a bridge to modify
else if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && select_bridge && mods == 0) {
mouse_x = x;
mouse_y = y;
bool flag = epsilon_edge(mouse_x, mouse_y, bridge_index);
if (flag) {
select_bridge = false;
select_corner = true;
}
else {
Sleep(100); // make flash effect
std::cout << "\r";
std::cout << "[ ERROR ] ";
std::cout << "No bridge at where your click";
}
}
// re connect the inlet/outlet that selected
else if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && select_corner && mods == 0) {
mouse_x = x;
mouse_y = y;
mask_done = false; // recalculate the connection
corner_vertex = stim::vec3<float>(x, (vY - y), 0); // get the corner vertex
corner_vertex[0] = corner_vertex[0] * (float)X / vX;
corner_vertex[1] = corner_vertex[1] * (float)Y / vY;
if (build_inlet) {
stim::bridge<float> tmp_b;
tmp_b.V.push_back(inlet_port); // push back the inlet port vertex
tmp_b.V.push_back(corner_vertex); // push back the corner vertex
unsigned idx = inlet[bridge_index].V.size() - 1; // get the dangle vertex index from the inlet
tmp_b.V.push_back(inlet[bridge_index].V[idx]); // push back the dangle vertex
tmp_b.l = (tmp_b.V[0] - tmp_b.V[1]).len() + (tmp_b.V[1] - tmp_b.V[2]).len();
tmp_b.Q = inlet[bridge_index].Q;
tmp_b.v.push_back(inlet[bridge_index].v[0]);
tmp_b.r = inlet[bridge_index].r;
inlet[bridge_index] = tmp_b;
}
else if (build_outlet) {
stim::bridge<float> tmp_b;
tmp_b.V.push_back(outlet_port); // push back the inlet port vertex
tmp_b.V.push_back(corner_vertex); // push back the corner vertex
unsigned idx = outlet[bridge_index].V.size() - 1; // get the dangle vertex index from the outlet
tmp_b.V.push_back(outlet[bridge_index].V[idx]); // push back the dangle vertex
tmp_b.l = (tmp_b.V[0] - tmp_b.V[1]).len() + (tmp_b.V[1] - tmp_b.V[2]).len();
tmp_b.Q = outlet[bridge_index].Q;
tmp_b.v.push_back(outlet[bridge_index].v[0]);
tmp_b.r = outlet[bridge_index].r;
outlet[bridge_index] = tmp_b;
}
// check out current connection
is_acceptable();
select_corner = false;
select_bridge = true;
}
// left CTRL + left mouse to grow a line new edges from any vertex
if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && simulation && mods == GLUT_ACTIVE_CTRL && grow) {
mouse_x = x;
mouse_y = y;
unsigned i;
bool flag = epsilon_edge(mouse_x, mouse_y, edge_index, i);
if (flag) {
for (unsigned j = 0; j < tmp_E.size(); j++)
E.push_back(tmp_E[j]);
new_vertex = V[E[edge_index].p[i]];
new_edge.p[1] = E[edge_index].p[i];
E.push_back(new_edge);
get_background(); // get network basic information
flow_initialize(); // initialize flow
find_stable_state(); // main function of solving the linear system
show_stable_state(); // output results as csv files
grow = false;
}
else {
new_vertex.c = stim::vec3<float>(x, (vY - y), 0); // make a new vertex
new_vertex.c[0] = new_vertex.c[0] * (float)X / vX;
new_vertex.c[1] = new_vertex.c[1] * (float)Y / vY;
unsigned num = V.size(); // get the new vertex index
V.push_back(new_vertex);
new_edge.p[1] = num;
tmp_E.push_back(new_edge);
new_edge.p[0] = num;
tmp_edge.p[0] = num;
new_num++;
}
}
else if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && simulation && mods == GLUT_ACTIVE_CTRL && !grow) {
mouse_x = x;
mouse_y = y;
// new point information
unsigned i;
new_num = 0;
bool flag = epsilon_edge(mouse_x, mouse_y, edge_index, i);
if (flag) {
grow = true;
new_vertex = V[E[edge_index].p[i]];
new_edge.p[0] = E[edge_index].p[i];
tmp_edge.p[0] = E[edge_index].p[i];
}
else {
Sleep(100); // make flash effect
std::cout << "\r";
std::cout << "[ ERROR ] ";
std::cout << "No vertex at where your click";
}
}
}
// register mouse move events
void glut_motion(int x, int y) {
tmp_vertex.c = stim::vec3<float>(x, (vY - y), 0);
tmp_vertex.c[0] = tmp_vertex.c[0] * (float)X / vX;
tmp_vertex.c[1] = tmp_vertex.c[1] * (float)Y / vY;
corner_vertex[0] = tmp_vertex.c[0];
corner_vertex[1] = tmp_vertex.c[1];
glutPostRedisplay();
}
// register wheel events
void glut_wheel(int wheel, int direction, int x, int y) {
std::cout << "\r"; // clear up ERROR reminder
std::cout << "\t\t\t\t\t\t\t\t\t";
std::cout.flush();
if (simulation) {
flag = epsilon_vertex(x, y, radii_index);
if (flag) {
radii_changed = true;
if (direction > 0) // increase radii
V[radii_index].r += radii_factor;
else {
V[radii_index].r -= radii_factor;
if (V[radii_index].r <= 0) { // degenerate case where radii less than 0
Sleep(100); // make flash effect
std::cout << "\r";
std::cout << "[ ERROR ] ";
std::cout << "Radii is less than 0, reset to default radii";
V[radii_index].r = default_radii;
}
}
}
system("CLS"); // clear up console box
std::cout << " ===================" << std::endl;
std::cout << "| SIMULATION MODE |" << std::endl;
std::cout << " ===================" << std::endl << std::endl;
std::cout << "[ TIP ] ";
std::cout << "Click dangle vertex to set pressure" << std::endl;
std::cout << " Move wheel to change radii of the vertex which the cursor meets" << std::endl;
// simulate again
find_stable_state();
show_stable_state();
}
glutPostRedisplay();
}
// register keyboard inputs
void glut_keyboard(unsigned char key, int x, int y) {
switch (key) {
// press space to start a new line of edges
case 32:
first_click = true;
num = 0;
color_index = (color_index == JACK_CTRL_PTS - 1) ? 0 : color_index + 1; // update color scheme for new line of edges
break;
// reset main feeder position
case 'c':
if (build_inlet || build_outlet) {
select_bridge = false;
select_corner = false;
if (build_inlet) {
inlet_done = false;
inlet.clear();
}
else if (build_outlet) {
outlet_done = false;
outlet.clear();
}
mask_done = false;
}
break;
// output the image stack
case 'm':
if (manufacture) {
#ifdef __HIPCC__
make_image_stack();
#else
std::cout << "You need to have a gpu to make image stack, sorry." << std::endl;
#endif
}
break;
// output the drawn network
case 's':
{
stringstream output_ss;
output_ss << name << "_" << sub_name << "_net" << ".obj";
std::string output_filename = output_ss.str();
std::ofstream output_file;
output_file.open(output_filename.c_str());
for (unsigned i = 0; i < V.size(); i++)
output_file << "v" << " " << V[i].c[0] << " " << V[i].c[1] << " " << V[i].c[2] << std::endl;
for (unsigned i = 0; i < V.size(); i++)
output_file << "vt" << " " << V[i].r << std::endl;
for (unsigned i = 0; i < E.size(); i++)
output_file << "l" << " " << E[i].p[0] + 1 << "/" << E[i].p[0] + 1 << " " << E[i].p[1] + 1 << "/" << E[i].p[1] + 1 << std::endl;
output_file.close();
sub_name++; // sub name change
break;
}
// undo
case 'u': {
// first vertex on a new line of edges
if (num == 1) {
bool flag = false; // check whether current vertex belongs to another edge
for (unsigned i = 0; i < E.size(); i++) {
if (new_edge.p[0] == E[i].p[0] || new_edge.p[0] == E[i].p[1]) {
flag = true;
break;
}
}
if (new_edge.p[0] == V.size() - 1 && !flag) { // new vertex
V.pop_back(); // pop back new vertex
iter--;
}
first_click = true;
num = 0;
}
// not first vertex
else if (num > 1) {
new_edge.p[0] = E[E.size() - 1].p[0];
tmp_edge.p[0] = new_edge.p[0];
E.pop_back(); // pop back new "things"
color.pop_back();
V.pop_back();
iter--;
num--;
}
break;
}
// close window and exit application
case 27: // if keyboard 'ESC' is pressed, then exit
std::exit(1);
}
glutPostRedisplay();
}
// register glut menu options
void glut_menu(int value) {
cur_menu_num = glutGet(GLUT_MENU_NUM_ITEMS);
if (value == 1) { // generation mode
system("CLS"); // clear up console
std::cout << " ==================" << std::endl;
std::cout << "| GENERATOR MODE |" << std::endl;
std::cout << " ==================" << std::endl << std::endl;
std::cout << "[ TIP ] ";
std::cout << "Click to draw a network. (press SPACE to start a new line of edges)" << std::endl;
// clear up previous work
glClear(GL_COLOR_BUFFER_BIT);
V.clear();
E.clear();
iter = 0;
num = 0;
// set up flags
generate_network = true;
simulation = false;
manufacture = false;
first_click = true;
build_inlet = false;
build_outlet = false;
select_bridge = false;
mask_done = false;
color_bound = false;
name++; // name sequence increments
new_menu_num = 2; // set new menu option number
}
if (value == 2) { // simulation mode
// clear previous drawn buffer
glClear(GL_COLOR_BUFFER_BIT);
iter = 0;
num = 0;
system("CLS"); // clear up console box
std::cout << " ===================" << std::endl;
std::cout << "| SIMULATION MODE |" << std::endl;
std::cout << " ===================" << std::endl << std::endl;
std::cout << "[ TIP ] ";
std::cout << "Click dangle vertex to set pressure" << std::endl;
std::cout << " Move wheel to change radii of the vertex which the cursor meets" << std::endl;
// set up flags
generate_network = false;
simulation = true;
manufacture = false;
build_inlet = false;
build_outlet = false;
select_bridge = false;
mask_done = false;
if (first_simulation) {
get_background(); // get network basic information
flow_initialize(); // initialize flow
first_simulation = false;
}
// set other initial information then solve the network
find_stable_state(); // main function of solving the linear system
show_stable_state(); // output results as csv files
// set the camera object
stim::vec3<float> c = (L + U) * 0.5f; // get the center of the bounding box
stim::vec3<float> size = (U - L); // get the size of the bounding box
// place the camera along the z-axis at a distance determined by the network size along x and y
cam.setPosition(c + stim::vec<float>(0, 0, camera_factor * ::max(size[0], size[1])));
cam.LookAt(c[0], c[1], c[2]);
new_menu_num = 5; // set new menu option number
}
if (value == 3) { // building inlet mode
system("CLS"); // clear up console
std::cout << " ====================" << std::endl;
std::cout << "| BUILD INLET MODE |" << std::endl;
std::cout << " ====================" << std::endl << std::endl;
std::cout << "[ TIP ] ";
std::cout << "Firstly, click any position to set inlet main feeder" << std::endl;
std::cout << " Then, click any bridge to translocate" << std::endl;
std::cout << " System will check and print current bridge status :)" << std::endl;
std::cout << " Press c to delete inlet main feeder and bridges" << std::endl;
std::cout << " If current bridge is not acceptable, you can either do:" << std::endl;
std::cout << " [*1. increase the pressure at the vertex which is pointed out" << std::endl;
std::cout << " 2. increase the length of connection at that vertex" << std::endl;
std::cout << " 3. use more advance manufacture machine]" << std::endl;
std::cout << "[ NOTE ] ";
std::cout << "Delete main feeder before modify if you want to change input ports" << std::endl << std::endl;
// set up flags
if (!inlet_done) { // first time need to set main feeder position
generate_network = false;
simulation = false;
manufacture = false;
build_inlet = true;
build_outlet = false;
select_pressure = false;
select_bridge = false;
select_corner = false;
mask_done = false;
}
else { // already set the inlet main feeder position
generate_network = false;
simulation = false;
manufacture = false;
build_inlet = true;
build_outlet = false;
select_pressure = false;
select_bridge = true;
// check out current connection
is_acceptable();
}
new_menu_num = 5; // set new menu option number
}
if (value == 4) { // building outlet mode
system("CLS"); // clear up console box
std::cout << " =====================" << std::endl;
std::cout << "| BUILD OUTLET MODE |" << std::endl;
std::cout << " =====================" << std::endl << std::endl;
std::cout << "[ TIP ] ";
std::cout << "Firstly, click any position to set inlet main feeder" << std::endl;
std::cout << " Then, click any bridge to translocate" << std::endl;
std::cout << " System will check and print current bridge status :)" << std::endl;
std::cout << " Press c to delete outlet main feeder and bridges" << std::endl;
std::cout << " If current bridge is not acceptable, you can either do:" << std::endl;
std::cout << " [*1. decrease the pressure at the vertex which is pointed out" << std::endl;
std::cout << " 2. increase the length of connection at that vertex" << std::endl;
std::cout << " 3. use more advance manufacture machine]" << std::endl;
std::cout << "[ NOTE ] ";
std::cout << "Delete main feeder before modify if you want to change output ports" << std::endl << std::endl;
// set up flags
if (!outlet_done) { // first time need to set main feeder position
generate_network = false;
simulation = false;
manufacture = false;
build_inlet = false;
build_outlet = true;
select_pressure = false;
select_bridge = false;
select_corner = false;
mask_done = false;
}
else { // already set the outlet main feeder position
generate_network = false;
simulation = false;
manufacture = false;
build_inlet = false;
build_outlet = true;
select_bridge = true;
select_pressure = false;
select_corner = false;
// check out current connection
is_acceptable();
}
new_menu_num = 5; // set new menu option number
}
if (value == 5) { // manufacture mode
system("CLS"); // clear up console box
std::cout << " ====================" << std::endl;
std::cout << "| MANUFACTURE MODE |" << std::endl;
std::cout << " ====================" << std::endl << std::endl;
std::cout << "[ TIP ] ";
std::cout << "Press m to make and save image stack" << std::endl;
// set up flags
generate_network = false;
simulation = false;
manufacture = true;
build_inlet = false;
build_outlet = false;
select_bridge = false;
if (!mask_done) {
// calculate the inlet connection radii
unsigned midx;
for (unsigned i = 0; i < inlet.size(); i++) {
if (inlet[i].v[0] == min_input_index) {
midx = i;
break;
}
}
for (unsigned i = 0; i < inlet.size(); i++) {
unsigned idx = inlet[i].v[0];
if (idx == min_input_index) {
inlet[i].r = minimum_radii; // set the maximum pressure connection to minimum radii
}
else { // P1 + deltaP1 = P2 + deltaP2
float tmp_r;
if (mode == 2) {
tmp_r = (Flow.pressure[min_input_index] + ((12 * u * inlet[midx].l * inlet[midx].Q) / (::pow(h, 3) * 2 * minimum_radii)) - Flow.pressure[idx]) * (::pow(h, 3)) / (12 * u * inlet[i].l * inlet[i].Q);
tmp_r = (1 / tmp_r) / 2;
}
else if (mode == 3) {
tmp_r = (Flow.pressure[min_input_index] + ((8 * u * inlet[midx].l * inlet[midx].Q) / (::pow(minimum_radii, 4) * (float)stim::PI)) - Flow.pressure[idx]) * (float)stim::PI / (8 * u * inlet[i].l * inlet[i].Q);
tmp_r = ::pow(1 / tmp_r, 1.0f / 4);
}
inlet[i].r = tmp_r;
}
}
// calculate the outlet connection radii
for (unsigned i = 0; i < outlet.size(); i++) {
if (outlet[i].v[0] == max_output_index) {
midx = i;
break;
}
}
for (unsigned i = 0; i < outlet.size(); i++) {
unsigned idx = outlet[i].v[0];
if (idx == max_output_index) {
outlet[i].r = minimum_radii; // set the maximum pressure connection to minimum radii
}
else { // P1 - deltaP1 = P2 - deltaP2
float tmp_r;
if (mode == 2) {
tmp_r = (Flow.pressure[idx] - (Flow.pressure[max_output_index] - (12 * u * outlet[midx].l * outlet[midx].Q) / (::pow(h, 3) * 2 * minimum_radii))) * (::pow(h, 3)) / (12 * u * outlet[i].l * outlet[i].Q);
tmp_r = (1 / tmp_r) / 2;
}
else if (mode == 3) {
tmp_r = (Flow.pressure[idx] - (Flow.pressure[max_output_index] - (8 * u * outlet[midx].l * outlet[midx].Q) / (::pow(minimum_radii, 4) * (float)stim::PI))) * (float)stim::PI / (8 * u * outlet[i].l * outlet[i].Q);
tmp_r = ::pow(1 / tmp_r, 1.0f / 4);
}
outlet[i].r = tmp_r;
}
}
}
inlet_flow_rate = outlet_flow_rate = 0.0f;
// calculate the main feeder flow rate and pressure
for (unsigned i = 0; i < inlet.size(); i++) {
inlet_flow_rate += fabsf(inlet[i].Q);
}
for (unsigned i = 0; i < outlet.size(); i++) {
outlet_flow_rate += fabsf(outlet[i].Q);
}
for (unsigned i = 0; i < inlet.size(); i++) {
unsigned idx = inlet[i].v[0];
if (mode == 2)
inlet_pressure = Flow.pressure[idx] + (12 * u * inlet[i].l * inlet[i].Q) / (2 * inlet[i].r * ::pow(h, 3));
else if (mode == 3)
inlet_pressure = Flow.pressure[idx] + (8 * u * inlet[i].l * inlet[i].Q) / ((float)stim::PI * ::pow(inlet[i].r, 4));
}
for (unsigned i = 0; i < outlet.size(); i++) {
unsigned idx = outlet[i].v[0];
if (mode == 2)
outlet_pressure = Flow.pressure[idx] - (12 * u * outlet[i].l * outlet[i].Q) / (2 * inlet[i].r * ::pow(h, 3));
else if (mode == 3)
outlet_pressure = Flow.pressure[idx] - (8 * u * outlet[i].l * outlet[i].Q) / ((float)stim::PI * ::pow(outlet[i].r, 4));
}
mask_done = true;
preparation(); // preparation for making image stack
new_menu_num = 5; // set new menu option number
}
// set up new menu
glut_set_menu(cur_menu_num, new_menu_num);
glutPostRedisplay();
}
// window reshape function
void glut_reshape(int x, int y) {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
vX = glutGet(GLUT_WINDOW_WIDTH);
vY = glutGet(GLUT_WINDOW_HEIGHT);
glViewport(0, 0, vX, vY);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0, X, 0.0, Y, -50.0, 50.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
// glut initialization
void glut_initialize() {
int myargc = 1;
char* myargv[1];
myargv[0] = strdup("generate_network_network");
glutInit(&myargc, myargv);
glutInitDisplayMode(GLUT_DEPTH | GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowPosition(800, 0);
glutInitWindowSize(1000, 1000);
glutCreateWindow("Generate Simple 2D network");
glutDisplayFunc(glut_render);
glutMouseFunc(glut_mouse);
glutPassiveMotionFunc(glut_motion);
glutMouseWheelFunc(glut_wheel);
glutKeyboardFunc(glut_keyboard);
glutReshapeFunc(glut_reshape);
// initilize menu
glutCreateMenu(glut_menu); // create a menu object
glut_set_menu(0, 2);
glutAttachMenu(GLUT_RIGHT_BUTTON); // register right mouse to open menu option
}
// output an advertisement for the lab, authors and usage information
void advertise() {
std::cout << std::endl << std::endl;
std::cout << " =======================================================================================" << std::endl;
std::cout << "|Thank you for using the synthetic microvascular model generator for microfluidics tool!|" << std::endl;
std::cout << "|Scalable Tissue Imaging and Modeling (STIM) Lab, University of Houston |" << std::endl;
std::cout << "|Developers: Jiaming Guo, David Mayerich |" << std::endl;
std::cout << "|Source: https://git.stim.ee.uh.edu/instrumentation/Microfluidics |" << std::endl;
std::cout << " =======================================================================================" << std::endl << std::endl;
std::cout << "usage: flow2" << std::endl;
std::cout << "--2d -> activate 2d mode to treat the cross-section as rectangular" << std::endl;
std::cout << "--units units-> string indicating output units (ex. um)" << std::endl;
std::cout << "--maxpress 2 -> maximal pressure for simulation" << std::endl;
std::cout << "--minradii 10 -> minimal manufacuture radius" << std::endl;
std::cout << "--fradii 15 -> main feeder radius" << std::endl;
std::cout << "--viscosity 0.00001 -> constant viscosity value" << std::endl;
std::cout << "--workspace 450 -> workspace size in terms of units" << std::endl;
std::cout << "--stackres 0.6 0.6 1.0 -> voxel size" << std::endl;
std::cout << "--stackdir /home/network/image_stack -> image stack saving directory" << std::endl;
}
// argument and main loop
int main(int argc, char* argv[]) {
HWND Window = GetConsoleWindow(); // set the window default window
SetWindowPos(Window, 0, 0, 200, 0, 0, SWP_NOSIZE | SWP_NOZORDER); // position might value based on the screen resolution
stim::arglist args; // create an instance of arglist
// add arguments
args.add("help", "prints this help");
args.add("2d", "activate 2d mode and set the height of microvascular channel (in units), default is 3d mode (circle cross section)");
args.add("units", "string indicating units of length for output measurements (ex. velocity)", "um", "text string");
args.add("maxpress", "maximum allowed pressure in g / units / s^2, default 2 is for blood when units = um", "2", "real value > 0");
args.add("minradii", "minimum radii allowed for manufacture, default 5 is for blood when units = um", "5", "real value > 5");
args.add("fradii", "radii of main feeders, default is 10 when units = um", "10", "real value > 5");
args.add("viscosity", "set the viscosity of the fluid (in g / units / s), default .00001 is for blood when units = um", ".00001", "real value > 0");
args.add("workspace", "sets the size of the workspace (in units)", "400", "real value > 0");
args.add("stackres", "spacing between pixel samples in each dimension(in units/pixel)", ".184 .184 1", "real value > 0");
args.add("stackdir", "set the directory of the output image stack", "", "any existing directory (ex. /home/name/network)");
args.parse(argc, argv); // parse the command line
// set up initial inputs
if (args["help"].is_set()) { // test for help
advertise(); // advertise here
std::cout << args.str(); // output arguments
std::exit(1);
}
// get the units to work on
units = args["units"].as_string();
// set the mode, default is 10 in um
if (args["2d"].is_set()) {
mode = 2;
h = args["2d"].as_float();
}
else { // default mode is 3d
mode = 3;
}
// get the workspace size
X = Y = args["workspace"].as_float();
// get the vexel and image stack size
dx = args["stackres"].as_float(0);
dy = args["stackres"].as_float(1);
dz = args["stackres"].as_float(2);
// get the save directory of image stack
if (args["stackdir"].is_set())
stackdir = args["stackdir"].as_string();
// blood pressure in capillaries range from 15 - 35 torr
// 1 torr = 133.3 Pa
max_pressure = args["maxpress"].as_float();
// normal blood viscosity range from 4 - 15 mPas(cP)
// 1 Pas = 1 g / mm / s
u = args["viscosity"].as_float(); // g / units / s
// get minimum radii for building bridge
default_radii = minimum_radii = args["minradii"].as_float();
new_vertex.r = default_radii;
// get the main feeder radius
main_feeder_radii = args["fradii"].as_float();
// draw a network
generate_network = true; // begin draw a new network
std::cout << " ==================" << std::endl;
std::cout << "| GENERATOR MODE |" << std::endl;
std::cout << " ==================" << std::endl << std::endl;
std::cout << "[ TIP ] ";
std::cout << "Click to draw a new network. (press SPACE to start a new line of edges)" << std::endl;
std::cout << "[ NOTE ] ";
std::cout << "Press s to save the network and r to load the save" << std::endl;
// glut main loop
glut_initialize();
glutMainLoop();
} | 9fb00faba60a038e2b41ba3c5e38bca308023638.cu | // STD include
#include <vector>
#include <thread>
// CUDA include
#ifdef __CUDACC__
#include "device_launch_parameters.h"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include "cuda_runtime.h"
#endif
// OPENGL include
#include <GL/glut.h>
#include <GL/freeglut.h>
// STIM include
#include <stim/visualization/gl_network.h>
#include <stim/visualization/gl_aaboundingbox.h>
#include <stim/parser/arguments.h>
#include <stim/visualization/camera.h>
#include <stim/biomodels/flow.h>
#include <stim/visualization/colormap.h>
#include <stim/math/matrix.h>
#include <stim/grids/image_stack.h>
#include <stim/cuda/cudatools/error.h>
#include <stim/ui/progressbar.h>
//****************************parameter setting*********************************
// user input parameters
float u = 0.0f; // viscosity
float h = 0.0f; // height of edge(channel)
float dx, dy, dz; // x, y and z image scaling(units/pixel)
float main_feeder_radii; // default radii of main feeder (50um will be great for microfluidics manufacture)
float default_radii = 5.0f; // default radii of network vertex
float minimum_radii = 0.0f; // minimum radii that current machine can manufacture
float max_pressure = 0.0f; // maximum pressure that the channel can bear
int X; // workspace X
int Y; // workspace Y
int size_x, size_y, size_z; // size of image stack
std::string units; // units
std::string stackdir = ""; // directory where image stacks will be stored
// window console parameters
int mouse_x = -5; // mouse x window position
int mouse_y = -5; // mouse y window position
int vX; // viewport X
int vY; // viewport Y
// hard-coded parameters
float delta = 0.01f; // discrepancy
float eps = 15.0f; // epsilon threshold
std::vector<std::string> menu_option = { "generate network", "simulation", "build inlet", "build outlet", "manufacture" };
int cur_menu_num; // number of current menu option
int new_menu_num; // number of new menu option
int mode; // menu options
int mods; // special keyboard input
float border = 20.0f; // bar edge position
float radii_factor = 0.4f; // change ratio of network vertex radii
GLint subdivision = 20; // slices and stacks
float cur_max_radii = 0.0f; // store the maximum radii in the network for manufacture
// new structure type definition
struct vertex {
stim::vec3<float> c; // coordinates
float r = default_radii; // radii
};
struct edge {
unsigned p[2]; // start and end vertex indices
float v = 0.0f; // velocity along edge
};
struct sphere {
stim::vec3<float> c; // center of sphere
float r; // radii
};
struct cylinder { // radii changes gradually
stim::vec3<float> c1; // center of geometry start hat
stim::vec3<float> c2; // center of geometry end hat
float r1; // radii at start hat
float r2; // radii at end hat
};
// parameters for generating new networks
bool generate_network = false; // flag indicates in generating network mode
bool first_click = true; // flag indicates first click of one line of edges
bool flag = false; // flag indicates found a near vertex or edge
unsigned num = 0; // number of vertex in a new line
unsigned iter = 0; // iterator indicates index of current vertex
unsigned name = 0; // output network's main name in sequences
unsigned sub_name = 0; // output network's sub_name in sequences
vertex new_vertex; // stores current acceptable vertex
vertex tmp_vertex; // temporarily stores a vertex when moving mouse
edge new_edge; // stores current acceptable edge
edge tmp_edge; // temporarily stores a edge when moving mouse
std::vector<unsigned> dangle_vertex; // boundary(dangle) vertices list
stim::vec3<float> L = stim::vec3<float>(FLT_MAX, FLT_MAX, FLT_MAX); // minimum point in the bounding box
stim::vec3<float> U = stim::vec3<float>(-FLT_MAX, -FLT_MAX, -FLT_MAX); // maximum point in the bounding box
std::vector<unsigned> color_scheme; // color scheme for each edge
unsigned color_index = 0;
// parameters for simulation
bool simulation = false; // flag indicates in simulation network mode
bool first_simulation = true; // initialize simulation, all inlet to maximum pressure, all outlet to zero pressure
bool select_pressure = false; // flag indicates having selected a vertex to modify pressure, next step is to set specific pressure value
bool select_radii = false; // flag indicates having selected a vertex to change radii, next step is to set specific radii value
bool radii_changed = false; // flag indicates one vertex has been changed radii
bool grow = false; // flag indicates grow new line of edges
unsigned pressure_index = 0; // index of picked vertex for pressure
unsigned radii_index = 0; // index of picked vertex for radii
unsigned edge_index = 0; // index of picked edge
float max_v; // maximum velocity in units / s
float min_v;
stim::flow<float> Flow; // flow object for calculating network fluid flow
std::vector<typename stim::triple<unsigned, unsigned, float> > input; // first one store which vertex, second one stores which edge, third one stores in/out volume flow rate of that vertex
std::vector<typename stim::triple<unsigned, unsigned, float> > output;
std::vector<unsigned char> color; // color map based on velocity
bool color_bound = false; // flag indicates color map has been bound to 1D texture
std::vector<int> velocity_map; // velocity map
std::vector<typename edge> tmp_E; // temp list of edges
unsigned new_num = 0; // number of new growing vertex
// parameters for building bridge
bool build_inlet = false; // flag indicates in building inlet mode
bool build_outlet = false; // flag indicates in building outlet mode
bool select_bridge = false; // flag indicates now user can select bridge to modify
bool select_corner = false; // flag indicates having selected a bridge to modify, the next click is to choose a new position for the corner vertex
bool inlet_done = false; // finished choosing the inlet main feeder position
bool outlet_done = false; // finished choosing the outlet main feeder position
std::vector<typename stim::bridge<float> > inlet; // input bridge
std::vector<typename stim::bridge<float> > outlet; // output bridge
stim::vec3<float> inlet_port; // inlet main feeder port
stim::vec3<float> outlet_port; // outlet main feeder port
stim::vec3<float> corner_vertex; // corner vertex
unsigned bridge_index; // selected bridge index
float inlet_flow_rate = 0.0f; // volume flow rate at main inlet feeder
float outlet_flow_rate = 0.0f; // volume flow rate at main outlet feeder
float inlet_pressure; // pressure at main inlet feeder
float outlet_pressure; // pressure at main outlet feeder
unsigned min_input_index; // maximum output pressure index
unsigned max_output_index; // minimum input pressure index
std::vector<bool> inlet_feasibility;// list of flag indicates ith inlet bridge feasibility
std::vector<bool> outlet_feasibility;
// parameters for manufacture
bool manufacture = false; // flag indicates in manufacture mode
bool mask_done = false; // flag indicates having made a mask
// network
unsigned num_edge = 0; // number of edges in current network
unsigned num_vertex = 0; // number of vertices in current network
std::vector<vertex> V; // list of vertices
std::vector<edge> E; // list of edges
// image stack
stim::image_stack<unsigned char, float> I; // image stack object
std::vector<sphere> A; // sphere model for making image stack
unsigned feeder_start_index;
std::vector<cylinder> B; // cylinder model for making image stack
unsigned bridge_start_index;
// camera object
stim::camera cam; // camera object
float camera_factor = 1.2f; // start point of the camera as a function of X and Y size
// colors
#define JACK_CTRL_PTS 11
static float JACKCP[JACK_CTRL_PTS * 3] = { 0.671f, 0.851f, 0.914f,
0.502f, 0.804f, 0.757f,
0.651f, 0.851f, 0.416f,
0.945f, 0.714f, 0.855f,
0.600f, 0.439f, 0.671f,
0.914f, 0.761f, 0.490f,
0.729f, 0.729f, 0.729f,
0.957f, 0.647f, 0.510f,
0.996f, 0.878f, 0.565f,
0.992f, 0.722f, 0.388f,
0.957f, 0.427f, 0.263f };
//****************************auxiliary functions*********************************
// find the nearest vertex of current click position
// return true and a value if found
inline bool epsilon_vertex(int x, int y, unsigned& v) {
float d = FLT_MAX; // minimum distance between 2 vertices
float tmp_d = 0.0f; // temporary stores distance for loop
unsigned tmp_i = 0; // temporary stores connection index for loop
stim::vec3<float> tmp_v; // temporary stores current loop point
d = FLT_MAX; // set to max of float number
for (unsigned i = 0; i < V.size(); i++) {
tmp_v = stim::vec3<float>((float)x, (float)(vY - y), 0.0f);
tmp_v[0] = tmp_v[0] * (float)X / vX;
tmp_v[1] = tmp_v[1] * (float)Y / vY;
tmp_v = tmp_v - V[i].c; // calculate a vector between two vertices
tmp_d = tmp_v.len(); // calculate length of that vector
if (tmp_d < d) {
d = tmp_d; // if found a nearer vertex
tmp_i = i; // get the index of that vertex
}
}
if (d < eps) { // if current click is close to vertex we set before
// must have at least three point to make a plane or loop
if (tmp_i < num && (tmp_i == V.size() - 1 || tmp_i == V.size() - 2) && !first_click && mods == 0) {
Sleep(100); // make flash effect
std::cout << "\r";
std::cout << "[ ERROR ] ";
std::cout << "You can't do that!";
std::cout.flush();
}
else {
v = tmp_i; // copy the extant vertex's index to v
}
return true;
}
return false;
}
// check out whether the projection of v0 onto line segment v1-v2 is on extensed line
// set distance to FLT_MAX if true
inline void is_outside(stim::vec3<float> v0, stim::vec3<float> v1, stim::vec3<float> v2, float &distance) {
float a = (v0 - v1).dot((v2 - v1).norm());
float b = (v0 - v2).dot((v1 - v2).norm());
float length = (v2 - v1).len();
if (a > length || b > length)
distance = FLT_MAX;
}
// find the nearest inlet/outlet connection line of current click position
// return true and a value if found
inline bool epsilon_edge(int x, int y, unsigned &idx) {
float d = FLT_MAX;
float tmp_d;
unsigned tmp_i;
stim::vec3<float> v1;
stim::vec3<float> v2;
stim::vec3<float> v0 = stim::vec3<float>((float)x, (float)(vY - y), 0.0f);
v0[0] = v0[0] * (float)X / vX;
v0[1] = v0[1] * (float)Y / vY;
if (build_inlet) {
for (unsigned i = 0; i < inlet.size(); i++) {
if (inlet[i].V.size() == 2) { // direct line connection
v1 = inlet[i].V[0]; // the inlet port vertex
v2 = inlet[i].V[1]; // the dangle vertex
// the distance between a point and a line segment, d = (|(x0 - x1)x(x0 - x2)|) / (|x2 - x1|)
tmp_d = ((v0 - v1).cross(v0 - v2)).len() / (v2 - v1).len();
if (tmp_d < d) {
d = tmp_d;
tmp_i = i;
// check whether the projection is on the line segment
is_outside(v0, v1, v2, d);
}
}
else if (inlet[i].V.size() == 3) { // broken line connection
// first half of bridge
v1 = inlet[i].V[0]; // the inlet port vertex
v2 = inlet[i].V[1]; // the corner vertex
// the distance between a point and a line segment, d = (|(x0 - x1)x(x0 - x2)|) / (|x2 - x1|)
tmp_d = ((v0 - v1).cross(v0 - v2)).len() / (v2 - v1).len();
if (tmp_d < d) {
d = tmp_d;
tmp_i = i;
is_outside(v0, v1, v2, d);
}
// second half of bridge
v1 = inlet[i].V[1]; // the corner vertex
v2 = inlet[i].V[2]; // the dangle vertex
// the distance between a point and a line segment, d = (|(x0 - x1)x(x0 - x2)|) / (|x2 - x1|)
tmp_d = ((v0 - v1).cross(v0 - v2)).len() / (v2 - v1).len();
if (tmp_d < d) {
d = tmp_d;
tmp_i = i;
is_outside(v0, v1, v2, d);
}
}
}
if (d < eps) {
idx = tmp_i;
return true;
}
}
else if (build_outlet) {
for (unsigned i = 0; i < outlet.size(); i++) {
if (outlet[i].V.size() == 2) { // direct line connection
// first half of bridge
v1 = outlet[i].V[0]; // the inlet port vertex
v2 = outlet[i].V[1]; // the dangle vertex
// the distance between a point and a line segment, d = (|(x0 - x1)x(x0 - x2)|) / (|x2 - x1|)
tmp_d = ((v0 - v1).cross(v0 - v2)).len() / (v2 - v1).len();
if (tmp_d < d) {
d = tmp_d;
tmp_i = i;
is_outside(v0, v1, v2, d);
}
}
else if (outlet[i].V.size() == 3) { // broken line connection
v1 = outlet[i].V[0]; // the inlet port vertex
v2 = outlet[i].V[1]; // the corner vertex
// the distance between a point and a line segment, d = (|(x0 - x1)x(x0 - x2)|) / (|x2 - x1|)
tmp_d = ((v0 - v1).cross(v0 - v2)).len() / (v2 - v1).len();
if (tmp_d < d) {
d = tmp_d;
tmp_i = i;
is_outside(v0, v1, v2, d);
}
// second half of bridge
v1 = outlet[i].V[1]; // the corner vertex
v2 = outlet[i].V[2]; // the dangle vertex
// the distance between a point and a line segment, d = (|(x0 - x1)x(x0 - x2)|) / (|x2 - x1|)
tmp_d = ((v0 - v1).cross(v0 - v2)).len() / (v2 - v1).len();
if (tmp_d < d) {
d = tmp_d;
tmp_i = i;
is_outside(v0, v1, v2, d);
}
}
}
if (d < eps) { // check to see whether the smallest distance is within the threshold
idx = tmp_i;
return true;
}
}
return false;
}
// find the nearest edge
// retrun true, edge index and index within edge if found
inline bool epsilon_edge(int x, int y, unsigned &idx, unsigned &i) {
float d = FLT_MAX;
float tmp_d;
unsigned tmp_i;
stim::vec3<float> v1;
stim::vec3<float> v2;
stim::vec3<float> v0 = stim::vec3<float>((float)x, (float)(vY - y), 0.0f);
v0[0] = v0[0] * (float)X / vX;
v0[1] = v0[1] * (float)Y / vY;
for (unsigned i = 0; i < E.size(); i++) {
v1 = V[E[i].p[0]].c; // starting vertex
v2 = V[E[i].p[1]].c; // ending vertex
// the distance between a point and a line segment, d = (|(x0 - x1)x(x0 - x2)|) / (|x2 - x1|)
tmp_d = ((v0 - v1).cross(v0 - v2)).len() / (v2 - v1).len();
if (tmp_d < d) {
d = tmp_d;
tmp_i = i;
// check whether the projection is on the line segment
is_outside(v0, v1, v2, d);
}
}
if (d < eps) {
idx = tmp_i; // get the edge index
float px;
float py;
// get the projection coordinates
v1 = V[E[idx].p[0]].c;
v2 = V[E[idx].p[1]].c;
float dx = v2[0] - v1[0];
float dy = v2[1] - v1[1];
float dAB = dx * dx + dy * dy;
float u = ((v0[0] - v1[0]) * dx + (v0[1] - v1[1]) * dy) / dAB;
px = v1[0] + u * dx;
py = v1[1] + u * dy;
float l = (v1 - v2).len();
tmp_d = sqrt(std::pow(px - v1[0], 2) + std::pow(py - v1[1], 2));
if (tmp_d < l - tmp_d) // if the projection is near starting vertex
i = 0;
else
i = 1;
return true;
}
return false;
}
// check whether there is a edge between two vertices
// return true if found
inline bool is_edge(unsigned idx) {
for (unsigned i = 0; i < E.size(); i++) { // brute force method
if (E[i].p[0] == new_edge.p[0] && E[i].p[1] == idx)
return true;
else if (E[i].p[1] == new_edge.p[0] && E[i].p[0] == idx)
return true;
}
return false;
}
// find the distance between two vertices
inline float length(unsigned i) {
stim::vec3<float> v1 = V[E[i].p[0]].c;
stim::vec3<float> v2 = V[E[i].p[1]].c;
v1 = v1 - v2;
return v1.len();
}
// find the average radius of one edge
inline float radius(unsigned i) {
return (V[E[i].p[0]].r + V[E[i].p[1]].r) / 2;
}
// find two envelope caps for two spheres
// @param cp1, cp2: list of points on the cap
// @param center1, center2: center point of cap
// @param r1, r2: radii of cap
inline void find_envelope(std::vector<typename stim::vec3<float> > &cp1, std::vector<typename stim::vec3<float> > &cp2, stim::vec3<float> center1, stim::vec3<float> center2, float r1, float r2) {
stim::vec3<float> tmp_d;
if (r1 == r2) { // two vertices have the same radius
tmp_d = center2 - center1; // calculate the direction vector
tmp_d = tmp_d.norm();
stim::circle<float> tmp_c; // in order to get zero direction vector
tmp_c.rotate(tmp_d);
stim::circle<float> c1(center1, r1, tmp_d, tmp_c.U);
stim::circle<float> c2(center2, r2, tmp_d, tmp_c.U);
cp1 = c1.glpoints(subdivision);
cp2 = c2.glpoints(subdivision);
}
else {
if (r1 < r2) { // switch index, we always want r1 to be larger than r2
stim::vec3<float> tmp_c = center2;
center2 = center1;
center1 = tmp_c;
float tmp_r = r2;
r2 = r1;
r1 = tmp_r;
}
tmp_d = center2 - center1; // bigger one points to smaller one
tmp_d = tmp_d.norm();
float D = (center1 - center2).len();
stim::vec3<float> exp;
exp[0] = (center2[0] * r1 - center1[0] * r2) / (r1 - r2);
exp[1] = (center2[1] * r1 - center1[1] * r2) / (r1 - r2);
stim::vec3<float> t1, t2, t3, t4;
t1[2] = t2[2] = t3[2] = t4[2] = 0.0f;
// first two
t1[0] = pow(r1, 2)*(exp[0] - center1[0]);
t1[0] += r1*(exp[1] - center1[1])*sqrt(pow((exp[0] - center1[0]), 2) + pow((exp[1] - center1[1]), 2) - pow(r1, 2));
t1[0] /= (pow((exp[0] - center1[0]), 2) + pow((exp[1] - center1[1]), 2));
t1[0] += center1[0];
t2[0] = pow(r1, 2)*(exp[0] - center1[0]);
t2[0] -= r1*(exp[1] - center1[1])*sqrt(pow((exp[0] - center1[0]), 2) + pow((exp[1] - center1[1]), 2) - pow(r1, 2));
t2[0] /= (pow((exp[0] - center1[0]), 2) + pow((exp[1] - center1[1]), 2));
t2[0] += center1[0];
t1[1] = pow(r1, 2)*(exp[1] - center1[1]);
t1[1] -= r1*(exp[0] - center1[0])*sqrt(pow((exp[0] - center1[0]), 2) + pow((exp[1] - center1[1]), 2) - pow(r1, 2));
t1[1] /= (pow((exp[0] - center1[0]), 2) + pow((exp[1] - center1[1]), 2));
t1[1] += center1[1];
t2[1] = pow(r1, 2)*(exp[1] - center1[1]);
t2[1] += r1*(exp[0] - center1[0])*sqrt(pow((exp[0] - center1[0]), 2) + pow((exp[1] - center1[1]), 2) - pow(r1, 2));
t2[1] /= (pow((exp[0] - center1[0]), 2) + pow((exp[1] - center1[1]), 2));
t2[1] += center1[1];
// check the correctness of the points
//float s = (center1[1] - t1[1])*(exp[1] - t1[1]) / ((t1[0] - center1[0])*(t1[0] - exp[0]));
//if (s != 1) { // swap t1[1] and t2[1]
// float tmp_t = t2[1];
// t2[1] = t1[1];
// t1[1] = tmp_t;
//}
// second two
t3[0] = pow(r2, 2)*(exp[0] - center2[0]);
t3[0] += r2*(exp[1] - center2[1])*sqrt(pow((exp[0] - center2[0]), 2) + pow((exp[1] - center2[1]), 2) - pow(r2, 2));
t3[0] /= (pow((exp[0] - center2[0]), 2) + pow((exp[1] - center2[1]), 2));
t3[0] += center2[0];
t4[0] = pow(r2, 2)*(exp[0] - center2[0]);
t4[0] -= r2*(exp[1] - center2[1])*sqrt(pow((exp[0] - center2[0]), 2) + pow((exp[1] - center2[1]), 2) - pow(r2, 2));
t4[0] /= (pow((exp[0] - center2[0]), 2) + pow((exp[1] - center2[1]), 2));
t4[0] += center2[0];
t3[1] = pow(r2, 2)*(exp[1] - center2[1]);
t3[1] -= r2*(exp[0] - center2[0])*sqrt(pow((exp[0] - center2[0]), 2) + pow((exp[1] - center2[1]), 2) - pow(r2, 2));
t3[1] /= (pow((exp[0] - center2[0]), 2) + pow((exp[1] - center2[1]), 2));
t3[1] += center2[1];
t4[1] = pow(r2, 2)*(exp[1] - center2[1]);
t4[1] += r2*(exp[0] - center2[0])*sqrt(pow((exp[0] - center2[0]), 2) + pow((exp[1] - center2[1]), 2) - pow(r2, 2));
t4[1] /= (pow((exp[0] - center2[0]), 2) + pow((exp[1] - center2[1]), 2));
t4[1] += center2[1];
// check the correctness of the points
//s = (center2[1] - t3[1])*(exp[1] - t3[1]) / ((t3[0] - center2[0])*(t3[0] - exp[0]));
//if (s != 1) { // swap t1[1] and t2[1]
// float tmp_t = t4[1];
// t4[1] = t3[1];
// t3[1] = tmp_t;
//}
stim::vec3<float> d1;
float dot;
float a;
float new_r;
stim::vec3<float> new_u;
stim::vec3<float> new_c;
// calculate the bigger circle
d1 = t1 - center1;
dot = d1.dot(tmp_d);
a = dot / (r1 * 1) * r1; // a = cos(alpha) * radii
new_c = center1 + a * tmp_d;
new_r = sqrt(pow(r1, 2) - pow(a, 2));
new_u = t1 - new_c;
stim::circle<float> c1(new_c, new_r, tmp_d, new_u);
cp1 = c1.glpoints(subdivision);
// calculate the smaller circle
d1 = t3 - center2;
dot = d1.dot(tmp_d);
a = dot / (r2 * 1) * r2;
new_c = center2 + a * tmp_d;
new_r = sqrt(pow(r2, 2) - pow(a, 2));
new_u = t3 - new_c;
stim::circle<float> c2(new_c, new_r, tmp_d, new_u);
cp2 = c2.glpoints(subdivision);
}
}
// check to see whether current bridge is acceptable
// if it is not acceptable, print error reminder
inline void is_acceptable() {
if (build_inlet) {
unsigned midx; // get the index from inlet list
for (unsigned i = 0; i < inlet.size(); i++) {
if (inlet[i].v[0] == min_input_index) {
midx = i;
break;
}
}
float tmp_r;
unsigned idx;
std::vector<bool> tmp(inlet.size(), true);
std::swap(tmp, inlet_feasibility);
for (unsigned i = 0; i < inlet.size(); i++) {
idx = inlet[i].v[0];
if (i != midx) {
if (mode == 2)
tmp_r = ((Flow.pressure[min_input_index] + ((12 * u * inlet[midx].l * inlet[midx].Q) / (std::pow(h, 3) * 2 * minimum_radii)) - Flow.pressure[idx]) * (std::pow(h, 3)) / (12 * u * inlet[i].l * inlet[i].Q)) / 2;
else if (mode == 3)
tmp_r = (Flow.pressure[min_input_index] + ((8 * u * inlet[midx].l * inlet[midx].Q) / (std::pow(minimum_radii, 4) * (float)stim::PI)) - Flow.pressure[idx]) * (float)stim::PI / (8 * u * inlet[i].l * inlet[i].Q);
if (tmp_r <= 0) { // degenerate case where radii ie less than zero
Sleep(100); // make flash effect
std::cout << "\r";
std::cout << "[ ERROR ] ";
std::cout << "Inlet bridge for vertex " << min_input_index << " is not feasible";
inlet_feasibility[i] = false;
break;
}
else // feasible
inlet_feasibility[i] = true;
}
}
}
else if (build_outlet) {
unsigned midx; // get the index from outlet list
for (unsigned i = 0; i < outlet.size(); i++) {
if (outlet[i].v[0] == max_output_index) {
midx = i;
break;
}
}
float tmp_r;
unsigned idx;
std::vector<bool> tmp(outlet.size(), true);
std::swap(tmp, outlet_feasibility);
for (unsigned i = 0; i < outlet.size(); i++) {
idx = outlet[i].v[0];
if (i != midx) {
if (mode == 2)
tmp_r = ((Flow.pressure[idx] - (Flow.pressure[max_output_index] - (12 * u * outlet[midx].l * outlet[midx].Q) / (std::pow(h, 3) * 2 * minimum_radii))) * (std::pow(h, 3)) / (12 * u * outlet[i].l * outlet[i].Q)) / 2;
else if (mode == 3)
tmp_r = (Flow.pressure[idx] - (Flow.pressure[max_output_index] - (8 * u * outlet[midx].l * outlet[midx].Q) / (std::pow(minimum_radii, 4) * (float)stim::PI))) * (float)stim::PI / (8 * u * outlet[i].l * outlet[i].Q);
if (tmp_r <= 0) { // not enough length to satisfy to situation
std::cout << "\r";
std::cout << "[ ERROR ] ";
std::cout << "Outlet bridge for vertex " << max_output_index << " is not feasible";
outlet_feasibility[i] = false;
break;
}
else // feasible
outlet_feasibility[i] = true;
}
}
}
}
//****************************simulation functions*********************************
// get the network information
void get_background() {
num_edge = E.size(); // get the number of edge on current network
num_vertex = V.size(); // get the number of vertices on current network
// get the bounding box of current network
float tmp;
for (unsigned i = 0; i < num_vertex; i++) {
for (unsigned j = 0; j < 3; j++) {
tmp = V[i].c[j];
if (tmp < L[j])
L[j] = tmp;
if (tmp > U[j])
U[j] = tmp;
}
}
// get the dangle vertex
dangle_vertex.clear();
for (unsigned i = 0; i < num_vertex; i++) {
unsigned k = 0;
for (unsigned j = 0; j < num_edge; j++) {
if (E[j].p[0] == i || E[j].p[1] == i)
k++;
}
if (k == 1)
dangle_vertex.push_back(i);
}
// print out
std::cout << "OBJECT NUMBER" << std::endl;
std::cout << "edge " << num_edge << std::endl;
std::cout << "vertex " << num_vertex << std::endl;
std::cout << "dangle vertex " << dangle_vertex.size() << std::endl;
Flow.init(num_edge, num_vertex); // initialize flow object
}
// initialize flow
void flow_initialize() {
// clear up non-dangle vertex pressure
for (unsigned i = 0; i < num_vertex; i++) {
bool is_dangle = false;
for (unsigned j = 0; j < dangle_vertex.size(); j++) {
if (dangle_vertex[j] == i)
is_dangle = true;
}
if (!is_dangle)
Flow.P[i] = 0;
}
if (!grow) { // when it is to grow a new edge, do not initialize again
float mid = 0.0f;
for (unsigned i = 0; i < dangle_vertex.size(); i++) {
mid += V[dangle_vertex[i]].c[0];
}
mid /= dangle_vertex.size();
for (unsigned i = 0; i < dangle_vertex.size(); i++) {
if (V[dangle_vertex[i]].c[0] <= mid)
Flow.P[dangle_vertex[i]] = max_pressure - i * delta; // should set minor discrepancy
else
Flow.P[dangle_vertex[i]] = (i + 1) * delta; // algorithm treat 0 as no initial pressure
}
}
}
// find the stable flow state
void find_stable_state(float threshold = 0.01f) {
// clear up last time simulation
input.clear();
output.clear();
std::vector<float> zero_QQ(num_vertex);
std::swap(Flow.QQ, zero_QQ);
std::vector<float> zero_pressure(num_vertex);
std::swap(Flow.pressure, zero_pressure);
// set the conductance matrix of flow object
unsigned start_vertex = 0;
unsigned end_vertex = 0;
for (unsigned i = 0; i < num_edge; i++) {
start_vertex = E[i].p[0]; // get the start vertex index of current edge
end_vertex = E[i].p[1]; // get the end vertex index of current edge
if (mode == 2) {
Flow.C[start_vertex][end_vertex] = -(2 * radius(i) * std::pow(h, 3)) / (12 * u * length(i)); // UNITS: g/mm^4/s
}
else if (mode == 3) {
Flow.C[start_vertex][end_vertex] = -((float)stim::PI * std::pow(radius(i), 4)) / (8 * u * length(i));
}
Flow.C[end_vertex][start_vertex] = Flow.C[start_vertex][end_vertex];
}
// set the diagonal to the negative sum of row element
float sum = 0.0;
for (unsigned i = 0; i < num_vertex; i++) {
for (unsigned j = 0; j < num_vertex; j++) {
sum += Flow.C[i][j];
}
Flow.C[i][i] = -sum;
sum = 0.0;
}
// get the Q' vector QQ
// matrix manipulation to zero out the conductance matrix as defined by the boundary values that were enterd
for (unsigned i = 0; i < num_vertex; i++) {
if (Flow.P[i] != 0) { // for every dangle vertex
for (unsigned j = 0; j < num_vertex; j++) {
if (j == i) {
Flow.QQ[i] = Flow.C[i][i] * Flow.P[i];
}
else {
Flow.C[i][j] = 0;
Flow.QQ[j] = Flow.QQ[j] - Flow.C[j][i] * Flow.P[i];
Flow.C[j][i] = 0;
}
}
}
}
// get the inverse of conductance matrix
stim::matrix<float> _C(num_vertex, num_vertex);
//float** _C = (float**)calloc(num_vertex, sizeof(float*));
//for (unsigned i = 0; i < num_vertex; i++) {
// _C[i] = new float[num_vertex]();
//}
Flow.inversion(Flow.C, num_vertex, _C.data());
// get the pressure in the network
for (unsigned i = 0; i < num_vertex; i++) {
for (unsigned j = 0; j < num_vertex; j++) {
//Flow.pressure[i] += _C[i][j] * Flow.QQ[j];
Flow.pressure[i] += _C(i, j) * Flow.QQ[j];
}
}
// get the flow state from known pressure
float start_pressure = 0.0;
float end_pressure = 0.0;
float deltaP = 0.0;
for (unsigned i = 0; i < num_edge; i++) {
start_vertex = E[i].p[0];
end_vertex = E[i].p[1];
start_pressure = Flow.pressure[start_vertex]; // get the start vertex pressure of current edge
end_pressure = Flow.pressure[end_vertex]; // get the end vertex pressure of current edge
deltaP = start_pressure - end_pressure; // deltaP = Pa - Pb
Flow.Q[i].first = start_vertex;
Flow.Q[i].second = end_vertex;
if (mode == 2) {
Flow.Q[i].third = (2 * radius(i) * std::pow(h, 3) * deltaP) / (12 * u * length(i));
E[i].v = Flow.Q[i].third / (h * 2 * radius(i));
}
else if (mode == 3) {
Flow.Q[i].third = ((float)stim::PI * std::pow(radius(i), 4) * deltaP) / (8 * u * length(i));
E[i].v = Flow.Q[i].third / ((float)stim::PI * std::pow(radius(i), 2));
}
}
// find both input and output vertex
stim::triple<unsigned, unsigned, float> tmp;
unsigned N = dangle_vertex.size(); // get the number of dangle vertex
unsigned idx = 0;
for (unsigned i = 0; i < N; i++) { // for every boundary vertex
idx = dangle_vertex[i];
for (unsigned j = 0; j < num_edge; j++) { // for every edge
if (Flow.Q[j].first == idx) { // starting vertex
if (Flow.Q[j].third > 0) { // flow comes in
tmp.first = idx;
tmp.second = j;
tmp.third = Flow.Q[j].third;
input.push_back(tmp);
break;
}
// their might be a degenerate case that it equals to 0?
else if (Flow.Q[j].third < 0) { // flow comes out
tmp.first = idx;
tmp.second = j;
tmp.third = -Flow.Q[j].third;
output.push_back(tmp);
break;
}
}
else if (Flow.Q[j].second == idx) { // ending vertex
if (Flow.Q[j].third > 0) { // flow comes in
tmp.first = idx;
tmp.second = j;
tmp.third = Flow.Q[j].third;
output.push_back(tmp);
break;
}
// their might be a degenerate case that it equals to 0?
else if (Flow.Q[j].third < 0) { // flow comes out
tmp.first = idx;
tmp.second = j;
tmp.third = -Flow.Q[j].third;
input.push_back(tmp);
break;
}
}
}
}
// find the absolute maximum velocity and minimum velocity
std::vector<float> abs_V(num_edge);
for (unsigned i = 0; i < num_edge; i++) {
abs_V[i] = std::fabsf(E[i].v);
if (abs_V[i] < threshold)
abs_V[i] = 0.0f;
}
max_v = *std::max_element(abs_V.begin(), abs_V.end());
min_v = *std::min_element(abs_V.begin(), abs_V.end());
// get the color map based on velocity range along the network
color.clear();
if (dangle_vertex.size() == 2 && num_edge - num_vertex + 1 <= 0) // only one inlet and one outlet
color.resize(num_edge * 3, (unsigned char)0);
else {
color.resize(num_edge * 3);
stim::cpu2cpu<float>(&abs_V[0], &color[0], num_edge, min_v, max_v, stim::cmBrewer);
}
color_bound = true;
// sort the velocity bar in ascending order
velocity_map.resize(num_edge);
for (unsigned i = 0; i < num_edge; i++)
velocity_map[i] = i;
std::sort(velocity_map.begin(), velocity_map.end(), [&](int x, int y) {return abs_V[x] < abs_V[y]; });
Flow.reset(num_vertex); // reset flow object for next time simulation
// find the minimum pressure input port
if (input.size()) {
min_input_index = input[0].first;
for (unsigned i = 1; i < input.size(); i++) {
unsigned idx = input[i].first;
if (Flow.pressure[idx] < Flow.pressure[min_input_index])
min_input_index = idx;
}
}
// find the minimum pressure output port
if (output.size()) {
max_output_index = output[0].first;
for (unsigned i = 1; i < output.size(); i++) {
unsigned idx = output[i].first;
if (Flow.pressure[idx] > Flow.pressure[max_output_index])
max_output_index = idx;
}
}
// get the number of input/output
inlet_feasibility.resize(input.size(), true);
outlet_feasibility.resize(output.size(), true);
}
// display and output final state
void show_stable_state() {
std::cout << std::endl;
// save the pressure information to CSV file
std::string p_filename = "pressure.csv";
std::ofstream p_file;
p_file.open(p_filename.c_str());
p_file << "Vertex, Pressure(g/" << units << "/s^2)" << std::endl;
for (unsigned i = 0; i < num_vertex; i++)
p_file << i << "," << Flow.pressure[i] << std::endl;
p_file.close();
// show the pressure information in console box
std::cout << "PRESSURE(g/" << units << "/s^2):" << std::endl;
for (unsigned i = 0; i < num_vertex; i++) {
std::cout << "[" << i << "] " << Flow.pressure[i] << std::endl;
}
// save the flow information to CSV file
std::string f_filename = "flow.csv";
std::ofstream f_file;
f_file.open(f_filename.c_str());
f_file << "Edge, Volume flow rate(" << units << "^3/s)" << std::endl;
for (unsigned i = 0; i < num_edge; i++)
f_file << Flow.Q[i].first << "->" << Flow.Q[i].second << "," << Flow.Q[i].third << std::endl;
f_file.close();
// show the flow rate information in console box
std::cout << "VOLUME FLOW RATE(" << units << "^3/s):" << std::endl;
for (unsigned i = 0; i < num_edge; i++) {
std::cout << "(" << Flow.Q[i].first << "," << Flow.Q[i].second << ")" << Flow.Q[i].third << std::endl;
}
}
//****************************manufacture functions*********************************
// indicator functions
// indicator for sphere
__global__ void find_near_sphere(const sphere* V, unsigned num, size_t* R, float* S, unsigned char* ptr, unsigned z, int Size) {
unsigned ix = blockDim.x * blockIdx.x + threadIdx.x; // col
unsigned iy = blockDim.y * blockIdx.y + threadIdx.y; // row
if (ix >= R[1] || iy >= R[2]) return; // avoid segfault
stim::vec3<float> world_pixel;
world_pixel[0] = (float)ix * S[1];
world_pixel[1] = (float)iy * S[2];
world_pixel[2] = ((float)z - Size / 2) * S[3];
float distance = FLT_MAX;
float tmp_distance;
unsigned idx;
for (unsigned i = 0; i < num; i++) {
tmp_distance = (V[i].c - world_pixel).len();
if (tmp_distance <= distance) {
distance = tmp_distance;
idx = i;
}
}
if (distance <= V[idx].r)
ptr[(R[2] - 1 - iy) * R[0] * R[1] + ix * R[0]] = 255;
}
// indicator for cylinder(envelope/hyperboloid)
__global__ void find_near_cylinder(cylinder* E, unsigned num, size_t* R, float* S, unsigned char* ptr, unsigned z, int Size) {
unsigned ix = blockDim.x * blockIdx.x + threadIdx.x;
unsigned iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix >= R[1] || iy >= R[2]) return; // avoid segfault
stim::vec3<float> world_pixel;
world_pixel[0] = (float)ix * S[1];
world_pixel[1] = (float)iy * S[2];
world_pixel[2] = ((float)z - Size / 2) * S[3];
float distance = FLT_MAX;
float tmp_distance;
float rr; // radii at the surface where projection meets
for (unsigned i = 0; i < num; i++) { // find the nearest cylinder
tmp_distance = ((world_pixel - E[i].c1).cross(world_pixel - E[i].c2)).len() / (E[i].c2 - E[i].c1).len();
if (tmp_distance <= distance) {
// we only focus on point to line segment
// check to see whether projection is lying outside the line segment
float a = (world_pixel - E[i].c1).dot((E[i].c2 - E[i].c1).norm());
float b = (world_pixel - E[i].c2).dot((E[i].c1 - E[i].c2).norm());
float length = (E[i].c1 - E[i].c2).len();
if (a <= length && b <= length) { // projection lying inside the line segment
distance = tmp_distance;
rr = E[i].r1 + (E[i].r2 - E[i].r1) * a / (length); // linear change
}
}
}
if (distance <= rr)
ptr[(R[2] - 1 - iy) * R[0] * R[1] + ix * R[0]] = 255;
}
// make image stack using gpu
void make_image_stack() {
std::cout << "[-----ON PROGRESS-----]" << std::endl;
// initilize the image stack object
I.init(1, size_x, size_y, size_z);
I.set_dim(dx, dy, dz);
// because of lack of memory, we have to computer one slice of stack per time
// allocate vertex and edge
sphere* d_V;
cylinder* d_E;
HANDLE_ERROR(cudaMalloc((void**)&d_V, A.size() * sizeof(sphere)));
HANDLE_ERROR(cudaMalloc((void**)&d_E, B.size() * sizeof(cylinder)));
HANDLE_ERROR(cudaMemcpy(d_V, &A[0], A.size() * sizeof(sphere), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_E, &B[0], B.size() * sizeof(cylinder), cudaMemcpyHostToDevice));
// allocate image stack information memory
float* d_S;
size_t* d_R;
size_t* R = (size_t*)malloc(4 * sizeof(size_t)); // size in 4 dimension
R[0] = 1;
R[1] = (size_t)size_x;
R[2] = (size_t)size_y;
R[3] = (size_t)size_z;
float* S = (float*)malloc(4 * sizeof(float)); // spacing in 4 dimension
S[0] = 1.0f;
S[1] = dx;
S[2] = dy;
S[3] = dz;
size_t num = size_x * size_y;
HANDLE_ERROR(cudaMalloc((void**)&d_S, 4 * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&d_R, 4 * sizeof(size_t)));
HANDLE_ERROR(cudaMemcpy(d_R, R, 4 * sizeof(size_t), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(d_S, S, 4 * sizeof(float), cudaMemcpyHostToDevice));
// for every slice of image
unsigned p = 0; // percentage of progress
for (unsigned i = 0; i < size_z; i++) {
// allocate image slice memory
unsigned char* d_ptr;
unsigned char* ptr = (unsigned char*)malloc(num * sizeof(unsigned char));
memset(ptr, 0, num * sizeof(unsigned char));
HANDLE_ERROR(cudaMalloc((void**)&d_ptr, num * sizeof(unsigned char)));
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0); // get cuda device properties structure
size_t max_thread = sqrt(prop.maxThreadsPerBlock); // get the maximum number of thread per block
dim3 block(size_x / max_thread + 1, size_y / max_thread + 1);
dim3 thread(max_thread, max_thread);
find_near_sphere << <block, thread >> > (d_V, A.size(), d_R, d_S, d_ptr, i, size_z);
cudaDeviceSynchronize();
find_near_cylinder << <block, thread >> > (d_E, B.size(), d_R, d_S, d_ptr, i, size_z);
HANDLE_ERROR(cudaMemcpy(ptr, d_ptr, num * sizeof(unsigned char), cudaMemcpyDeviceToHost));
I.set(ptr, i);
free(ptr);
HANDLE_ERROR(cudaFree(d_ptr));
// print progress bar
p = (float)(i + 1) / (float)size_z * 100;
rtsProgressBar(p);
}
// clear up
free(R);
free(S);
HANDLE_ERROR(cudaFree(d_R));
HANDLE_ERROR(cudaFree(d_S));
HANDLE_ERROR(cudaFree(d_V));
HANDLE_ERROR(cudaFree(d_E));
if (stackdir == "")
I.save_images("image????.bmp");
else
I.save_images(stackdir + "/image????.bmp");
std::cout << std::endl << "[-----SUCCEEDED-----]" << std::endl;
}
// preparation for making image stack
void preparation() {
// clear result from last time
A.clear();
B.clear();
// firstly push back the network
sphere new_sphere;
cylinder new_cylinder;
// push back current network
for (unsigned i = 0; i < num_vertex; i++) {
new_sphere.c = V[i].c;
new_sphere.r = V[i].r;
A.push_back(new_sphere);
if (V[i].r > cur_max_radii)
cur_max_radii = V[i].r;
}
for (unsigned i = 0; i < num_edge; i++) {
new_cylinder.c1 = V[E[i].p[0]].c;
new_cylinder.c2 = V[E[i].p[1]].c;
new_cylinder.r1 = V[E[i].p[0]].r;
new_cylinder.r2 = V[E[i].p[1]].r;
B.push_back(new_cylinder);
}
bridge_start_index = B.size();
feeder_start_index = A.size();
// push back the inlet main feeder
if (inlet_done) {
new_sphere.c = inlet_port;
new_sphere.r = main_feeder_radii;
A.push_back(new_sphere);
if (main_feeder_radii > cur_max_radii)
cur_max_radii = main_feeder_radii;
}
// push back the outlet main feeder
if (outlet_done) {
new_sphere.c = outlet_port;
new_sphere.r = main_feeder_radii;
A.push_back(new_sphere);
}
// connect input port to inlet main feeder
float mid_r;
float p1;
float p2;
stim::vec3<float> center1;
stim::vec3<float> center2;
float r1;
float r2;
for (unsigned i = 0; i < inlet.size(); i++) {
if (inlet[i].V.size() == 2) { // straight connection
mid_r = 2 * inlet[i].r - 1.0f / 2.0f * (V[inlet[i].v[0]].r + default_radii); // mid_r = 2*ave_r - 1/2(r1 + r2), set proportion to be half
if (mid_r > cur_max_radii)
cur_max_radii = mid_r;
// calculate the envelope along the inlet
// first half
center1 = (inlet[i].V[0] + inlet[i].V[1]) / 2; // normally, the radii of middle point is the largest among those two
center2 = inlet[i].V[0];
r1 = mid_r;
r2 = default_radii;
// push back middle point
new_sphere.c = center1;
new_sphere.r = mid_r;
A.push_back(new_sphere);
// push back current cylinder
new_cylinder.c1 = center1;
new_cylinder.c2 = center2;
new_cylinder.r1 = r1;
new_cylinder.r2 = r2;
B.push_back(new_cylinder);
//second half
center2 = inlet[i].V[1];
r2 = V[inlet[i].v[0]].r;
// push back current cylinder
new_cylinder.c1 = center1;
new_cylinder.c2 = center2;
new_cylinder.r1 = r1;
new_cylinder.r2 = r2;
B.push_back(new_cylinder);
}
else { // broken line connection
p1 = (inlet[i].V[0] - inlet[i].V[1]).len() / inlet[i].l; // calculate the two line segments length proportion
p2 = (inlet[i].V[1] - inlet[i].V[2]).len() / inlet[i].l;
mid_r = (inlet[i].r - (p1 / 2 * default_radii + p2 / 2 * V[inlet[i].v[0]].r)) * 2;
if (mid_r > cur_max_radii)
cur_max_radii = mid_r;
// first half
center1 = inlet[i].V[1];
center2 = inlet[i].V[0];
r1 = mid_r;
r2 = default_radii;
// push back corner point
new_sphere.c = center1;
new_sphere.r = mid_r;
A.push_back(new_sphere);
// push back current cylinder
new_cylinder.c1 = center1;
new_cylinder.c2 = center2;
new_cylinder.r1 = r1;
new_cylinder.r2 = r2;
B.push_back(new_cylinder);
// second half
center2 = inlet[i].V[2];
r2 = V[inlet[i].v[0]].r;
// push back current cylinder
new_cylinder.c1 = center1;
new_cylinder.c2 = center2;
new_cylinder.r1 = r1;
new_cylinder.r2 = r2;
B.push_back(new_cylinder);
}
}
// connect output port to outlet main feeder
for (unsigned i = 0; i < outlet.size(); i++) {
if (outlet[i].V.size() == 2) { // straight connection
mid_r = 2 * outlet[i].r - 1.0f / 2.0f * (V[outlet[i].v[0]].r + default_radii); // mid_r = 2*ave_r - 1/2(r1 + r2), set proportion to be half
if (mid_r > cur_max_radii)
cur_max_radii = mid_r;
// calculate the envelope along the inlet
// first half
center1 = (outlet[i].V[0] + outlet[i].V[1]) / 2; // normally, the radii of middle poipnt is the largest of these two
center2 = outlet[i].V[0];
r1 = mid_r;
r2 = default_radii;
// push back middle point
new_sphere.c = center1;
new_sphere.r = mid_r;
A.push_back(new_sphere);
// push back current cylinder
new_cylinder.c1 = center1;
new_cylinder.c2 = center2;
new_cylinder.r1 = r1;
new_cylinder.r2 = r2;
B.push_back(new_cylinder);
//second half
center2 = outlet[i].V[1];
r2 = V[outlet[i].v[0]].r;
// push back current cylinder
new_cylinder.c1 = center1;
new_cylinder.c2 = center2;
new_cylinder.r1 = r1;
new_cylinder.r2 = r2;
B.push_back(new_cylinder);
}
else { // broken line connection
p1 = (outlet[i].V[0] - outlet[i].V[1]).len() / outlet[i].l; // calculate the two line segments length proportion
p2 = (outlet[i].V[1] - outlet[i].V[2]).len() / outlet[i].l;
mid_r = (outlet[i].r - (p1 / 2 * default_radii + p2 / 2 * V[outlet[i].v[0]].r)) * 2;
if (mid_r > cur_max_radii)
cur_max_radii = mid_r;
// first half
center1 = outlet[i].V[1];
center2 = outlet[i].V[0];
r1 = mid_r;
r2 = default_radii;
// push back corner point
new_sphere.c = center1;
new_sphere.r = mid_r;
A.push_back(new_sphere);
// push back current cylinder
new_cylinder.c1 = center1;
new_cylinder.c2 = center2;
new_cylinder.r1 = r1;
new_cylinder.r2 = r2;
B.push_back(new_cylinder);
// second half
center2 = outlet[i].V[2];
r2 = V[outlet[i].v[0]].r;
// push back current cylinder
new_cylinder.c1 = center1;
new_cylinder.c2 = center2;
new_cylinder.r1 = r1;
new_cylinder.r2 = r2;
B.push_back(new_cylinder);
}
}
// get the size of image stack in pixel
size_x = X / dx + 1;
size_y = Y / dy + 1;
size_z = 2.0f * cur_max_radii / dz;
size_z += 5; // expand a little bit
}
//*****************************glut functions*********************************
// dynamically set menu
// @param num: number of current menu options
// @param range: range of option to be set from menu_option list
void glut_set_menu(int num, int range) {
// remove last time menu options
for (int i = 1; i < num + 1; i++)
glutRemoveMenuItem(1);
// set new menu options
std::string menu_name;
for (int i = 1; i < range + 1; i++) {
menu_name = menu_option[i - 1];
glutAddMenuEntry(menu_name.c_str(), i);
}
}
// glut projection setting, do squash transformation(from pyramid to cube)
void glut_projection() {
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
X = glutGet(GLUT_WINDOW_WIDTH);
Y = glutGet(GLUT_WINDOW_HEIGHT);
glViewport(0, 0, X, Y);
float aspect = (float)X / (float)Y;
gluPerspective(60, aspect, 0.1, 1000000);
glPopMatrix();
}
// glut modelview setting, translate camera to origin
void glut_modelview() {
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
glLoadIdentity();
glPopMatrix();
stim::vec3<float> eye = cam.getPosition();
stim::vec3<float> focus = cam.getLookAt();
stim::vec3<float> up = cam.getUp();
gluLookAt(eye[0], eye[1], eye[2], focus[0], focus[1], focus[2], up[0], up[1], up[2]);
}
// render vertex as point
void glut_draw_point() {
stim::circle<float> tmp_c;
tmp_c.rotate(stim::vec3<float>(0.0, 0.0, -1.0)); // model circle waiting to be translated and scaled
for (unsigned i = 0; i < V.size(); i++) {
if (grow) {
if (i >= V.size() - new_num)
break;
}
if (!manufacture) { // in modes except manufacture mode
if (Flow.P.empty()) // if current vertex hasn't been set initial pressure
glColor3f(0.992f, 0.859f, 0.780f); // orange point
else
if (Flow.P[i] != 0) {
stim::vec3<float> new_color;
new_color[0] = (Flow.P[i] / max_pressure) > 0.5f ? 1.0f : 2.0f * Flow.P[i] / max_pressure; // red
new_color[1] = 0.0f; // green
new_color[2] = (Flow.P[i] / max_pressure) > 0.5f ? 1.0f - 2.0f * (Flow.P[i] / max_pressure - 0.5f) : 1.0f; // blue
glColor3f(new_color[0], new_color[1], new_color[2]);
}
else
glColor3f(0.5f, 0.5f, 0.5f); // gray point
stim::circle<float> c(V[i].c, V[i].r, stim::vec3<float>(0.0, 0.0, 1.0), tmp_c.U); // create a circle in order to draw the point
std::vector<typename stim::vec3<float> > cp = c.glpoints(20); // get points along the circle
glBegin(GL_TRIANGLE_FAN); // draw circle as bunch of triangles
glVertex2f(V[i].c[0], V[i].c[1]);
for (unsigned i = 0; i < cp.size(); i++) {
glVertex2f(cp[i][0], cp[i][1]);
}
glEnd();
glFlush();
}
}
if (!generate_network && !simulation && !manufacture) {
glColor3f(0.0f, 0.0f, 0.0f);
if (inlet.size() != 0) {
// draw the inlet main feeder
stim::circle<float> c(inlet_port, main_feeder_radii, stim::vec3<float>(0.0, 0.0, 1.0), tmp_c.U); // create a circle in order to draw the point
std::vector<typename stim::vec3<float> > cp = c.glpoints(20); // get points along the circle
glBegin(GL_TRIANGLE_FAN);
glVertex2f(inlet_port[0], inlet_port[1]);
for (unsigned i = 0; i < cp.size(); i++) {
glVertex2f(cp[i][0], cp[i][1]);
}
glEnd();
glFlush();
}
if (outlet.size() != 0) {
// draw the outlet main feeder
stim::circle<float> c(outlet_port, main_feeder_radii, stim::vec3<float>(0.0, 0.0, 1.0), tmp_c.U); // create a circle in order to draw the point
std::vector<typename stim::vec3<float> > cp = c.glpoints(20); // get points along the circle
glBegin(GL_TRIANGLE_FAN);
glVertex2f(outlet_port[0], outlet_port[1]);
for (unsigned i = 0; i < cp.size(); i++) {
glVertex2f(cp[i][0], cp[i][1]);
}
glEnd();
glFlush();
}
}
}
// render centerline(edge) as line
void glut_draw_line() {
stim::vec3<float> ori_v; // direction vector of current edge
stim::vec3<float> per_v; // vector perpendicular to direction vector
stim::vec3<float> v1; // four vertices for drawing trapezoid
stim::vec3<float> v2;
stim::vec3<float> v3;
stim::vec3<float> v4;
for (unsigned i = 0; i < E.size(); i++) { // for every edge
ori_v = V[E[i].p[1]].c - V[E[i].p[0]].c;
ori_v = ori_v.norm();
per_v[0] = -ori_v[1]; // for x dot y = 0, the best solution is x1 = -y2, y1 = x2
per_v[1] = ori_v[0];
per_v[2] = ori_v[2];
v1 = V[E[i].p[0]].c + V[E[i].p[0]].r * per_v;
v2 = V[E[i].p[0]].c - V[E[i].p[0]].r * per_v;
v3 = V[E[i].p[1]].c + V[E[i].p[1]].r * per_v;
v4 = V[E[i].p[1]].c - V[E[i].p[1]].r * per_v;
if (!manufacture) {
if (color_bound) // get corresponding color from color map
glColor3f((float)color[i * 3 + 0] / 255, (float)color[i * 3 + 1] / 255, (float)color[i * 3 + 2] / 255);
glBegin(GL_QUAD_STRIP);
if (!color_bound) {
glEnable(GL_BLEND); // enable color blend
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); // set blend function
glColor4f(JACKCP[color[i] * 3 + 0], JACKCP[color[i] * 3 + 1], JACKCP[color[i] * 3 + 2], 0.7f);
}
glVertex2f(v1[0], v1[1]);
glVertex2f(v2[0], v2[1]);
glVertex2f(v3[0], v3[1]);
glVertex2f(v4[0], v4[1]);
glEnd();
if (!color_bound)
glDisable(GL_BLEND);
}
glFlush();
}
if (!generate_network && !simulation && !manufacture) {
glLineWidth(1);
if (inlet.size() != 0) {
for (unsigned i = 0; i < inlet.size(); i++) {
if (inlet_feasibility[i])
glColor3f(0.0f, 0.0f, 0.0f); // white means feasible
else
glColor3f(1.0f, 0.0f, 0.0f); // red means nonfeasible
glBegin(GL_LINE_STRIP);
for (unsigned j = 0; j < inlet[i].V.size(); j++) {
glVertex2f(inlet[i].V[j][0], inlet[i].V[j][1]);
}
glEnd();
}
}
if (outlet.size() != 0) {
for (unsigned i = 0; i < outlet.size(); i++) {
if (outlet_feasibility[i])
glColor3f(0.0f, 0.0f, 0.0f); // white means feasible
else
glColor3f(1.0f, 0.0f, 0.0f); // red means nonfeasible
glBegin(GL_LINE_STRIP);
for (unsigned j = 0; j < outlet[i].V.size(); j++) {
glVertex2f(outlet[i].V[j][0], outlet[i].V[j][1]);
}
glEnd();
}
}
glFlush();
}
}
// render flow rane as triangle
void glut_draw_triangle(float threshold = 0.01f) {
stim::vec3<float> ori_v; // edge direction vector
stim::vec3<float> per_v; // perpendicular vector of ori_v
stim::vec3<float> mid_p; // middle point of current edge
stim::vec3<float> left; // left point
stim::vec3<float> right; // right point
stim::vec3<float> top; // top point
for (unsigned i = 0; i < E.size(); i++) {
// find the perpendicular vector of current edge
ori_v = V[E[i].p[1]].c - V[E[i].p[0]].c;
ori_v = ori_v.norm();
per_v[0] = -ori_v[1];
per_v[1] = ori_v[0];
per_v[2] = ori_v[2];
mid_p = (V[E[i].p[0]].c + V[E[i].p[1]].c) / 2;
left = mid_p + per_v * default_radii / 2;
right = mid_p - per_v * default_radii / 2;
if (E[i].v > threshold)
top = mid_p + ori_v * default_radii * sqrt(3.0f);
else if(E[i].v < -threshold)
top = mid_p - ori_v * default_radii * sqrt(3.0f);
if (E[i].v > threshold || E[i].v < -threshold) {
glColor3f(0.600f, 0.847f, 0.788f); // lime color
glBegin(GL_TRIANGLES);
glVertex2f(left[0], left[1]);
glVertex2f(right[0], right[1]);
glVertex2f(top[0], top[1]);
glEnd();
glFlush();
}
}
}
// render inlet/outlet bridge as cylinder
void glut_draw_bridge() {
glColor3f(0.0f, 0.0f, 0.0f);
std::vector<typename stim::vec3<float> > cp1(subdivision + 1);
std::vector<typename stim::vec3<float> > cp2(subdivision + 1);
// draw spheres on the end/middle of bridge
for (unsigned i = feeder_start_index; i < A.size(); i++) {
glPushMatrix();
glTranslatef(A[i].c[0], A[i].c[1], A[i].c[2]);
glutSolidSphere(A[i].r, subdivision, subdivision);
glPopMatrix();
}
// draw inlet/outlet bridge
for (unsigned i = bridge_start_index; i < B.size(); i++) {
// calculate the envelope caps
find_envelope(cp1, cp2, B[i].c1, B[i].c2, B[i].r1, B[i].r2);
glBegin(GL_QUAD_STRIP);
for (unsigned j = 0; j < cp1.size(); j++) {
glVertex3f(cp1[j][0], cp1[j][1], cp1[j][2]);
glVertex3f(cp2[j][0], cp2[j][1], cp2[j][2]);
}
glEnd();
}
glFlush();
}
// render point as sphere
void glut_draw_sphere() {
glColor3f(0.0f, 0.0f, 0.0f);
for (unsigned i = 0; i < V.size(); i++) {
glPushMatrix();
glTranslatef(V[i].c[0], V[i].c[1], V[i].c[2]);
glutSolidSphere(V[i].r, subdivision, subdivision);
glPopMatrix();
}
if (inlet.size() != 0) {
// draw the inlet main feeder
glPushMatrix();
glTranslatef(inlet_port[0], inlet_port[1], inlet_port[2]);
glutSolidSphere(main_feeder_radii, subdivision, subdivision);
glPopMatrix();
}
if (outlet.size() != 0) {
// draw the outlet main feeder
glPushMatrix();
glTranslatef(outlet_port[0], outlet_port[1], outlet_port[2]);
glutSolidSphere(main_feeder_radii, subdivision, subdivision);
glPopMatrix();
}
glFlush();
}
// render line as cylinder
void glut_draw_cylinder() {
glColor3f(0.0f, 0.0f, 0.0f);
stim::vec3<float> tmp_d;
stim::vec3<float> tmp_n;
stim::vec3<float> center1;
stim::vec3<float> center2;
float r1;
float r2;
std::vector<typename stim::vec3<float> > cp1(subdivision + 1);
std::vector<typename stim::vec3<float> > cp2(subdivision + 1);
for (unsigned i = 0; i < E.size(); i++) {
center1 = V[E[i].p[0]].c;
center2 = V[E[i].p[1]].c;
r1 = V[E[i].p[0]].r;
r2 = V[E[i].p[1]].r;
// calculate the envelope caps
find_envelope(cp1, cp2, center1, center2, r1, r2);
glBegin(GL_QUAD_STRIP);
for (unsigned j = 0; j < cp1.size(); j++) {
glVertex3f(cp1[j][0], cp1[j][1], cp1[j][2]);
glVertex3f(cp2[j][0], cp2[j][1], cp2[j][2]);
}
glEnd();
glFlush();
}
}
// main render function
void glut_render() {
glEnable(GL_SMOOTH);
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glut_draw_line(); // draw the edge as line
glut_draw_point(); // draw the vertex as point
if (!first_click && generate_network) { // render a transparent line to indicate your next click position
glEnable(GL_BLEND); // enable color blend
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); // set blend function
glColor4f(JACKCP[color_index * 3 + 0], JACKCP[color_index * 3 + 1], JACKCP[color_index * 3 + 2], 0.2f);
stim::vec3<float> tmp_d;
stim::circle<float> tmp_c;
std::vector<typename stim::vec3<float> > cp1(subdivision + 1);
std::vector<typename stim::vec3<float> > cp2(subdivision + 1);
tmp_d = tmp_vertex.c - V[tmp_edge.p[0]].c;
tmp_d = tmp_d.norm();
tmp_c.rotate(tmp_d);
stim::circle<float> c1(V[tmp_edge.p[0]].c, V[tmp_edge.p[0]].r, tmp_d, tmp_c.U);
stim::circle<float> c2(tmp_vertex.c, tmp_vertex.r, tmp_d, tmp_c.U);
cp1 = c1.glpoints(subdivision);
cp2 = c2.glpoints(subdivision);
glBegin(GL_QUAD_STRIP);
for (unsigned j = 0; j < subdivision + 1; j++) {
glVertex3f(cp1[j][0], cp1[j][1], cp1[j][2]);
glVertex3f(cp2[j][0], cp2[j][1], cp2[j][2]);
}
glEnd();
glFlush();
glDisable(GL_BLEND);
}
if (grow) { // render a gray line to indicate grow edge
glColor3f(0.5f, 0.5f, 0.5f);
glBegin(GL_LINES);
glVertex2f(V[tmp_edge.p[0]].c[0], V[tmp_edge.p[0]].c[1]);
glVertex2f(tmp_vertex.c[0], tmp_vertex.c[1]);
glEnd();
// render the new edges and new vertex
for (unsigned i = num_vertex; i < V.size(); i++) {
glPointSize(10);
glBegin(GL_POINT);
glVertex2f(V[i].c[0], V[i].c[1]);
glEnd();
}
for (unsigned i = 0; i < tmp_E.size(); i++) {
glBegin(GL_LINES);
glVertex2f(V[tmp_E[i].p[0]].c[0], V[tmp_E[i].p[0]].c[1]);
glVertex2f(V[tmp_E[i].p[1]].c[0], V[tmp_E[i].p[1]].c[1]);
glEnd();
}
glFlush();
}
if (select_corner) {
glEnable(GL_BLEND); // enable color blend
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); // set blend function
glColor4f(0.0f, 0.0f, 0.0f, 0.4f);
// draw the joint position as a point
glBegin(GL_POINT);
glVertex2f(corner_vertex[0], corner_vertex[1]);
glEnd();
// draw the bridge
glBegin(GL_LINE_STRIP);
if (build_inlet) {
glVertex2f(inlet[bridge_index].V[0][0], inlet[bridge_index].V[0][1]);
glVertex2f(corner_vertex[0], corner_vertex[1]);
unsigned idx = inlet[bridge_index].V.size() - 1;
glVertex2f(inlet[bridge_index].V[idx][0], inlet[bridge_index].V[idx][1]);
}
else if (build_outlet) {
glVertex2f(outlet[bridge_index].V[0][0], outlet[bridge_index].V[0][1]);
glVertex2f(corner_vertex[0], corner_vertex[1]);
unsigned idx = outlet[bridge_index].V.size() - 1;
glVertex2f(outlet[bridge_index].V[idx][0], outlet[bridge_index].V[idx][1]);
}
glEnd();
glFlush();
glDisable(GL_BLEND);
}
if (!manufacture) {
if (simulation || build_inlet || build_outlet) {
glut_draw_triangle();
}
for (unsigned i = 0; i < V.size(); i++) {
glColor3f(0.0f, 0.0f, 0.0f);
glRasterPos2f(V[i].c[0], V[i].c[1] + 0.5f); // mark index right above the vertex
std::stringstream ss;
ss << i;
glutBitmapString(GLUT_BITMAP_HELVETICA_18, (const unsigned char*)(ss.str().c_str()));
}
// bring up a pressure bar on left
if (select_pressure) {
glLineWidth(100);
glBegin(GL_LINES);
glColor3f(0.0f, 0.0f, 1.0f); // blue to red
glVertex2f(border * X / vX, border * Y / vY);
glColor3f(1.0, 0.0, 0.0);
glVertex2f(border * X / vX, (vY - 2 * border) * Y / vY);
glEnd();
glFlush();
// pressure bar text
glColor3f(0.0f, 0.0f, 0.0f);
glRasterPos2f(0.0f, (vY - border) * Y / vY);
std::stringstream ss_p;
ss_p << "Pressure Bar";
glutBitmapString(GLUT_BITMAP_HELVETICA_18, (const unsigned char*)(ss_p.str().c_str()));
// pressure range text
float step = vY - 3 * border;
step /= 10;
for (unsigned i = 0; i < 11; i++) {
glRasterPos2f((border * 1.5f) * X / vX, (border + i * step) * Y / vY);
std::stringstream ss_n;
ss_n << i * max_pressure / 10;
glutBitmapString(GLUT_BITMAP_HELVETICA_18, (const unsigned char*)(ss_n.str().c_str()));
}
}
}
// print the velocity range bar
if (simulation && !select_pressure) {
if (dangle_vertex.size() == 2 && num_edge - num_vertex + 1 <= 0) {
// do nothing
}
else {
float step = (vY - 3 * border) * Y / vY;
step /= BREWER_CTRL_PTS - 1;
for (unsigned i = 0; i < BREWER_CTRL_PTS - 1; i++) {
glLineWidth(100);
glBegin(GL_LINES);
glColor3f(BREWERCP[i * 4 + 0], BREWERCP[i * 4 + 1], BREWERCP[i * 4 + 2]);
glVertex2f(border * X / vX, border * Y / vY + i * step);
glColor3f(BREWERCP[(i + 1) * 4 + 0], BREWERCP[(i + 1) * 4 + 1], BREWERCP[(i + 1) * 4 + 2]);
glVertex2f(border * X / vX, border * Y / vY + (i + 1) * step);
glEnd();
}
glFlush();
// pressure bar text
glColor3f(0.0f, 0.0f, 0.0f);
glRasterPos2f(0.0f, (vY - border) * Y / vY);
std::stringstream ss_p;
ss_p << "Velocity range";
glutBitmapString(GLUT_BITMAP_HELVETICA_18, (const unsigned char*)(ss_p.str().c_str()));
// pressure range text
step = vY - 3 * border;
step /= 10;
for (unsigned i = 0; i < 11; i++) {
glRasterPos2f((border * 1.5f) * X / vX, (border + i * step) * Y / vY);
std::stringstream ss_n;
ss_n << min_v + i * (max_v - min_v) / 10;
glutBitmapString(GLUT_BITMAP_HELVETICA_18, (const unsigned char*)(ss_n.str().c_str()));
}
}
}
if (manufacture) {
glut_draw_sphere();
glut_draw_cylinder();
glut_draw_bridge();
}
if (radii_changed) {
glColor3f(0.835f, 0.243f, 0.310f);
glRasterPos2f(V[radii_index].c[0], V[radii_index].c[1] - 1.0f);
std::stringstream ss_r;
ss_r << "r=" << V[radii_index].r;
glutBitmapString(GLUT_BITMAP_HELVETICA_18, (const unsigned char*)(ss_r.str().c_str()));
radii_changed = false;
}
glutSwapBuffers();
}
// register mouse click events
void glut_mouse(int button, int state, int x, int y) {
if (button == GLUT_RIGHT_BUTTON)
return;
mods = glutGetModifiers(); // get special keyboard input
if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN) {
std::cout << "\r"; // clear up ERROR reminder
std::cout << "\t\t\t\t\t\t\t\t\t";
std::cout.flush();
}
// to generate a new network by mouse click
if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && generate_network && mods == 0) {
mouse_x = x; // get the click position in the window coordinates
mouse_y = y;
unsigned idx = UINT_MAX; // stores the vertex's index
if (first_click) { // first click of one line of edge
flag = epsilon_vertex(x, y, idx); // find out whether current position appears a vertex
if (flag) {
new_edge.p[0] = idx; // store the geometry start vertex index
tmp_edge.p[0] = idx;
num++;
}
else {
new_vertex.c = stim::vec3<float>(x, (vY - y), 0); // make a new vertex
new_vertex.c[0] = new_vertex.c[0] * (float)X / vX;
new_vertex.c[1] = new_vertex.c[1] * (float)Y / vY;
new_edge.p[0] = iter; // make a new edge and set the starting vertex
tmp_edge.p[0] = iter;
V.push_back(new_vertex); // push a new vertex
iter++; // iterator + 1
num++; // added a vertex
}
first_click = false; // finished first click
}
else { // following click of one line of edge
flag = epsilon_vertex(x, y, idx);
if (flag) {
if (!is_edge(idx)) { // no edge between two vertices
if (idx != UINT_MAX) { // acceptable click
new_edge.p[1] = idx;
if (new_edge.p[0] != new_edge.p[1]) { // simple graph, no loop and parallel edge
E.push_back(new_edge);
color.push_back(color_index); // record the color scheme
first_click = true;
num = 0; // start a new line of edges
color_index = (color_index == JACK_CTRL_PTS - 1) ? 0 : color_index + 1; // update color scheme for new line of edges
}
else {
Sleep(100); // make flash effect
std::cout << "\r";
std::cout << "[ ERROR ] ";
std::cout << "You can't do that!";
std::cout.flush();
}
}
}
else {
Sleep(100); // make flash effect
std::cout << "\r";
std::cout << "[ ERROR ] ";
std::cout << "There exists an edge between these two vertices";
std::cout.flush();
}
}
else {
new_vertex.c = stim::vec3<float>(x, (vY - y), 0); // make a new vertex
new_vertex.c[0] = new_vertex.c[0] * (float)X / vX;
new_vertex.c[1] = new_vertex.c[1] * (float)Y / vY;
new_edge.p[1] = iter; // make a new edge and set the starting vertex to current
V.push_back(new_vertex); // push a new vertex
E.push_back(new_edge); // push a new edge
color.push_back(color_index); // record the color scheme
new_edge.p[0] = iter; // make a new edge and set the starting vertex to current
tmp_edge.p[0] = iter;
iter++; // iterator + 1
num++; // added a vertex
}
}
}
// modify pressure
if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && simulation && mods == 0 && !grow) {
mouse_x = x;
mouse_y = y;
if (select_pressure) { // if a vertex had been selected to be modified pressure
if (vY - y < border || vY - y > vY - 2 * border) { // click outside the bar along y-axis
Sleep(100); // make flash effect
std::cout << "\r";
std::cout << "[ ERROR ] ";
std::cout << "Click exceeds the range of pressure bar";
std::cout.flush();
}
else {
select_pressure = false; // finished setting the pressure of chose vertex
Flow.P[pressure_index] = (vY - mouse_y - border) / (vY - 3 * border) * max_pressure; // get the pressure value on pressure bar
system("CLS"); // clear up console box
std::cout << " ===================" << std::endl;
std::cout << "| SIMULATION MODE |" << std::endl;
std::cout << " ===================" << std::endl << std::endl;
std::cout << "[ TIP ] ";
std::cout << "Click dangle vertex to set pressure" << std::endl;
std::cout << " Move wheel to change radii of the vertex which the cursor meets" << std::endl;
// simulate again
find_stable_state();
show_stable_state();
}
}
else {
unsigned tmp_p = 0;
bool flag = epsilon_vertex(mouse_x, mouse_y, tmp_p);
if (flag) {
std::vector<unsigned>::iterator it = std::find(dangle_vertex.begin(), dangle_vertex.end(), tmp_p);
if (it == dangle_vertex.end()) { // if it is not dangle vertex
Sleep(100); // make flash effect
std::cout << "\r";
std::cout << "[ ERROR ] ";
std::cout << "Only dangle vertex pressure need to be set";
std::cout.flush();
}
else { // if it is dangle vertex
select_pressure = true; // set flag to true
pressure_index = tmp_p; // stores the index of vertex
}
}
}
}
// build inlet and outlet
if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && (build_inlet || build_outlet) && !select_bridge && !select_corner && mods == 0) {
mouse_x = x;
mouse_y = y;
select_bridge = true;
if (build_inlet) {
inlet_port = stim::vec3<float>(x, (vY - y), 0); // get the inlet port coordinates
inlet_port[0] = inlet_port[0] * (float)X / vX;
inlet_port[1] = inlet_port[1] * (float)Y / vY;
inlet_done = true;
float tmp_l;
for (unsigned i = 0; i < input.size(); i++) {
stim::bridge<float> b;
// push back vertices
b.V.push_back(inlet_port);
b.V.push_back(V[input[i].first].c);
// one direct line
tmp_l = (inlet_port - V[input[i].first].c).len();
b.Q = input[i].third;
b.l = tmp_l;
b.v.push_back(input[i].first); // only store the dangle vertex index information
inlet.push_back(b);
}
// check out current connection
is_acceptable();
}
else if (build_outlet) {
outlet_port = stim::vec3<float>(x, (vY - y), 0); // get the inlet port coordinates
outlet_port[0] = outlet_port[0] * (float)X / vX;
outlet_port[1] = outlet_port[1] * (float)Y / vY;
outlet_done = true;
float tmp_l;
for (unsigned i = 0; i < output.size(); i++) {
stim::bridge<float> b;
// push back vertices
b.V.push_back(outlet_port);
b.V.push_back(V[output[i].first].c);
// one direct line
tmp_l = (outlet_port - V[output[i].first].c).len();
b.Q = output[i].third;
b.l = tmp_l;
b.v.push_back(output[i].first);
outlet.push_back(b);
}
// check out current connection
is_acceptable();
}
}
// select a bridge to modify
else if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && select_bridge && mods == 0) {
mouse_x = x;
mouse_y = y;
bool flag = epsilon_edge(mouse_x, mouse_y, bridge_index);
if (flag) {
select_bridge = false;
select_corner = true;
}
else {
Sleep(100); // make flash effect
std::cout << "\r";
std::cout << "[ ERROR ] ";
std::cout << "No bridge at where your click";
}
}
// re connect the inlet/outlet that selected
else if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && select_corner && mods == 0) {
mouse_x = x;
mouse_y = y;
mask_done = false; // recalculate the connection
corner_vertex = stim::vec3<float>(x, (vY - y), 0); // get the corner vertex
corner_vertex[0] = corner_vertex[0] * (float)X / vX;
corner_vertex[1] = corner_vertex[1] * (float)Y / vY;
if (build_inlet) {
stim::bridge<float> tmp_b;
tmp_b.V.push_back(inlet_port); // push back the inlet port vertex
tmp_b.V.push_back(corner_vertex); // push back the corner vertex
unsigned idx = inlet[bridge_index].V.size() - 1; // get the dangle vertex index from the inlet
tmp_b.V.push_back(inlet[bridge_index].V[idx]); // push back the dangle vertex
tmp_b.l = (tmp_b.V[0] - tmp_b.V[1]).len() + (tmp_b.V[1] - tmp_b.V[2]).len();
tmp_b.Q = inlet[bridge_index].Q;
tmp_b.v.push_back(inlet[bridge_index].v[0]);
tmp_b.r = inlet[bridge_index].r;
inlet[bridge_index] = tmp_b;
}
else if (build_outlet) {
stim::bridge<float> tmp_b;
tmp_b.V.push_back(outlet_port); // push back the inlet port vertex
tmp_b.V.push_back(corner_vertex); // push back the corner vertex
unsigned idx = outlet[bridge_index].V.size() - 1; // get the dangle vertex index from the outlet
tmp_b.V.push_back(outlet[bridge_index].V[idx]); // push back the dangle vertex
tmp_b.l = (tmp_b.V[0] - tmp_b.V[1]).len() + (tmp_b.V[1] - tmp_b.V[2]).len();
tmp_b.Q = outlet[bridge_index].Q;
tmp_b.v.push_back(outlet[bridge_index].v[0]);
tmp_b.r = outlet[bridge_index].r;
outlet[bridge_index] = tmp_b;
}
// check out current connection
is_acceptable();
select_corner = false;
select_bridge = true;
}
// left CTRL + left mouse to grow a line new edges from any vertex
if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && simulation && mods == GLUT_ACTIVE_CTRL && grow) {
mouse_x = x;
mouse_y = y;
unsigned i;
bool flag = epsilon_edge(mouse_x, mouse_y, edge_index, i);
if (flag) {
for (unsigned j = 0; j < tmp_E.size(); j++)
E.push_back(tmp_E[j]);
new_vertex = V[E[edge_index].p[i]];
new_edge.p[1] = E[edge_index].p[i];
E.push_back(new_edge);
get_background(); // get network basic information
flow_initialize(); // initialize flow
find_stable_state(); // main function of solving the linear system
show_stable_state(); // output results as csv files
grow = false;
}
else {
new_vertex.c = stim::vec3<float>(x, (vY - y), 0); // make a new vertex
new_vertex.c[0] = new_vertex.c[0] * (float)X / vX;
new_vertex.c[1] = new_vertex.c[1] * (float)Y / vY;
unsigned num = V.size(); // get the new vertex index
V.push_back(new_vertex);
new_edge.p[1] = num;
tmp_E.push_back(new_edge);
new_edge.p[0] = num;
tmp_edge.p[0] = num;
new_num++;
}
}
else if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && simulation && mods == GLUT_ACTIVE_CTRL && !grow) {
mouse_x = x;
mouse_y = y;
// new point information
unsigned i;
new_num = 0;
bool flag = epsilon_edge(mouse_x, mouse_y, edge_index, i);
if (flag) {
grow = true;
new_vertex = V[E[edge_index].p[i]];
new_edge.p[0] = E[edge_index].p[i];
tmp_edge.p[0] = E[edge_index].p[i];
}
else {
Sleep(100); // make flash effect
std::cout << "\r";
std::cout << "[ ERROR ] ";
std::cout << "No vertex at where your click";
}
}
}
// register mouse move events
void glut_motion(int x, int y) {
tmp_vertex.c = stim::vec3<float>(x, (vY - y), 0);
tmp_vertex.c[0] = tmp_vertex.c[0] * (float)X / vX;
tmp_vertex.c[1] = tmp_vertex.c[1] * (float)Y / vY;
corner_vertex[0] = tmp_vertex.c[0];
corner_vertex[1] = tmp_vertex.c[1];
glutPostRedisplay();
}
// register wheel events
void glut_wheel(int wheel, int direction, int x, int y) {
std::cout << "\r"; // clear up ERROR reminder
std::cout << "\t\t\t\t\t\t\t\t\t";
std::cout.flush();
if (simulation) {
flag = epsilon_vertex(x, y, radii_index);
if (flag) {
radii_changed = true;
if (direction > 0) // increase radii
V[radii_index].r += radii_factor;
else {
V[radii_index].r -= radii_factor;
if (V[radii_index].r <= 0) { // degenerate case where radii less than 0
Sleep(100); // make flash effect
std::cout << "\r";
std::cout << "[ ERROR ] ";
std::cout << "Radii is less than 0, reset to default radii";
V[radii_index].r = default_radii;
}
}
}
system("CLS"); // clear up console box
std::cout << " ===================" << std::endl;
std::cout << "| SIMULATION MODE |" << std::endl;
std::cout << " ===================" << std::endl << std::endl;
std::cout << "[ TIP ] ";
std::cout << "Click dangle vertex to set pressure" << std::endl;
std::cout << " Move wheel to change radii of the vertex which the cursor meets" << std::endl;
// simulate again
find_stable_state();
show_stable_state();
}
glutPostRedisplay();
}
// register keyboard inputs
void glut_keyboard(unsigned char key, int x, int y) {
switch (key) {
// press space to start a new line of edges
case 32:
first_click = true;
num = 0;
color_index = (color_index == JACK_CTRL_PTS - 1) ? 0 : color_index + 1; // update color scheme for new line of edges
break;
// reset main feeder position
case 'c':
if (build_inlet || build_outlet) {
select_bridge = false;
select_corner = false;
if (build_inlet) {
inlet_done = false;
inlet.clear();
}
else if (build_outlet) {
outlet_done = false;
outlet.clear();
}
mask_done = false;
}
break;
// output the image stack
case 'm':
if (manufacture) {
#ifdef __CUDACC__
make_image_stack();
#else
std::cout << "You need to have a gpu to make image stack, sorry." << std::endl;
#endif
}
break;
// output the drawn network
case 's':
{
stringstream output_ss;
output_ss << name << "_" << sub_name << "_net" << ".obj";
std::string output_filename = output_ss.str();
std::ofstream output_file;
output_file.open(output_filename.c_str());
for (unsigned i = 0; i < V.size(); i++)
output_file << "v" << " " << V[i].c[0] << " " << V[i].c[1] << " " << V[i].c[2] << std::endl;
for (unsigned i = 0; i < V.size(); i++)
output_file << "vt" << " " << V[i].r << std::endl;
for (unsigned i = 0; i < E.size(); i++)
output_file << "l" << " " << E[i].p[0] + 1 << "/" << E[i].p[0] + 1 << " " << E[i].p[1] + 1 << "/" << E[i].p[1] + 1 << std::endl;
output_file.close();
sub_name++; // sub name change
break;
}
// undo
case 'u': {
// first vertex on a new line of edges
if (num == 1) {
bool flag = false; // check whether current vertex belongs to another edge
for (unsigned i = 0; i < E.size(); i++) {
if (new_edge.p[0] == E[i].p[0] || new_edge.p[0] == E[i].p[1]) {
flag = true;
break;
}
}
if (new_edge.p[0] == V.size() - 1 && !flag) { // new vertex
V.pop_back(); // pop back new vertex
iter--;
}
first_click = true;
num = 0;
}
// not first vertex
else if (num > 1) {
new_edge.p[0] = E[E.size() - 1].p[0];
tmp_edge.p[0] = new_edge.p[0];
E.pop_back(); // pop back new "things"
color.pop_back();
V.pop_back();
iter--;
num--;
}
break;
}
// close window and exit application
case 27: // if keyboard 'ESC' is pressed, then exit
std::exit(1);
}
glutPostRedisplay();
}
// register glut menu options
void glut_menu(int value) {
cur_menu_num = glutGet(GLUT_MENU_NUM_ITEMS);
if (value == 1) { // generation mode
system("CLS"); // clear up console
std::cout << " ==================" << std::endl;
std::cout << "| GENERATOR MODE |" << std::endl;
std::cout << " ==================" << std::endl << std::endl;
std::cout << "[ TIP ] ";
std::cout << "Click to draw a network. (press SPACE to start a new line of edges)" << std::endl;
// clear up previous work
glClear(GL_COLOR_BUFFER_BIT);
V.clear();
E.clear();
iter = 0;
num = 0;
// set up flags
generate_network = true;
simulation = false;
manufacture = false;
first_click = true;
build_inlet = false;
build_outlet = false;
select_bridge = false;
mask_done = false;
color_bound = false;
name++; // name sequence increments
new_menu_num = 2; // set new menu option number
}
if (value == 2) { // simulation mode
// clear previous drawn buffer
glClear(GL_COLOR_BUFFER_BIT);
iter = 0;
num = 0;
system("CLS"); // clear up console box
std::cout << " ===================" << std::endl;
std::cout << "| SIMULATION MODE |" << std::endl;
std::cout << " ===================" << std::endl << std::endl;
std::cout << "[ TIP ] ";
std::cout << "Click dangle vertex to set pressure" << std::endl;
std::cout << " Move wheel to change radii of the vertex which the cursor meets" << std::endl;
// set up flags
generate_network = false;
simulation = true;
manufacture = false;
build_inlet = false;
build_outlet = false;
select_bridge = false;
mask_done = false;
if (first_simulation) {
get_background(); // get network basic information
flow_initialize(); // initialize flow
first_simulation = false;
}
// set other initial information then solve the network
find_stable_state(); // main function of solving the linear system
show_stable_state(); // output results as csv files
// set the camera object
stim::vec3<float> c = (L + U) * 0.5f; // get the center of the bounding box
stim::vec3<float> size = (U - L); // get the size of the bounding box
// place the camera along the z-axis at a distance determined by the network size along x and y
cam.setPosition(c + stim::vec<float>(0, 0, camera_factor * std::max(size[0], size[1])));
cam.LookAt(c[0], c[1], c[2]);
new_menu_num = 5; // set new menu option number
}
if (value == 3) { // building inlet mode
system("CLS"); // clear up console
std::cout << " ====================" << std::endl;
std::cout << "| BUILD INLET MODE |" << std::endl;
std::cout << " ====================" << std::endl << std::endl;
std::cout << "[ TIP ] ";
std::cout << "Firstly, click any position to set inlet main feeder" << std::endl;
std::cout << " Then, click any bridge to translocate" << std::endl;
std::cout << " System will check and print current bridge status :)" << std::endl;
std::cout << " Press c to delete inlet main feeder and bridges" << std::endl;
std::cout << " If current bridge is not acceptable, you can either do:" << std::endl;
std::cout << " [*1. increase the pressure at the vertex which is pointed out" << std::endl;
std::cout << " 2. increase the length of connection at that vertex" << std::endl;
std::cout << " 3. use more advance manufacture machine]" << std::endl;
std::cout << "[ NOTE ] ";
std::cout << "Delete main feeder before modify if you want to change input ports" << std::endl << std::endl;
// set up flags
if (!inlet_done) { // first time need to set main feeder position
generate_network = false;
simulation = false;
manufacture = false;
build_inlet = true;
build_outlet = false;
select_pressure = false;
select_bridge = false;
select_corner = false;
mask_done = false;
}
else { // already set the inlet main feeder position
generate_network = false;
simulation = false;
manufacture = false;
build_inlet = true;
build_outlet = false;
select_pressure = false;
select_bridge = true;
// check out current connection
is_acceptable();
}
new_menu_num = 5; // set new menu option number
}
if (value == 4) { // building outlet mode
system("CLS"); // clear up console box
std::cout << " =====================" << std::endl;
std::cout << "| BUILD OUTLET MODE |" << std::endl;
std::cout << " =====================" << std::endl << std::endl;
std::cout << "[ TIP ] ";
std::cout << "Firstly, click any position to set inlet main feeder" << std::endl;
std::cout << " Then, click any bridge to translocate" << std::endl;
std::cout << " System will check and print current bridge status :)" << std::endl;
std::cout << " Press c to delete outlet main feeder and bridges" << std::endl;
std::cout << " If current bridge is not acceptable, you can either do:" << std::endl;
std::cout << " [*1. decrease the pressure at the vertex which is pointed out" << std::endl;
std::cout << " 2. increase the length of connection at that vertex" << std::endl;
std::cout << " 3. use more advance manufacture machine]" << std::endl;
std::cout << "[ NOTE ] ";
std::cout << "Delete main feeder before modify if you want to change output ports" << std::endl << std::endl;
// set up flags
if (!outlet_done) { // first time need to set main feeder position
generate_network = false;
simulation = false;
manufacture = false;
build_inlet = false;
build_outlet = true;
select_pressure = false;
select_bridge = false;
select_corner = false;
mask_done = false;
}
else { // already set the outlet main feeder position
generate_network = false;
simulation = false;
manufacture = false;
build_inlet = false;
build_outlet = true;
select_bridge = true;
select_pressure = false;
select_corner = false;
// check out current connection
is_acceptable();
}
new_menu_num = 5; // set new menu option number
}
if (value == 5) { // manufacture mode
system("CLS"); // clear up console box
std::cout << " ====================" << std::endl;
std::cout << "| MANUFACTURE MODE |" << std::endl;
std::cout << " ====================" << std::endl << std::endl;
std::cout << "[ TIP ] ";
std::cout << "Press m to make and save image stack" << std::endl;
// set up flags
generate_network = false;
simulation = false;
manufacture = true;
build_inlet = false;
build_outlet = false;
select_bridge = false;
if (!mask_done) {
// calculate the inlet connection radii
unsigned midx;
for (unsigned i = 0; i < inlet.size(); i++) {
if (inlet[i].v[0] == min_input_index) {
midx = i;
break;
}
}
for (unsigned i = 0; i < inlet.size(); i++) {
unsigned idx = inlet[i].v[0];
if (idx == min_input_index) {
inlet[i].r = minimum_radii; // set the maximum pressure connection to minimum radii
}
else { // P1 + deltaP1 = P2 + deltaP2
float tmp_r;
if (mode == 2) {
tmp_r = (Flow.pressure[min_input_index] + ((12 * u * inlet[midx].l * inlet[midx].Q) / (std::pow(h, 3) * 2 * minimum_radii)) - Flow.pressure[idx]) * (std::pow(h, 3)) / (12 * u * inlet[i].l * inlet[i].Q);
tmp_r = (1 / tmp_r) / 2;
}
else if (mode == 3) {
tmp_r = (Flow.pressure[min_input_index] + ((8 * u * inlet[midx].l * inlet[midx].Q) / (std::pow(minimum_radii, 4) * (float)stim::PI)) - Flow.pressure[idx]) * (float)stim::PI / (8 * u * inlet[i].l * inlet[i].Q);
tmp_r = std::pow(1 / tmp_r, 1.0f / 4);
}
inlet[i].r = tmp_r;
}
}
// calculate the outlet connection radii
for (unsigned i = 0; i < outlet.size(); i++) {
if (outlet[i].v[0] == max_output_index) {
midx = i;
break;
}
}
for (unsigned i = 0; i < outlet.size(); i++) {
unsigned idx = outlet[i].v[0];
if (idx == max_output_index) {
outlet[i].r = minimum_radii; // set the maximum pressure connection to minimum radii
}
else { // P1 - deltaP1 = P2 - deltaP2
float tmp_r;
if (mode == 2) {
tmp_r = (Flow.pressure[idx] - (Flow.pressure[max_output_index] - (12 * u * outlet[midx].l * outlet[midx].Q) / (std::pow(h, 3) * 2 * minimum_radii))) * (std::pow(h, 3)) / (12 * u * outlet[i].l * outlet[i].Q);
tmp_r = (1 / tmp_r) / 2;
}
else if (mode == 3) {
tmp_r = (Flow.pressure[idx] - (Flow.pressure[max_output_index] - (8 * u * outlet[midx].l * outlet[midx].Q) / (std::pow(minimum_radii, 4) * (float)stim::PI))) * (float)stim::PI / (8 * u * outlet[i].l * outlet[i].Q);
tmp_r = std::pow(1 / tmp_r, 1.0f / 4);
}
outlet[i].r = tmp_r;
}
}
}
inlet_flow_rate = outlet_flow_rate = 0.0f;
// calculate the main feeder flow rate and pressure
for (unsigned i = 0; i < inlet.size(); i++) {
inlet_flow_rate += fabsf(inlet[i].Q);
}
for (unsigned i = 0; i < outlet.size(); i++) {
outlet_flow_rate += fabsf(outlet[i].Q);
}
for (unsigned i = 0; i < inlet.size(); i++) {
unsigned idx = inlet[i].v[0];
if (mode == 2)
inlet_pressure = Flow.pressure[idx] + (12 * u * inlet[i].l * inlet[i].Q) / (2 * inlet[i].r * std::pow(h, 3));
else if (mode == 3)
inlet_pressure = Flow.pressure[idx] + (8 * u * inlet[i].l * inlet[i].Q) / ((float)stim::PI * std::pow(inlet[i].r, 4));
}
for (unsigned i = 0; i < outlet.size(); i++) {
unsigned idx = outlet[i].v[0];
if (mode == 2)
outlet_pressure = Flow.pressure[idx] - (12 * u * outlet[i].l * outlet[i].Q) / (2 * inlet[i].r * std::pow(h, 3));
else if (mode == 3)
outlet_pressure = Flow.pressure[idx] - (8 * u * outlet[i].l * outlet[i].Q) / ((float)stim::PI * std::pow(outlet[i].r, 4));
}
mask_done = true;
preparation(); // preparation for making image stack
new_menu_num = 5; // set new menu option number
}
// set up new menu
glut_set_menu(cur_menu_num, new_menu_num);
glutPostRedisplay();
}
// window reshape function
void glut_reshape(int x, int y) {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
vX = glutGet(GLUT_WINDOW_WIDTH);
vY = glutGet(GLUT_WINDOW_HEIGHT);
glViewport(0, 0, vX, vY);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0, X, 0.0, Y, -50.0, 50.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
// glut initialization
void glut_initialize() {
int myargc = 1;
char* myargv[1];
myargv[0] = strdup("generate_network_network");
glutInit(&myargc, myargv);
glutInitDisplayMode(GLUT_DEPTH | GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowPosition(800, 0);
glutInitWindowSize(1000, 1000);
glutCreateWindow("Generate Simple 2D network");
glutDisplayFunc(glut_render);
glutMouseFunc(glut_mouse);
glutPassiveMotionFunc(glut_motion);
glutMouseWheelFunc(glut_wheel);
glutKeyboardFunc(glut_keyboard);
glutReshapeFunc(glut_reshape);
// initilize menu
glutCreateMenu(glut_menu); // create a menu object
glut_set_menu(0, 2);
glutAttachMenu(GLUT_RIGHT_BUTTON); // register right mouse to open menu option
}
// output an advertisement for the lab, authors and usage information
void advertise() {
std::cout << std::endl << std::endl;
std::cout << " =======================================================================================" << std::endl;
std::cout << "|Thank you for using the synthetic microvascular model generator for microfluidics tool!|" << std::endl;
std::cout << "|Scalable Tissue Imaging and Modeling (STIM) Lab, University of Houston |" << std::endl;
std::cout << "|Developers: Jiaming Guo, David Mayerich |" << std::endl;
std::cout << "|Source: https://git.stim.ee.uh.edu/instrumentation/Microfluidics |" << std::endl;
std::cout << " =======================================================================================" << std::endl << std::endl;
std::cout << "usage: flow2" << std::endl;
std::cout << "--2d -> activate 2d mode to treat the cross-section as rectangular" << std::endl;
std::cout << "--units units-> string indicating output units (ex. um)" << std::endl;
std::cout << "--maxpress 2 -> maximal pressure for simulation" << std::endl;
std::cout << "--minradii 10 -> minimal manufacuture radius" << std::endl;
std::cout << "--fradii 15 -> main feeder radius" << std::endl;
std::cout << "--viscosity 0.00001 -> constant viscosity value" << std::endl;
std::cout << "--workspace 450 -> workspace size in terms of units" << std::endl;
std::cout << "--stackres 0.6 0.6 1.0 -> voxel size" << std::endl;
std::cout << "--stackdir /home/network/image_stack -> image stack saving directory" << std::endl;
}
// argument and main loop
int main(int argc, char* argv[]) {
HWND Window = GetConsoleWindow(); // set the window default window
SetWindowPos(Window, 0, 0, 200, 0, 0, SWP_NOSIZE | SWP_NOZORDER); // position might value based on the screen resolution
stim::arglist args; // create an instance of arglist
// add arguments
args.add("help", "prints this help");
args.add("2d", "activate 2d mode and set the height of microvascular channel (in units), default is 3d mode (circle cross section)");
args.add("units", "string indicating units of length for output measurements (ex. velocity)", "um", "text string");
args.add("maxpress", "maximum allowed pressure in g / units / s^2, default 2 is for blood when units = um", "2", "real value > 0");
args.add("minradii", "minimum radii allowed for manufacture, default 5 is for blood when units = um", "5", "real value > 5");
args.add("fradii", "radii of main feeders, default is 10 when units = um", "10", "real value > 5");
args.add("viscosity", "set the viscosity of the fluid (in g / units / s), default .00001 is for blood when units = um", ".00001", "real value > 0");
args.add("workspace", "sets the size of the workspace (in units)", "400", "real value > 0");
args.add("stackres", "spacing between pixel samples in each dimension(in units/pixel)", ".184 .184 1", "real value > 0");
args.add("stackdir", "set the directory of the output image stack", "", "any existing directory (ex. /home/name/network)");
args.parse(argc, argv); // parse the command line
// set up initial inputs
if (args["help"].is_set()) { // test for help
advertise(); // advertise here
std::cout << args.str(); // output arguments
std::exit(1);
}
// get the units to work on
units = args["units"].as_string();
// set the mode, default is 10 in um
if (args["2d"].is_set()) {
mode = 2;
h = args["2d"].as_float();
}
else { // default mode is 3d
mode = 3;
}
// get the workspace size
X = Y = args["workspace"].as_float();
// get the vexel and image stack size
dx = args["stackres"].as_float(0);
dy = args["stackres"].as_float(1);
dz = args["stackres"].as_float(2);
// get the save directory of image stack
if (args["stackdir"].is_set())
stackdir = args["stackdir"].as_string();
// blood pressure in capillaries range from 15 - 35 torr
// 1 torr = 133.3 Pa
max_pressure = args["maxpress"].as_float();
// normal blood viscosity range from 4 - 15 mPa·s(cP)
// 1 Pa·s = 1 g / mm / s
u = args["viscosity"].as_float(); // g / units / s
// get minimum radii for building bridge
default_radii = minimum_radii = args["minradii"].as_float();
new_vertex.r = default_radii;
// get the main feeder radius
main_feeder_radii = args["fradii"].as_float();
// draw a network
generate_network = true; // begin draw a new network
std::cout << " ==================" << std::endl;
std::cout << "| GENERATOR MODE |" << std::endl;
std::cout << " ==================" << std::endl << std::endl;
std::cout << "[ TIP ] ";
std::cout << "Click to draw a new network. (press SPACE to start a new line of edges)" << std::endl;
std::cout << "[ NOTE ] ";
std::cout << "Press s to save the network and r to load the save" << std::endl;
// glut main loop
glut_initialize();
glutMainLoop();
} |
12badcc5c2ae4e8aeb8cfb45e73852e93e438feb.hip | // !!! This is a file automatically generated by hipify!!!
// This is used to Test the adaptive Spmspv/spmv performance in applications.
#include <iostream>
#include <string>
#include <float.h>
#include <typeinfo>
#include <limits>
#include <algorithm>
#include <vector>
#include <random>
#include <dirent.h>
#include <sys/stat.h>
#include <omp.h>
#include <hipsparse.h>
#include <cusparse_v2.h>
#include <hip/hip_runtime.h>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include "spmspv/csc-spmspv/spmspv.h"
#include "spmspv/class.hpp"
#include "spmspv/mtx.hpp"
#include "spmspv/readsvmdata.hpp"
#include "spmspv/config.h"
//#define BFS
//#define CORRECT
#ifndef VALUE_TYPE
#define VALUE_TYPE float
#endif
#ifndef NUM_RUN
#define NUM_RUN 10
#endif
#define IS_DOUBLE_ZERO(d) (abs(d) < DBL_EPSILON)
#define IS_FLOAT_ZERO(d) (abs(d) < FLT_EPSILON)
void TestCoo2Csr(int m, int mat_nnz,
int* coo_row, int* csr_row) {
int* d_csr_row = NULL;
int* d_coo_row = NULL;
cudaErrCheck(hipMalloc((void** )&d_csr_row, (m + 1) * sizeof(int)));
cudaErrCheck(hipMalloc((void** )&d_coo_row, mat_nnz * sizeof(int)));
cudaErrCheck(hipMemcpy(d_coo_row, coo_row, mat_nnz * sizeof(int),
hipMemcpyHostToDevice));
hipsparseHandle_t sparse_handle;
CUSP_CALL(hipsparseCreate(&sparse_handle));
hipsparseMatDescr_t descr = 0;
CUSP_CALL(hipsparseCreateMatDescr(&descr));
hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO);
cudaErrCheck(hipDeviceSynchronize());
hipEvent_t st, ed;
float tm = 0;
hipEventCreate(&st);
hipEventCreate(&ed);
hipEventRecord(st, 0);
CUSP_CALL(hipsparseXcoo2csr(sparse_handle,
d_coo_row, mat_nnz, m, d_csr_row,
HIPSPARSE_INDEX_BASE_ZERO));
hipEventRecord(ed, 0);
hipEventSynchronize(ed);
//unit: ms.
hipEventElapsedTime(&tm, st, ed);
hipEventDestroy(st);
hipEventDestroy(ed);
cudaErrCheck(hipMemcpy(csr_row, d_csr_row, (m + 1) * sizeof(int),
hipMemcpyDeviceToHost));
cudaErrCheck(hipDeviceSynchronize());
cudaErrCheck(hipFree(d_csr_row));
cudaErrCheck(hipFree(d_coo_row));
}
template<typename T>
void CheckVector(T* cpu, T* gpu, int len) {
int flag = 1;
for(int i = 0; i < len; i++) {
if(cpu[i] != gpu[i]) {
std::cout << "Err at " << i << ", cpu[i] = " << cpu[i] <<", gpu[i] = " << gpu[i] << std::endl;
flag = 0;
}
}
if(flag == 1)
std::cout <<"RESULT OK" <<std::endl;
}
void serialspmv(int m, int n, int mat_nnz,
int* csr_row, int* csr_col, VALUE_TYPE* csr_val,
VALUE_TYPE* x_dense, VALUE_TYPE* y_ref, VALUE_TYPE alpha) {
for (int i = 0; i < m; i++) {
VALUE_TYPE sum = 0;
for (int j = csr_row[i]; j < csr_row[i+1]; j++)
sum += x_dense[csr_col[j]] * csr_val[j] * alpha;
y_ref[i] = sum;
}
}
int predictSolution(float nnz, float x_sparsity){
return 0;
}
int predictWorkloadDistribution(float maxRow, float avgRow, float stdRow){
return 0;
}
int predictWritingBack(float m, float x_sparisity){
return 0;
}
int Run(std::string file_name, std::string matrix_name, int iter, float alpha, float beta,
int m, int n, int mat_nnz,
int* csr_row, int* csr_col, VALUE_TYPE* csr_val,
int* x_sparse_key, VALUE_TYPE* x_sparse_val, VALUE_TYPE* x_dense,
VALUE_TYPE* y_dense) {
int err = 0;
hipError_t err_cuda = hipSuccess;
int device_id = 0;
hipSetDevice(device_id);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device_id);
std::cout << "Device [" << device_id << "] " << deviceProp.name << ", "
<< " @ " << deviceProp.clockRate * 1e-3f << "MHz. " << std::endl;
// write to file.
std::ofstream fout;
fout.open(file_name.c_str(), std::ofstream::app);
if (!fout.is_open()) {
std::cout << "can't open output file\n" << file_name << std::endl;
exit(1);
}
fout << "mat_name, m, n, nnz, max, min, avg, m_range, std, equlity, gini, x_nnz, sparse2dense, sparse2bitarray, bin_len, max, min, xnnz/n, bin_len/nnz, xnnz_range, m1_id, m2_id, m3_id, time_r, time_c, single_atomic_col_kernel, single_sort_col_kernel, single_spmv, framework\n";
fout << matrix_name << " ";
int* d_csr_row = NULL;
int* d_csr_col = NULL;
VALUE_TYPE* d_csr_val = NULL;
cudaErrCheck(hipMalloc((void **)&d_csr_row, (m+1) * sizeof(int)));
cudaErrCheck(hipMalloc((void **)&d_csr_col, mat_nnz * sizeof(int)));
cudaErrCheck(hipMalloc((void **)&d_csr_val, mat_nnz * sizeof(VALUE_TYPE)));
cudaErrCheck(hipMemcpy(d_csr_row, csr_row, (m+1) * sizeof(int),
hipMemcpyHostToDevice));
cudaErrCheck(hipMemcpy(d_csr_col, csr_col, mat_nnz * sizeof(int),
hipMemcpyHostToDevice));
cudaErrCheck(hipMemcpy(d_csr_val, csr_val, mat_nnz * sizeof(VALUE_TYPE),
hipMemcpyHostToDevice));
VALUE_TYPE* d_x = NULL;
cudaErrCheck(hipMalloc((void** )&d_x, n * sizeof(VALUE_TYPE)));
cudaErrCheck(hipMemcpy(d_x, x_dense, n * sizeof(VALUE_TYPE),
hipMemcpyHostToDevice));
int num_ints = (n + sizeof(int) * 8 - 1) / (sizeof(int) * 8);
int* d_bit_vector = NULL;
cudaErrCheck(hipMalloc((void** )&d_bit_vector, num_ints * sizeof(int)));
SpmspvHandle<int, unsigned int, VALUE_TYPE> A(m, n, mat_nnz);
err = A.InputCSR(mat_nnz, d_csr_row, d_csr_col, d_csr_val);
err = A.set_vector_type(1);//current vector type is densetype.
err = A.set_x(d_x); //
err = A.set_bitvector(d_bit_vector, num_ints);//
VALUE_TYPE* d_y = NULL; //result vec.
cudaErrCheck(hipMalloc((void** )&d_y, m * sizeof(VALUE_TYPE)));
cudaErrCheck(hipMemset(d_y, 0, m * sizeof(VALUE_TYPE))); //initialized to zero.
int* d_y_key = NULL; //result vec.
VALUE_TYPE* d_y_val = NULL;
cudaErrCheck(hipMalloc((void** )&d_y_key, m * sizeof(int)));
cudaErrCheck(hipMalloc((void** )&d_y_val, m * sizeof(VALUE_TYPE)));
VALUE_TYPE alpha_i = 1.0;
SpmspvTimer timer;
/*********select spmv: use holaspmv directly***********/
err = A.holaPreprocess();
timer.Start();
for (int i = 0; i < NUM_RUN; i++)
err = A.holaspmv(alpha_i, d_y);
err_cuda = hipDeviceSynchronize();
double holaspmv_time = timer.Stop()/ (double)NUM_RUN;
//double holaspmv_time = timer.Stop();
std::cout << "holaspmv time " << holaspmv_time << std::endl;
//fout << holaspmv_time << " ";
double csr5spmv_time = 0;
//fout << csr5spmv_time << " ";
int spmv_type = 0;
spmv_type = 0;
A.set_spmv_type(0);
//fout << spmv_type << " ";
//fout << "alpha = " << alpha << ", beta = " << beta << std::endl;
#ifdef CORRECT
//for serial spmv.
VALUE_TYPE* hres = (VALUE_TYPE* )malloc(m * sizeof(VALUE_TYPE));
CHECK_MALLOC(hres);
#endif
err = A.ToCSC();
#ifdef BFS
std::string file_prex = "/home/*/bfs_xnnz_";
#else
std::string file_prex = "/home/*/pr_xnnz_";
#endif
std::string file_suffix = ".txt";
std::string file_all = file_prex + matrix_name + file_suffix;
std::cout << "reading xnnz from file: " << file_all << std::endl;
readNNZXFromFile(file_all.c_str(), &iter);
std::cout << "iter = " << iter << std::endl;
int* xnnz_vec = (int* )malloc(iter * sizeof(int));
CHECK_MALLOC(xnnz_vec);
readSparseXInxFromFile(file_all.c_str(), iter, xnnz_vec);
int* d_x_key = NULL;
VALUE_TYPE* d_x_val = NULL;
cudaErrCheck(hipMalloc((void** )&d_x_key, (n) * sizeof(int)));
cudaErrCheck(hipMalloc((void** )&d_x_val, (n) * sizeof(VALUE_TYPE)));
//fout << "x_nnz, sparse2dense, sparse2bitarray, bin_len, max, min, xnnz/n, bin_len/nnz, xnnz_range, GM1, GM2, GM3, GM1/GM2, GM2/GM3, GM1/GM3, naive-col, lb-col, naive-rspmspv, naive-rspmspv+s2a, lb-rspmspv, lb-rspmspv+s2a, naive-spmv, naive-spmv+s2d, lb-spmv, lb-spmv+s2d \n";
double all_time = 0.0;
int y_nnz = 0;
int quit = 0;
//malloc for d_col_len[] and d_pre_alloc_buffer (csc spmspv preprocess and binlen) .
A.allocPreBuffer();
double time_sa=0, time_ss=0, time_sspmv=0;
double time_r, time_c;
double time_single_atomic_col_spmspv = 0.0;
double time_single_sort_col_spmspv = 0.0;
double time_single_spmv = 0.0;
double time_rule = 0, time_spmv = 0;
int mat_max_elems, mat_min_elems;
float mat_avg_elems, mat_x_range, mat_standard_row, mat_equlity, mat_gini;
A.computeMatFeture_serial(m, n, mat_nnz, csr_row, &mat_max_elems, &mat_min_elems, &mat_avg_elems,
&mat_x_range, &mat_standard_row, &mat_equlity, &mat_gini);
fout << m << " ";
fout << n << " ";
fout << mat_nnz << " ";
fout << mat_max_elems << " ";
fout << mat_min_elems << " ";
fout << mat_avg_elems << " ";
fout << mat_x_range << " ";//relative range of degree
fout << mat_standard_row<< " ";
fout << mat_equlity << " ";
fout << mat_gini << std::endl;
for (int i = 0; i < iter; i++) {
int x_nnz = xnnz_vec[i];
if (quit) break;
if(x_nnz >= n) {
x_nnz = n;
}
printf("x_nnz = %d\n", x_nnz);
fout << x_nnz << " ";
memset(x_sparse_key, 0, n * sizeof(int));
memset(x_sparse_val, 0, n * sizeof(VALUE_TYPE));
memset(x_dense, 0, n * sizeof(VALUE_TYPE));
#ifdef BFS
std::string in_file_prex = "/home/*/bfs_x_";
#else
std::string in_file_prex = "/home/*/pr_x_";
#endif
std::string in_file_suffix = ".txt";
std::string in_file_name = in_file_prex + matrix_name + in_file_suffix;
std::cout << "reading sparse x from file: " << in_file_name << std::endl;
extractSparseXfromFile(in_file_name, i, x_nnz, x_sparse_key, x_sparse_val, x_dense);
#ifdef CORRECT
//serial spmv computation.
memset(hres, 0, m * sizeof(VALUE_TYPE));//
serialspmv(m, n, mat_nnz, csr_row, csr_col, csr_val, x_dense, hres, alpha_i);
#endif
cudaErrCheck(hipMemcpy(d_x_key, x_sparse_key, x_nnz * sizeof(int),
hipMemcpyHostToDevice));
cudaErrCheck(hipMemcpy(d_x_val, x_sparse_val, x_nnz * sizeof(VALUE_TYPE),
hipMemcpyHostToDevice));
cudaErrCheck(hipMemcpy(d_x, x_dense, n * sizeof(VALUE_TYPE),
hipMemcpyHostToDevice));
err = A.set_vector_type(0);//current vector type is sparse type.
err = A.set_sparse_x(d_x_key, d_x_val, x_nnz);//
err = A.set_x(d_x);//
timer.Start();
A.sparse2dense();//generate values in dense vector.
double s2dtime = timer.Stop();
std::cout << "DEBUG: sparse2dense time = " << s2dtime << std::endl;
//fout << "s2dtime = " << s2dtime << " ";
fout << s2dtime << " ";
timer.Start();
A.sparse2bitarray();// generate values in bitvector.
double s2atime = timer.Stop();
std::cout << "DEBUG: sparse2bitarray time = " << s2atime << std::endl;
//fout << " s2atime = " << s2atime << " ";
fout << s2atime << " ";
timer.Start();
int bin_len = A.computeBinlenVer2();
double time = timer.Stop();
std::cout << "DEBUG: compute bin_len time = " << time << "ms." << std::endl;
std::cout << "DEBUG: bin_len = " << bin_len << std::endl;
int max_elems = 0;
int min_elems = 0;
A.computeVecFeture_serial(x_nnz, x_sparse_key, &bin_len, &max_elems, &min_elems);
fout << bin_len << " ";
fout << max_elems << " ";
fout << min_elems << " ";
fout << 1.0*x_nnz/n << " ";
fout << 1.0*bin_len/mat_nnz << " ";
fout << (max_elems - min_elems)/(1.0*n) << " ";//relative range of degree
float x_sparsity = 1.0*x_nnz/n;
//rules judgement
int solution_id = 2;//0,1,2
int workloadDistribution_id = 1;//0,1
int writingBack_id = 1;//0,1(sort, atomic)
timer.Start();
//int predictSolution(int nnz, int x_sparsity){
solution_id = predictSolution((float)mat_nnz, x_sparsity);
//int predictWorkloadDistribution(int maxRow, int avgRow, int stdRow){
workloadDistribution_id = predictWorkloadDistribution((float)mat_max_elems, mat_avg_elems, mat_standard_row);
if(solution_id == 0){
//int predictWritingBack(int m, int x_sparisity){
writingBack_id = predictWritingBack((float)m, x_sparsity);
}
time_r = timer.Stop();
time_rule += time_r;
fout << " " << solution_id << " " << workloadDistribution_id << " " << writingBack_id << " ";
#if 1
//execute sinle kernel
hipMemset(d_y, 0, m * sizeof(VALUE_TYPE));
timer.Start();
err = A.CscBasedNoSortMergeSpmspv_keyval(true, alpha_i, &y_nnz, d_y, d_y_key, d_y_val);
time_sa = timer.Stop();
time_single_atomic_col_spmspv += time_sa;
hipError_t err_r = hipGetLastError();
if ( hipSuccess != err_r) {
printf("cscspmspv() invocate error.\n");
std::cout << "err code: " << hipGetErrorString(err_r) << std::endl;
exit(1);
}
#endif
#if 0
//execute sinle kernel
hipMemset(d_y, 0, m * sizeof(VALUE_TYPE));
timer.Start();
err = A.CscBasedSortMergeSpmspv(false, alpha, &y_nnz, d_y, d_y_key, d_y_val);
time_ss = timer.Stop();
time_single_sort_col_spmspv += time_ss;
err_r = hipGetLastError();
if ( hipSuccess != err_r) {
printf("cscspmspv() invocate error.\n");
std::cout << "err code: " << hipGetErrorString(err_r) << std::endl;
exit(1);
}
#endif
hipMemset(d_y, 0, m * sizeof(VALUE_TYPE));
timer.Start();
err = A.spmv(alpha_i, d_y);
time_sspmv = timer.Stop();
time_single_spmv += time_sspmv;
#if 0
//execute adaptive framework
//SpMV
hipMemset(d_y, 0, m * sizeof(VALUE_TYPE));
if(solution_id == 2){
if(workloadDistribution_id == 0){
timer.Start();
err = A.naivespmv(alpha, d_y);
time_c = timer.Stop();
}else{
timer.Start();
err = A.spmv(alpha_i, d_y);
time_c = timer.Stop();
}
}else if(solution_id == 1){
if(workloadDistribution_id == 0){
timer.Start();
err = A.naivespmspv(alpha, d_y);
time_c = timer.Stop();
}else{
timer.Start();
err = A.spmspv(alpha_i, d_y);
time_c = timer.Stop();
}
}else if(solution_id == 0){
//naive
if(workloadDistribution_id == 0){
if(writingBack_id == 0){
//sort
timer.Start();
err = A.CscBasedSortNaiveSpmspv(alpha, &y_nnz, d_y_key, d_y_val, 0);
time_c = timer.Stop();
}else{
//atomic
timer.Start();
err = A.CscBasedNoSortNaiveSpmspv_keyval(alpha, &y_nnz, d_y, d_y_key, d_y_val);
time_c = timer.Stop();
}
}else{
//load-balanced
if(writingBack_id == 0){
//sort
timer.Start();
err = A.CscBasedSortMergeSpmspv(false, alpha, &y_nnz, d_y, d_y_key, d_y_val);
time_c = timer.Stop();
}else{
//atomic
timer.Start();
err = A.CscBasedNoSortMergeSpmspv_keyval(true, alpha_i, &y_nnz, d_y, d_y_key, d_y_val);
time_c = timer.Stop();
}
}
}
hipError_t err_r = hipGetLastError();
if ( hipSuccess != err_r) {
printf("framework: cscspmspv() invocate error.\n");
printf("solution_id = %d, workloadDistribution_id =%d, writingBack_id=%d.\n",
solution_id, workloadDistribution_id, writingBack_id);
std::cout << "err code: " << hipGetErrorString(err_r) << std::endl;
exit(1);
}
#endif
time_spmv += time_c;
fout << time_sa << " " << time_ss << " " << time_sspmv << " " << time_r << " " << time_c << std::endl;
}
fout << time_single_atomic_col_spmspv << " ";
fout << time_single_sort_col_spmspv << " " << time_single_spmv << " ";
fout << time_rule << " " << time_spmv << std::endl;
A.deallocPreBuffer();
if(spmv_type == 0) {
A.holaPostprocess();
}
A.Destroy();
#ifdef CORRECT
if (hres) free(hres);
#endif
if (d_csr_row) cudaErrCheck(hipFree(d_csr_row));
if (d_csr_col) cudaErrCheck(hipFree(d_csr_col));
if (d_csr_val) cudaErrCheck(hipFree(d_csr_val));
if (d_x) hipFree(d_x);
if (d_y) hipFree(d_y);
if (d_y_key) hipFree(d_y_key);
if (d_y_val) hipFree(d_y_val);
if (d_x_key) hipFree(d_x_key);
if (d_x_val) hipFree(d_x_val);
if (d_bit_vector) hipFree(d_bit_vector);
if(xnnz_vec) free(xnnz_vec);
fout.close();
return err;
}
int doThis(std::string file_name, int iter, float alpha, float beta) {
const char* real_file_name = file_name.c_str();
std::cout << "file path = " << real_file_name << std::endl;
int m, n, mat_nnz;
int* csr_row;
int* csr_col;
VALUE_TYPE* csr_val;
// report precision of floating-point
std::cout << "-------------------------" << std::endl;
std::string precision;
if (sizeof(VALUE_TYPE) == 4) {
precision = "32-bit Single Precision (float)";
} else if (sizeof(VALUE_TYPE) == 8) {
precision = "64-bit Double Precision (double)";
} else {
std::cout << "Wrong precision. Program exit!" << std::endl;
return 0;
}
std::cout << "PRECISION = " << precision << std::endl;
std::cout << "-------------------------" << std::endl;
MTX<VALUE_TYPE> mtx;
std::string csr_name = std::string(real_file_name) + "_" + ".csr";
try
{
std::cout << "trying to load csr file \"" << csr_name << "\"\n";
loadCSR_header(csr_name.c_str(), &m, &n, &mat_nnz);
csr_row = (int* )malloc((m + 1) * sizeof(int));
CHECK_MALLOC(csr_row);
csr_col = (int* )malloc((mat_nnz) * sizeof(int));
CHECK_MALLOC(csr_col);
csr_val = (VALUE_TYPE* )malloc((mat_nnz) * sizeof(VALUE_TYPE));
CHECK_MALLOC(csr_val);
loadCSR(csr_name.c_str(), m, n, mat_nnz, csr_row, csr_col, csr_val);
}
catch (std::exception& ex){
std::cout << "could not load csr file:\n\t" << ex.what() << "\n";
fileToMtxCoo<VALUE_TYPE>(real_file_name, &mtx, true);
m = mtx.rows;
n = mtx.cols;
mat_nnz = mtx.nnz;
//coo2csr: attention memory alloc and free.
csr_row = (int* )malloc((m + 1) * sizeof(int));
CHECK_MALLOC(csr_row);
TestCoo2Csr(m, mat_nnz, mtx.row, csr_row);
csr_col = mtx.col;
csr_val = mtx.data;
try
{
storeCSR(m, n, mat_nnz, csr_row, csr_col, csr_val,csr_name.c_str());
}
catch (std::exception& ex)
{
std::cout << ex.what() << std::endl;
}
}
// easy for test correctness.
for (int i = 0; i < mat_nnz; i++) {
csr_val[i] = 1.0;
}
// SparseVec* x_sparse = (SparseVec* )malloc(n * sizeof(SparseVec));
// CHECK_MALLOC(x_sparse);
int* x_sparse_key = (int* )malloc(n * sizeof(int));
CHECK_MALLOC(x_sparse_key);
VALUE_TYPE* x_sparse_val = (VALUE_TYPE* )malloc(n *
sizeof(VALUE_TYPE));
CHECK_MALLOC(x_sparse_val);
VALUE_TYPE* x_dense = (VALUE_TYPE* )malloc(n * sizeof(VALUE_TYPE));
CHECK_MALLOC(x_dense);
for(int i=0; i<n; i++) {
x_dense[i] = (VALUE_TYPE)i;
}
VALUE_TYPE* y_dense = (VALUE_TYPE* )malloc(m * sizeof(VALUE_TYPE));
CHECK_MALLOC(y_dense);
// get matrix name.
std::string matrix_name;
int nPos1 = file_name.find_last_of(".");
int nPos2 = file_name.find_last_of("/", nPos1 - 1);
if(nPos1 != -1 && nPos2 != -1) {
matrix_name = file_name.substr(nPos2 + 1, nPos1 - nPos2 - 1);
}
std::cout << "matrix_name = " << matrix_name << std::endl;
std::cout << "m = " << m << ", n = " << n << ", nnz = " << mat_nnz << std::endl;
#ifdef BFS
std::string out_file = "/home/*/framework-time/bfs/"
+ matrix_name + "_bfs_perf.info";
#else
std::string out_file = "/home/*/framework-time/pagerank/"
+ matrix_name + "_pr_perf.info";
#endif
std::cout << out_file << std::endl;
Run(out_file, matrix_name, iter, alpha, beta, m, n, mat_nnz, csr_row, csr_col, csr_val,
x_sparse_key, x_sparse_val, x_dense, y_dense);
if (x_sparse_key) free(x_sparse_key);
if (x_sparse_val) free(x_sparse_val);
if (x_dense) free(x_dense);
if (y_dense) free(y_dense);
#if 0
if (mtx.row) free(mtx.row);
if (mtx.col) free(mtx.col);
if (mtx.data) free(mtx.data);
#endif
if(csr_row) free(csr_row);
if(csr_col) free(csr_col);
if(csr_val) free(csr_val);
return 0;
}
int main(int argc, char** argv) {
std::string file_name;
int iter = 0;
float alpha = 0, beta = 0;
if (argc == 2) {
file_name = argv[1];
// iter = atoi(argv[2]);
// alpha = atof(argv[3]);
// beta = atof(argv[4]);
std::cout << "---------file_name: " << file_name << "---------" << std::endl;
// std::cout << "---------iter: " << iter << "---------" << std::endl;
// std::cout << "---------alpha: " << alpha << "---------" << std::endl;
// std::cout << "---------beta: " << beta << "---------" << std::endl;
} else {
std::cout << "Usage: matrix_file_name " << std::endl;
exit(1);
}
//std::cout << "i am new-test-app.cu" << std::endl;
doThis(file_name, iter, alpha, beta);
return 0;
}
| 12badcc5c2ae4e8aeb8cfb45e73852e93e438feb.cu | // This is used to Test the adaptive Spmspv/spmv performance in applications.
#include <iostream>
#include <string>
#include <float.h>
#include <typeinfo>
#include <limits>
#include <algorithm>
#include <vector>
#include <random>
#include <dirent.h>
#include <sys/stat.h>
#include <omp.h>
#include <cusparse.h>
#include <cusparse_v2.h>
#include <cuda_runtime.h>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include "spmspv/csc-spmspv/spmspv.h"
#include "spmspv/class.hpp"
#include "spmspv/mtx.hpp"
#include "spmspv/readsvmdata.hpp"
#include "spmspv/config.h"
//#define BFS
//#define CORRECT
#ifndef VALUE_TYPE
#define VALUE_TYPE float
#endif
#ifndef NUM_RUN
#define NUM_RUN 10
#endif
#define IS_DOUBLE_ZERO(d) (abs(d) < DBL_EPSILON)
#define IS_FLOAT_ZERO(d) (abs(d) < FLT_EPSILON)
void TestCoo2Csr(int m, int mat_nnz,
int* coo_row, int* csr_row) {
int* d_csr_row = NULL;
int* d_coo_row = NULL;
cudaErrCheck(cudaMalloc((void** )&d_csr_row, (m + 1) * sizeof(int)));
cudaErrCheck(cudaMalloc((void** )&d_coo_row, mat_nnz * sizeof(int)));
cudaErrCheck(cudaMemcpy(d_coo_row, coo_row, mat_nnz * sizeof(int),
cudaMemcpyHostToDevice));
cusparseHandle_t sparse_handle;
CUSP_CALL(cusparseCreate(&sparse_handle));
cusparseMatDescr_t descr = 0;
CUSP_CALL(cusparseCreateMatDescr(&descr));
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
cudaErrCheck(cudaDeviceSynchronize());
cudaEvent_t st, ed;
float tm = 0;
cudaEventCreate(&st);
cudaEventCreate(&ed);
cudaEventRecord(st, 0);
CUSP_CALL(cusparseXcoo2csr(sparse_handle,
d_coo_row, mat_nnz, m, d_csr_row,
CUSPARSE_INDEX_BASE_ZERO));
cudaEventRecord(ed, 0);
cudaEventSynchronize(ed);
//unit: ms.
cudaEventElapsedTime(&tm, st, ed);
cudaEventDestroy(st);
cudaEventDestroy(ed);
cudaErrCheck(cudaMemcpy(csr_row, d_csr_row, (m + 1) * sizeof(int),
cudaMemcpyDeviceToHost));
cudaErrCheck(cudaDeviceSynchronize());
cudaErrCheck(cudaFree(d_csr_row));
cudaErrCheck(cudaFree(d_coo_row));
}
template<typename T>
void CheckVector(T* cpu, T* gpu, int len) {
int flag = 1;
for(int i = 0; i < len; i++) {
if(cpu[i] != gpu[i]) {
std::cout << "Err at " << i << ", cpu[i] = " << cpu[i] <<", gpu[i] = " << gpu[i] << std::endl;
flag = 0;
}
}
if(flag == 1)
std::cout <<"RESULT OK" <<std::endl;
}
void serialspmv(int m, int n, int mat_nnz,
int* csr_row, int* csr_col, VALUE_TYPE* csr_val,
VALUE_TYPE* x_dense, VALUE_TYPE* y_ref, VALUE_TYPE alpha) {
for (int i = 0; i < m; i++) {
VALUE_TYPE sum = 0;
for (int j = csr_row[i]; j < csr_row[i+1]; j++)
sum += x_dense[csr_col[j]] * csr_val[j] * alpha;
y_ref[i] = sum;
}
}
int predictSolution(float nnz, float x_sparsity){
return 0;
}
int predictWorkloadDistribution(float maxRow, float avgRow, float stdRow){
return 0;
}
int predictWritingBack(float m, float x_sparisity){
return 0;
}
int Run(std::string file_name, std::string matrix_name, int iter, float alpha, float beta,
int m, int n, int mat_nnz,
int* csr_row, int* csr_col, VALUE_TYPE* csr_val,
int* x_sparse_key, VALUE_TYPE* x_sparse_val, VALUE_TYPE* x_dense,
VALUE_TYPE* y_dense) {
int err = 0;
cudaError_t err_cuda = cudaSuccess;
int device_id = 0;
cudaSetDevice(device_id);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device_id);
std::cout << "Device [" << device_id << "] " << deviceProp.name << ", "
<< " @ " << deviceProp.clockRate * 1e-3f << "MHz. " << std::endl;
// write to file.
std::ofstream fout;
fout.open(file_name.c_str(), std::ofstream::app);
if (!fout.is_open()) {
std::cout << "can't open output file\n" << file_name << std::endl;
exit(1);
}
fout << "mat_name, m, n, nnz, max, min, avg, m_range, std, equlity, gini, x_nnz, sparse2dense, sparse2bitarray, bin_len, max, min, xnnz/n, bin_len/nnz, xnnz_range, m1_id, m2_id, m3_id, time_r, time_c, single_atomic_col_kernel, single_sort_col_kernel, single_spmv, framework\n";
fout << matrix_name << " ";
int* d_csr_row = NULL;
int* d_csr_col = NULL;
VALUE_TYPE* d_csr_val = NULL;
cudaErrCheck(cudaMalloc((void **)&d_csr_row, (m+1) * sizeof(int)));
cudaErrCheck(cudaMalloc((void **)&d_csr_col, mat_nnz * sizeof(int)));
cudaErrCheck(cudaMalloc((void **)&d_csr_val, mat_nnz * sizeof(VALUE_TYPE)));
cudaErrCheck(cudaMemcpy(d_csr_row, csr_row, (m+1) * sizeof(int),
cudaMemcpyHostToDevice));
cudaErrCheck(cudaMemcpy(d_csr_col, csr_col, mat_nnz * sizeof(int),
cudaMemcpyHostToDevice));
cudaErrCheck(cudaMemcpy(d_csr_val, csr_val, mat_nnz * sizeof(VALUE_TYPE),
cudaMemcpyHostToDevice));
VALUE_TYPE* d_x = NULL;
cudaErrCheck(cudaMalloc((void** )&d_x, n * sizeof(VALUE_TYPE)));
cudaErrCheck(cudaMemcpy(d_x, x_dense, n * sizeof(VALUE_TYPE),
cudaMemcpyHostToDevice));
int num_ints = (n + sizeof(int) * 8 - 1) / (sizeof(int) * 8);
int* d_bit_vector = NULL;
cudaErrCheck(cudaMalloc((void** )&d_bit_vector, num_ints * sizeof(int)));
SpmspvHandle<int, unsigned int, VALUE_TYPE> A(m, n, mat_nnz);
err = A.InputCSR(mat_nnz, d_csr_row, d_csr_col, d_csr_val);
err = A.set_vector_type(1);//current vector type is densetype.
err = A.set_x(d_x); //
err = A.set_bitvector(d_bit_vector, num_ints);//
VALUE_TYPE* d_y = NULL; //result vec.
cudaErrCheck(cudaMalloc((void** )&d_y, m * sizeof(VALUE_TYPE)));
cudaErrCheck(cudaMemset(d_y, 0, m * sizeof(VALUE_TYPE))); //initialized to zero.
int* d_y_key = NULL; //result vec.
VALUE_TYPE* d_y_val = NULL;
cudaErrCheck(cudaMalloc((void** )&d_y_key, m * sizeof(int)));
cudaErrCheck(cudaMalloc((void** )&d_y_val, m * sizeof(VALUE_TYPE)));
VALUE_TYPE alpha_i = 1.0;
SpmspvTimer timer;
/*********select spmv: use holaspmv directly***********/
err = A.holaPreprocess();
timer.Start();
for (int i = 0; i < NUM_RUN; i++)
err = A.holaspmv(alpha_i, d_y);
err_cuda = cudaDeviceSynchronize();
double holaspmv_time = timer.Stop()/ (double)NUM_RUN;
//double holaspmv_time = timer.Stop();
std::cout << "holaspmv time " << holaspmv_time << std::endl;
//fout << holaspmv_time << " ";
double csr5spmv_time = 0;
//fout << csr5spmv_time << " ";
int spmv_type = 0;
spmv_type = 0;
A.set_spmv_type(0);
//fout << spmv_type << " ";
//fout << "alpha = " << alpha << ", beta = " << beta << std::endl;
#ifdef CORRECT
//for serial spmv.
VALUE_TYPE* hres = (VALUE_TYPE* )malloc(m * sizeof(VALUE_TYPE));
CHECK_MALLOC(hres);
#endif
err = A.ToCSC();
#ifdef BFS
std::string file_prex = "/home/*/bfs_xnnz_";
#else
std::string file_prex = "/home/*/pr_xnnz_";
#endif
std::string file_suffix = ".txt";
std::string file_all = file_prex + matrix_name + file_suffix;
std::cout << "reading xnnz from file: " << file_all << std::endl;
readNNZXFromFile(file_all.c_str(), &iter);
std::cout << "iter = " << iter << std::endl;
int* xnnz_vec = (int* )malloc(iter * sizeof(int));
CHECK_MALLOC(xnnz_vec);
readSparseXInxFromFile(file_all.c_str(), iter, xnnz_vec);
int* d_x_key = NULL;
VALUE_TYPE* d_x_val = NULL;
cudaErrCheck(cudaMalloc((void** )&d_x_key, (n) * sizeof(int)));
cudaErrCheck(cudaMalloc((void** )&d_x_val, (n) * sizeof(VALUE_TYPE)));
//fout << "x_nnz, sparse2dense, sparse2bitarray, bin_len, max, min, xnnz/n, bin_len/nnz, xnnz_range, GM1, GM2, GM3, GM1/GM2, GM2/GM3, GM1/GM3, naive-col, lb-col, naive-rspmspv, naive-rspmspv+s2a, lb-rspmspv, lb-rspmspv+s2a, naive-spmv, naive-spmv+s2d, lb-spmv, lb-spmv+s2d \n";
double all_time = 0.0;
int y_nnz = 0;
int quit = 0;
//malloc for d_col_len[] and d_pre_alloc_buffer (csc spmspv preprocess and binlen) .
A.allocPreBuffer();
double time_sa=0, time_ss=0, time_sspmv=0;
double time_r, time_c;
double time_single_atomic_col_spmspv = 0.0;
double time_single_sort_col_spmspv = 0.0;
double time_single_spmv = 0.0;
double time_rule = 0, time_spmv = 0;
int mat_max_elems, mat_min_elems;
float mat_avg_elems, mat_x_range, mat_standard_row, mat_equlity, mat_gini;
A.computeMatFeture_serial(m, n, mat_nnz, csr_row, &mat_max_elems, &mat_min_elems, &mat_avg_elems,
&mat_x_range, &mat_standard_row, &mat_equlity, &mat_gini);
fout << m << " ";
fout << n << " ";
fout << mat_nnz << " ";
fout << mat_max_elems << " ";
fout << mat_min_elems << " ";
fout << mat_avg_elems << " ";
fout << mat_x_range << " ";//relative range of degree
fout << mat_standard_row<< " ";
fout << mat_equlity << " ";
fout << mat_gini << std::endl;
for (int i = 0; i < iter; i++) {
int x_nnz = xnnz_vec[i];
if (quit) break;
if(x_nnz >= n) {
x_nnz = n;
}
printf("x_nnz = %d\n", x_nnz);
fout << x_nnz << " ";
memset(x_sparse_key, 0, n * sizeof(int));
memset(x_sparse_val, 0, n * sizeof(VALUE_TYPE));
memset(x_dense, 0, n * sizeof(VALUE_TYPE));
#ifdef BFS
std::string in_file_prex = "/home/*/bfs_x_";
#else
std::string in_file_prex = "/home/*/pr_x_";
#endif
std::string in_file_suffix = ".txt";
std::string in_file_name = in_file_prex + matrix_name + in_file_suffix;
std::cout << "reading sparse x from file: " << in_file_name << std::endl;
extractSparseXfromFile(in_file_name, i, x_nnz, x_sparse_key, x_sparse_val, x_dense);
#ifdef CORRECT
//serial spmv computation.
memset(hres, 0, m * sizeof(VALUE_TYPE));//
serialspmv(m, n, mat_nnz, csr_row, csr_col, csr_val, x_dense, hres, alpha_i);
#endif
cudaErrCheck(cudaMemcpy(d_x_key, x_sparse_key, x_nnz * sizeof(int),
cudaMemcpyHostToDevice));
cudaErrCheck(cudaMemcpy(d_x_val, x_sparse_val, x_nnz * sizeof(VALUE_TYPE),
cudaMemcpyHostToDevice));
cudaErrCheck(cudaMemcpy(d_x, x_dense, n * sizeof(VALUE_TYPE),
cudaMemcpyHostToDevice));
err = A.set_vector_type(0);//current vector type is sparse type.
err = A.set_sparse_x(d_x_key, d_x_val, x_nnz);//
err = A.set_x(d_x);//
timer.Start();
A.sparse2dense();//generate values in dense vector.
double s2dtime = timer.Stop();
std::cout << "DEBUG: sparse2dense time = " << s2dtime << std::endl;
//fout << "s2dtime = " << s2dtime << " ";
fout << s2dtime << " ";
timer.Start();
A.sparse2bitarray();// generate values in bitvector.
double s2atime = timer.Stop();
std::cout << "DEBUG: sparse2bitarray time = " << s2atime << std::endl;
//fout << " s2atime = " << s2atime << " ";
fout << s2atime << " ";
timer.Start();
int bin_len = A.computeBinlenVer2();
double time = timer.Stop();
std::cout << "DEBUG: compute bin_len time = " << time << "ms." << std::endl;
std::cout << "DEBUG: bin_len = " << bin_len << std::endl;
int max_elems = 0;
int min_elems = 0;
A.computeVecFeture_serial(x_nnz, x_sparse_key, &bin_len, &max_elems, &min_elems);
fout << bin_len << " ";
fout << max_elems << " ";
fout << min_elems << " ";
fout << 1.0*x_nnz/n << " ";
fout << 1.0*bin_len/mat_nnz << " ";
fout << (max_elems - min_elems)/(1.0*n) << " ";//relative range of degree
float x_sparsity = 1.0*x_nnz/n;
//rules judgement
int solution_id = 2;//0,1,2
int workloadDistribution_id = 1;//0,1
int writingBack_id = 1;//0,1(sort, atomic)
timer.Start();
//int predictSolution(int nnz, int x_sparsity){
solution_id = predictSolution((float)mat_nnz, x_sparsity);
//int predictWorkloadDistribution(int maxRow, int avgRow, int stdRow){
workloadDistribution_id = predictWorkloadDistribution((float)mat_max_elems, mat_avg_elems, mat_standard_row);
if(solution_id == 0){
//int predictWritingBack(int m, int x_sparisity){
writingBack_id = predictWritingBack((float)m, x_sparsity);
}
time_r = timer.Stop();
time_rule += time_r;
fout << " " << solution_id << " " << workloadDistribution_id << " " << writingBack_id << " ";
#if 1
//execute sinle kernel
cudaMemset(d_y, 0, m * sizeof(VALUE_TYPE));
timer.Start();
err = A.CscBasedNoSortMergeSpmspv_keyval(true, alpha_i, &y_nnz, d_y, d_y_key, d_y_val);
time_sa = timer.Stop();
time_single_atomic_col_spmspv += time_sa;
cudaError_t err_r = cudaGetLastError();
if ( cudaSuccess != err_r) {
printf("cscspmspv() invocate error.\n");
std::cout << "err code: " << cudaGetErrorString(err_r) << std::endl;
exit(1);
}
#endif
#if 0
//execute sinle kernel
cudaMemset(d_y, 0, m * sizeof(VALUE_TYPE));
timer.Start();
err = A.CscBasedSortMergeSpmspv(false, alpha, &y_nnz, d_y, d_y_key, d_y_val);
time_ss = timer.Stop();
time_single_sort_col_spmspv += time_ss;
err_r = cudaGetLastError();
if ( cudaSuccess != err_r) {
printf("cscspmspv() invocate error.\n");
std::cout << "err code: " << cudaGetErrorString(err_r) << std::endl;
exit(1);
}
#endif
cudaMemset(d_y, 0, m * sizeof(VALUE_TYPE));
timer.Start();
err = A.spmv(alpha_i, d_y);
time_sspmv = timer.Stop();
time_single_spmv += time_sspmv;
#if 0
//execute adaptive framework
//SpMV
cudaMemset(d_y, 0, m * sizeof(VALUE_TYPE));
if(solution_id == 2){
if(workloadDistribution_id == 0){
timer.Start();
err = A.naivespmv(alpha, d_y);
time_c = timer.Stop();
}else{
timer.Start();
err = A.spmv(alpha_i, d_y);
time_c = timer.Stop();
}
}else if(solution_id == 1){
if(workloadDistribution_id == 0){
timer.Start();
err = A.naivespmspv(alpha, d_y);
time_c = timer.Stop();
}else{
timer.Start();
err = A.spmspv(alpha_i, d_y);
time_c = timer.Stop();
}
}else if(solution_id == 0){
//naive
if(workloadDistribution_id == 0){
if(writingBack_id == 0){
//sort
timer.Start();
err = A.CscBasedSortNaiveSpmspv(alpha, &y_nnz, d_y_key, d_y_val, 0);
time_c = timer.Stop();
}else{
//atomic
timer.Start();
err = A.CscBasedNoSortNaiveSpmspv_keyval(alpha, &y_nnz, d_y, d_y_key, d_y_val);
time_c = timer.Stop();
}
}else{
//load-balanced
if(writingBack_id == 0){
//sort
timer.Start();
err = A.CscBasedSortMergeSpmspv(false, alpha, &y_nnz, d_y, d_y_key, d_y_val);
time_c = timer.Stop();
}else{
//atomic
timer.Start();
err = A.CscBasedNoSortMergeSpmspv_keyval(true, alpha_i, &y_nnz, d_y, d_y_key, d_y_val);
time_c = timer.Stop();
}
}
}
cudaError_t err_r = cudaGetLastError();
if ( cudaSuccess != err_r) {
printf("framework: cscspmspv() invocate error.\n");
printf("solution_id = %d, workloadDistribution_id =%d, writingBack_id=%d.\n",
solution_id, workloadDistribution_id, writingBack_id);
std::cout << "err code: " << cudaGetErrorString(err_r) << std::endl;
exit(1);
}
#endif
time_spmv += time_c;
fout << time_sa << " " << time_ss << " " << time_sspmv << " " << time_r << " " << time_c << std::endl;
}
fout << time_single_atomic_col_spmspv << " ";
fout << time_single_sort_col_spmspv << " " << time_single_spmv << " ";
fout << time_rule << " " << time_spmv << std::endl;
A.deallocPreBuffer();
if(spmv_type == 0) {
A.holaPostprocess();
}
A.Destroy();
#ifdef CORRECT
if (hres) free(hres);
#endif
if (d_csr_row) cudaErrCheck(cudaFree(d_csr_row));
if (d_csr_col) cudaErrCheck(cudaFree(d_csr_col));
if (d_csr_val) cudaErrCheck(cudaFree(d_csr_val));
if (d_x) cudaFree(d_x);
if (d_y) cudaFree(d_y);
if (d_y_key) cudaFree(d_y_key);
if (d_y_val) cudaFree(d_y_val);
if (d_x_key) cudaFree(d_x_key);
if (d_x_val) cudaFree(d_x_val);
if (d_bit_vector) cudaFree(d_bit_vector);
if(xnnz_vec) free(xnnz_vec);
fout.close();
return err;
}
int doThis(std::string file_name, int iter, float alpha, float beta) {
const char* real_file_name = file_name.c_str();
std::cout << "file path = " << real_file_name << std::endl;
int m, n, mat_nnz;
int* csr_row;
int* csr_col;
VALUE_TYPE* csr_val;
// report precision of floating-point
std::cout << "-------------------------" << std::endl;
std::string precision;
if (sizeof(VALUE_TYPE) == 4) {
precision = "32-bit Single Precision (float)";
} else if (sizeof(VALUE_TYPE) == 8) {
precision = "64-bit Double Precision (double)";
} else {
std::cout << "Wrong precision. Program exit!" << std::endl;
return 0;
}
std::cout << "PRECISION = " << precision << std::endl;
std::cout << "-------------------------" << std::endl;
MTX<VALUE_TYPE> mtx;
std::string csr_name = std::string(real_file_name) + "_" + ".csr";
try
{
std::cout << "trying to load csr file \"" << csr_name << "\"\n";
loadCSR_header(csr_name.c_str(), &m, &n, &mat_nnz);
csr_row = (int* )malloc((m + 1) * sizeof(int));
CHECK_MALLOC(csr_row);
csr_col = (int* )malloc((mat_nnz) * sizeof(int));
CHECK_MALLOC(csr_col);
csr_val = (VALUE_TYPE* )malloc((mat_nnz) * sizeof(VALUE_TYPE));
CHECK_MALLOC(csr_val);
loadCSR(csr_name.c_str(), m, n, mat_nnz, csr_row, csr_col, csr_val);
}
catch (std::exception& ex){
std::cout << "could not load csr file:\n\t" << ex.what() << "\n";
fileToMtxCoo<VALUE_TYPE>(real_file_name, &mtx, true);
m = mtx.rows;
n = mtx.cols;
mat_nnz = mtx.nnz;
//coo2csr: attention memory alloc and free.
csr_row = (int* )malloc((m + 1) * sizeof(int));
CHECK_MALLOC(csr_row);
TestCoo2Csr(m, mat_nnz, mtx.row, csr_row);
csr_col = mtx.col;
csr_val = mtx.data;
try
{
storeCSR(m, n, mat_nnz, csr_row, csr_col, csr_val,csr_name.c_str());
}
catch (std::exception& ex)
{
std::cout << ex.what() << std::endl;
}
}
// easy for test correctness.
for (int i = 0; i < mat_nnz; i++) {
csr_val[i] = 1.0;
}
// SparseVec* x_sparse = (SparseVec* )malloc(n * sizeof(SparseVec));
// CHECK_MALLOC(x_sparse);
int* x_sparse_key = (int* )malloc(n * sizeof(int));
CHECK_MALLOC(x_sparse_key);
VALUE_TYPE* x_sparse_val = (VALUE_TYPE* )malloc(n *
sizeof(VALUE_TYPE));
CHECK_MALLOC(x_sparse_val);
VALUE_TYPE* x_dense = (VALUE_TYPE* )malloc(n * sizeof(VALUE_TYPE));
CHECK_MALLOC(x_dense);
for(int i=0; i<n; i++) {
x_dense[i] = (VALUE_TYPE)i;
}
VALUE_TYPE* y_dense = (VALUE_TYPE* )malloc(m * sizeof(VALUE_TYPE));
CHECK_MALLOC(y_dense);
// get matrix name.
std::string matrix_name;
int nPos1 = file_name.find_last_of(".");
int nPos2 = file_name.find_last_of("/", nPos1 - 1);
if(nPos1 != -1 && nPos2 != -1) {
matrix_name = file_name.substr(nPos2 + 1, nPos1 - nPos2 - 1);
}
std::cout << "matrix_name = " << matrix_name << std::endl;
std::cout << "m = " << m << ", n = " << n << ", nnz = " << mat_nnz << std::endl;
#ifdef BFS
std::string out_file = "/home/*/framework-time/bfs/"
+ matrix_name + "_bfs_perf.info";
#else
std::string out_file = "/home/*/framework-time/pagerank/"
+ matrix_name + "_pr_perf.info";
#endif
std::cout << out_file << std::endl;
Run(out_file, matrix_name, iter, alpha, beta, m, n, mat_nnz, csr_row, csr_col, csr_val,
x_sparse_key, x_sparse_val, x_dense, y_dense);
if (x_sparse_key) free(x_sparse_key);
if (x_sparse_val) free(x_sparse_val);
if (x_dense) free(x_dense);
if (y_dense) free(y_dense);
#if 0
if (mtx.row) free(mtx.row);
if (mtx.col) free(mtx.col);
if (mtx.data) free(mtx.data);
#endif
if(csr_row) free(csr_row);
if(csr_col) free(csr_col);
if(csr_val) free(csr_val);
return 0;
}
int main(int argc, char** argv) {
std::string file_name;
int iter = 0;
float alpha = 0, beta = 0;
if (argc == 2) {
file_name = argv[1];
// iter = atoi(argv[2]);
// alpha = atof(argv[3]);
// beta = atof(argv[4]);
std::cout << "---------file_name: " << file_name << "---------" << std::endl;
// std::cout << "---------iter: " << iter << "---------" << std::endl;
// std::cout << "---------alpha: " << alpha << "---------" << std::endl;
// std::cout << "---------beta: " << beta << "---------" << std::endl;
} else {
std::cout << "Usage: matrix_file_name " << std::endl;
exit(1);
}
//std::cout << "i am new-test-app.cu" << std::endl;
doThis(file_name, iter, alpha, beta);
return 0;
}
|
957718a4859da3d98c09a02b52ec117051a44bf6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
// @author [email protected]
// @author [email protected]
//
#include <ops/declarable/helpers/transforms.h>
#include <helpers/ShapeUtils.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void clipByNormCuda(const void* vClipNorm, const void* vNorm, const Nd4jLong* normShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int* dimensions, const int dimsLen, const bool useAverage) {
const T clipNorm = *reinterpret_cast<const T*>(vClipNorm);
const T* norm = reinterpret_cast<const T*>(vNorm);
T* z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong zLen, tadLen, totalThreads;
if (threadIdx.x == 0) {
zLen = shape::length(zShapeInfo);
tadLen = zLen / shape::length(normShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
int zCoords[MAX_RANK], normCoords[MAX_RANK];
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, zCoords);
// deduce norm coords
for (int j = 0; j < dimsLen; ++j)
normCoords[j] = zCoords[dimensions[j]];
const T actualNorm = useAverage ? norm[shape::getOffset(normShapeInfo, normCoords)] / tadLen : norm[shape::getOffset(normShapeInfo, normCoords)];
if(actualNorm > clipNorm)
z[shape::getOffset(zShapeInfo, zCoords)] *= clipNorm / actualNorm;
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__host__ static void clipByNormCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream,
const void* vClipNorm, const void* vNorm, const Nd4jLong* normShapeInfo, void* vz, const Nd4jLong* zShapeInfo,
const int* dimensions, const int dimsLen, const bool useAverage) {
hipLaunchKernelGGL(( clipByNormCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 512, *stream, vClipNorm, vNorm, normShapeInfo, vz, zShapeInfo, dimensions, dimsLen, useAverage);
}
//////////////////////////////////////////////////////////////////////////
void clipByNorm(sd::LaunchContext* context, NDArray& input, NDArray& output, const std::vector<int>& dims, const NDArray& clipNorm, const bool isInplace, const bool useAverage) {
NDArray* z = nullptr;
if(isInplace) {
z = &input;
}
else {
output.assign(input);
z = &output;
}
if(dims.empty()) {
const NDArray actualNorm = useAverage ? z->reduceAlongDimension(reduce::Norm2, {}) / z->lengthOf() : z->reduceAlongDimension(reduce::Norm2, {});
if(actualNorm.e<float>(0) > clipNorm.e<float>(0))
*z *= clipNorm / actualNorm;
}
else {
const NDArray actualNorms = z->reduceAlongDimension(reduce::Norm2, dims);
std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(z->rankOf(), dims);
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (z->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
PointersManager manager(context, "clipByNorm");
const int* dimensions = reinterpret_cast<const int*>(manager.replicatePointer(dimsToExclude.data(), dimsToExclude.size() * sizeof(int)));
NDArray::prepareSpecialUse({z}, {z, &actualNorms, &clipNorm});
BUILD_SINGLE_SELECTOR(z->dataType(), clipByNormCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), clipNorm.specialBuffer(), actualNorms.specialBuffer(), actualNorms.specialShapeInfo(), z->specialBuffer(), z->specialShapeInfo(), dimensions, (int)dimsToExclude.size(), useAverage), FLOAT_TYPES);
NDArray::registerSpecialUse({z}, {z, &actualNorms, &clipNorm});
manager.synchronize();
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void clipByNormBpCuda(const void* vClipNorm,
const void* vx, const Nd4jLong* xShapeInfo, // input
const void* vy, const Nd4jLong* yShapeInfo, // gradO
const void* vNorm, const Nd4jLong* normShapeInfo,
const void* vSum, const Nd4jLong* sumShapeInfo,
void* vz, const Nd4jLong* zShapeInfo, // gradI
const int* dimensions, const int dimsLen, const bool useAverage) {
const T clipNorm = *reinterpret_cast<const T*>(vClipNorm);
const T* norm = reinterpret_cast<const T*>(vNorm);
const T* sum = reinterpret_cast<const T*>(vSum);
const T* x = reinterpret_cast<const T*>(vx);
const T* y = reinterpret_cast<const T*>(vy);
T* z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong zLen, tadLen, totalThreads;
__shared__ bool sameOffsets;
if (threadIdx.x == 0) {
zLen = shape::length(zShapeInfo);
tadLen = zLen / shape::length(normShapeInfo);
totalThreads = gridDim.x * blockDim.x;
sameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, yShapeInfo, zShapeInfo);
}
__syncthreads();
int zCoords[MAX_RANK], normCoords[MAX_RANK];
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, zCoords);
const auto zOffset = shape::getOffset(zShapeInfo, zCoords);
const auto yOffset = sameOffsets ? zOffset : shape::getOffset(yShapeInfo, zCoords);
// deduce norm coords
for (int j = 0; j < dimsLen; ++j)
normCoords[j] = zCoords[dimensions[j]];
const T actualNorm = useAverage ? norm[shape::getOffset(normShapeInfo, normCoords)] / tadLen : norm[shape::getOffset(normShapeInfo, normCoords)];
if(actualNorm > clipNorm) {
const T sumVal = sum[shape::getOffset(sumShapeInfo, normCoords)];
const auto xOffset = sameOffsets ? zOffset : shape::getOffset(xShapeInfo, zCoords);
z[zOffset] = (clipNorm / actualNorm) * y[yOffset] * (static_cast<T>(1.f) - (x[xOffset] * sumVal) / (actualNorm * actualNorm));
}
else
z[zOffset] = y[yOffset];
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
void clipByNormBp_(sd::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI, const std::vector<int>& dims, const NDArray& clipNorm, const bool useAverage) {
const int rank = input.rankOf();
auto actualNorms = input.reduceAlongDimension(reduce::Norm2, dims);
if(actualNorms.lengthOf() == 1) {
const T norm = useAverage ? actualNorms.e<T>(0) / static_cast<T>(input.lengthOf()) : actualNorms.e<T>(0);
auto clipVal = clipNorm.e<T>(0);
if(norm > clipVal) {
const T sum = input.reduceNumber(reduce::Sum).e<T>(0); // reduce to scalar
const T factor1 = clipVal / norm;
const T factor2 = static_cast<T>(1.f) / (norm * norm); // 1 / (norm*norm*norm)
auto lambda = LAMBDA_TT(x, y, sum, factor1, factor2) {
return factor1 * y * (static_cast<T>(1.f) - factor2 * x * sum);
};
const_cast<NDArray&>(input).applyPairwiseLambda(const_cast<NDArray&>(gradO), lambda, gradI);
}
else
gradI.assign(gradO);
}
else {
const NDArray actualNorms = input.reduceAlongDimension(reduce::Norm2, dims);
const NDArray sums = input.reduceAlongDimension(reduce::Sum, dims);
std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(gradI.rankOf(), dims);
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
PointersManager manager(context, "clipByNormBp");
const int* dimensions = reinterpret_cast<const int*>(manager.replicatePointer(dimsToExclude.data(), dimsToExclude.size() * sizeof(int)));
NDArray::prepareSpecialUse({&gradI}, {&actualNorms, &sums, &clipNorm, &input, &gradO});
hipLaunchKernelGGL(( clipByNormBpCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 512, *context->getCudaStream(), clipNorm.specialBuffer(), input.specialBuffer(), input.specialShapeInfo(), gradO.specialBuffer(), gradO.specialShapeInfo(), actualNorms.specialBuffer(), actualNorms.specialShapeInfo(), sums.specialBuffer(), sums.specialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), dimensions, (int)dimsToExclude.size(), useAverage);
NDArray::registerSpecialUse({&gradI}, {&actualNorms, &sums, &clipNorm, &input, &gradO});
manager.synchronize();
}
}
BUILD_SINGLE_TEMPLATE(template void clipByNormBp_, (sd::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool useAverage), FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
void clipByNormBp(sd::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool useAverage) {
const NDArray& castedInput = gradI.dataType() == input.dataType() ? input : input.cast(gradI.dataType());
BUILD_SINGLE_SELECTOR(gradI.dataType(), clipByNormBp_, (context, castedInput, gradO, gradI, dimensions, clipNorm, useAverage), FLOAT_TYPES);
}
template <typename T>
void clipByGlobalNorm_(sd::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, sd::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace) {
NDArray globalNorm = NDArrayFactory::create<T>(0, inputs[0]->getContext()); //sqrt(sum([l2norm(t)**2 for t in t_list]))
for (auto i = 0; i < inputs.size(); i++) {
auto input = inputs[i];
auto l2norm = input->reduceNumber(reduce::Norm2);
globalNorm += l2norm * l2norm;
}
globalNorm.applyTransform(transform::Sqrt, globalNorm); // = sd::math::nd4j_sqrt(globalNorm);
outputs[inputs.size()]->p(0, globalNorm);
globalNorm.syncToHost();
const T factor = static_cast<T>(clipNorm) / globalNorm.e<T>(0);
for (size_t e = 0; e < inputs.size(); e++) {
// all-reduce
auto input = inputs[e];
auto output = outputs[e];
if (globalNorm.e<double>(0) <= clipNorm) {
output->assign(input);
}
else {
auto lambda = LAMBDA_T(_x, factor) { return _x * factor; };
input->applyLambda(lambda, *output);
}
}
}
void clipByGlobalNorm(sd::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, sd::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace) {
BUILD_SINGLE_SELECTOR(outputs[0]->dataType(), clipByGlobalNorm_, (context, inputs, clipNorm, workspace, outputs, isInplace), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void clipByGlobalNorm_, (sd::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, sd::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace), FLOAT_TYPES);
template <typename T>
static void __global__ clipByValueKernel(void* input, const Nd4jLong* inputShape, void* output, const Nd4jLong* outputShape, double leftBound, double rightBound) {
__shared__ T* outputBuf;
__shared__ T* inputBuf;
__shared__ Nd4jLong length;
__shared__ bool linearBuffers;
if (threadIdx.x == 0) {
outputBuf = reinterpret_cast<T *>(output);
inputBuf = reinterpret_cast<T *>(input);
length = shape::length(inputShape);
linearBuffers = shape::elementWiseStride(inputShape) == shape::elementWiseStride(outputShape) && shape::elementWiseStride(inputShape) == 1;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
if (linearBuffers) {
if (inputBuf[e] > rightBound) outputBuf[e] = (T) rightBound;
else if (inputBuf[e] < leftBound) outputBuf[e] = (T) leftBound;
else outputBuf[e] = inputBuf[e];
}
else {
auto inputOffset = shape::getIndexOffset(e, inputShape);
auto outputOffset = shape::getIndexOffset(e, outputShape);
if (inputBuf[inputOffset] > rightBound) outputBuf[outputOffset] = (T) rightBound;
else if (inputBuf[inputOffset] < leftBound) outputBuf[outputOffset] = (T) leftBound;
else outputBuf[outputOffset] = inputBuf[outputOffset];
}
}
}
template <typename T>
static void clipByValue_(sd::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output) {
auto stream = context->getCudaStream();
if (!input.isActualOnDeviceSide())
input.syncToDevice();
NDArray::prepareSpecialUse({&output}, {&input});
hipLaunchKernelGGL(( clipByValueKernel<T>), dim3(256), dim3(512), 8192, *stream, input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), leftBound, rightBound);
NDArray::registerSpecialUse({&output}, {&input});
}
void clipByValue(sd::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output) {
BUILD_SINGLE_SELECTOR(input.dataType(), clipByValue_, (context, input, leftBound, rightBound, output), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void clipByValue_, (sd::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output);, FLOAT_TYPES);
}
}
}
| 957718a4859da3d98c09a02b52ec117051a44bf6.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
// @author [email protected]
// @author [email protected]
//
#include <ops/declarable/helpers/transforms.h>
#include <helpers/ShapeUtils.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void clipByNormCuda(const void* vClipNorm, const void* vNorm, const Nd4jLong* normShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int* dimensions, const int dimsLen, const bool useAverage) {
const T clipNorm = *reinterpret_cast<const T*>(vClipNorm);
const T* norm = reinterpret_cast<const T*>(vNorm);
T* z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong zLen, tadLen, totalThreads;
if (threadIdx.x == 0) {
zLen = shape::length(zShapeInfo);
tadLen = zLen / shape::length(normShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
int zCoords[MAX_RANK], normCoords[MAX_RANK];
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, zCoords);
// deduce norm coords
for (int j = 0; j < dimsLen; ++j)
normCoords[j] = zCoords[dimensions[j]];
const T actualNorm = useAverage ? norm[shape::getOffset(normShapeInfo, normCoords)] / tadLen : norm[shape::getOffset(normShapeInfo, normCoords)];
if(actualNorm > clipNorm)
z[shape::getOffset(zShapeInfo, zCoords)] *= clipNorm / actualNorm;
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__host__ static void clipByNormCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream,
const void* vClipNorm, const void* vNorm, const Nd4jLong* normShapeInfo, void* vz, const Nd4jLong* zShapeInfo,
const int* dimensions, const int dimsLen, const bool useAverage) {
clipByNormCuda<T><<<blocksPerGrid, threadsPerBlock, 512, *stream>>>(vClipNorm, vNorm, normShapeInfo, vz, zShapeInfo, dimensions, dimsLen, useAverage);
}
//////////////////////////////////////////////////////////////////////////
void clipByNorm(sd::LaunchContext* context, NDArray& input, NDArray& output, const std::vector<int>& dims, const NDArray& clipNorm, const bool isInplace, const bool useAverage) {
NDArray* z = nullptr;
if(isInplace) {
z = &input;
}
else {
output.assign(input);
z = &output;
}
if(dims.empty()) {
const NDArray actualNorm = useAverage ? z->reduceAlongDimension(reduce::Norm2, {}) / z->lengthOf() : z->reduceAlongDimension(reduce::Norm2, {});
if(actualNorm.e<float>(0) > clipNorm.e<float>(0))
*z *= clipNorm / actualNorm;
}
else {
const NDArray actualNorms = z->reduceAlongDimension(reduce::Norm2, dims);
std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(z->rankOf(), dims);
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (z->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
PointersManager manager(context, "clipByNorm");
const int* dimensions = reinterpret_cast<const int*>(manager.replicatePointer(dimsToExclude.data(), dimsToExclude.size() * sizeof(int)));
NDArray::prepareSpecialUse({z}, {z, &actualNorms, &clipNorm});
BUILD_SINGLE_SELECTOR(z->dataType(), clipByNormCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), clipNorm.specialBuffer(), actualNorms.specialBuffer(), actualNorms.specialShapeInfo(), z->specialBuffer(), z->specialShapeInfo(), dimensions, (int)dimsToExclude.size(), useAverage), FLOAT_TYPES);
NDArray::registerSpecialUse({z}, {z, &actualNorms, &clipNorm});
manager.synchronize();
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void clipByNormBpCuda(const void* vClipNorm,
const void* vx, const Nd4jLong* xShapeInfo, // input
const void* vy, const Nd4jLong* yShapeInfo, // gradO
const void* vNorm, const Nd4jLong* normShapeInfo,
const void* vSum, const Nd4jLong* sumShapeInfo,
void* vz, const Nd4jLong* zShapeInfo, // gradI
const int* dimensions, const int dimsLen, const bool useAverage) {
const T clipNorm = *reinterpret_cast<const T*>(vClipNorm);
const T* norm = reinterpret_cast<const T*>(vNorm);
const T* sum = reinterpret_cast<const T*>(vSum);
const T* x = reinterpret_cast<const T*>(vx);
const T* y = reinterpret_cast<const T*>(vy);
T* z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong zLen, tadLen, totalThreads;
__shared__ bool sameOffsets;
if (threadIdx.x == 0) {
zLen = shape::length(zShapeInfo);
tadLen = zLen / shape::length(normShapeInfo);
totalThreads = gridDim.x * blockDim.x;
sameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, yShapeInfo, zShapeInfo);
}
__syncthreads();
int zCoords[MAX_RANK], normCoords[MAX_RANK];
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, zCoords);
const auto zOffset = shape::getOffset(zShapeInfo, zCoords);
const auto yOffset = sameOffsets ? zOffset : shape::getOffset(yShapeInfo, zCoords);
// deduce norm coords
for (int j = 0; j < dimsLen; ++j)
normCoords[j] = zCoords[dimensions[j]];
const T actualNorm = useAverage ? norm[shape::getOffset(normShapeInfo, normCoords)] / tadLen : norm[shape::getOffset(normShapeInfo, normCoords)];
if(actualNorm > clipNorm) {
const T sumVal = sum[shape::getOffset(sumShapeInfo, normCoords)];
const auto xOffset = sameOffsets ? zOffset : shape::getOffset(xShapeInfo, zCoords);
z[zOffset] = (clipNorm / actualNorm) * y[yOffset] * (static_cast<T>(1.f) - (x[xOffset] * sumVal) / (actualNorm * actualNorm));
}
else
z[zOffset] = y[yOffset];
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
void clipByNormBp_(sd::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI, const std::vector<int>& dims, const NDArray& clipNorm, const bool useAverage) {
const int rank = input.rankOf();
auto actualNorms = input.reduceAlongDimension(reduce::Norm2, dims);
if(actualNorms.lengthOf() == 1) {
const T norm = useAverage ? actualNorms.e<T>(0) / static_cast<T>(input.lengthOf()) : actualNorms.e<T>(0);
auto clipVal = clipNorm.e<T>(0);
if(norm > clipVal) {
const T sum = input.reduceNumber(reduce::Sum).e<T>(0); // reduce to scalar
const T factor1 = clipVal / norm;
const T factor2 = static_cast<T>(1.f) / (norm * norm); // 1 / (norm*norm*norm)
auto lambda = LAMBDA_TT(x, y, sum, factor1, factor2) {
return factor1 * y * (static_cast<T>(1.f) - factor2 * x * sum);
};
const_cast<NDArray&>(input).applyPairwiseLambda(const_cast<NDArray&>(gradO), lambda, gradI);
}
else
gradI.assign(gradO);
}
else {
const NDArray actualNorms = input.reduceAlongDimension(reduce::Norm2, dims);
const NDArray sums = input.reduceAlongDimension(reduce::Sum, dims);
std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(gradI.rankOf(), dims);
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
PointersManager manager(context, "clipByNormBp");
const int* dimensions = reinterpret_cast<const int*>(manager.replicatePointer(dimsToExclude.data(), dimsToExclude.size() * sizeof(int)));
NDArray::prepareSpecialUse({&gradI}, {&actualNorms, &sums, &clipNorm, &input, &gradO});
clipByNormBpCuda<T><<<blocksPerGrid, threadsPerBlock, 512, *context->getCudaStream()>>>(clipNorm.specialBuffer(), input.specialBuffer(), input.specialShapeInfo(), gradO.specialBuffer(), gradO.specialShapeInfo(), actualNorms.specialBuffer(), actualNorms.specialShapeInfo(), sums.specialBuffer(), sums.specialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), dimensions, (int)dimsToExclude.size(), useAverage);
NDArray::registerSpecialUse({&gradI}, {&actualNorms, &sums, &clipNorm, &input, &gradO});
manager.synchronize();
}
}
BUILD_SINGLE_TEMPLATE(template void clipByNormBp_, (sd::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool useAverage), FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
void clipByNormBp(sd::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool useAverage) {
const NDArray& castedInput = gradI.dataType() == input.dataType() ? input : input.cast(gradI.dataType());
BUILD_SINGLE_SELECTOR(gradI.dataType(), clipByNormBp_, (context, castedInput, gradO, gradI, dimensions, clipNorm, useAverage), FLOAT_TYPES);
}
template <typename T>
void clipByGlobalNorm_(sd::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, sd::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace) {
NDArray globalNorm = NDArrayFactory::create<T>(0, inputs[0]->getContext()); //sqrt(sum([l2norm(t)**2 for t in t_list]))
for (auto i = 0; i < inputs.size(); i++) {
auto input = inputs[i];
auto l2norm = input->reduceNumber(reduce::Norm2);
globalNorm += l2norm * l2norm;
}
globalNorm.applyTransform(transform::Sqrt, globalNorm); // = sd::math::nd4j_sqrt(globalNorm);
outputs[inputs.size()]->p(0, globalNorm);
globalNorm.syncToHost();
const T factor = static_cast<T>(clipNorm) / globalNorm.e<T>(0);
for (size_t e = 0; e < inputs.size(); e++) {
// all-reduce
auto input = inputs[e];
auto output = outputs[e];
if (globalNorm.e<double>(0) <= clipNorm) {
output->assign(input);
}
else {
auto lambda = LAMBDA_T(_x, factor) { return _x * factor; };
input->applyLambda(lambda, *output);
}
}
}
void clipByGlobalNorm(sd::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, sd::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace) {
BUILD_SINGLE_SELECTOR(outputs[0]->dataType(), clipByGlobalNorm_, (context, inputs, clipNorm, workspace, outputs, isInplace), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void clipByGlobalNorm_, (sd::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, sd::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace), FLOAT_TYPES);
template <typename T>
static void __global__ clipByValueKernel(void* input, const Nd4jLong* inputShape, void* output, const Nd4jLong* outputShape, double leftBound, double rightBound) {
__shared__ T* outputBuf;
__shared__ T* inputBuf;
__shared__ Nd4jLong length;
__shared__ bool linearBuffers;
if (threadIdx.x == 0) {
outputBuf = reinterpret_cast<T *>(output);
inputBuf = reinterpret_cast<T *>(input);
length = shape::length(inputShape);
linearBuffers = shape::elementWiseStride(inputShape) == shape::elementWiseStride(outputShape) && shape::elementWiseStride(inputShape) == 1;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
if (linearBuffers) {
if (inputBuf[e] > rightBound) outputBuf[e] = (T) rightBound;
else if (inputBuf[e] < leftBound) outputBuf[e] = (T) leftBound;
else outputBuf[e] = inputBuf[e];
}
else {
auto inputOffset = shape::getIndexOffset(e, inputShape);
auto outputOffset = shape::getIndexOffset(e, outputShape);
if (inputBuf[inputOffset] > rightBound) outputBuf[outputOffset] = (T) rightBound;
else if (inputBuf[inputOffset] < leftBound) outputBuf[outputOffset] = (T) leftBound;
else outputBuf[outputOffset] = inputBuf[outputOffset];
}
}
}
template <typename T>
static void clipByValue_(sd::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output) {
auto stream = context->getCudaStream();
if (!input.isActualOnDeviceSide())
input.syncToDevice();
NDArray::prepareSpecialUse({&output}, {&input});
clipByValueKernel<T><<<256, 512, 8192, *stream>>>(input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), leftBound, rightBound);
NDArray::registerSpecialUse({&output}, {&input});
}
void clipByValue(sd::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output) {
BUILD_SINGLE_SELECTOR(input.dataType(), clipByValue_, (context, input, leftBound, rightBound, output), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void clipByValue_, (sd::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output);, FLOAT_TYPES);
}
}
}
|
939efdab78238dd6ab798ac3dd84d81777478b1e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "layer.h"
// --------------------------------------------------------------------------
// kernel code
// max_pool_{gpu, cpu}
// --------------------------------------------------------------------------
// max-pooling bottom3d (C x H x W) -> top3d (C x H' x W')
// given (c, h', w'),
// top3d[c][h'][w'] = max_{h, w} bottom3d[c][h][w]
// argmax3d[c][h'][w'] = argmax_{h, w} bottom3d[c][h][w]
// for
// h = (-pad_h + stride_h * h') + { 0, 1, ..., kernel_h - 1 }
// w = (-pad_w + stride_w * w') + { 0, 1, ..., kernel_w - 1 }
#ifdef GPU
__global__
void max_pool_gpu(const real* const bottom3d,
real* const top3d,
int* const argmax3d,
const int C, const int bottom_H, const int bottom_W,
const int top_H, const int top_W,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w)
{
// thread index: (c, h', w') = c*H'*W' + h'*W' + w'
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < C * top_H * top_W) {
// parse thread index -> (c, h', w')
const int c = index / top_H / top_W;
const int ht = (index / top_W) % top_H;
const int wt = index % top_W;
// pooling range in bottom
// h = (-pad_h + stride_h * h') + { 0, 1, ..., kernel_h - 1}
// w = (-pad_w + stride_w * w') + { 0, 1, ..., kernel_w - 1}
const int h_start = MAX(0, -pad_h + stride_h * ht);
const int w_start = MAX(0, -pad_w + stride_w * wt);
const int h_end = MIN(-pad_h + stride_h * ht + kernel_h, bottom_H);
const int w_end = MIN(-pad_w + stride_w * wt + kernel_w, bottom_W);
if (h_start >= h_end || w_start >= w_end) {
top3d[index] = 0;
argmax3d[index] = -1;
}
// find maximum in the pooling region
const real* const p_bottom3d = bottom3d + c * bottom_H * bottom_W;
int maxidx = h_start * bottom_W + w_start;
real maxval = p_bottom3d[maxidx];
for (int h = h_start; h < h_end; ++h) {
for (int w = w_start; w < w_end; ++w) {
if (p_bottom3d[h * bottom_W + w] > maxval) {
maxidx = h * bottom_W + w;
maxval = p_bottom3d[maxidx];
}
}
}
// if pooling region is not empty,
// top3d[c][h'][w'] = "max in the region"
// otherwise, assign 0
{
const int not_empty = (h_start < h_end) * (w_start < w_end);
top3d[index] = not_empty * maxval;
argmax3d[index] = not_empty * maxidx + (1 - not_empty) * (-1);
}
}
}
#else
void max_pool_cpu(const real* const bottom3d,
real* const top3d,
int* const argmax3d,
const int C, const int bottom_H, const int bottom_W,
const int top_H, const int top_W,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w)
{
// thread index: (c, h', w') = c*H'*W' + h'*W' + w'
for (int index = 0; index < C * top_H * top_W; ++index) {
// parse thread index -> (c, h', w')
const int c = index / top_H / top_W;
const int ht = (index / top_W) % top_H;
const int wt = index % top_W;
// pooling range in bottom
// h = (-pad_h + stride_h * h') + { 0, 1, ..., kernel_h - 1}
// w = (-pad_w + stride_w * w') + { 0, 1, ..., kernel_w - 1}
const int h_start = MAX(0, -pad_h + stride_h * ht);
const int w_start = MAX(0, -pad_w + stride_w * wt);
const int h_end = MIN(-pad_h + stride_h * ht + kernel_h, bottom_H);
const int w_end = MIN(-pad_w + stride_w * wt + kernel_w, bottom_W);
if (h_start >= h_end || w_start >= w_end) {
top3d[index] = 0;
argmax3d[index] = -1;
}
// find maximum in the pooling region
const real* const p_bottom3d = bottom3d + c * bottom_H * bottom_W;
int maxidx = h_start * bottom_W + w_start;
real maxval = p_bottom3d[maxidx];
for (int h = h_start; h < h_end; ++h) {
for (int w = w_start; w < w_end; ++w) {
if (p_bottom3d[h * bottom_W + w] > maxval) {
maxidx = h * bottom_W + w;
maxval = p_bottom3d[maxidx];
}
}
}
// if pooling region is not empty,
// top3d[c][h'][w'] = "max in the region"
// otherwise, assign 0
{
const int not_empty = (h_start < h_end) * (w_start < w_end);
top3d[index] = not_empty * maxval;
argmax3d[index] = not_empty * maxidx + (1 - not_empty) * (-1);
}
}
}
#endif
// --------------------------------------------------------------------------
// layer operator code
// pool_forward
// --------------------------------------------------------------------------
// max-pooling: bottom -> top
// bottom: C x H x W
// top: C x H' x W'
// argmax: C x H' x W' array
void pool_forward(const Tensor* const bottom3d,
Tensor* const top3d,
int* const argmax_data,
const PoolOption* const option)
{
// kernel size, padding size & stride size
const int kernel_h = option->kernel_h;
const int kernel_w = option->kernel_w;
const int pad_h = option->pad_h;
const int pad_w = option->pad_w;
const int stride_h = option->stride_h;
const int stride_w = option->stride_w;
// do forward-pass for each item in the batch
const real* p_bottom_item = bottom3d->data;
real* p_top_item = top3d->data;
int* p_argmax_item = argmax_data;
for (int n = 0; n < bottom3d->num_items; ++n) {
// bottom shape: C x H x W
const int C = bottom3d->shape[n][0]; // C
const int bottom_H = bottom3d->shape[n][1]; // H
const int bottom_W = bottom3d->shape[n][2]; // W
// set top shape: C x H' x W'
// H' = 1 + (H + 2*pad_h - kernel_h) / stride_h
// W' = 1 + (W + 2*pad_w - kernel_w) / stride_w
const int top_H
= 1 + DIV_THEN_CEIL(bottom_H + 2 * pad_h - kernel_h, stride_h);
const int top_W
= 1 + DIV_THEN_CEIL(bottom_W + 2 * pad_w - kernel_w, stride_w);
top3d->shape[n][0] = C;
top3d->shape[n][1] = top_H;
top3d->shape[n][2] = top_W;
// max-pooling
// bottom3d (C x H x W) -> top3d (C x H' x W')
#ifdef GPU
{
const int num_threads = C * top_H * top_W;
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(num_threads, threads_per_block);
hipLaunchKernelGGL(( max_pool_gpu), dim3(num_blocks), dim3(threads_per_block), 0, 0,
p_bottom_item, p_top_item, p_argmax_item,
C, bottom_H, bottom_W, top_H, top_W,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w);
}
#else
{
max_pool_cpu(
p_bottom_item, p_top_item, p_argmax_item,
C, bottom_H, bottom_W, top_H, top_W,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w);
}
#endif
// locate next item
{
const int bottom_size = C * bottom_H * bottom_W;
const int top_size = C * top_H * top_W;
p_bottom_item += bottom_size;
p_top_item += top_size;
p_argmax_item += top_size;
}
} // endfor batch
top3d->ndim = 3;
top3d->num_items = bottom3d->num_items;
}
// --------------------------------------------------------------------------
// layer shape calculator code
// --------------------------------------------------------------------------
void pool_shape(const Tensor* const bottom3d,
Tensor* const top3d,
int* const argmax_size,
const PoolOption* const option)
{
const int kernel_h = option->kernel_h;
const int kernel_w = option->kernel_w;
const int pad_h = option->pad_h;
const int pad_w = option->pad_w;
const int stride_h = option->stride_h;
const int stride_w = option->stride_w;
// calculate shape for each item in the batch
int total_size = 0;
for (int n = 0; n < bottom3d->num_items; ++n) {
// bottom shape: C x H x W
const int C = bottom3d->shape[n][0]; // C
const int bottom_H = bottom3d->shape[n][1]; // H
const int bottom_W = bottom3d->shape[n][2]; // W
// top shape: C x H' x W'
// H' = 1 + (H + 2*pad_h - kernel_h) / stride_h
// W' = 1 + (W + 2*pad_w - kernel_w) / stride_w
const int top_H
= 1 + DIV_THEN_CEIL(bottom_H + 2 * pad_h - kernel_h, stride_h);
const int top_W
= 1 + DIV_THEN_CEIL(bottom_W + 2 * pad_w - kernel_w, stride_w);
top3d->shape[n][0] = C;
top3d->shape[n][1] = top_H;
top3d->shape[n][2] = top_W;
// start position for n-th item in top3d->data
top3d->start[n] = total_size;
total_size += C * top_H * top_W;
}
top3d->ndim = 3;
top3d->num_items = bottom3d->num_items;
// argmax data size = top size
*argmax_size = total_size;
}
// --------------------------------------------------------------------------
// test code
// --------------------------------------------------------------------------
#ifdef TEST
#include <stdio.h>
int main(int argc, char* argv[])
{
// variable declaration & memory allocation
Tensor X, Y;
real *X_data = NULL, *Y_data = NULL, *Y_true_data = NULL;
int* p_argmax_data = NULL;
PoolOption option;
int argmax_size;
// set option
{
option.kernel_h = 3;
option.kernel_w = 3;
option.pad_h = 0;
option.pad_w = 0;
option.stride_h = 2;
option.stride_w = 2;
}
// load data
{
int ndim;
int shape[g_max_ndim];
int total_size;
X_data = load_data("../data/temp/pool_bottom0.bin",
&ndim, shape, NULL);
X.num_items = shape[0];
X.ndim = ndim - 1;
total_size = 0;
for (int n = 0; n < X.num_items; ++n) {
int size_n = 1;
for (int i = 0; i < X.ndim; ++i) {
X.shape[n][i] = shape[i + 1];
size_n *= shape[i + 1];
}
X.start[n] = total_size;
total_size += size_n;
}
pool_shape(&X, &Y, &argmax_size, &option);
Y_true_data = load_data("../data/temp/pool_top0.bin",
&ndim, shape, NULL);
Y_data = (real*)malloc(flatten_size(&Y) * sizeof(real));
}
// CUDA initialization
#ifdef GPU
{
printf("set device\n");
hipSetDevice(0);
}
#endif
// bind loaded data to corresponding tensors
#ifdef GPU
{
const int X_size = flatten_size(&X);
const int Y_size = flatten_size(&Y);
printf("gpu malloc\n");
hipMalloc(&X.data, X_size * sizeof(real));
hipMalloc(&Y.data, Y_size * sizeof(real));
hipMalloc(&p_argmax_data, argmax_size * sizeof(int));
printf("memcpy: cpu -> gpu\n");
hipMemcpyAsync(X.data, X_data, X_size * sizeof(real),
hipMemcpyHostToDevice);
}
#else
{
X.data = X_data;
Y.data = Y_data;
p_argmax_data = (int*)malloc(argmax_size * sizeof(int));
}
#endif
// do forward operation
{
printf("do forward\n");
pool_forward(&X, &Y, p_argmax_data, &option);
}
// copy GPU data to main memory
#ifdef GPU
{
const int Y_size = flatten_size(&Y);
printf("memcpy: cpu <- gpu\n");
hipMemcpyAsync(Y_data, Y.data, Y_size * sizeof(real),
hipMemcpyDeviceToHost);
}
#endif
// verify results
{
int i = 0;
printf("verification\n");
for (int n = 0; n < Y.num_items; ++n) {
for (int c = 0; c < Y.shape[n][0]; ++c) {
for (int h = 0; h < Y.shape[n][1]; ++h) {
for (int w = 0; w < Y.shape[n][2]; ++w) {
if (Y_data[i] != Y_true_data[i]) {
printf("Y[%d,%d,%d,%d] = %.6f Y_true[%d,%d,%d,%d] = %.6f\n",
n, c, h, w, Y_data[i], n, c, h, w, Y_true_data[i]);
++i;
}
} // endfor w
} // endfor h
} // endfor c
} // endfor n
}
// memory deallocation
{
printf("free\n");
free(X_data);
free(Y_data);
free(Y_true_data);
}
#ifdef GPU
{
printf("gpu free\n");
hipFree(X.data);
hipFree(Y.data);
hipFree(p_argmax_data);
}
#else
{
free(p_argmax_data);
}
#endif
return 0;
}
#endif // endifdef TEST
| 939efdab78238dd6ab798ac3dd84d81777478b1e.cu | #include "layer.h"
// --------------------------------------------------------------------------
// kernel code
// max_pool_{gpu, cpu}
// --------------------------------------------------------------------------
// max-pooling bottom3d (C x H x W) -> top3d (C x H' x W')
// given (c, h', w'),
// top3d[c][h'][w'] = max_{h, w} bottom3d[c][h][w]
// argmax3d[c][h'][w'] = argmax_{h, w} bottom3d[c][h][w]
// for
// h = (-pad_h + stride_h * h') + { 0, 1, ..., kernel_h - 1 }
// w = (-pad_w + stride_w * w') + { 0, 1, ..., kernel_w - 1 }
#ifdef GPU
__global__
void max_pool_gpu(const real* const bottom3d,
real* const top3d,
int* const argmax3d,
const int C, const int bottom_H, const int bottom_W,
const int top_H, const int top_W,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w)
{
// thread index: (c, h', w') = c*H'*W' + h'*W' + w'
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < C * top_H * top_W) {
// parse thread index -> (c, h', w')
const int c = index / top_H / top_W;
const int ht = (index / top_W) % top_H;
const int wt = index % top_W;
// pooling range in bottom
// h = (-pad_h + stride_h * h') + { 0, 1, ..., kernel_h - 1}
// w = (-pad_w + stride_w * w') + { 0, 1, ..., kernel_w - 1}
const int h_start = MAX(0, -pad_h + stride_h * ht);
const int w_start = MAX(0, -pad_w + stride_w * wt);
const int h_end = MIN(-pad_h + stride_h * ht + kernel_h, bottom_H);
const int w_end = MIN(-pad_w + stride_w * wt + kernel_w, bottom_W);
if (h_start >= h_end || w_start >= w_end) {
top3d[index] = 0;
argmax3d[index] = -1;
}
// find maximum in the pooling region
const real* const p_bottom3d = bottom3d + c * bottom_H * bottom_W;
int maxidx = h_start * bottom_W + w_start;
real maxval = p_bottom3d[maxidx];
for (int h = h_start; h < h_end; ++h) {
for (int w = w_start; w < w_end; ++w) {
if (p_bottom3d[h * bottom_W + w] > maxval) {
maxidx = h * bottom_W + w;
maxval = p_bottom3d[maxidx];
}
}
}
// if pooling region is not empty,
// top3d[c][h'][w'] = "max in the region"
// otherwise, assign 0
{
const int not_empty = (h_start < h_end) * (w_start < w_end);
top3d[index] = not_empty * maxval;
argmax3d[index] = not_empty * maxidx + (1 - not_empty) * (-1);
}
}
}
#else
void max_pool_cpu(const real* const bottom3d,
real* const top3d,
int* const argmax3d,
const int C, const int bottom_H, const int bottom_W,
const int top_H, const int top_W,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w)
{
// thread index: (c, h', w') = c*H'*W' + h'*W' + w'
for (int index = 0; index < C * top_H * top_W; ++index) {
// parse thread index -> (c, h', w')
const int c = index / top_H / top_W;
const int ht = (index / top_W) % top_H;
const int wt = index % top_W;
// pooling range in bottom
// h = (-pad_h + stride_h * h') + { 0, 1, ..., kernel_h - 1}
// w = (-pad_w + stride_w * w') + { 0, 1, ..., kernel_w - 1}
const int h_start = MAX(0, -pad_h + stride_h * ht);
const int w_start = MAX(0, -pad_w + stride_w * wt);
const int h_end = MIN(-pad_h + stride_h * ht + kernel_h, bottom_H);
const int w_end = MIN(-pad_w + stride_w * wt + kernel_w, bottom_W);
if (h_start >= h_end || w_start >= w_end) {
top3d[index] = 0;
argmax3d[index] = -1;
}
// find maximum in the pooling region
const real* const p_bottom3d = bottom3d + c * bottom_H * bottom_W;
int maxidx = h_start * bottom_W + w_start;
real maxval = p_bottom3d[maxidx];
for (int h = h_start; h < h_end; ++h) {
for (int w = w_start; w < w_end; ++w) {
if (p_bottom3d[h * bottom_W + w] > maxval) {
maxidx = h * bottom_W + w;
maxval = p_bottom3d[maxidx];
}
}
}
// if pooling region is not empty,
// top3d[c][h'][w'] = "max in the region"
// otherwise, assign 0
{
const int not_empty = (h_start < h_end) * (w_start < w_end);
top3d[index] = not_empty * maxval;
argmax3d[index] = not_empty * maxidx + (1 - not_empty) * (-1);
}
}
}
#endif
// --------------------------------------------------------------------------
// layer operator code
// pool_forward
// --------------------------------------------------------------------------
// max-pooling: bottom -> top
// bottom: C x H x W
// top: C x H' x W'
// argmax: C x H' x W' array
void pool_forward(const Tensor* const bottom3d,
Tensor* const top3d,
int* const argmax_data,
const PoolOption* const option)
{
// kernel size, padding size & stride size
const int kernel_h = option->kernel_h;
const int kernel_w = option->kernel_w;
const int pad_h = option->pad_h;
const int pad_w = option->pad_w;
const int stride_h = option->stride_h;
const int stride_w = option->stride_w;
// do forward-pass for each item in the batch
const real* p_bottom_item = bottom3d->data;
real* p_top_item = top3d->data;
int* p_argmax_item = argmax_data;
for (int n = 0; n < bottom3d->num_items; ++n) {
// bottom shape: C x H x W
const int C = bottom3d->shape[n][0]; // C
const int bottom_H = bottom3d->shape[n][1]; // H
const int bottom_W = bottom3d->shape[n][2]; // W
// set top shape: C x H' x W'
// H' = 1 + (H + 2*pad_h - kernel_h) / stride_h
// W' = 1 + (W + 2*pad_w - kernel_w) / stride_w
const int top_H
= 1 + DIV_THEN_CEIL(bottom_H + 2 * pad_h - kernel_h, stride_h);
const int top_W
= 1 + DIV_THEN_CEIL(bottom_W + 2 * pad_w - kernel_w, stride_w);
top3d->shape[n][0] = C;
top3d->shape[n][1] = top_H;
top3d->shape[n][2] = top_W;
// max-pooling
// bottom3d (C x H x W) -> top3d (C x H' x W')
#ifdef GPU
{
const int num_threads = C * top_H * top_W;
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(num_threads, threads_per_block);
max_pool_gpu<<<num_blocks, threads_per_block>>>(
p_bottom_item, p_top_item, p_argmax_item,
C, bottom_H, bottom_W, top_H, top_W,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w);
}
#else
{
max_pool_cpu(
p_bottom_item, p_top_item, p_argmax_item,
C, bottom_H, bottom_W, top_H, top_W,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w);
}
#endif
// locate next item
{
const int bottom_size = C * bottom_H * bottom_W;
const int top_size = C * top_H * top_W;
p_bottom_item += bottom_size;
p_top_item += top_size;
p_argmax_item += top_size;
}
} // endfor batch
top3d->ndim = 3;
top3d->num_items = bottom3d->num_items;
}
// --------------------------------------------------------------------------
// layer shape calculator code
// --------------------------------------------------------------------------
void pool_shape(const Tensor* const bottom3d,
Tensor* const top3d,
int* const argmax_size,
const PoolOption* const option)
{
const int kernel_h = option->kernel_h;
const int kernel_w = option->kernel_w;
const int pad_h = option->pad_h;
const int pad_w = option->pad_w;
const int stride_h = option->stride_h;
const int stride_w = option->stride_w;
// calculate shape for each item in the batch
int total_size = 0;
for (int n = 0; n < bottom3d->num_items; ++n) {
// bottom shape: C x H x W
const int C = bottom3d->shape[n][0]; // C
const int bottom_H = bottom3d->shape[n][1]; // H
const int bottom_W = bottom3d->shape[n][2]; // W
// top shape: C x H' x W'
// H' = 1 + (H + 2*pad_h - kernel_h) / stride_h
// W' = 1 + (W + 2*pad_w - kernel_w) / stride_w
const int top_H
= 1 + DIV_THEN_CEIL(bottom_H + 2 * pad_h - kernel_h, stride_h);
const int top_W
= 1 + DIV_THEN_CEIL(bottom_W + 2 * pad_w - kernel_w, stride_w);
top3d->shape[n][0] = C;
top3d->shape[n][1] = top_H;
top3d->shape[n][2] = top_W;
// start position for n-th item in top3d->data
top3d->start[n] = total_size;
total_size += C * top_H * top_W;
}
top3d->ndim = 3;
top3d->num_items = bottom3d->num_items;
// argmax data size = top size
*argmax_size = total_size;
}
// --------------------------------------------------------------------------
// test code
// --------------------------------------------------------------------------
#ifdef TEST
#include <stdio.h>
int main(int argc, char* argv[])
{
// variable declaration & memory allocation
Tensor X, Y;
real *X_data = NULL, *Y_data = NULL, *Y_true_data = NULL;
int* p_argmax_data = NULL;
PoolOption option;
int argmax_size;
// set option
{
option.kernel_h = 3;
option.kernel_w = 3;
option.pad_h = 0;
option.pad_w = 0;
option.stride_h = 2;
option.stride_w = 2;
}
// load data
{
int ndim;
int shape[g_max_ndim];
int total_size;
X_data = load_data("../data/temp/pool_bottom0.bin",
&ndim, shape, NULL);
X.num_items = shape[0];
X.ndim = ndim - 1;
total_size = 0;
for (int n = 0; n < X.num_items; ++n) {
int size_n = 1;
for (int i = 0; i < X.ndim; ++i) {
X.shape[n][i] = shape[i + 1];
size_n *= shape[i + 1];
}
X.start[n] = total_size;
total_size += size_n;
}
pool_shape(&X, &Y, &argmax_size, &option);
Y_true_data = load_data("../data/temp/pool_top0.bin",
&ndim, shape, NULL);
Y_data = (real*)malloc(flatten_size(&Y) * sizeof(real));
}
// CUDA initialization
#ifdef GPU
{
printf("set device\n");
cudaSetDevice(0);
}
#endif
// bind loaded data to corresponding tensors
#ifdef GPU
{
const int X_size = flatten_size(&X);
const int Y_size = flatten_size(&Y);
printf("gpu malloc\n");
cudaMalloc(&X.data, X_size * sizeof(real));
cudaMalloc(&Y.data, Y_size * sizeof(real));
cudaMalloc(&p_argmax_data, argmax_size * sizeof(int));
printf("memcpy: cpu -> gpu\n");
cudaMemcpyAsync(X.data, X_data, X_size * sizeof(real),
cudaMemcpyHostToDevice);
}
#else
{
X.data = X_data;
Y.data = Y_data;
p_argmax_data = (int*)malloc(argmax_size * sizeof(int));
}
#endif
// do forward operation
{
printf("do forward\n");
pool_forward(&X, &Y, p_argmax_data, &option);
}
// copy GPU data to main memory
#ifdef GPU
{
const int Y_size = flatten_size(&Y);
printf("memcpy: cpu <- gpu\n");
cudaMemcpyAsync(Y_data, Y.data, Y_size * sizeof(real),
cudaMemcpyDeviceToHost);
}
#endif
// verify results
{
int i = 0;
printf("verification\n");
for (int n = 0; n < Y.num_items; ++n) {
for (int c = 0; c < Y.shape[n][0]; ++c) {
for (int h = 0; h < Y.shape[n][1]; ++h) {
for (int w = 0; w < Y.shape[n][2]; ++w) {
if (Y_data[i] != Y_true_data[i]) {
printf("Y[%d,%d,%d,%d] = %.6f Y_true[%d,%d,%d,%d] = %.6f\n",
n, c, h, w, Y_data[i], n, c, h, w, Y_true_data[i]);
++i;
}
} // endfor w
} // endfor h
} // endfor c
} // endfor n
}
// memory deallocation
{
printf("free\n");
free(X_data);
free(Y_data);
free(Y_true_data);
}
#ifdef GPU
{
printf("gpu free\n");
cudaFree(X.data);
cudaFree(Y.data);
cudaFree(p_argmax_data);
}
#else
{
free(p_argmax_data);
}
#endif
return 0;
}
#endif // endifdef TEST
|
ece9388fc1f9c24dc5290579e72a6726bbabf490.hip | // !!! This is a file automatically generated by hipify!!!
/**
* \file cuda_dijkstra.c
* \brief Dokumentirana datoteka.
*
* Datoteka u kojoj su dokumentirane funkcije.
*/
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <iostream>
#include <time.h>
#define PRINT_THRESHOLD 10 // Number of vertices threshold to stop printing data
#define MAX_WEIGHT 100 // Max edge weight ([0, MAX_WEIGHT])
/**
* \brief Checks if the number of vertices is smaller then the threshold allows and returns the result.
*
* Accepts a whole number that represents the number of vertices in a graph that's about to run and returns a bool value depending if it meets the requirement.
*
* \param V number of vertices
* \return bool, true / false
*/
bool print_threshold_check(int V)
{
if (V < PRINT_THRESHOLD)
{
return true;
}
return false;
}
/**
* \brief Populates the graph (array representation) with weighted edges (macro MAX_LIMIT limits the edge weight).
*
* \param graph array representation of a graph
* \param N num of vertices
*/
void createGraph(float *graph, int N)
{
int col;
int row;
srand((unsigned)time(0));
for (col = 0; col < sqrt(N); col++)
{
for (row = 0; row < sqrt(N); row++)
{
if (col != row)
{
graph[(int)(row * sqrt(N)) + col] = rand() % MAX_WEIGHT; // assign random
// copy the same value on the inverse position in the matrix
graph[(int)(col * sqrt(N)) + row] = graph[(int)(row * sqrt(N)) + col];
}
else
{
graph[(int)(col * sqrt(N)) + row] = 0; // Points a vertex on itself, therefore we set weight to 0 beacuse no loops are allowed
}
}
}
}
/**
* \brief Print the graph (array representation) to console.
*
* \param graph array representation of a graph
* \param size size of the array (numOfVertices^2)
*/
void printGraph(float *graph, int size)
{
int index;
printf("\Graf:\n");
for (index = 0; index < size; index++)
{
if (((index + 1) % (int)sqrt(size)) == 0)
{
printf("%5.1f\n", graph[index]);
}
else
{
printf("%5.1f ", graph[index]);
}
}
printf("\n");
}
/**
* \brief Get's minimum distance from a "src" vertex to any other vertex in the graph.
*
* Pick the minimum distance vertex from the set of vertices not yet processed. "u" is always equal to "src" in the first iteration.
*
* \param dist resulting array of the graph that dijskta uses in it's iterations
* \param sptSet graph array (numOfVertices^2) that contains bools if the vertex is included in SPT
* \param V number of vertices
* \return integer representing index of vertex
*/
int min_distance_cpu(float *dist, bool *sptSet, int V)
{
// Initialize min value
float min = INT_MAX;
float min_index;
// Find minimum distance
for (int v = 0; v < V; v++)
{
if (!sptSet[v] && dist[v] <= min)
{
min = dist[v];
min_index = v;
}
}
return min_index;
}
/**
* \brief Function that run dijskta on a set of data and calculates SPT for one vertex.
*
* \param graph (array) input graph with weighted edges and no loops
* \param src (int) index of vertex
* \param V number of vertices
* \param result (array) resulting graph
*/
void dijkstra_cpu(float *graph, int src, int V, float *result)
{
// sptSet[i] will be true if vertex i is included in shortest
// path tree or shortest distance from src to i is finalized
bool* sptSet = (bool*)malloc(V * sizeof(bool));
// Initialize all distances as INFINITE and sptSet[] as false
for (int i = 0; i < V; i++)
{
result[i] = INT_MAX;
sptSet[i] = false;
}
// Distance of source vertex from itself is always 0
result[src] = 0;
// Find shortest path from src
for (int count = 0; count < V-1; count++)
{
// Pick the minimum distance vertex from the set of vertices not
// yet processed. "u" is always equal to "src" in the first iteration.
int u = min_distance_cpu(result, sptSet, V);
// Mark the picked vertex as processed
sptSet[u] = true;
// Update dist value of the adjacent vertices of the picked vertex
for (int v = 0; v < V; v++)
{
// Update result[v] only if is not in sptSet, there is an edge from
// u to v, and total weight of path from src to v through u is
// smaller than current value of dist[v]
if (
!sptSet[v]
&& graph[(u * V) + v] && result[u] != INT_MAX
&& result[u] + graph[(u * V) + v] < result[v]
)
{
result[v] = result[u] + graph[(u * V) + v];
}
}
}
}
/**
* \brief Untility function to print the solution of dijsktra run for a specific vertex
*
* \param src (int) index of vertex
* \param dist (array) resulting graph
* \param V number of vertices
*/
void print_solution(int src, float *dist, int V)
{
printf("\n Vrh Udaljenost of izvora %d\n", src);
// Loop and print the data from "src" to index
for (int i = 0; i < V; i++) {
printf("%d \t\t %.1f\n", i, dist[i]);
}
}
/**
* \brief Untility function to print the solution of dijsktra run for a specific vertex (whole array)
*
* \param src (int) index of vertex
* \param dist (array) resulting graph
* \param V number of vertices
* \param size suze of array (numOfVertices^2)
*/
void print_solution_interval(int src, float* dist, int V, int size)
{
printf("\n Vrh Udaljenost of izvora %d\n", src);
// Loop and print the data from "src" to index
for (int i = src; i < size; i+=V) {
printf("%d \t\t %.1f\n", i, dist[i]);
}
}
/**
* \brief Populates an array with shortest paths for all vertices based on per-vertex dijsktra calucations.
*
* \param src (int) index of vertex
* \param dist (array) containing per vertex dijkstra solution
* \param result array that (will) contain all shortest paths
* \param V number of vertices
* \param size suze of array (numOfVertices^2)
*/
void populate_result_array(int src, float *dist, float *result, int V, int size)
{
for (int i = 0; i < V; i++)
{
//printf("\nPutting %5.1f at result array index %d", dist[i], src + (i * V));
result[src+(i*V)] = dist[i];
}
}
/**
* \brief GPU Prepare the helper graphs for dijkstra algorithm.
*
* Initializes GPU resulting graph with max distances that dijsktra algoithm requires.
* Sets all indexes of "visited" graph to false - no vertecies are included yet in the SPT exept starting vertex
*
* \param result GPU resulting array graph
* \param visited GPU array graph containing data on every vertex included-in-SPT state
*/
__global__ void gpu_setUpGraph(float* result, bool* visited) {
// Initialize all distances as INFINITE and stpSet[] as false
int index = threadIdx.x + blockIdx.x * blockDim.x;
// Initially set all vertex not to have been visited
visited[index] = false;
// Set all distances to INFINITE (INT_MAX will do), but set distance of vertex to itself to 0
if (index == ((blockDim.x * blockIdx.x) + blockIdx.x)) // "index" goes through every global threadId, and this matches it to the x*x place in the matrix representation
result[index] = 0; // distance to itself is always 0
else result[index] = INT_MAX;
}
/**
* \brief GPU Performs dijkstra's algorithm for every vertice in the graph in separate cores.
*
* \param graph GPU initial graph array with weighted edges and no loops
* \param result GPU resulting array graph
* \param visited GPU array graph containing data on every vertex included-in-SPT state
* \param V number of vertices
*/
__global__ void gpu_dijkstra_threads(float* graph, float* result, bool* visited, int V) {
// Find shortest path for all vertices
for (int count = 0; count < V - 1; count++)
{
// Pick the minimum distance vertex from the set of vertices not
// yet processed.
int min = INT_MAX, u;
for (int v = 0; v < V; v++)
if (visited[(V * threadIdx.x) + v] == false && result[(V * threadIdx.x) + v] <= min)
min = result[(V * threadIdx.x) + v], u = v;
// Mark the picked vertex as processed
visited[(V * threadIdx.x) + u] = true;
// Update the wieght value
for (int v = 0; v < V; v++) {
// Update only if is not in visited, there is an edge from
// u to v, and total weight of path from src to v through u is
// smaller than current value
if (!visited[(V * threadIdx.x) + v] && graph[(u * V) + v] && result[(V * threadIdx.x) + u] != INT_MAX
&& result[(V * threadIdx.x) + u] + graph[(u * V) + v] < result[(V * threadIdx.x) + v])
result[(V * threadIdx.x) + v] = result[(V * threadIdx.x) + u] + graph[(u * V) + v];
}
}
}
/**
* \brief Takes two arrays and compares values on each index.
*
* \param graph1 Graph 1
* \param graph2 Graph 2
* \param size size of arrays
*
*/
void compare_results(float *graph1, float *graph2, int size)
{
for (int i = 0; i < size; i++)
{
if (graph1[i] != graph2[i])
{
printf("\n\n GRESKA:\n");
printf("Vrijednost grafa na poziciji %d se ne podudaraju! : [%5.1f, %5.1f]", i, graph1[i], graph2[i]);
}
}
printf("CPU i GPU matice se podudaraju.");
}
/**
* \brief Returns the difference between two double numbers.
*
* NOTE: Assumption is made that t2 is greater then t1
*
* \param t1
* \param t2
*
*/
double compare_times(float t1, float t2)
{
return (t1-t2);
}
/**
* \brief Main function.
*/
int main()
{
// NOTE:
// All printing threshold checks will be performed in the main function as to maintain printing functions reusability
/**************************** TAKE USER INPUT *****************************/
int* numOfVertices = (int*)std::malloc(sizeof(int));
int* arrayLength = (int*) std::malloc(sizeof(int));
// PROMPT USER FOR # OF VERTICES
printf("\nUnesite broj vrhova: ");
scanf("%d", numOfVertices);
// WILL BE AN ARRAY REPRESENTATION OF A MATRIX
*arrayLength = *numOfVertices * *numOfVertices;
printf("Broj vrhova je %d, pa je velicina listne repreznetacije grafa %d\n", *numOfVertices, *arrayLength);
// Store the print requirement bool
bool* print_req = (bool*)std::malloc(sizeof(bool));
*print_req = print_threshold_check(*numOfVertices);
// Writing to console if the data will be printed or not
if (*print_req)
{
printf("\nBroj vrhova je manji od limita printanja pa printam sve podatke matrica u konzolu.\n\n");
}
else
{
printf("\nBroj vrhova je veci od limita printanja pa ne printam podatke matrica u konzolu.\n\n");
}
// ============================== CPU ==============================
// Variables to store time for dijkstra on CPU
clock_t cpu_start, cpu_end;
// Variable to store total time
double cpu_total_time = 0;
// Allocate CPU memory for initial and resulting graphs
float* graph = (float*)malloc(*arrayLength * sizeof(float));
float* result = (float*)malloc(*arrayLength * sizeof(float));
float* result_graph = (float*)malloc(*arrayLength * sizeof(float));
// Fill the graph with data
createGraph(graph, *arrayLength);
printf("\nGraf inicijaliziran i popunjen nasuminim vrijednostima.");
printf("\nTezine veza u grafu su u intervalu [%5.1f, %5.1f].\n", 0, (float)MAX_WEIGHT);
// print the graph
if (*print_req)
{
printGraph(graph, *arrayLength);
}
if (*print_req)
{
printf("\nPrintam udaljenost od svakog vrha (CPU):\n");
}
// Run the dijsksta on every vertex and populate result array
for (int vertex = 0; vertex < *numOfVertices; vertex++)
{
cpu_start = clock(); // start time of this iteration
// dijkstra calculates shortest paths for vertex "vertex"
dijkstra_cpu(graph, vertex, *numOfVertices, result);
cpu_end = clock(); // end time of this iteration
// Add time required for this dijsktra iteration to the total CPU time count
cpu_total_time += (((double)cpu_end - (double)cpu_start) / CLOCKS_PER_SEC);
if (*print_req)
{
// print the solution for that vertex
print_solution(vertex, result, *numOfVertices);
}
// Fill the data for this dijsktra iteration in a full resulting graph
populate_result_array(vertex, result, result_graph, *numOfVertices, *arrayLength);
}
// ============================== GPU ==============================
// create GPU CUDA events to measure times
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Initialize variable to store GPU resulting data
float* gpu_result;
// Allocate memory for device result array
gpu_result = (float*)malloc(*arrayLength * sizeof(float));
// Initialize GPU variables
float* dev_graph;
float* dev_result;
bool* dev_visited;
// Allocate memory for device variables
hipMalloc( (void**)&dev_graph, (*arrayLength * sizeof(float)) );
hipMalloc( (void**)&dev_result, (*arrayLength * sizeof(float)) );
hipMalloc( (void**)&dev_visited, (*arrayLength * sizeof(bool)) );
// Copy CPU generated graph data to the device (GPU)
hipMemcpy(dev_graph, graph, (*arrayLength * sizeof(float)), hipMemcpyHostToDevice);
// Set up data on the GPU for dijkstra calculations
gpu_setUpGraph << < *numOfVertices, *numOfVertices >> > (dev_result, dev_visited); // Every block executes one matrix
hipEventRecord(start); // Mark the event of start of GPU calculations
// Perform dijstra on ALL vertices as src vertex using multiple threads
//gpu_dijkstra_blocks << < *numOfVertices, 1 >> > (dev_graph, dev_result, dev_visited, *numOfVertices);
gpu_dijkstra_threads << < 1, *numOfVertices >> > (dev_graph, dev_result, dev_visited, *numOfVertices);
hipEventRecord(stop); // Mark the event of end of GPU calcuations
hipEventSynchronize(stop);
float gpu_total_time = 0; // stores total time required for GPU calucations
hipEventElapsedTime(&gpu_total_time, start, stop); // Calculates the time based on events
// Copy result from GPU calculations to CPU (host)
hipMemcpy(gpu_result, dev_result, (*arrayLength * sizeof(float)), hipMemcpyDeviceToHost);
if (*print_req)
{
// Printing by-vertex solutions
printf("\nPrintam udaljenost od svakog vrha (GPU):\n");
for (int v = 0; v < *numOfVertices; v++)
{
print_solution_interval(v, gpu_result, *numOfVertices, *arrayLength);
}
}
if (*print_req)
{
// Printing resulting graph of CPU calculations
printf("\nIspisujem rezultantnu matricu sa CPU (host):\n");
printGraph(result_graph, *arrayLength);
// Printing resulting graph of GPU calculations
printf("\nIspisujem razultatnu marticu sa GPU (device):\n");
printGraph(gpu_result, *arrayLength);
}
// Compare the two resulting arrays
printf("\nUsporedujem...\n");
compare_results(result_graph, gpu_result, *arrayLength);
printf("\n\nIspisujem vremena:\n");
printf("Potrebno vrijeme na CPU-u: %.10fs\n", cpu_total_time);
printf("Potrebno vrijeme na GPU-u: %.10fs\n", gpu_total_time * .0001);
printf("Vrijeme potrebno za izracun je %.10fs manje na GPU-u nego na CPU-u.", compare_times((double)cpu_total_time, gpu_total_time * 0.0001)); // Compare the times
// Some more memory management
//CPU
free(numOfVertices);
free(arrayLength);
free(graph);
free(result);
free(result_graph);
free(gpu_result);
// GPU
hipFree(dev_graph);
hipFree(dev_result);
hipFree(dev_visited);
return 0;
}
| ece9388fc1f9c24dc5290579e72a6726bbabf490.cu | /**
* \file cuda_dijkstra.c
* \brief Dokumentirana datoteka.
*
* Datoteka u kojoj su dokumentirane funkcije.
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <iostream>
#include <time.h>
#define PRINT_THRESHOLD 10 // Number of vertices threshold to stop printing data
#define MAX_WEIGHT 100 // Max edge weight ([0, MAX_WEIGHT])
/**
* \brief Checks if the number of vertices is smaller then the threshold allows and returns the result.
*
* Accepts a whole number that represents the number of vertices in a graph that's about to run and returns a bool value depending if it meets the requirement.
*
* \param V number of vertices
* \return bool, true / false
*/
bool print_threshold_check(int V)
{
if (V < PRINT_THRESHOLD)
{
return true;
}
return false;
}
/**
* \brief Populates the graph (array representation) with weighted edges (macro MAX_LIMIT limits the edge weight).
*
* \param graph array representation of a graph
* \param N num of vertices
*/
void createGraph(float *graph, int N)
{
int col;
int row;
srand((unsigned)time(0));
for (col = 0; col < sqrt(N); col++)
{
for (row = 0; row < sqrt(N); row++)
{
if (col != row)
{
graph[(int)(row * sqrt(N)) + col] = rand() % MAX_WEIGHT; // assign random
// copy the same value on the inverse position in the matrix
graph[(int)(col * sqrt(N)) + row] = graph[(int)(row * sqrt(N)) + col];
}
else
{
graph[(int)(col * sqrt(N)) + row] = 0; // Points a vertex on itself, therefore we set weight to 0 beacuse no loops are allowed
}
}
}
}
/**
* \brief Print the graph (array representation) to console.
*
* \param graph array representation of a graph
* \param size size of the array (numOfVertices^2)
*/
void printGraph(float *graph, int size)
{
int index;
printf("\Graf:\n");
for (index = 0; index < size; index++)
{
if (((index + 1) % (int)sqrt(size)) == 0)
{
printf("%5.1f\n", graph[index]);
}
else
{
printf("%5.1f ", graph[index]);
}
}
printf("\n");
}
/**
* \brief Get's minimum distance from a "src" vertex to any other vertex in the graph.
*
* Pick the minimum distance vertex from the set of vertices not yet processed. "u" is always equal to "src" in the first iteration.
*
* \param dist resulting array of the graph that dijskta uses in it's iterations
* \param sptSet graph array (numOfVertices^2) that contains bools if the vertex is included in SPT
* \param V number of vertices
* \return integer representing index of vertex
*/
int min_distance_cpu(float *dist, bool *sptSet, int V)
{
// Initialize min value
float min = INT_MAX;
float min_index;
// Find minimum distance
for (int v = 0; v < V; v++)
{
if (!sptSet[v] && dist[v] <= min)
{
min = dist[v];
min_index = v;
}
}
return min_index;
}
/**
* \brief Function that run dijskta on a set of data and calculates SPT for one vertex.
*
* \param graph (array) input graph with weighted edges and no loops
* \param src (int) index of vertex
* \param V number of vertices
* \param result (array) resulting graph
*/
void dijkstra_cpu(float *graph, int src, int V, float *result)
{
// sptSet[i] will be true if vertex i is included in shortest
// path tree or shortest distance from src to i is finalized
bool* sptSet = (bool*)malloc(V * sizeof(bool));
// Initialize all distances as INFINITE and sptSet[] as false
for (int i = 0; i < V; i++)
{
result[i] = INT_MAX;
sptSet[i] = false;
}
// Distance of source vertex from itself is always 0
result[src] = 0;
// Find shortest path from src
for (int count = 0; count < V-1; count++)
{
// Pick the minimum distance vertex from the set of vertices not
// yet processed. "u" is always equal to "src" in the first iteration.
int u = min_distance_cpu(result, sptSet, V);
// Mark the picked vertex as processed
sptSet[u] = true;
// Update dist value of the adjacent vertices of the picked vertex
for (int v = 0; v < V; v++)
{
// Update result[v] only if is not in sptSet, there is an edge from
// u to v, and total weight of path from src to v through u is
// smaller than current value of dist[v]
if (
!sptSet[v]
&& graph[(u * V) + v] && result[u] != INT_MAX
&& result[u] + graph[(u * V) + v] < result[v]
)
{
result[v] = result[u] + graph[(u * V) + v];
}
}
}
}
/**
* \brief Untility function to print the solution of dijsktra run for a specific vertex
*
* \param src (int) index of vertex
* \param dist (array) resulting graph
* \param V number of vertices
*/
void print_solution(int src, float *dist, int V)
{
printf("\n Vrh Udaljenost of izvora %d\n", src);
// Loop and print the data from "src" to index
for (int i = 0; i < V; i++) {
printf("%d \t\t %.1f\n", i, dist[i]);
}
}
/**
* \brief Untility function to print the solution of dijsktra run for a specific vertex (whole array)
*
* \param src (int) index of vertex
* \param dist (array) resulting graph
* \param V number of vertices
* \param size suze of array (numOfVertices^2)
*/
void print_solution_interval(int src, float* dist, int V, int size)
{
printf("\n Vrh Udaljenost of izvora %d\n", src);
// Loop and print the data from "src" to index
for (int i = src; i < size; i+=V) {
printf("%d \t\t %.1f\n", i, dist[i]);
}
}
/**
* \brief Populates an array with shortest paths for all vertices based on per-vertex dijsktra calucations.
*
* \param src (int) index of vertex
* \param dist (array) containing per vertex dijkstra solution
* \param result array that (will) contain all shortest paths
* \param V number of vertices
* \param size suze of array (numOfVertices^2)
*/
void populate_result_array(int src, float *dist, float *result, int V, int size)
{
for (int i = 0; i < V; i++)
{
//printf("\nPutting %5.1f at result array index %d", dist[i], src + (i * V));
result[src+(i*V)] = dist[i];
}
}
/**
* \brief GPU Prepare the helper graphs for dijkstra algorithm.
*
* Initializes GPU resulting graph with max distances that dijsktra algoithm requires.
* Sets all indexes of "visited" graph to false - no vertecies are included yet in the SPT exept starting vertex
*
* \param result GPU resulting array graph
* \param visited GPU array graph containing data on every vertex included-in-SPT state
*/
__global__ void gpu_setUpGraph(float* result, bool* visited) {
// Initialize all distances as INFINITE and stpSet[] as false
int index = threadIdx.x + blockIdx.x * blockDim.x;
// Initially set all vertex not to have been visited
visited[index] = false;
// Set all distances to INFINITE (INT_MAX will do), but set distance of vertex to itself to 0
if (index == ((blockDim.x * blockIdx.x) + blockIdx.x)) // "index" goes through every global threadId, and this matches it to the x*x place in the matrix representation
result[index] = 0; // distance to itself is always 0
else result[index] = INT_MAX;
}
/**
* \brief GPU Performs dijkstra's algorithm for every vertice in the graph in separate cores.
*
* \param graph GPU initial graph array with weighted edges and no loops
* \param result GPU resulting array graph
* \param visited GPU array graph containing data on every vertex included-in-SPT state
* \param V number of vertices
*/
__global__ void gpu_dijkstra_threads(float* graph, float* result, bool* visited, int V) {
// Find shortest path for all vertices
for (int count = 0; count < V - 1; count++)
{
// Pick the minimum distance vertex from the set of vertices not
// yet processed.
int min = INT_MAX, u;
for (int v = 0; v < V; v++)
if (visited[(V * threadIdx.x) + v] == false && result[(V * threadIdx.x) + v] <= min)
min = result[(V * threadIdx.x) + v], u = v;
// Mark the picked vertex as processed
visited[(V * threadIdx.x) + u] = true;
// Update the wieght value
for (int v = 0; v < V; v++) {
// Update only if is not in visited, there is an edge from
// u to v, and total weight of path from src to v through u is
// smaller than current value
if (!visited[(V * threadIdx.x) + v] && graph[(u * V) + v] && result[(V * threadIdx.x) + u] != INT_MAX
&& result[(V * threadIdx.x) + u] + graph[(u * V) + v] < result[(V * threadIdx.x) + v])
result[(V * threadIdx.x) + v] = result[(V * threadIdx.x) + u] + graph[(u * V) + v];
}
}
}
/**
* \brief Takes two arrays and compares values on each index.
*
* \param graph1 Graph 1
* \param graph2 Graph 2
* \param size size of arrays
*
*/
void compare_results(float *graph1, float *graph2, int size)
{
for (int i = 0; i < size; i++)
{
if (graph1[i] != graph2[i])
{
printf("\n\n GRESKA:\n");
printf("Vrijednost grafa na poziciji %d se ne podudaraju! : [%5.1f, %5.1f]", i, graph1[i], graph2[i]);
}
}
printf("CPU i GPU matice se podudaraju.");
}
/**
* \brief Returns the difference between two double numbers.
*
* NOTE: Assumption is made that t2 is greater then t1
*
* \param t1
* \param t2
*
*/
double compare_times(float t1, float t2)
{
return (t1-t2);
}
/**
* \brief Main function.
*/
int main()
{
// NOTE:
// All printing threshold checks will be performed in the main function as to maintain printing functions reusability
/**************************** TAKE USER INPUT *****************************/
int* numOfVertices = (int*)std::malloc(sizeof(int));
int* arrayLength = (int*) std::malloc(sizeof(int));
// PROMPT USER FOR # OF VERTICES
printf("\nUnesite broj vrhova: ");
scanf("%d", numOfVertices);
// WILL BE AN ARRAY REPRESENTATION OF A MATRIX
*arrayLength = *numOfVertices * *numOfVertices;
printf("Broj vrhova je %d, pa je velicina listne repreznetacije grafa %d\n", *numOfVertices, *arrayLength);
// Store the print requirement bool
bool* print_req = (bool*)std::malloc(sizeof(bool));
*print_req = print_threshold_check(*numOfVertices);
// Writing to console if the data will be printed or not
if (*print_req)
{
printf("\nBroj vrhova je manji od limita printanja pa printam sve podatke matrica u konzolu.\n\n");
}
else
{
printf("\nBroj vrhova je veci od limita printanja pa ne printam podatke matrica u konzolu.\n\n");
}
// ============================== CPU ==============================
// Variables to store time for dijkstra on CPU
clock_t cpu_start, cpu_end;
// Variable to store total time
double cpu_total_time = 0;
// Allocate CPU memory for initial and resulting graphs
float* graph = (float*)malloc(*arrayLength * sizeof(float));
float* result = (float*)malloc(*arrayLength * sizeof(float));
float* result_graph = (float*)malloc(*arrayLength * sizeof(float));
// Fill the graph with data
createGraph(graph, *arrayLength);
printf("\nGraf inicijaliziran i popunjen nasumičnim vrijednostima.");
printf("\nTezine veza u grafu su u intervalu [%5.1f, %5.1f].\n", 0, (float)MAX_WEIGHT);
// print the graph
if (*print_req)
{
printGraph(graph, *arrayLength);
}
if (*print_req)
{
printf("\nPrintam udaljenost od svakog vrha (CPU):\n");
}
// Run the dijsksta on every vertex and populate result array
for (int vertex = 0; vertex < *numOfVertices; vertex++)
{
cpu_start = clock(); // start time of this iteration
// dijkstra calculates shortest paths for vertex "vertex"
dijkstra_cpu(graph, vertex, *numOfVertices, result);
cpu_end = clock(); // end time of this iteration
// Add time required for this dijsktra iteration to the total CPU time count
cpu_total_time += (((double)cpu_end - (double)cpu_start) / CLOCKS_PER_SEC);
if (*print_req)
{
// print the solution for that vertex
print_solution(vertex, result, *numOfVertices);
}
// Fill the data for this dijsktra iteration in a full resulting graph
populate_result_array(vertex, result, result_graph, *numOfVertices, *arrayLength);
}
// ============================== GPU ==============================
// create GPU CUDA events to measure times
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Initialize variable to store GPU resulting data
float* gpu_result;
// Allocate memory for device result array
gpu_result = (float*)malloc(*arrayLength * sizeof(float));
// Initialize GPU variables
float* dev_graph;
float* dev_result;
bool* dev_visited;
// Allocate memory for device variables
cudaMalloc( (void**)&dev_graph, (*arrayLength * sizeof(float)) );
cudaMalloc( (void**)&dev_result, (*arrayLength * sizeof(float)) );
cudaMalloc( (void**)&dev_visited, (*arrayLength * sizeof(bool)) );
// Copy CPU generated graph data to the device (GPU)
cudaMemcpy(dev_graph, graph, (*arrayLength * sizeof(float)), cudaMemcpyHostToDevice);
// Set up data on the GPU for dijkstra calculations
gpu_setUpGraph << < *numOfVertices, *numOfVertices >> > (dev_result, dev_visited); // Every block executes one matrix
cudaEventRecord(start); // Mark the event of start of GPU calculations
// Perform dijstra on ALL vertices as src vertex using multiple threads
//gpu_dijkstra_blocks << < *numOfVertices, 1 >> > (dev_graph, dev_result, dev_visited, *numOfVertices);
gpu_dijkstra_threads << < 1, *numOfVertices >> > (dev_graph, dev_result, dev_visited, *numOfVertices);
cudaEventRecord(stop); // Mark the event of end of GPU calcuations
cudaEventSynchronize(stop);
float gpu_total_time = 0; // stores total time required for GPU calucations
cudaEventElapsedTime(&gpu_total_time, start, stop); // Calculates the time based on events
// Copy result from GPU calculations to CPU (host)
cudaMemcpy(gpu_result, dev_result, (*arrayLength * sizeof(float)), cudaMemcpyDeviceToHost);
if (*print_req)
{
// Printing by-vertex solutions
printf("\nPrintam udaljenost od svakog vrha (GPU):\n");
for (int v = 0; v < *numOfVertices; v++)
{
print_solution_interval(v, gpu_result, *numOfVertices, *arrayLength);
}
}
if (*print_req)
{
// Printing resulting graph of CPU calculations
printf("\nIspisujem rezultantnu matricu sa CPU (host):\n");
printGraph(result_graph, *arrayLength);
// Printing resulting graph of GPU calculations
printf("\nIspisujem razultatnu marticu sa GPU (device):\n");
printGraph(gpu_result, *arrayLength);
}
// Compare the two resulting arrays
printf("\nUsporedujem...\n");
compare_results(result_graph, gpu_result, *arrayLength);
printf("\n\nIspisujem vremena:\n");
printf("Potrebno vrijeme na CPU-u: %.10fs\n", cpu_total_time);
printf("Potrebno vrijeme na GPU-u: %.10fs\n", gpu_total_time * .0001);
printf("Vrijeme potrebno za izracun je %.10fs manje na GPU-u nego na CPU-u.", compare_times((double)cpu_total_time, gpu_total_time * 0.0001)); // Compare the times
// Some more memory management
//CPU
free(numOfVertices);
free(arrayLength);
free(graph);
free(result);
free(result_graph);
free(gpu_result);
// GPU
cudaFree(dev_graph);
cudaFree(dev_result);
cudaFree(dev_visited);
return 0;
}
|
39bdbded47d1a03c277347737de1c1fb0f5acef1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cunnx_BlockSparse_updateGradOutput_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *_gradOutput = NULL;
hipMalloc(&_gradOutput, XSIZE*YSIZE);
float *gradOutputScale = NULL;
hipMalloc(&gradOutputScale, XSIZE*YSIZE);
const float *gradOutput = NULL;
hipMalloc(&gradOutput, XSIZE*YSIZE);
const float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
const float *outputScale = NULL;
hipMalloc(&outputScale, XSIZE*YSIZE);
int outputWindowSize = XSIZE*YSIZE;
int outputSize = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cunnx_BlockSparse_updateGradOutput_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, _gradOutput,gradOutputScale,gradOutput,output,outputScale,outputWindowSize,outputSize);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cunnx_BlockSparse_updateGradOutput_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, _gradOutput,gradOutputScale,gradOutput,output,outputScale,outputWindowSize,outputSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cunnx_BlockSparse_updateGradOutput_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, _gradOutput,gradOutputScale,gradOutput,output,outputScale,outputWindowSize,outputSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 39bdbded47d1a03c277347737de1c1fb0f5acef1.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cunnx_BlockSparse_updateGradOutput_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *_gradOutput = NULL;
cudaMalloc(&_gradOutput, XSIZE*YSIZE);
float *gradOutputScale = NULL;
cudaMalloc(&gradOutputScale, XSIZE*YSIZE);
const float *gradOutput = NULL;
cudaMalloc(&gradOutput, XSIZE*YSIZE);
const float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
const float *outputScale = NULL;
cudaMalloc(&outputScale, XSIZE*YSIZE);
int outputWindowSize = XSIZE*YSIZE;
int outputSize = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cunnx_BlockSparse_updateGradOutput_kernel<<<gridBlock,threadBlock>>>(_gradOutput,gradOutputScale,gradOutput,output,outputScale,outputWindowSize,outputSize);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cunnx_BlockSparse_updateGradOutput_kernel<<<gridBlock,threadBlock>>>(_gradOutput,gradOutputScale,gradOutput,output,outputScale,outputWindowSize,outputSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cunnx_BlockSparse_updateGradOutput_kernel<<<gridBlock,threadBlock>>>(_gradOutput,gradOutputScale,gradOutput,output,outputScale,outputWindowSize,outputSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
f3e19c3da56a12bad118b47f282b2ee8f7e801ce.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2020 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// CUDA sample demonstrating a Double precision GEMM computation using the Warp
// Matrix Multiply and Accumulate API introduced in CUDA 11.0.
// In this program, the compute_dgemm kernel computes the result of a matrix multiplication
// and addition: D = alpha * A * B + beta * C. The dimensions of both C and D matrices
// are M_GLOBAL x N_GLOBAL. The A matrix is M_GLOBAL x K_GLOBAL (row-major), the B matrix
// is K_GLOBAL x N_GLOBAL (column-major).
// In that kernel, each CTA computes one 64 x 64 tile of the resulting matrix
// per iteration. When the tile is computed, the CTA stores it to the global memory
// and begins a new iteration, selecting a new 64 x 64 tile to compute.
// Each CTA consists of eight warps. For the 64 x 64 tile, each warp computes eight
// 8 x 8 subtiles, organized in a 2 x 4 two-dimensional array.
// Warps compute the 8 x 8 subtiles using nvcuda::wmma::mma_sync operations by
// moving through the K_GLOBAL dimension of the A and B matrices and accumulating
// the intermediate result in the local thread state.
// There are a number of simple optimizations used in the algorithm:
// - The CTA copies the 64 x 64 tile of the C matrix from the global memory to
// shared memory. After that is done, each warp loads the C matrix fragments from
// shared memory, thus avoiding a random global memory access.
// - On each internal iteration, the CTA copies a portion of the A and B matrices from
// global memory to shared memory. After that, all warps in the CTA reuse the A and B
// data from shared memory, thus reducing the number of data copies from global memory.
// - The portions of the A and B matrices are stored in shared memory with an additional
// padding (skew) to reduce the number of shared memory access bank conflicts.
// (See a detailed explanation near the SKEW_DOUBLE macro definition.)
// - When the CTA finishes computing the tiles of the resulting matrix, each warp stores
// its subtiles to shared memory. The CTA then copies the shared memory contents to
// global memory, again avoiding redundant random global memory accesses.
// - Note that the CTA tile size is chosen to maximize the GPU register utilization,
// but carefully enough to avoid local memory use.
#include <assert.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <mma.h>
#include <hip/hip_cooperative_groups.h>
#include <cooperative_groups/memcpy_async.h>
#include <cuda/std/type_traits>
#include <cuda/barrier>
#include <cuda/pipeline>
// helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
// Externally configurable parameters.
#ifndef CPU_DEBUG
// Set this to 1 to verify the correctness of the GPU-computed matrix.
#define CPU_DEBUG 0
#endif
#ifndef SHARED_MEMORY_LIMIT_64K
// Set this to 0 to use more than 64 Kb of shared memory to cache data, to
// improve the performance of the computations on GPU.
// Note that you need a GPU that can have more than 64 Kb of shared memory
// per multiprocessor.
#define SHARED_MEMORY_LIMIT_64K 0
#endif
// GPU configuration.
#define WARP_SIZE 32
// MMA matrix tile dimensions.
#define M 8
#define N 8
#define K 4
// GEMM configuration.
#define M_TILES 1024
#define N_TILES 1024
#define K_TILES 1024
#define M_GLOBAL (M * M_TILES)
#define N_GLOBAL (N * N_TILES)
#define K_GLOBAL (K * K_TILES)
#define C_LAYOUT wmma::mem_row_major
// Implementation constants.
#define WARPS_PER_BLOCK 8
#define THREADS_PER_BLOCK (WARP_SIZE * WARPS_PER_BLOCK)
#if SHARED_MEMORY_LIMIT_64K
// With only 64 Kb shared memory available, we can fit 8x16-tile chunks of each
// the A and B matrix data, that are (M = 8) * (K = 4) * 8 * (CHUNK_K = 16) * sizeof(double) = 32 Kb each
// But we cannot account the 4 Kb total skew overhead, without which the performance
// would be severely impacted. So we choose to reduce the chunk size in half,
// i.e. the amount of A and B matrix data we cache in shared memory.
// Accordingly, this doubles the number of outer iterations across the global K
// dimension, which only slightly impacts the performance.
#define CHUNK_K 8
#else
#define CHUNK_K 16
#endif
#define CHUNK_LINE_BYTES (CHUNK_K * K * sizeof(double))
#define WARP_COPY_BYTES (WARP_SIZE * sizeof(int4))
#define CHUNK_COPY_LINES_PER_WARP (WARP_COPY_BYTES / CHUNK_LINE_BYTES)
#define CHUNK_COPY_LINE_LANES (WARP_SIZE / CHUNK_COPY_LINES_PER_WARP)
#define BLOCK_ROW_WARPS 2
#define BLOCK_COL_WARPS 4
#define WARP_ROW_TILES 4
#define WARP_COL_TILES 2
#define BLOCK_ROW_TILES (WARP_ROW_TILES * BLOCK_ROW_WARPS)
#define BLOCK_COL_TILES (WARP_COL_TILES * BLOCK_COL_WARPS)
#define GLOBAL_MEM_STRIDE N_GLOBAL
#define SHMEM_STRIDE (N * BLOCK_ROW_TILES)
#define SHMEM_OFFSET (N * WARP_ROW_TILES)
// The macro below is used to shift rows of the A matrix and columns of the B matrix
// in shared memory to minimize possible bank conflicts.
// Before performing the nvcuda::wmma::mma_sync operation, the warp must load the matrix
// data using the nvcuda::wmma::load_matrix_sync operation. Although the memory access pattern
// is not specified for that function, each lane in the warp can read one or multiple matrix
// elements from different matrix rows or columns.
// For shared memory, such access can result in bank conflicts if different rows / columns
// of the matrix map to the same bank. By shifting each row and column by a few bytes, we
// make sure that they map to different banks, thus reducing the number of possible bank
// conflicts.
// The number of 4 eight-byte "double" elements is chosen as the minimum possible shift because
// we must keep each row and column 256-bit aligned, as required by nvcuda::wmma::load_matrix_sync.
#define SKEW_DOUBLE 4
#define checkKernelErrors(expr) do { \
expr; \
\
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
printf("Line %d: '%s' failed: %s\n", __LINE__, # expr, hipGetErrorString(__err)); \
abort(); \
} \
} while(0)
enum kernels
{
dmma_shmem_gemm_async_copy = 0, // DMMA shmem using kernel with async_copy
dmma_shmem_gemm_cg_async_copy = 1, // DMMA shmem using kernel with cooperative groups async_copy
dmma_shmem_gemm = 2, // DMMA shmem using kernel normal copy (without async_copy).
simple_dmma_gemm = 3 // DMMA non-shmem using simple kernel.
};
const char* kernelNames[] = {"compute_dgemm_async_copy", "compute_dgemm_cg_async_copy",
"compute_dgemm", "simple_wmma_gemm"};
using namespace nvcuda;
namespace cg = cooperative_groups;
__host__ void init_host_matrices(double *a, double *b, double *c)
{
for (int i = 0; i < M_GLOBAL; i++) {
for (int j = 0; j < K_GLOBAL; j++) {
a[i*K_GLOBAL+j] = (double) (rand() % 3);
}
}
for (int i = 0; i < N_GLOBAL; i++) {
for (int j = 0; j < K_GLOBAL; j++) {
b[i*K_GLOBAL+j] = (double) (rand() % 3);
}
}
for (int t = 0; t < M_GLOBAL * N_GLOBAL; t++) {
c[t] = (double) (rand() % 3);
}
}
__global__ void compute_dgemm(const double *A, const double *B, const double *C, double *D, double alpha, double beta)
{
#if __CUDA_ARCH__ >= 800
extern __shared__ double shmem[][CHUNK_K * K + SKEW_DOUBLE];
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
// Offset in shared memory from which the B matrix is stored.
const size_t shmem_idx_b_off = BLOCK_COL_TILES * M;
// This pointer is used to access the C and D matrix tiles this warp computes.
double *shmem_warp_tile_ptr = (double*)&shmem[0][0] + (warpId / BLOCK_ROW_WARPS) * SHMEM_STRIDE * N * BLOCK_ROW_WARPS + (warpId % BLOCK_ROW_WARPS) * SHMEM_OFFSET;
// This pointer is used to stream the C and D matrices block-wide tile to and from shared memory.
double *shmem_warp_stream_ptr = (double*)&shmem[0][0] + warpId * SHMEM_STRIDE * N;
// Adjust the beta scaler, as it'll be multiplied by alpha at the end of
// each tile computation. Technically this is not generally correct (may result
// in a loss of precision). Zero still needs to be specially handled though.
beta /= alpha;
// Each CTA slides along the 64 x 64 tiles from the top left corner of the matrix to the
// right and down, and selects the next tile to compute. Once there's no such tile,
// all warps in this CTA exit.
for(unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_tile_i = ((block_pos * BLOCK_ROW_TILES) / N_TILES) * (BLOCK_COL_TILES);
const unsigned int block_tile_j = (block_pos * BLOCK_COL_TILES) % N_TILES;
// Stop when there are no more D matrix tiles to compute in this CTA.
if (block_tile_i >= M_TILES) {
break;
}
// This warp's pointer to the C matrix data to copy memory from to shared memory.
const size_t gmem_idx = (block_tile_i + warpId) * M * GLOBAL_MEM_STRIDE + block_tile_j * N;
const double *src_gmem_warp_stream_ptr = &C[gmem_idx];
// Stream multiple C tiles to shared memory.
#pragma unroll
for (int i = 0; i < N; i++) {
*((int4 *)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId) =
*((int4 *)(src_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId);
}
__syncthreads();
// These fragments will accumulate the result of A and B matrix fragment multiplications
// along the K_GLOBAL dimension.
wmma::fragment<wmma::accumulator, M, N, K, double> c[WARP_COL_TILES][WARP_ROW_TILES];
// Load the C matrix tiles into fragments from shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
const double *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N;
wmma::load_matrix_sync(c[i][j], tile_ptr, SHMEM_STRIDE, C_LAYOUT);
}
}
__syncthreads();
// Scale the C matrix.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++) {
c[i][j].x[t] *= beta;
}
}
}
// Select what warp copies what matrix to shared memory.
// Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix.
const double *warp_ptr = (warpId < (WARPS_PER_BLOCK/2)) ? (&A[block_tile_i * M * K_GLOBAL] + M * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2) :
(&B[block_tile_j * N * K_GLOBAL] + N * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2);
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) {
// Copy slices of the A and B matrices to shared memory.
// The first half of the warps in the CTA copy the A matrix, the rest copy the B matrix.
size_t shmem_idx = warpId < (WARPS_PER_BLOCK/2) ? (M * (warpId % (WARPS_PER_BLOCK/2)) * 2) :
(N * (warpId % (WARPS_PER_BLOCK/2)) * 2 + shmem_idx_b_off);
// First half of the warp copies the first row / column of the matrix,
// the second half of the warp copies the next.
const double *lane_ptr = warp_ptr + tile_k * K + (laneId / CHUNK_COPY_LINE_LANES) * K_GLOBAL;
// Shift the second half of the warp to the next row / column in the shared memory.
shmem_idx += laneId / CHUNK_COPY_LINE_LANES;
#pragma unroll
for(int i = 0; i < ((WARP_SIZE/2) / CHUNK_COPY_LINES_PER_WARP); i++) {
// Copy 16 bytes at once in each lane.
*((int4*)&shmem[shmem_idx][0] + (laneId % CHUNK_COPY_LINE_LANES)) = *((int4*)lane_ptr + (laneId % CHUNK_COPY_LINE_LANES));
// Advance the global memory pointer and the shared memory index.
lane_ptr = lane_ptr + K_GLOBAL * CHUNK_COPY_LINES_PER_WARP;
shmem_idx += CHUNK_COPY_LINES_PER_WARP;
}
__syncthreads();
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
wmma::fragment<wmma::matrix_a, M, N, K, double, wmma::row_major> a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, double, wmma::col_major> b[WARP_ROW_TILES];
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId/2) * M * 2 + (i * M);
const double *tile_ptr = &shmem[shmem_idx_a][k_step * K];
wmma::load_matrix_sync(a[i], tile_ptr, K * CHUNK_K + SKEW_DOUBLE);
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be reused
// against the other A matrix fragments.
size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId%2) + (j * N);
const double *tile_ptr = &shmem[shmem_idx_b][k_step * K];
wmma::load_matrix_sync(b[j], tile_ptr, K * CHUNK_K + SKEW_DOUBLE);
}
wmma::mma_sync(c[i][j], a[i], b[j], c[i][j]);
}
}
}
__syncthreads();
}
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
// Uniform, point-wise transformations of ALL fragment elements by ALL threads in the
// warp are well-defined even though element indices within fragment storage are not defined.
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++)
c[i][j].x[t] *= alpha;
double *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N;
wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT);
}
}
__syncthreads();
// Now that shared memory contains all the D tiles, stream them to global memory.
double *dst_gmem_warp_stream_ptr = &D[gmem_idx];
#pragma unroll
for (int i = 0; i < N; i++) {
*((int4*)(dst_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId) =
*((int4*)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId);
}
__syncthreads();
}
#endif
}
__global__ void compute_dgemm_async_copy(const double *A, const double *B, const double *C, double *D, double alpha, double beta)
{
#if __CUDA_ARCH__ >= 800
extern __shared__ double shmem[][CHUNK_K * K + SKEW_DOUBLE];
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
// Offset in shared memory from which the B matrix is stored.
constexpr size_t shmem_idx_b_off = BLOCK_COL_TILES * M;
// This pointer is used to access the C and D matrix tiles this warp computes.
double *shmem_warp_tile_ptr = &shmem[0][0] + (warpId/BLOCK_ROW_WARPS) * SHMEM_STRIDE * N * BLOCK_ROW_WARPS + (warpId % BLOCK_ROW_WARPS) * SHMEM_OFFSET;
// This pointer is used to stream the C and D matrices block-wide tile to and from shared memory.
double *shmem_warp_stream_ptr = &shmem[0][0] + warpId * SHMEM_STRIDE * N;
// Adjust the beta scaler, as it'll be multiplied by alpha at the end of
// each tile computation. Technically this is not generally correct (may result
// in a loss of precision). Zero still needs to be specially handled though.
beta /= alpha;
cuda::pipeline<cuda::thread_scope_thread> pipe = cuda::make_pipeline();
const auto shape2 = cuda::aligned_size_t<alignof(double2)>(sizeof(double2));
constexpr int loadStride = 1; // load 2 double, left-shift by 1.
// Each CTA slides along the 64 x 64 tiles from the top left corner of the matrix to the
// right and down, and selects the next tile to compute. Once there's no such tile,
// all warps in this CTA exit.
for(unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_tile_i = ((block_pos * BLOCK_ROW_TILES) / N_TILES) * (BLOCK_COL_TILES);
const unsigned int block_tile_j = (block_pos * BLOCK_COL_TILES) % N_TILES;
// Stop when there are no more D matrix tiles to compute in this CTA.
if (block_tile_i >= M_TILES) {
break;
}
// This warp's pointer to the C matrix data to copy memory from to shared memory.
const size_t gmem_idx = (block_tile_i + warpId) * M * GLOBAL_MEM_STRIDE + block_tile_j * N;
const double *src_gmem_warp_stream_ptr = &C[gmem_idx];
// Stream multiple C tiles to shared memory.
#pragma unroll
for (int i = 0; i < N; i++) {
pipe.producer_acquire();
cuda::memcpy_async(&shmem_warp_stream_ptr[(SHMEM_STRIDE * i) + (laneId << loadStride)],
&src_gmem_warp_stream_ptr[(GLOBAL_MEM_STRIDE * i) + (laneId << loadStride)],
shape2, pipe);
pipe.producer_commit();
}
// Now wait for all the above issued 8 batches to complete.
cuda::pipeline_consumer_wait_prior<0>(pipe);
__syncthreads();
// These fragments will accumulate the result of A and B matrix fragment multiplications
// along the K_GLOBAL dimension.
wmma::fragment<wmma::accumulator, M, N, K, double> c[WARP_COL_TILES][WARP_ROW_TILES];
// Load the C matrix tiles into fragments from shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
const double *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N;
wmma::load_matrix_sync(c[i][j], tile_ptr, SHMEM_STRIDE, C_LAYOUT);
// Scale the C matrix.
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++) {
c[i][j].x[t] *= beta;
}
}
}
pipe.consumer_release();
// sync here so that shared memory can then be used for loading A & B matrices.
__syncthreads();
// Select what warp copies what matrix to shared memory.
// Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix.
const double *warp_ptr = (warpId < (WARPS_PER_BLOCK/2)) ? (&A[block_tile_i * M * K_GLOBAL] + M * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2) :
(&B[block_tile_j * N * K_GLOBAL] + N * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2);
const int stridePerLaneCopy = (laneId / CHUNK_COPY_LINE_LANES);
constexpr int chunksPerLane = ((WARP_SIZE/2) / CHUNK_COPY_LINES_PER_WARP);
const int laneLoadElem = (laneId % CHUNK_COPY_LINE_LANES) << loadStride;
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) {
// Copy slices of the A and B matrices to shared memory.
// The first half of the warps in the CTA copy the A matrix, the rest copy the B matrix.
// As for DMMA M == N we use M for warp 4-7 + shmem_idx_b_off.
size_t shmem_idx = (M * (warpId % (WARPS_PER_BLOCK/2)) * 2) + (shmem_idx_b_off * (warpId/(WARPS_PER_BLOCK/2)));
// First half of the warp copies the first row / column of the matrix,
// the second half of the warp copies the next.
const double *lane_ptr = warp_ptr + tile_k * K + stridePerLaneCopy * K_GLOBAL + laneLoadElem;
// Shift the second half of the warp to the next row / column in the shared memory.
shmem_idx += stridePerLaneCopy;
#pragma unroll
for(int i = 0; i < chunksPerLane; i++) {
// Copy 16 bytes at once in each lane.
pipe.producer_acquire();
cuda::memcpy_async(&shmem[shmem_idx][laneLoadElem], lane_ptr, shape2, pipe);
pipe.producer_commit();
// Advance the global memory pointer and the shared memory index.
lane_ptr = lane_ptr + K_GLOBAL * CHUNK_COPY_LINES_PER_WARP;
shmem_idx += CHUNK_COPY_LINES_PER_WARP;
}
cuda::pipeline_consumer_wait_prior<0>(pipe);
__syncthreads();
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
wmma::fragment<wmma::matrix_a, M, N, K, double, wmma::row_major> a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, double, wmma::col_major> b[WARP_ROW_TILES];
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId/2) * M * 2 + (i * M);
const double *tile_ptr = &shmem[shmem_idx_a][k_step * K];
wmma::load_matrix_sync(a[i], tile_ptr, K * CHUNK_K + SKEW_DOUBLE);
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be reused
// against the other A matrix fragments.
size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId%2) + (j * N);
const double *tile_ptr = &shmem[shmem_idx_b][k_step * K];
wmma::load_matrix_sync(b[j], tile_ptr, K * CHUNK_K + SKEW_DOUBLE);
}
wmma::mma_sync(c[i][j], a[i], b[j], c[i][j]);
}
}
}
pipe.consumer_release();
__syncthreads();
}
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
// Uniform, point-wise transformations of ALL fragment elements by ALL threads in the
// warp are well-defined even though element indices within fragment storage are not defined.
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++)
c[i][j].x[t] *= alpha;
double *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N;
wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT);
}
}
__syncthreads();
// Now that shared memory contains all the D tiles, stream them to global memory.
double *dst_gmem_warp_stream_ptr = &D[gmem_idx];
#pragma unroll
for (int i = 0; i < N; i++) {
*((int4*)(dst_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId) =
*((int4*)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId);
}
__syncthreads();
}
#endif
}
__global__ void compute_dgemm_cg_async_copy(const double *A, const double *B, const double *C, double *D, double alpha, double beta)
{
#if __CUDA_ARCH__ >= 800
extern __shared__ double shmem[][CHUNK_K * K + SKEW_DOUBLE];
auto cta = cg::this_thread_block();
auto tile32 = cg::tiled_partition<32>(cta);
constexpr int tileChunkCopySize = WARP_SIZE / CHUNK_COPY_LINES_PER_WARP;
auto tileChunkCopy = cg::tiled_partition<tileChunkCopySize>(cta);
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
// Offset in shared memory from which the B matrix is stored.
constexpr size_t shmem_idx_b_off = BLOCK_COL_TILES * M;
// This pointer is used to access the C and D matrix tiles this warp computes.
double *shmem_warp_tile_ptr = (double*)&shmem[0][0] + (warpId/2) * SHMEM_STRIDE * N * 2 + (warpId%2) * SHMEM_OFFSET;
// This pointer is used to stream the C and D matrices block-wide tile to and from shared memory.
double *shmem_warp_stream_ptr = (double*)&shmem[0][0] + warpId * SHMEM_STRIDE * N;
// Adjust the beta scaler, as it'll be multiplied by alpha at the end of
// each tile computation. Technically this is not generally correct (may result
// in a loss of precision). Zero still needs to be specially handled though.
beta /= alpha;
// Each CTA slides along the 64 x 64 tiles from the top left corner of the matrix to the
// right and down, and selects the next tile to compute. Once there's no such tile,
// all warps in this CTA exit.
for(unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_tile_i = ((block_pos * BLOCK_ROW_TILES) / N_TILES) * (BLOCK_COL_TILES);
const unsigned int block_tile_j = (block_pos * BLOCK_COL_TILES) % N_TILES;
// Stop when there are no more D matrix tiles to compute in this CTA.
if (block_tile_i >= M_TILES) {
break;
}
// This warp's pointer to the C matrix data to copy memory from to shared memory.
const size_t gmem_idx = (block_tile_i + warpId) * M * GLOBAL_MEM_STRIDE + block_tile_j * N;
const double *src_gmem_warp_stream_ptr = &C[gmem_idx];
// Stream multiple C tiles to shared memory.
#pragma unroll
for (int i = 0; i < N; i++) {
auto dst_ptr = &shmem_warp_stream_ptr[(SHMEM_STRIDE * i)];
auto src_ptr = &src_gmem_warp_stream_ptr[(GLOBAL_MEM_STRIDE * i)];
cg::memcpy_async(tile32, dst_ptr, src_ptr, cuda::aligned_size_t<alignof(double2)>{tile32.size() * sizeof(double2)});
}
cg::wait(cta);
// These fragments will accumulate the result of A and B matrix fragment multiplications
// along the K_GLOBAL dimension.
wmma::fragment<wmma::accumulator, M, N, K, double> c[WARP_COL_TILES][WARP_ROW_TILES];
// Load the C matrix tiles into fragments from shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
const double *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N;
wmma::load_matrix_sync(c[i][j], tile_ptr, SHMEM_STRIDE, C_LAYOUT);
}
}
// Scale the C matrix.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++) {
c[i][j].x[t] *= beta;
}
}
}
// sync here so that shared memory can then be used for loading A & B matrices.
cg::wait(cta);
// Select what warp copies what matrix to shared memory.
// Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix.
const double *warp_ptr = (warpId < 4) ? (&A[block_tile_i * M * K_GLOBAL] + M * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2) :
(&B[block_tile_j * N * K_GLOBAL] + N * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2);
const int stridePerLaneCopy = (laneId / CHUNK_COPY_LINE_LANES);
constexpr int chunksPerLane = ((WARP_SIZE/2) / CHUNK_COPY_LINES_PER_WARP);
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) {
// Copy slices of the A and B matrices to shared memory.
// The first half of the warps in the CTA copy the A matrix, the rest copy the B matrix.
// As for DMMA M == N we use M for warp 4-7 + shmem_idx_b_off.
size_t shmem_idx = (M * (warpId % (WARPS_PER_BLOCK/2)) * 2) + (shmem_idx_b_off * (warpId/(WARPS_PER_BLOCK/2)));
// First half of the warp copies the first row / column of the matrix,
// the second half of the warp copies the next.
auto lane_ptr = warp_ptr + tile_k * K + stridePerLaneCopy * K_GLOBAL;
// Shift the second half of the warp to the next row / column in the shared memory.
shmem_idx += stridePerLaneCopy;
#pragma unroll
for(int i = 0; i < chunksPerLane; i++) {
// Copy 16 bytes at once in each lane.
auto dst_ptr = &shmem[shmem_idx][0];
auto src_ptr = lane_ptr;
cg::memcpy_async(tileChunkCopy, dst_ptr, src_ptr,
cuda::aligned_size_t<alignof(double2)>{tileChunkCopySize * sizeof(double2)});
// Advance the global memory pointer and the shared memory index.
lane_ptr = lane_ptr + K_GLOBAL * CHUNK_COPY_LINES_PER_WARP;
shmem_idx += CHUNK_COPY_LINES_PER_WARP;
}
cg::wait(cta);
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
wmma::fragment<wmma::matrix_a, M, N, K, double, wmma::row_major> a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, double, wmma::col_major> b[WARP_ROW_TILES];
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId/2) * M * 2 + (i * M);
const double *tile_ptr = &shmem[shmem_idx_a][k_step * K];
wmma::load_matrix_sync(a[i], tile_ptr, K * CHUNK_K + SKEW_DOUBLE);
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be reused
// against the other A matrix fragments.
size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId%2) + (j * N);
const double *tile_ptr = &shmem[shmem_idx_b][k_step * K];
wmma::load_matrix_sync(b[j], tile_ptr, K * CHUNK_K + SKEW_DOUBLE);
}
wmma::mma_sync(c[i][j], a[i], b[j], c[i][j]);
}
}
}
cg::sync(cta);
}
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
// Uniform, point-wise transformations of ALL fragment elements by ALL threads in the
// warp are well-defined even though element indices within fragment storage are not defined.
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++)
c[i][j].x[t] *= alpha;
double *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N;
wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT);
}
}
cg::sync(cta);
// Now that shared memory contains all the D tiles, stream them to global memory.
double *dst_gmem_warp_stream_ptr = &D[gmem_idx];
#pragma unroll
for (int i = 0; i < N; i++) {
*((int4*)(dst_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId) =
*((int4*)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId);
}
cg::sync(cta);
}
#endif
}
// Performs an MxNxK DGEMM (C=alpha*A*B + beta*C) assuming:
// 1) Matrices are packed in memory.
// 2) M, N and K are multiples of 8, 8 and 4 respectively.
// 3) A is row major, B is column major matrix.
// Note: This is a less performant version of the compute_dgemm kernel. It is designed for
// demonstration purposes only to show the CUDA WMMA API use without relying on
// availability of the shared memory.
__global__ void simple_wmma_gemm(double *a, double *b, double *c, double *d, int m_ld, int n_ld, int k_ld, double alpha, double beta)
{
#if __CUDA_ARCH__ >= 800
// Leading dimensions. Packed with no transpositions.
int lda = k_ld;
int ldb = k_ld;
int ldc = n_ld;
// Tile using a 2D grid
int warpM = (blockIdx.x * blockDim.x + threadIdx.x) / warpSize;
int warpN = (blockIdx.y * blockDim.y + threadIdx.y);
// Declare the fragments
wmma::fragment<wmma::matrix_a, M, N, K, double, wmma::row_major> a_frag;
wmma::fragment<wmma::matrix_b, M, N, K, double, wmma::col_major> b_frag;
wmma::fragment<wmma::accumulator, M, N, K, double> acc_frag;
wmma::fragment<wmma::accumulator, M, N, K, double> c_frag;
wmma::fill_fragment(acc_frag, 0.0f);
// Loop over k
for (int i = 0; i < k_ld; i += K) {
int aCol = i;
int aRow = warpM * M;
int bCol = warpN * N;
int bRow = i;
// Bounds checking
if (aRow < m_ld && aCol < k_ld && bRow < k_ld && bCol < n_ld) {
// Load the inputs
wmma::load_matrix_sync(a_frag, a + aCol + aRow * lda, lda);
wmma::load_matrix_sync(b_frag, b + bRow + bCol * ldb, ldb);
// Perform the matrix multiplication
wmma::mma_sync(acc_frag, a_frag, b_frag, acc_frag);
}
}
// Load in the current value of c, scale it by beta, and add this our result scaled by alpha
int cCol = warpN * N;
int cRow = warpM * M;
if (cRow < m_ld && cCol < n_ld) {
wmma::load_matrix_sync(c_frag, c + cCol + cRow * ldc, ldc, wmma::mem_row_major);
for(int i=0; i < c_frag.num_elements; i++) {
c_frag.x[i] = alpha * acc_frag.x[i] + beta * c_frag.x[i];
}
// Store the output
wmma::store_matrix_sync(d + cCol + cRow * ldc, c_frag, ldc, wmma::mem_row_major);
}
#endif
}
__host__ void matMultiplyOnHost(double *A, double *B, double *C,
float alpha, float beta,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
for (int i = 0; i < numCRows; i++) {
for (int j = 0; j < numCColumns; j++) {
double temp = 0.0;
for (int k = 0; k < numAColumns; k++) {
// B matrix is column major. A matrix is row major.
temp += A[i * numAColumns + k] * B[j * numBRows + k];
}
C[i*numCColumns + j] = temp * alpha + beta * C[i * numCColumns + j];
}
}
}
int main(int argc, char **argv)
{
printf("Initializing...\n");
int dev = findCudaDevice(argc, (const char **)argv);
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev));
// Double precision Tensor cores require a GPU of Ampere (SM8X) architecture or higher.
if (deviceProp.major < 8) {
printf("dmmaTensorCoreGemm requires SM 8.0 or higher. Exiting...\n");
exit(EXIT_WAIVED);
}
printf("M: %d (%d x %d)\n", M_GLOBAL, M, M_TILES);
printf("N: %d (%d x %d)\n", N_GLOBAL, N, N_TILES);
printf("K: %d (%d x %d)\n", K_GLOBAL, K, K_TILES);
double *A_h = NULL;
double *B_h = NULL;
double *C_h = NULL;
#if CPU_DEBUG
double *result_hD = NULL;
double *result_host = NULL;
#endif
A_h = (double*) malloc(sizeof(double) * M_GLOBAL * K_GLOBAL);
B_h = (double*) malloc(sizeof(double) * K_GLOBAL * N_GLOBAL);
C_h = (double*) malloc(sizeof(double) * M_GLOBAL * N_GLOBAL);
#if CPU_DEBUG
result_hD = (double*) malloc(sizeof(double) * M_GLOBAL * N_GLOBAL);
result_host = (double*) malloc(sizeof(double) * M_GLOBAL * N_GLOBAL);
#endif
double *A = NULL;
double *B = NULL;
double *C = NULL;
double *D = NULL;
checkCudaErrors(hipMalloc((void**)&A, sizeof(double) * M_GLOBAL * K_GLOBAL));
checkCudaErrors(hipMalloc((void**)&B, sizeof(double) * N_GLOBAL * K_GLOBAL));
checkCudaErrors(hipMalloc((void**)&C, sizeof(double) * M_GLOBAL * N_GLOBAL));
checkCudaErrors(hipMalloc((void**)&D, sizeof(double) * M_GLOBAL * N_GLOBAL));
assert(((unsigned long long)A) % 128 == 0);
assert(((unsigned long long)B) % 128 == 0);
assert(((unsigned long long)C) % 128 == 0);
assert(((unsigned long long)D) % 128 == 0);
init_host_matrices(A_h, B_h, C_h);
printf("Preparing data for GPU...\n");
checkCudaErrors(hipMemcpy(A, A_h, sizeof(double) * M_GLOBAL * K_GLOBAL, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(B, B_h, sizeof(double) * N_GLOBAL * K_GLOBAL, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(C, C_h, sizeof(double) * M_GLOBAL * N_GLOBAL, hipMemcpyHostToDevice));
checkCudaErrors(hipMemset(D, 0, sizeof(double) * M_GLOBAL * N_GLOBAL));
enum {
// Compute the right amount of shared memory to request.
// We need shared memory to hold per-CTA C and D matrix tiles, and to cache per-CTA chunks
// of the A and B matrices. Therefore, the right amount to request is the maximum of those
// two numbers.
SHMEM_SZ = MAX(sizeof(double) * (BLOCK_COL_TILES * M) * (CHUNK_K * K + SKEW_DOUBLE) * 2,
M * (BLOCK_ROW_WARPS * WARP_ROW_TILES) * N * (BLOCK_COL_WARPS * WARP_COL_TILES) * sizeof(double))
};
printf("Required shared memory size: %lu Kb\n", SHMEM_SZ / 1024UL);
const double alpha = 1.1f;
const double beta = 1.2f;
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipEventRecord(start));
kernels selected_kernel = dmma_shmem_gemm_async_copy;
// kernel to run - default (dmma_shmem_gemm_async_copy == 0)
if (checkCmdLineFlag(argc, (const char **)argv, "kernel")) {
int kernel_number = getCmdLineArgumentInt(argc, (const char **)argv, "kernel");
if (kernel_number < 4)
{
selected_kernel = (kernels)kernel_number;
}
else
{
printf("Error: kernel number should be between 0 to 3, you have entered %d\n", kernel_number);
exit(EXIT_FAILURE);
}
}
// If enough shared memory available on the GPU use high performant kernel
if ((deviceProp.sharedMemPerMultiprocessor >= SHMEM_SZ) && (selected_kernel != simple_dmma_gemm))
{
printf("Computing using high performance kernel = %d - %s\n", selected_kernel, kernelNames[selected_kernel]);
switch (selected_kernel)
{
case dmma_shmem_gemm_async_copy :
default:
checkCudaErrors(hipFuncSetAttribute(compute_dgemm_async_copy, hipFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ));
hipLaunchKernelGGL(( checkKernelErrors((compute_dgemm_async_copy), dim3(deviceProp.multiProcessorCount*3), dim3(THREADS_PER_BLOCK), SHMEM_SZ, 0, A, B, C, D, alpha, beta)));
break;
case dmma_shmem_gemm_cg_async_copy :
checkCudaErrors(hipFuncSetAttribute(compute_dgemm_cg_async_copy, hipFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ));
hipLaunchKernelGGL(( checkKernelErrors((compute_dgemm_cg_async_copy), dim3(deviceProp.multiProcessorCount*3), dim3(THREADS_PER_BLOCK), SHMEM_SZ, 0, A, B, C, D, alpha, beta)));
break;
case dmma_shmem_gemm :
checkCudaErrors(hipFuncSetAttribute(compute_dgemm, hipFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ));
hipLaunchKernelGGL(( checkKernelErrors((compute_dgemm), dim3(deviceProp.multiProcessorCount*2), dim3(THREADS_PER_BLOCK), SHMEM_SZ, 0, A, B, C, D, alpha, beta)));
break;
}
#if CPU_DEBUG
checkCudaErrors(hipMemcpy(result_hD, D, sizeof(double)*M_GLOBAL*N_GLOBAL, hipMemcpyDeviceToHost));
#endif
}
else
{
dim3 gridDim;
dim3 blockDim;
// blockDim.x must be a multple of warpSize
// 128x4 means we have 16 warps and a block computes a 64x64 output tile
blockDim.x = 128;
blockDim.y = 4;
gridDim.x = (M_GLOBAL + (M * blockDim.x / 32 - 1)) / (M * blockDim.x / 32);
gridDim.y = (N_GLOBAL + N * blockDim.y - 1) / (N * blockDim.y);
printf("Computing... using simple_wmma_gemm kernel\n");
hipLaunchKernelGGL(( simple_wmma_gemm), dim3(gridDim), dim3(blockDim), 0, 0, A, B, C, D, M_GLOBAL, N_GLOBAL, K_GLOBAL, alpha, beta);
#if CPU_DEBUG
checkCudaErrors(hipMemcpy(result_hD, D, sizeof(double) * M_GLOBAL * N_GLOBAL, hipMemcpyDeviceToHost));
#endif
}
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
#if CPU_DEBUG
printf("Verifying correctness of the computations...\n");
memcpy(result_host, C_h, sizeof(double) * M_GLOBAL * N_GLOBAL);
matMultiplyOnHost(A_h, B_h, result_host,
alpha, beta,
M_GLOBAL, K_GLOBAL,
K_GLOBAL, N_GLOBAL,
M_GLOBAL, N_GLOBAL);
size_t number_of_matches = 0;
for (int i = 0; i < N_GLOBAL*M_GLOBAL; i++) {
if (fabs(result_hD[i] - result_host[i]) > 0.1f)
{
printf("mismatch i=%d result_hD=%f result_host=%f\n", i, result_hD[i], result_host[i]);
break;
}
else
{
number_of_matches++;
}
}
printf("number_of_matches = %zu out of = %d \n", number_of_matches, N_GLOBAL*M_GLOBAL);
free(result_hD);
free(result_host);
#endif
float milliseconds = 0;
checkCudaErrors(hipEventElapsedTime(&milliseconds, start, stop));
printf("Time: %f ms\n", milliseconds);
printf("FP64 TFLOPS: %.2f\n", (((double)M_GLOBAL * N_GLOBAL * K_GLOBAL * 2)/(milliseconds/1000.)) / 1e12);
free(A_h);
free(B_h);
free(C_h);
checkCudaErrors(hipFree((void*)A));
checkCudaErrors(hipFree((void*)B));
checkCudaErrors(hipFree((void*)C));
checkCudaErrors(hipFree((void*)D));
return 0;
}
| f3e19c3da56a12bad118b47f282b2ee8f7e801ce.cu | /*
* Copyright 1993-2020 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// CUDA sample demonstrating a Double precision GEMM computation using the Warp
// Matrix Multiply and Accumulate API introduced in CUDA 11.0.
// In this program, the compute_dgemm kernel computes the result of a matrix multiplication
// and addition: D = alpha * A * B + beta * C. The dimensions of both C and D matrices
// are M_GLOBAL x N_GLOBAL. The A matrix is M_GLOBAL x K_GLOBAL (row-major), the B matrix
// is K_GLOBAL x N_GLOBAL (column-major).
// In that kernel, each CTA computes one 64 x 64 tile of the resulting matrix
// per iteration. When the tile is computed, the CTA stores it to the global memory
// and begins a new iteration, selecting a new 64 x 64 tile to compute.
// Each CTA consists of eight warps. For the 64 x 64 tile, each warp computes eight
// 8 x 8 subtiles, organized in a 2 x 4 two-dimensional array.
// Warps compute the 8 x 8 subtiles using nvcuda::wmma::mma_sync operations by
// moving through the K_GLOBAL dimension of the A and B matrices and accumulating
// the intermediate result in the local thread state.
// There are a number of simple optimizations used in the algorithm:
// - The CTA copies the 64 x 64 tile of the C matrix from the global memory to
// shared memory. After that is done, each warp loads the C matrix fragments from
// shared memory, thus avoiding a random global memory access.
// - On each internal iteration, the CTA copies a portion of the A and B matrices from
// global memory to shared memory. After that, all warps in the CTA reuse the A and B
// data from shared memory, thus reducing the number of data copies from global memory.
// - The portions of the A and B matrices are stored in shared memory with an additional
// padding (skew) to reduce the number of shared memory access bank conflicts.
// (See a detailed explanation near the SKEW_DOUBLE macro definition.)
// - When the CTA finishes computing the tiles of the resulting matrix, each warp stores
// its subtiles to shared memory. The CTA then copies the shared memory contents to
// global memory, again avoiding redundant random global memory accesses.
// - Note that the CTA tile size is chosen to maximize the GPU register utilization,
// but carefully enough to avoid local memory use.
#include <assert.h>
#include <stdio.h>
#include <cuda.h>
#include <mma.h>
#include <cooperative_groups.h>
#include <cooperative_groups/memcpy_async.h>
#include <cuda/std/type_traits>
#include <cuda/barrier>
#include <cuda/pipeline>
// helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
// Externally configurable parameters.
#ifndef CPU_DEBUG
// Set this to 1 to verify the correctness of the GPU-computed matrix.
#define CPU_DEBUG 0
#endif
#ifndef SHARED_MEMORY_LIMIT_64K
// Set this to 0 to use more than 64 Kb of shared memory to cache data, to
// improve the performance of the computations on GPU.
// Note that you need a GPU that can have more than 64 Kb of shared memory
// per multiprocessor.
#define SHARED_MEMORY_LIMIT_64K 0
#endif
// GPU configuration.
#define WARP_SIZE 32
// MMA matrix tile dimensions.
#define M 8
#define N 8
#define K 4
// GEMM configuration.
#define M_TILES 1024
#define N_TILES 1024
#define K_TILES 1024
#define M_GLOBAL (M * M_TILES)
#define N_GLOBAL (N * N_TILES)
#define K_GLOBAL (K * K_TILES)
#define C_LAYOUT wmma::mem_row_major
// Implementation constants.
#define WARPS_PER_BLOCK 8
#define THREADS_PER_BLOCK (WARP_SIZE * WARPS_PER_BLOCK)
#if SHARED_MEMORY_LIMIT_64K
// With only 64 Kb shared memory available, we can fit 8x16-tile chunks of each
// the A and B matrix data, that are (M = 8) * (K = 4) * 8 * (CHUNK_K = 16) * sizeof(double) = 32 Kb each
// But we cannot account the 4 Kb total skew overhead, without which the performance
// would be severely impacted. So we choose to reduce the chunk size in half,
// i.e. the amount of A and B matrix data we cache in shared memory.
// Accordingly, this doubles the number of outer iterations across the global K
// dimension, which only slightly impacts the performance.
#define CHUNK_K 8
#else
#define CHUNK_K 16
#endif
#define CHUNK_LINE_BYTES (CHUNK_K * K * sizeof(double))
#define WARP_COPY_BYTES (WARP_SIZE * sizeof(int4))
#define CHUNK_COPY_LINES_PER_WARP (WARP_COPY_BYTES / CHUNK_LINE_BYTES)
#define CHUNK_COPY_LINE_LANES (WARP_SIZE / CHUNK_COPY_LINES_PER_WARP)
#define BLOCK_ROW_WARPS 2
#define BLOCK_COL_WARPS 4
#define WARP_ROW_TILES 4
#define WARP_COL_TILES 2
#define BLOCK_ROW_TILES (WARP_ROW_TILES * BLOCK_ROW_WARPS)
#define BLOCK_COL_TILES (WARP_COL_TILES * BLOCK_COL_WARPS)
#define GLOBAL_MEM_STRIDE N_GLOBAL
#define SHMEM_STRIDE (N * BLOCK_ROW_TILES)
#define SHMEM_OFFSET (N * WARP_ROW_TILES)
// The macro below is used to shift rows of the A matrix and columns of the B matrix
// in shared memory to minimize possible bank conflicts.
// Before performing the nvcuda::wmma::mma_sync operation, the warp must load the matrix
// data using the nvcuda::wmma::load_matrix_sync operation. Although the memory access pattern
// is not specified for that function, each lane in the warp can read one or multiple matrix
// elements from different matrix rows or columns.
// For shared memory, such access can result in bank conflicts if different rows / columns
// of the matrix map to the same bank. By shifting each row and column by a few bytes, we
// make sure that they map to different banks, thus reducing the number of possible bank
// conflicts.
// The number of 4 eight-byte "double" elements is chosen as the minimum possible shift because
// we must keep each row and column 256-bit aligned, as required by nvcuda::wmma::load_matrix_sync.
#define SKEW_DOUBLE 4
#define checkKernelErrors(expr) do { \
expr; \
\
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
printf("Line %d: '%s' failed: %s\n", __LINE__, # expr, cudaGetErrorString(__err)); \
abort(); \
} \
} while(0)
enum kernels
{
dmma_shmem_gemm_async_copy = 0, // DMMA shmem using kernel with async_copy
dmma_shmem_gemm_cg_async_copy = 1, // DMMA shmem using kernel with cooperative groups async_copy
dmma_shmem_gemm = 2, // DMMA shmem using kernel normal copy (without async_copy).
simple_dmma_gemm = 3 // DMMA non-shmem using simple kernel.
};
const char* kernelNames[] = {"compute_dgemm_async_copy", "compute_dgemm_cg_async_copy",
"compute_dgemm", "simple_wmma_gemm"};
using namespace nvcuda;
namespace cg = cooperative_groups;
__host__ void init_host_matrices(double *a, double *b, double *c)
{
for (int i = 0; i < M_GLOBAL; i++) {
for (int j = 0; j < K_GLOBAL; j++) {
a[i*K_GLOBAL+j] = (double) (rand() % 3);
}
}
for (int i = 0; i < N_GLOBAL; i++) {
for (int j = 0; j < K_GLOBAL; j++) {
b[i*K_GLOBAL+j] = (double) (rand() % 3);
}
}
for (int t = 0; t < M_GLOBAL * N_GLOBAL; t++) {
c[t] = (double) (rand() % 3);
}
}
__global__ void compute_dgemm(const double *A, const double *B, const double *C, double *D, double alpha, double beta)
{
#if __CUDA_ARCH__ >= 800
extern __shared__ double shmem[][CHUNK_K * K + SKEW_DOUBLE];
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
// Offset in shared memory from which the B matrix is stored.
const size_t shmem_idx_b_off = BLOCK_COL_TILES * M;
// This pointer is used to access the C and D matrix tiles this warp computes.
double *shmem_warp_tile_ptr = (double*)&shmem[0][0] + (warpId / BLOCK_ROW_WARPS) * SHMEM_STRIDE * N * BLOCK_ROW_WARPS + (warpId % BLOCK_ROW_WARPS) * SHMEM_OFFSET;
// This pointer is used to stream the C and D matrices block-wide tile to and from shared memory.
double *shmem_warp_stream_ptr = (double*)&shmem[0][0] + warpId * SHMEM_STRIDE * N;
// Adjust the beta scaler, as it'll be multiplied by alpha at the end of
// each tile computation. Technically this is not generally correct (may result
// in a loss of precision). Zero still needs to be specially handled though.
beta /= alpha;
// Each CTA slides along the 64 x 64 tiles from the top left corner of the matrix to the
// right and down, and selects the next tile to compute. Once there's no such tile,
// all warps in this CTA exit.
for(unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_tile_i = ((block_pos * BLOCK_ROW_TILES) / N_TILES) * (BLOCK_COL_TILES);
const unsigned int block_tile_j = (block_pos * BLOCK_COL_TILES) % N_TILES;
// Stop when there are no more D matrix tiles to compute in this CTA.
if (block_tile_i >= M_TILES) {
break;
}
// This warp's pointer to the C matrix data to copy memory from to shared memory.
const size_t gmem_idx = (block_tile_i + warpId) * M * GLOBAL_MEM_STRIDE + block_tile_j * N;
const double *src_gmem_warp_stream_ptr = &C[gmem_idx];
// Stream multiple C tiles to shared memory.
#pragma unroll
for (int i = 0; i < N; i++) {
*((int4 *)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId) =
*((int4 *)(src_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId);
}
__syncthreads();
// These fragments will accumulate the result of A and B matrix fragment multiplications
// along the K_GLOBAL dimension.
wmma::fragment<wmma::accumulator, M, N, K, double> c[WARP_COL_TILES][WARP_ROW_TILES];
// Load the C matrix tiles into fragments from shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
const double *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N;
wmma::load_matrix_sync(c[i][j], tile_ptr, SHMEM_STRIDE, C_LAYOUT);
}
}
__syncthreads();
// Scale the C matrix.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++) {
c[i][j].x[t] *= beta;
}
}
}
// Select what warp copies what matrix to shared memory.
// Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix.
const double *warp_ptr = (warpId < (WARPS_PER_BLOCK/2)) ? (&A[block_tile_i * M * K_GLOBAL] + M * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2) :
(&B[block_tile_j * N * K_GLOBAL] + N * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2);
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) {
// Copy slices of the A and B matrices to shared memory.
// The first half of the warps in the CTA copy the A matrix, the rest copy the B matrix.
size_t shmem_idx = warpId < (WARPS_PER_BLOCK/2) ? (M * (warpId % (WARPS_PER_BLOCK/2)) * 2) :
(N * (warpId % (WARPS_PER_BLOCK/2)) * 2 + shmem_idx_b_off);
// First half of the warp copies the first row / column of the matrix,
// the second half of the warp copies the next.
const double *lane_ptr = warp_ptr + tile_k * K + (laneId / CHUNK_COPY_LINE_LANES) * K_GLOBAL;
// Shift the second half of the warp to the next row / column in the shared memory.
shmem_idx += laneId / CHUNK_COPY_LINE_LANES;
#pragma unroll
for(int i = 0; i < ((WARP_SIZE/2) / CHUNK_COPY_LINES_PER_WARP); i++) {
// Copy 16 bytes at once in each lane.
*((int4*)&shmem[shmem_idx][0] + (laneId % CHUNK_COPY_LINE_LANES)) = *((int4*)lane_ptr + (laneId % CHUNK_COPY_LINE_LANES));
// Advance the global memory pointer and the shared memory index.
lane_ptr = lane_ptr + K_GLOBAL * CHUNK_COPY_LINES_PER_WARP;
shmem_idx += CHUNK_COPY_LINES_PER_WARP;
}
__syncthreads();
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
wmma::fragment<wmma::matrix_a, M, N, K, double, wmma::row_major> a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, double, wmma::col_major> b[WARP_ROW_TILES];
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId/2) * M * 2 + (i * M);
const double *tile_ptr = &shmem[shmem_idx_a][k_step * K];
wmma::load_matrix_sync(a[i], tile_ptr, K * CHUNK_K + SKEW_DOUBLE);
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be reused
// against the other A matrix fragments.
size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId%2) + (j * N);
const double *tile_ptr = &shmem[shmem_idx_b][k_step * K];
wmma::load_matrix_sync(b[j], tile_ptr, K * CHUNK_K + SKEW_DOUBLE);
}
wmma::mma_sync(c[i][j], a[i], b[j], c[i][j]);
}
}
}
__syncthreads();
}
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
// Uniform, point-wise transformations of ALL fragment elements by ALL threads in the
// warp are well-defined even though element indices within fragment storage are not defined.
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++)
c[i][j].x[t] *= alpha;
double *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N;
wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT);
}
}
__syncthreads();
// Now that shared memory contains all the D tiles, stream them to global memory.
double *dst_gmem_warp_stream_ptr = &D[gmem_idx];
#pragma unroll
for (int i = 0; i < N; i++) {
*((int4*)(dst_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId) =
*((int4*)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId);
}
__syncthreads();
}
#endif
}
__global__ void compute_dgemm_async_copy(const double *A, const double *B, const double *C, double *D, double alpha, double beta)
{
#if __CUDA_ARCH__ >= 800
extern __shared__ double shmem[][CHUNK_K * K + SKEW_DOUBLE];
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
// Offset in shared memory from which the B matrix is stored.
constexpr size_t shmem_idx_b_off = BLOCK_COL_TILES * M;
// This pointer is used to access the C and D matrix tiles this warp computes.
double *shmem_warp_tile_ptr = &shmem[0][0] + (warpId/BLOCK_ROW_WARPS) * SHMEM_STRIDE * N * BLOCK_ROW_WARPS + (warpId % BLOCK_ROW_WARPS) * SHMEM_OFFSET;
// This pointer is used to stream the C and D matrices block-wide tile to and from shared memory.
double *shmem_warp_stream_ptr = &shmem[0][0] + warpId * SHMEM_STRIDE * N;
// Adjust the beta scaler, as it'll be multiplied by alpha at the end of
// each tile computation. Technically this is not generally correct (may result
// in a loss of precision). Zero still needs to be specially handled though.
beta /= alpha;
cuda::pipeline<cuda::thread_scope_thread> pipe = cuda::make_pipeline();
const auto shape2 = cuda::aligned_size_t<alignof(double2)>(sizeof(double2));
constexpr int loadStride = 1; // load 2 double, left-shift by 1.
// Each CTA slides along the 64 x 64 tiles from the top left corner of the matrix to the
// right and down, and selects the next tile to compute. Once there's no such tile,
// all warps in this CTA exit.
for(unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_tile_i = ((block_pos * BLOCK_ROW_TILES) / N_TILES) * (BLOCK_COL_TILES);
const unsigned int block_tile_j = (block_pos * BLOCK_COL_TILES) % N_TILES;
// Stop when there are no more D matrix tiles to compute in this CTA.
if (block_tile_i >= M_TILES) {
break;
}
// This warp's pointer to the C matrix data to copy memory from to shared memory.
const size_t gmem_idx = (block_tile_i + warpId) * M * GLOBAL_MEM_STRIDE + block_tile_j * N;
const double *src_gmem_warp_stream_ptr = &C[gmem_idx];
// Stream multiple C tiles to shared memory.
#pragma unroll
for (int i = 0; i < N; i++) {
pipe.producer_acquire();
cuda::memcpy_async(&shmem_warp_stream_ptr[(SHMEM_STRIDE * i) + (laneId << loadStride)],
&src_gmem_warp_stream_ptr[(GLOBAL_MEM_STRIDE * i) + (laneId << loadStride)],
shape2, pipe);
pipe.producer_commit();
}
// Now wait for all the above issued 8 batches to complete.
cuda::pipeline_consumer_wait_prior<0>(pipe);
__syncthreads();
// These fragments will accumulate the result of A and B matrix fragment multiplications
// along the K_GLOBAL dimension.
wmma::fragment<wmma::accumulator, M, N, K, double> c[WARP_COL_TILES][WARP_ROW_TILES];
// Load the C matrix tiles into fragments from shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
const double *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N;
wmma::load_matrix_sync(c[i][j], tile_ptr, SHMEM_STRIDE, C_LAYOUT);
// Scale the C matrix.
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++) {
c[i][j].x[t] *= beta;
}
}
}
pipe.consumer_release();
// sync here so that shared memory can then be used for loading A & B matrices.
__syncthreads();
// Select what warp copies what matrix to shared memory.
// Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix.
const double *warp_ptr = (warpId < (WARPS_PER_BLOCK/2)) ? (&A[block_tile_i * M * K_GLOBAL] + M * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2) :
(&B[block_tile_j * N * K_GLOBAL] + N * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2);
const int stridePerLaneCopy = (laneId / CHUNK_COPY_LINE_LANES);
constexpr int chunksPerLane = ((WARP_SIZE/2) / CHUNK_COPY_LINES_PER_WARP);
const int laneLoadElem = (laneId % CHUNK_COPY_LINE_LANES) << loadStride;
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) {
// Copy slices of the A and B matrices to shared memory.
// The first half of the warps in the CTA copy the A matrix, the rest copy the B matrix.
// As for DMMA M == N we use M for warp 4-7 + shmem_idx_b_off.
size_t shmem_idx = (M * (warpId % (WARPS_PER_BLOCK/2)) * 2) + (shmem_idx_b_off * (warpId/(WARPS_PER_BLOCK/2)));
// First half of the warp copies the first row / column of the matrix,
// the second half of the warp copies the next.
const double *lane_ptr = warp_ptr + tile_k * K + stridePerLaneCopy * K_GLOBAL + laneLoadElem;
// Shift the second half of the warp to the next row / column in the shared memory.
shmem_idx += stridePerLaneCopy;
#pragma unroll
for(int i = 0; i < chunksPerLane; i++) {
// Copy 16 bytes at once in each lane.
pipe.producer_acquire();
cuda::memcpy_async(&shmem[shmem_idx][laneLoadElem], lane_ptr, shape2, pipe);
pipe.producer_commit();
// Advance the global memory pointer and the shared memory index.
lane_ptr = lane_ptr + K_GLOBAL * CHUNK_COPY_LINES_PER_WARP;
shmem_idx += CHUNK_COPY_LINES_PER_WARP;
}
cuda::pipeline_consumer_wait_prior<0>(pipe);
__syncthreads();
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
wmma::fragment<wmma::matrix_a, M, N, K, double, wmma::row_major> a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, double, wmma::col_major> b[WARP_ROW_TILES];
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId/2) * M * 2 + (i * M);
const double *tile_ptr = &shmem[shmem_idx_a][k_step * K];
wmma::load_matrix_sync(a[i], tile_ptr, K * CHUNK_K + SKEW_DOUBLE);
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be reused
// against the other A matrix fragments.
size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId%2) + (j * N);
const double *tile_ptr = &shmem[shmem_idx_b][k_step * K];
wmma::load_matrix_sync(b[j], tile_ptr, K * CHUNK_K + SKEW_DOUBLE);
}
wmma::mma_sync(c[i][j], a[i], b[j], c[i][j]);
}
}
}
pipe.consumer_release();
__syncthreads();
}
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
// Uniform, point-wise transformations of ALL fragment elements by ALL threads in the
// warp are well-defined even though element indices within fragment storage are not defined.
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++)
c[i][j].x[t] *= alpha;
double *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N;
wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT);
}
}
__syncthreads();
// Now that shared memory contains all the D tiles, stream them to global memory.
double *dst_gmem_warp_stream_ptr = &D[gmem_idx];
#pragma unroll
for (int i = 0; i < N; i++) {
*((int4*)(dst_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId) =
*((int4*)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId);
}
__syncthreads();
}
#endif
}
__global__ void compute_dgemm_cg_async_copy(const double *A, const double *B, const double *C, double *D, double alpha, double beta)
{
#if __CUDA_ARCH__ >= 800
extern __shared__ double shmem[][CHUNK_K * K + SKEW_DOUBLE];
auto cta = cg::this_thread_block();
auto tile32 = cg::tiled_partition<32>(cta);
constexpr int tileChunkCopySize = WARP_SIZE / CHUNK_COPY_LINES_PER_WARP;
auto tileChunkCopy = cg::tiled_partition<tileChunkCopySize>(cta);
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
// Offset in shared memory from which the B matrix is stored.
constexpr size_t shmem_idx_b_off = BLOCK_COL_TILES * M;
// This pointer is used to access the C and D matrix tiles this warp computes.
double *shmem_warp_tile_ptr = (double*)&shmem[0][0] + (warpId/2) * SHMEM_STRIDE * N * 2 + (warpId%2) * SHMEM_OFFSET;
// This pointer is used to stream the C and D matrices block-wide tile to and from shared memory.
double *shmem_warp_stream_ptr = (double*)&shmem[0][0] + warpId * SHMEM_STRIDE * N;
// Adjust the beta scaler, as it'll be multiplied by alpha at the end of
// each tile computation. Technically this is not generally correct (may result
// in a loss of precision). Zero still needs to be specially handled though.
beta /= alpha;
// Each CTA slides along the 64 x 64 tiles from the top left corner of the matrix to the
// right and down, and selects the next tile to compute. Once there's no such tile,
// all warps in this CTA exit.
for(unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_tile_i = ((block_pos * BLOCK_ROW_TILES) / N_TILES) * (BLOCK_COL_TILES);
const unsigned int block_tile_j = (block_pos * BLOCK_COL_TILES) % N_TILES;
// Stop when there are no more D matrix tiles to compute in this CTA.
if (block_tile_i >= M_TILES) {
break;
}
// This warp's pointer to the C matrix data to copy memory from to shared memory.
const size_t gmem_idx = (block_tile_i + warpId) * M * GLOBAL_MEM_STRIDE + block_tile_j * N;
const double *src_gmem_warp_stream_ptr = &C[gmem_idx];
// Stream multiple C tiles to shared memory.
#pragma unroll
for (int i = 0; i < N; i++) {
auto dst_ptr = &shmem_warp_stream_ptr[(SHMEM_STRIDE * i)];
auto src_ptr = &src_gmem_warp_stream_ptr[(GLOBAL_MEM_STRIDE * i)];
cg::memcpy_async(tile32, dst_ptr, src_ptr, cuda::aligned_size_t<alignof(double2)>{tile32.size() * sizeof(double2)});
}
cg::wait(cta);
// These fragments will accumulate the result of A and B matrix fragment multiplications
// along the K_GLOBAL dimension.
wmma::fragment<wmma::accumulator, M, N, K, double> c[WARP_COL_TILES][WARP_ROW_TILES];
// Load the C matrix tiles into fragments from shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
const double *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N;
wmma::load_matrix_sync(c[i][j], tile_ptr, SHMEM_STRIDE, C_LAYOUT);
}
}
// Scale the C matrix.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++) {
c[i][j].x[t] *= beta;
}
}
}
// sync here so that shared memory can then be used for loading A & B matrices.
cg::wait(cta);
// Select what warp copies what matrix to shared memory.
// Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix.
const double *warp_ptr = (warpId < 4) ? (&A[block_tile_i * M * K_GLOBAL] + M * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2) :
(&B[block_tile_j * N * K_GLOBAL] + N * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2);
const int stridePerLaneCopy = (laneId / CHUNK_COPY_LINE_LANES);
constexpr int chunksPerLane = ((WARP_SIZE/2) / CHUNK_COPY_LINES_PER_WARP);
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) {
// Copy slices of the A and B matrices to shared memory.
// The first half of the warps in the CTA copy the A matrix, the rest copy the B matrix.
// As for DMMA M == N we use M for warp 4-7 + shmem_idx_b_off.
size_t shmem_idx = (M * (warpId % (WARPS_PER_BLOCK/2)) * 2) + (shmem_idx_b_off * (warpId/(WARPS_PER_BLOCK/2)));
// First half of the warp copies the first row / column of the matrix,
// the second half of the warp copies the next.
auto lane_ptr = warp_ptr + tile_k * K + stridePerLaneCopy * K_GLOBAL;
// Shift the second half of the warp to the next row / column in the shared memory.
shmem_idx += stridePerLaneCopy;
#pragma unroll
for(int i = 0; i < chunksPerLane; i++) {
// Copy 16 bytes at once in each lane.
auto dst_ptr = &shmem[shmem_idx][0];
auto src_ptr = lane_ptr;
cg::memcpy_async(tileChunkCopy, dst_ptr, src_ptr,
cuda::aligned_size_t<alignof(double2)>{tileChunkCopySize * sizeof(double2)});
// Advance the global memory pointer and the shared memory index.
lane_ptr = lane_ptr + K_GLOBAL * CHUNK_COPY_LINES_PER_WARP;
shmem_idx += CHUNK_COPY_LINES_PER_WARP;
}
cg::wait(cta);
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
wmma::fragment<wmma::matrix_a, M, N, K, double, wmma::row_major> a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, double, wmma::col_major> b[WARP_ROW_TILES];
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId/2) * M * 2 + (i * M);
const double *tile_ptr = &shmem[shmem_idx_a][k_step * K];
wmma::load_matrix_sync(a[i], tile_ptr, K * CHUNK_K + SKEW_DOUBLE);
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be reused
// against the other A matrix fragments.
size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId%2) + (j * N);
const double *tile_ptr = &shmem[shmem_idx_b][k_step * K];
wmma::load_matrix_sync(b[j], tile_ptr, K * CHUNK_K + SKEW_DOUBLE);
}
wmma::mma_sync(c[i][j], a[i], b[j], c[i][j]);
}
}
}
cg::sync(cta);
}
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
// Uniform, point-wise transformations of ALL fragment elements by ALL threads in the
// warp are well-defined even though element indices within fragment storage are not defined.
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++)
c[i][j].x[t] *= alpha;
double *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N;
wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT);
}
}
cg::sync(cta);
// Now that shared memory contains all the D tiles, stream them to global memory.
double *dst_gmem_warp_stream_ptr = &D[gmem_idx];
#pragma unroll
for (int i = 0; i < N; i++) {
*((int4*)(dst_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId) =
*((int4*)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId);
}
cg::sync(cta);
}
#endif
}
// Performs an MxNxK DGEMM (C=alpha*A*B + beta*C) assuming:
// 1) Matrices are packed in memory.
// 2) M, N and K are multiples of 8, 8 and 4 respectively.
// 3) A is row major, B is column major matrix.
// Note: This is a less performant version of the compute_dgemm kernel. It is designed for
// demonstration purposes only to show the CUDA WMMA API use without relying on
// availability of the shared memory.
__global__ void simple_wmma_gemm(double *a, double *b, double *c, double *d, int m_ld, int n_ld, int k_ld, double alpha, double beta)
{
#if __CUDA_ARCH__ >= 800
// Leading dimensions. Packed with no transpositions.
int lda = k_ld;
int ldb = k_ld;
int ldc = n_ld;
// Tile using a 2D grid
int warpM = (blockIdx.x * blockDim.x + threadIdx.x) / warpSize;
int warpN = (blockIdx.y * blockDim.y + threadIdx.y);
// Declare the fragments
wmma::fragment<wmma::matrix_a, M, N, K, double, wmma::row_major> a_frag;
wmma::fragment<wmma::matrix_b, M, N, K, double, wmma::col_major> b_frag;
wmma::fragment<wmma::accumulator, M, N, K, double> acc_frag;
wmma::fragment<wmma::accumulator, M, N, K, double> c_frag;
wmma::fill_fragment(acc_frag, 0.0f);
// Loop over k
for (int i = 0; i < k_ld; i += K) {
int aCol = i;
int aRow = warpM * M;
int bCol = warpN * N;
int bRow = i;
// Bounds checking
if (aRow < m_ld && aCol < k_ld && bRow < k_ld && bCol < n_ld) {
// Load the inputs
wmma::load_matrix_sync(a_frag, a + aCol + aRow * lda, lda);
wmma::load_matrix_sync(b_frag, b + bRow + bCol * ldb, ldb);
// Perform the matrix multiplication
wmma::mma_sync(acc_frag, a_frag, b_frag, acc_frag);
}
}
// Load in the current value of c, scale it by beta, and add this our result scaled by alpha
int cCol = warpN * N;
int cRow = warpM * M;
if (cRow < m_ld && cCol < n_ld) {
wmma::load_matrix_sync(c_frag, c + cCol + cRow * ldc, ldc, wmma::mem_row_major);
for(int i=0; i < c_frag.num_elements; i++) {
c_frag.x[i] = alpha * acc_frag.x[i] + beta * c_frag.x[i];
}
// Store the output
wmma::store_matrix_sync(d + cCol + cRow * ldc, c_frag, ldc, wmma::mem_row_major);
}
#endif
}
__host__ void matMultiplyOnHost(double *A, double *B, double *C,
float alpha, float beta,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
for (int i = 0; i < numCRows; i++) {
for (int j = 0; j < numCColumns; j++) {
double temp = 0.0;
for (int k = 0; k < numAColumns; k++) {
// B matrix is column major. A matrix is row major.
temp += A[i * numAColumns + k] * B[j * numBRows + k];
}
C[i*numCColumns + j] = temp * alpha + beta * C[i * numCColumns + j];
}
}
}
int main(int argc, char **argv)
{
printf("Initializing...\n");
int dev = findCudaDevice(argc, (const char **)argv);
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev));
// Double precision Tensor cores require a GPU of Ampere (SM8X) architecture or higher.
if (deviceProp.major < 8) {
printf("dmmaTensorCoreGemm requires SM 8.0 or higher. Exiting...\n");
exit(EXIT_WAIVED);
}
printf("M: %d (%d x %d)\n", M_GLOBAL, M, M_TILES);
printf("N: %d (%d x %d)\n", N_GLOBAL, N, N_TILES);
printf("K: %d (%d x %d)\n", K_GLOBAL, K, K_TILES);
double *A_h = NULL;
double *B_h = NULL;
double *C_h = NULL;
#if CPU_DEBUG
double *result_hD = NULL;
double *result_host = NULL;
#endif
A_h = (double*) malloc(sizeof(double) * M_GLOBAL * K_GLOBAL);
B_h = (double*) malloc(sizeof(double) * K_GLOBAL * N_GLOBAL);
C_h = (double*) malloc(sizeof(double) * M_GLOBAL * N_GLOBAL);
#if CPU_DEBUG
result_hD = (double*) malloc(sizeof(double) * M_GLOBAL * N_GLOBAL);
result_host = (double*) malloc(sizeof(double) * M_GLOBAL * N_GLOBAL);
#endif
double *A = NULL;
double *B = NULL;
double *C = NULL;
double *D = NULL;
checkCudaErrors(cudaMalloc((void**)&A, sizeof(double) * M_GLOBAL * K_GLOBAL));
checkCudaErrors(cudaMalloc((void**)&B, sizeof(double) * N_GLOBAL * K_GLOBAL));
checkCudaErrors(cudaMalloc((void**)&C, sizeof(double) * M_GLOBAL * N_GLOBAL));
checkCudaErrors(cudaMalloc((void**)&D, sizeof(double) * M_GLOBAL * N_GLOBAL));
assert(((unsigned long long)A) % 128 == 0);
assert(((unsigned long long)B) % 128 == 0);
assert(((unsigned long long)C) % 128 == 0);
assert(((unsigned long long)D) % 128 == 0);
init_host_matrices(A_h, B_h, C_h);
printf("Preparing data for GPU...\n");
checkCudaErrors(cudaMemcpy(A, A_h, sizeof(double) * M_GLOBAL * K_GLOBAL, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(B, B_h, sizeof(double) * N_GLOBAL * K_GLOBAL, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(C, C_h, sizeof(double) * M_GLOBAL * N_GLOBAL, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemset(D, 0, sizeof(double) * M_GLOBAL * N_GLOBAL));
enum {
// Compute the right amount of shared memory to request.
// We need shared memory to hold per-CTA C and D matrix tiles, and to cache per-CTA chunks
// of the A and B matrices. Therefore, the right amount to request is the maximum of those
// two numbers.
SHMEM_SZ = MAX(sizeof(double) * (BLOCK_COL_TILES * M) * (CHUNK_K * K + SKEW_DOUBLE) * 2,
M * (BLOCK_ROW_WARPS * WARP_ROW_TILES) * N * (BLOCK_COL_WARPS * WARP_COL_TILES) * sizeof(double))
};
printf("Required shared memory size: %lu Kb\n", SHMEM_SZ / 1024UL);
const double alpha = 1.1f;
const double beta = 1.2f;
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaEventRecord(start));
kernels selected_kernel = dmma_shmem_gemm_async_copy;
// kernel to run - default (dmma_shmem_gemm_async_copy == 0)
if (checkCmdLineFlag(argc, (const char **)argv, "kernel")) {
int kernel_number = getCmdLineArgumentInt(argc, (const char **)argv, "kernel");
if (kernel_number < 4)
{
selected_kernel = (kernels)kernel_number;
}
else
{
printf("Error: kernel number should be between 0 to 3, you have entered %d\n", kernel_number);
exit(EXIT_FAILURE);
}
}
// If enough shared memory available on the GPU use high performant kernel
if ((deviceProp.sharedMemPerMultiprocessor >= SHMEM_SZ) && (selected_kernel != simple_dmma_gemm))
{
printf("Computing using high performance kernel = %d - %s\n", selected_kernel, kernelNames[selected_kernel]);
switch (selected_kernel)
{
case dmma_shmem_gemm_async_copy :
default:
checkCudaErrors(cudaFuncSetAttribute(compute_dgemm_async_copy, cudaFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ));
checkKernelErrors((compute_dgemm_async_copy<<<deviceProp.multiProcessorCount*3, THREADS_PER_BLOCK, SHMEM_SZ>>>(A, B, C, D, alpha, beta)));
break;
case dmma_shmem_gemm_cg_async_copy :
checkCudaErrors(cudaFuncSetAttribute(compute_dgemm_cg_async_copy, cudaFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ));
checkKernelErrors((compute_dgemm_cg_async_copy<<<deviceProp.multiProcessorCount*3, THREADS_PER_BLOCK, SHMEM_SZ>>>(A, B, C, D, alpha, beta)));
break;
case dmma_shmem_gemm :
checkCudaErrors(cudaFuncSetAttribute(compute_dgemm, cudaFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ));
checkKernelErrors((compute_dgemm<<<deviceProp.multiProcessorCount*2, THREADS_PER_BLOCK, SHMEM_SZ>>>(A, B, C, D, alpha, beta)));
break;
}
#if CPU_DEBUG
checkCudaErrors(cudaMemcpy(result_hD, D, sizeof(double)*M_GLOBAL*N_GLOBAL, cudaMemcpyDeviceToHost));
#endif
}
else
{
dim3 gridDim;
dim3 blockDim;
// blockDim.x must be a multple of warpSize
// 128x4 means we have 16 warps and a block computes a 64x64 output tile
blockDim.x = 128;
blockDim.y = 4;
gridDim.x = (M_GLOBAL + (M * blockDim.x / 32 - 1)) / (M * blockDim.x / 32);
gridDim.y = (N_GLOBAL + N * blockDim.y - 1) / (N * blockDim.y);
printf("Computing... using simple_wmma_gemm kernel\n");
simple_wmma_gemm<<<gridDim, blockDim>>>(A, B, C, D, M_GLOBAL, N_GLOBAL, K_GLOBAL, alpha, beta);
#if CPU_DEBUG
checkCudaErrors(cudaMemcpy(result_hD, D, sizeof(double) * M_GLOBAL * N_GLOBAL, cudaMemcpyDeviceToHost));
#endif
}
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
#if CPU_DEBUG
printf("Verifying correctness of the computations...\n");
memcpy(result_host, C_h, sizeof(double) * M_GLOBAL * N_GLOBAL);
matMultiplyOnHost(A_h, B_h, result_host,
alpha, beta,
M_GLOBAL, K_GLOBAL,
K_GLOBAL, N_GLOBAL,
M_GLOBAL, N_GLOBAL);
size_t number_of_matches = 0;
for (int i = 0; i < N_GLOBAL*M_GLOBAL; i++) {
if (fabs(result_hD[i] - result_host[i]) > 0.1f)
{
printf("mismatch i=%d result_hD=%f result_host=%f\n", i, result_hD[i], result_host[i]);
break;
}
else
{
number_of_matches++;
}
}
printf("number_of_matches = %zu out of = %d \n", number_of_matches, N_GLOBAL*M_GLOBAL);
free(result_hD);
free(result_host);
#endif
float milliseconds = 0;
checkCudaErrors(cudaEventElapsedTime(&milliseconds, start, stop));
printf("Time: %f ms\n", milliseconds);
printf("FP64 TFLOPS: %.2f\n", (((double)M_GLOBAL * N_GLOBAL * K_GLOBAL * 2)/(milliseconds/1000.)) / 1e12);
free(A_h);
free(B_h);
free(C_h);
checkCudaErrors(cudaFree((void*)A));
checkCudaErrors(cudaFree((void*)B));
checkCudaErrors(cudaFree((void*)C));
checkCudaErrors(cudaFree((void*)D));
return 0;
}
|
71fc40277ea23a6d182d6ed50651fe6a1e8a11b3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "additionally.h"
#include "gpu.h"
extern int gpu_index;
#define BLOCK 512
#define FULL_MASK 0xffffffff
#define WARP_SIZE 32
template<typename T1, typename T2>
__device__ inline T1 __shfl_custom(T1 val, T2 lane) {
#if CUDART_VERSION >= 9000
return __shfl_sync(FULL_MASK, val, lane);
#else
return __shfl(val, lane);
#endif
}
template<typename T>
__device__ inline uint32_t __ballot_custom(T val) {
#if CUDART_VERSION >= 9000
return __ballot_sync(FULL_MASK, val);
#else
return __ballot(val);
#endif
}
void pull_batchnorm_layer(layer l) {} // not required now
void push_batchnorm_layer(layer l) {} // not required now
void pull_local_layer(local_layer l) {} // not required now
void push_local_layer(local_layer l) {} // not required now
void pull_connected_layer(local_layer l) {} // not required now
void push_connected_layer(local_layer l) {} // not required now
int get_number_of_blocks(int array_size, int block_size)
{
return array_size / block_size + ((array_size % block_size > 0) ? 1 : 0);
}
void check_error(hipError_t status)
{
hipError_t status2 = hipGetLastError();
if (status != hipSuccess)
{
const char *s = hipGetErrorString(status);
char buffer[256];
printf("CUDA Error: %s\n", s);
snprintf(buffer, 256, "CUDA Error: %s", s);
#ifdef WIN32
getchar();
#endif
error(buffer);
}
if (status2 != hipSuccess)
{
const char *s = hipGetErrorString(status2);
char buffer[256];
printf("CUDA Error Prev: %s\n", s);
snprintf(buffer, 256, "CUDA Error Prev: %s", s);
#ifdef WIN32
getchar();
#endif
error(buffer);
}
}
void check_error_extended(hipError_t status, const char *file, int line, const char *date_time)
{
if (status != hipSuccess)
printf("CUDA status Error: file: %s() : line: %d : build time: %s \n", file, line, date_time);
#ifdef DEBUG
status = hipDeviceSynchronize();
if (status != hipSuccess)
printf("CUDA status = hipDeviceSynchronize() Error: file: %s() : line: %d : build time: %s \n", file, line, date_time);
#endif
check_error(status);
}
void cuda_set_device(int n)
{
gpu_index = n;
hipError_t status = hipSetDevice(n);
check_error(status);
}
int cuda_get_device()
{
int n = 0;
hipError_t status = hipGetDevice(&n);
check_error(status);
return n;
}
#ifdef CUDNN
cudnnHandle_t cudnn_handle()
{
static int init[16] = { 0 };
static cudnnHandle_t handle[16];
int i = cuda_get_device();
if (!init[i]) {
cudnnCreate(&handle[i]);
init[i] = 1;
}
return handle[i];
}
void cudnn_check_error(cudnnStatus_t status)
{
#ifdef DEBUG
hipDeviceSynchronize();
#endif
cudnnStatus_t status2 = CUDNN_STATUS_SUCCESS;
#ifdef CUDNN_ERRQUERY_RAWCODE
cudnnStatus_t status_tmp = cudnnQueryRuntimeError(cudnn_handle(), &status2, CUDNN_ERRQUERY_RAWCODE, NULL);
#endif
if (status != CUDNN_STATUS_SUCCESS)
{
const char *s = cudnnGetErrorString(status);
char buffer[256];
printf("cuDNN Error: %s\n", s);
snprintf(buffer, 256, "cuDNN Error: %s", s);
#ifdef WIN32
getchar();
#endif
error(buffer);
}
if (status2 != CUDNN_STATUS_SUCCESS)
{
const char *s = cudnnGetErrorString(status2);
char buffer[256];
printf("cuDNN Error Prev: %s\n", s);
snprintf(buffer, 256, "cuDNN Error Prev: %s", s);
#ifdef WIN32
getchar();
#endif
error(buffer);
}
}
void cudnn_check_error_extended(cudnnStatus_t status, const char *file, int line, const char *date_time)
{
if (status != CUDNN_STATUS_SUCCESS)
printf("\n cuDNN status Error in: file: %s() : line: %d : build time: %s \n", file, line, date_time);
#ifdef DEBUG
status = hipDeviceSynchronize();
if (status != CUDNN_STATUS_SUCCESS)
printf("\n cuDNN status = hipDeviceSynchronize() Error in: file: %s() : line: %d : build time: %s \n", file, line, date_time);
#endif
cudnn_check_error(status);
}
#endif // CUDNN
float *cuda_make_array(float *x, size_t n)
{
float *x_gpu;
size_t size = sizeof(float)*n;
hipError_t status = hipMalloc((void **)&x_gpu, size);
check_error(status);
if (x) {
status = hipMemcpy(x_gpu, x, size, hipMemcpyHostToDevice);
check_error(status);
}
if (!x_gpu) error("Cuda malloc failed\n");
return x_gpu;
}
int *cuda_make_int_array(size_t n)
{
int *x_gpu;
size_t size = sizeof(int)*n;
hipError_t status = hipMalloc((void **)&x_gpu, size);
check_error(status);
return x_gpu;
}
void cuda_free(float *x_gpu)
{
hipError_t status = hipFree(x_gpu);
check_error(status);
}
void cuda_push_array(float *x_gpu, float *x, size_t n)
{
size_t size = sizeof(float)*n;
hipError_t status = hipMemcpy(x_gpu, x, size, hipMemcpyHostToDevice);
check_error(status);
}
void cuda_pull_array(float *x_gpu, float *x, size_t n)
{
size_t size = sizeof(float)*n;
hipError_t status = hipMemcpy(x, x_gpu, size, hipMemcpyDeviceToHost);
check_error(status);
}
float *get_network_output_layer_gpu(network net, int i)
{
layer l = net.layers[i];
if (l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
return l.output;
}
float *get_network_output_gpu(network net)
{
int i;
for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break;
return get_network_output_layer_gpu(net, i);
}
dim3 cuda_gridsize(size_t n) {
size_t k = (n - 1) / BLOCK + 1;
size_t x = k;
size_t y = 1;
if (x > 65535) {
x = ceil(sqrtf(k));
y = (n - 1) / (x*BLOCK) + 1;
}
dim3 d;
d.x = x;
d.y = y;
d.z = 1;
//printf("%ld %ld %ld %ld\n", n, x, y, x*y*BLOCK);
return d;
}
void pull_convolutional_layer(convolutional_layer layer)
{
cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.biases_gpu, layer.biases, layer.n);
if (layer.batch_normalize) {
cuda_pull_array(layer.scales_gpu, layer.scales, layer.n);
cuda_pull_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_pull_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
}
void push_convolutional_layer(convolutional_layer layer)
{
cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.biases_gpu, layer.biases, layer.n);
if (layer.batch_normalize) {
cuda_push_array(layer.scales_gpu, layer.scales, layer.n);
cuda_push_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_push_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
}
// -------------------- CUDA functions -------------------
// add BIAS
__global__ void add_bias_kernel(float *output, float *biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset < size) output[(batch*n + filter)*size + offset] += biases[filter];
}
void add_bias_gpu(float *output, float *biases, int batch, int n, int size)
{
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
add_bias_kernel << <dimGrid, dimBlock >> >(output, biases, n, size);
check_error(hipPeekAtLastError());
}
// normalization
__global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index / spatial) % filters;
x[index] = (x[index] - mean[f]) / (sqrtf(variance[f]) + .000001f);
}
void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
size_t N = batch*filters*spatial;
normalize_kernel << <cuda_gridsize(N), BLOCK >> >(N, x, mean, variance, batch, filters, spatial);
check_error(hipPeekAtLastError());
}
// fill array
__global__ void fill_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < N) X[i*INCX] = ALPHA;
}
void fill_ongpu(int N, float ALPHA, float * X, int INCX)
{
fill_kernel << <cuda_gridsize(N), BLOCK >> >(N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
// scale BIAS
__global__ void scale_bias_kernel(float *output, float *biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset < size) output[(batch*n + filter)*size + offset] *= biases[filter];
}
void scale_bias_gpu(float *output, float *biases, int batch, int n, int size)
{
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
scale_bias_kernel << <dimGrid, dimBlock >> >(output, biases, n, size);
check_error(hipPeekAtLastError());
}
// max-pool layer
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *input, float *output, int *indexes)
{
int h = (in_h + pad - size) / stride + 1;
int w = (in_w + pad - size) / stride + 1;
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -pad / 2;
int h_offset = -pad / 2;
int out_index = j + w*(i + h*(k + c*b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for (l = 0; l < size; ++l) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i*stride + l;
int cur_w = w_offset + j*stride + m;
int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c));
int valid = (cur_h >= 0 && cur_h < in_h &&
cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
indexes[out_index] = max_i;
}
void forward_maxpool_layer_gpu(maxpool_layer layer, network_state state)
{
if (layer.stride == layer.size) {
//if(1) {
cudnnStatus_t maxpool_status;
float alpha = 1, beta = 0;
maxpool_status = cudnnPoolingForward(
cudnn_handle(),
layer.poolingDesc,
&alpha,
layer.srcTensorDesc,
state.input,
&beta,
layer.dstTensorDesc,
layer.output_gpu);
//maxpool_status = cudnnDestroyPoolingDescriptor(poolingDesc);
//cudnnDestroyTensorDescriptor(layer.srcTensorDesc);
//cudnnDestroyTensorDescriptor(layer.dstTensorDesc);
}
else {
int h = layer.out_h;
int w = layer.out_w;
int c = layer.c;
size_t n = h*w*c*layer.batch;
forward_maxpool_layer_kernel << <cuda_gridsize(n), BLOCK >> > (n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, state.input, layer.output_gpu, layer.indexes_gpu);
check_error(hipPeekAtLastError());
}
}
// flatten
__global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= N) return;
int in_s = i%spatial;
i = i / spatial;
int in_c = i%layers;
i = i / layers;
int b = i;
int i1 = b*layers*spatial + in_c*spatial + in_s;
int i2 = b*layers*spatial + in_s*layers + in_c;
if (forward) out[i2] = x[i1];
else out[i1] = x[i2];
}
void flatten_ongpu(float *x, int spatial, int layers, int batch, int forward, float *out)
{
int size = spatial*batch*layers;
flatten_kernel << <cuda_gridsize(size), BLOCK >> >(size, x, spatial, layers, batch, forward, out);
check_error(hipPeekAtLastError());
}
// activations
__device__ float lhtan_activate_kernel(float x)
{
if (x < 0) return .001*x;
if (x > 1) return .001*(x - 1) + 1;
return x;
}
__device__ float lhtan_gradient_kernel(float x)
{
if (x > 0 && x < 1) return 1;
return .001;
}
__device__ float hardtan_activate_kernel(float x)
{
if (x < -1) return -1;
if (x > 1) return 1;
return x;
}
__device__ float linear_activate_kernel(float x) { return x; }
__device__ float logistic_activate_kernel(float x) { return 1.f / (1.f + expf(-x)); }
__device__ float loggy_activate_kernel(float x) { return 2.f / (1.f + expf(-x)) - 1; }
__device__ float relu_activate_kernel(float x) { return x*(x>0); }
__device__ float elu_activate_kernel(float x) { return (x >= 0)*x + (x < 0)*(expf(x) - 1); }
__device__ float selu_activate_kernel(float x) { return (x >= 0)*1.0507f*x + (x < 0)*1.0507f*1.6732f*(expf(x) - 1); }
__device__ float relie_activate_kernel(float x) { return (x>0) ? x : .01f*x; }
__device__ float ramp_activate_kernel(float x) { return x*(x>0) + .1f*x; }
__device__ float leaky_activate_kernel(float x) { return (x>0) ? x : .1f*x; }
__device__ float tanh_activate_kernel(float x) { return (2 / (1 + expf(-2 * x)) - 1); }
__device__ float plse_activate_kernel(float x)
{
if (x < -4) return .01 * (x + 4);
if (x > 4) return .01 * (x - 4) + 1;
return .125*x + .5;
}
__device__ float stair_activate_kernel(float x)
{
int n = floor(x);
if (n % 2 == 0) return floor(x / 2.);
else return (x - n) + floor(x / 2.);
}
__device__ float activate_kernel(float x, ACTIVATION a)
{
switch (a) {
case LINEAR:
return linear_activate_kernel(x);
case LOGISTIC:
return logistic_activate_kernel(x);
case LOGGY:
return loggy_activate_kernel(x);
case RELU:
return relu_activate_kernel(x);
case ELU:
return elu_activate_kernel(x);
case RELIE:
return relie_activate_kernel(x);
case RAMP:
return ramp_activate_kernel(x);
case LEAKY:
return leaky_activate_kernel(x);
case TANH:
return tanh_activate_kernel(x);
case PLSE:
return plse_activate_kernel(x);
case STAIR:
return stair_activate_kernel(x);
case HARDTAN:
return hardtan_activate_kernel(x);
case LHTAN:
return lhtan_activate_kernel(x);
}
return 0;
}
__global__ void activate_array_kernel(float *x, int n, ACTIVATION a)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) x[i] = activate_kernel(x[i], a);
}
__global__ void activate_array_leaky_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = leaky_activate_kernel(x[index]);
}
// if(index < 10){
// printf("output %d is %f\n", index,x[index]);
// }
}
__global__ void activate_array_selu_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = selu_activate_kernel(x[index]);
}
}
__global__ void activate_array_logistic_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = logistic_activate_kernel(x[index]);
}
}
extern "C" void activate_array_ongpu(float *x, int n, ACTIVATION a)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
if (a == LINEAR) return;
else if (a == LEAKY) activate_array_leaky_kernel << <num_blocks, BLOCK, 0 >> >(x, n);
else if (a == LOGISTIC) activate_array_logistic_kernel << <num_blocks, BLOCK >> >(x, n);
else if (a == SELU) activate_array_selu_kernel << <num_blocks, BLOCK >> >(x, n);
else
activate_array_kernel << <cuda_gridsize(n), BLOCK >> >(x, n, a);
CHECK_CUDA(hipPeekAtLastError());
}
// softmax layer
__device__ void softmax_device(int n, float *input, float temp, float *output)
{
int i;
float sum = 0;
float largest = -INFINITY;
for (i = 0; i < n; ++i) {
int val = input[i];
largest = (val>largest) ? val : largest;
}
for (i = 0; i < n; ++i) {
float e = expf(input[i] / temp - largest / temp);
sum += e;
output[i] = e;
}
for (i = 0; i < n; ++i) {
output[i] /= sum;
}
}
__global__ void softmax_kernel(int n, int offset, int batch, float *input, float temp, float *output)
{
int b = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (b >= batch) return;
softmax_device(n, input + b*offset, temp, output + b*offset);
}
void softmax_gpu(float *input, int n, int offset, int groups, float temp, float *output)
{
int inputs = n;
int batch = groups;
softmax_kernel << <cuda_gridsize(batch), BLOCK >> >(inputs, offset, batch, input, temp, output);
check_error(hipPeekAtLastError());
}
// reorg layer
__global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= N) return;
int in_index = i;
int in_w = i%w;
i = i / w;
int in_h = i%h;
i = i / h;
int in_c = i%c;
i = i / c;
int b = i%batch;
int out_c = c / (stride*stride);
int c2 = in_c % out_c;
int offset = in_c / out_c;
int w2 = in_w*stride + offset % stride;
int h2 = in_h*stride + offset / stride;
int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
out[in_index] = x[out_index];
}
void reorg_ongpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int size = w*h*c*batch;
reorg_kernel << <cuda_gridsize(size), BLOCK >> >(size, x, w, h, c, batch, stride, forward, out);
check_error(hipPeekAtLastError());
}
// upsample layer
__global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= N) return;
int out_index = i;
int out_w = i % (w*stride);
i = i / (w*stride);
int out_h = i % (h*stride);
i = i / (h*stride);
int out_c = i%c;
i = i / c;
int b = i%batch;
int in_w = out_w / stride;
int in_h = out_h / stride;
int in_c = out_c;
int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w;
if (forward) out[out_index] += scale * x[in_index];
else atomicAdd(x + in_index, scale * out[out_index]);
}
extern "C" void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
size_t size = w*h*c*batch*stride*stride;
upsample_kernel << <cuda_gridsize(size), BLOCK >> >(size, in, w, h, c, batch, stride, forward, scale, out);
check_error(hipPeekAtLastError());
}
__global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX];
}
extern "C" void copy_ongpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
{
copy_kernel << <cuda_gridsize(N), BLOCK>> >(N, X, OFFX, INCX, Y, OFFY, INCY);
check_error(hipPeekAtLastError());
}
extern "C" void copy_ongpu(int N, float * X, int INCX, float * Y, int INCY)
{
copy_ongpu_offset(N, X, 0, INCX, Y, 0, INCY);
}
// shortcut layer
__global__ void simple_input_shortcut_kernel(float *in, int size, float *add, float *out)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
out[id] = in[id] + add[id];
}
__global__ void input_shortcut_kernel(float *in, int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
int i = id % minw;
id /= minw;
int j = id % minh;
id /= minh;
int k = id % minc;
id /= minc;
int b = id % batch;
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] = in[out_index] + add[add_index];
}
extern "C" void input_shortcut_gpu(float *in, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
if (w1 == w2 && h1 == h2 && c1 == c2) {
int size = batch * w1 * h1 * c1;
simple_input_shortcut_kernel << <cuda_gridsize(size), BLOCK >> >(in, size, add, out);
CHECK_CUDA(hipPeekAtLastError());
return;
}
int minw = (w1 < w2) ? w1 : w2;
int minh = (h1 < h2) ? h1 : h2;
int minc = (c1 < c2) ? c1 : c2;
int stride = w1 / w2;
int sample = w2 / w1;
assert(stride == h1 / h2);
assert(sample == h2 / h1);
if (stride < 1) stride = 1;
if (sample < 1) sample = 1;
int size = batch * minw * minh * minc;
input_shortcut_kernel << <cuda_gridsize(size), BLOCK >> >(in, size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, out);
CHECK_CUDA(hipPeekAtLastError());
}
// ----------- Quantinization --------------
__host__ __device__ int max_abs(int src, int max_val) {
if (abs(src) > abs(max_val)) src = (src > 0) ? max_val : -max_val;
return src;
}
__global__ void cuda_f32_to_int8(float* input_f32, size_t size, int8_t *output_int8, float multipler, int max_val)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_int8[idx] = max_abs(input_f32[idx] * multipler, max_val); // 7-bit (1-bit sign)
if (idx < 10)
printf("output_int8 %d is %d\n", idx, output_int8[idx]);
}
void cuda_convert_f32_to_int8(float* input_f32, size_t size, int8_t *output_int8, float multipler, int max_val) {
cuda_f32_to_int8 << < size / BLOCK + 1, BLOCK >> >(input_f32, size, output_int8, multipler, max_val);
}
__global__ void print_ints_kernel(int8_t* x, int size,int num_print){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
if (idx < num_print)
printf("x %d is %d\n", idx, x[idx]);
}
}
void print_ints(int8_t* x, int size, int num_print){
print_ints_kernel<< < size / BLOCK + 1, BLOCK >> >(x, size, num_print);
}
__global__ void print_floats_kernel(float* x, int size,int num_print){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
if (idx < num_print)
printf("x %d is %f\n", idx, x[idx]);
}
}
void print_floats(float* x, int size, int num_print){
print_floats_kernel<< < size / BLOCK + 1, BLOCK >> >(x, size, num_print);
}
__global__ void cuda_f32_to_int8_nomax(float* input_f32, size_t size, int8_t *output_int8, float multipler)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_int8[idx] = input_f32[idx] * multipler; // 7-bit (1-bit sign)
}
void cuda_convert_f32_to_int8_nomax(float* input_f32, size_t size, int8_t *output_int8, float multipler) {
cuda_f32_to_int8_nomax << < size / BLOCK + 1, BLOCK >> >(input_f32, size, output_int8, multipler);
}
__global__ void cuda_int8_to_f32(int8_t* input_int8, size_t size, float *output_f32, float multipler)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f32[idx] = input_int8[idx] * multipler; // 7-bit (1-bit sign)
}
void cuda_convert_int8_to_f32(int8_t* input_int8, size_t size, float *output_f32, float multipler) {
cuda_int8_to_f32 << < size / BLOCK + 1, BLOCK >> >(input_int8, size, output_f32, multipler);
}
__global__ void cuda_multiply_f32(float *input_output, size_t size, float multipler)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) input_output[idx] = input_output[idx] * multipler; // 7-bit (1-bit sign)
}
void cuda_do_multiply_f32(float *input_output, size_t size, float multipler) {
cuda_multiply_f32 << < size / BLOCK + 1, BLOCK >> >(input_output, size, multipler);
}
// --------------------------------
// ------------- XNOR -------------
// --------------------------------
__global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary)
{
int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (f >= n) return;
int i = 0;
float mean = 0;
for (i = 0; i < size; ++i) {
mean += fabs(weights[f*size + i]);
}
mean = mean / size;
for (i = 0; i < size; ++i) {
binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
//binary[f*size + i] = weights[f*size + i];
}
}
void binarize_weights_gpu(float *weights, int n, int size, float *binary)
{
binarize_weights_kernel << <cuda_gridsize(n), BLOCK >> >(weights, n, size, binary);
check_error(hipPeekAtLastError());
}
// --------------------------------
__global__ void binarize_kernel(float *x, int n, float *binary)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= n) return;
binary[i] = (x[i] >= 0) ? 1 : -1;
}
void binarize_gpu(float *x, int n, float *binary)
{
binarize_kernel << <cuda_gridsize(n), BLOCK >> >(x, n, binary);
check_error(hipPeekAtLastError());
}
// --------------------------------
void swap_binary(convolutional_layer *l)
{
float *swap = l->weights;
l->weights = l->binary_weights;
l->binary_weights = swap;
#ifdef GPU
swap = l->weights_gpu;
l->weights_gpu = l->binary_weights_gpu;
l->binary_weights_gpu = swap;
#endif
}
// --------------------------------
#define WARP_SIZE 32
__global__ void im2col_align_gpu_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_col, const int bit_align)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
for (; index < n; index += blockDim.x*gridDim.x) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
float* data_col_ptr = data_col;
//data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
data_col_ptr += channel_out * bit_align + h_out * width_col + w_out;
float* data_col_ptr_32 = data_col + (channel_out * bit_align + h_out * width_col + w_out) / 32;
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
//float src_val = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : 0;
//unsigned int bit_mask = __ballot_sync(0xffffffff, src_val > 0);
//if (threadIdx.x % WARP_SIZE == 0) *((unsigned int*)data_col_ptr_32) = bit_mask;
//data_col_ptr_32 += bit_align / 32;
//data_col_ptr += height_col * width_col;
data_col_ptr += bit_align;
}
}
}
}
void im2col_align_ongpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col, int bit_align)
{
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
im2col_align_gpu_kernel << <(num_kernels + BLOCK - 1) / BLOCK,
BLOCK, 0, 0>> >(
num_kernels, im, height, width, ksize, pad,
stride, height_col,
width_col, data_col, bit_align);
}
// --------------------------------
// binary im2col - stride=1
__global__ void im2col_align_bin_gpu_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize, const int channels,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_col, const int bit_align)
{
__shared__ float tmp_s[1];
__shared__ ulonglong4 tmp256_s[1];
//#define SHRED_VALS ((BLOCK / 169) * )
//__shared__ float dst_s[1024];
//__shared__ float dst_s[1024];
//__shared__ uint32_t bit_s[32];
//__shared__ uint8_t bit_s[128];
int index = blockIdx.x*blockDim.x + threadIdx.x;
//for (; index < n; index += blockDim.x*gridDim.x)
{
int c_index = index;
int channel_in = c_index % channels;
//int h_out = index % height_col;
//int c_index = index / height_col;
//int channel_in = c_index % channels;
int channel_out = channel_in * ksize * ksize;
int j_index = c_index / channels;
int j = j_index % ksize;
int i = j_index / ksize;
int pre_out_index = (channel_out + i*ksize + j) * bit_align;
int j_pad = (j - pad);
int i_pad = (i - pad);
for (int wh_index = 0; wh_index < (height_col*width_col); wh_index += 32)
//for (int h_out = 0; h_out < height_col; ++h_out)
{
// the end of padding
//if(0)
//for (int w_out = 0; w_out < (width_col); w_out += 32)
{
const int w_out = wh_index % width_col;
const int h_out = wh_index / width_col;
const int w = w_out + j_pad;
const int h = h_out + i_pad;
int pre_in_index = channel_in * height * width;
int pre_in_wh_index = h * width + w;
int send_wh_index = wh_index;
if (i >= ksize) send_wh_index = height_col*width_col;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t)
{
const int lane_id = threadIdx.x % WARP_SIZE;
const int cur_wh_index = __shfl(send_wh_index, t) + lane_id;
if (cur_wh_index < (width_col*height_col))// && (cur_i_pad+pad) < ksize)
{
const int cur_pre_out_index = __shfl(pre_out_index, t);
const int cur_pre_in_index = __shfl(pre_in_index, t);
const int cur_pre_in_wh_index = __shfl(pre_in_wh_index, t) + lane_id;
int w = cur_pre_in_wh_index % width;
int h = cur_pre_in_wh_index / width;
int in_index = cur_pre_in_index + cur_pre_in_wh_index;
int out_index = cur_pre_out_index + cur_wh_index;
float val = (w >= 0 && w < width && h >= 0 && h < height) ?
data_im[in_index] : float();
//data_col[out_index] = val;
//tmp_s[0] = val;
uint32_t bit_mask = __ballot(val > 0);
if (lane_id == 0) {
uint8_t *bit8_ptr = &(((uint8_t *)data_col)[out_index / 8]);
uint32_t *bit32_ptr = (uint32_t *)bit8_ptr;
*bit32_ptr = bit_mask;
}
}
}
}// w_out
}
}
}
void im2col_align_bin_ongpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col, int bit_align) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
//int num_kernels = channels * height_col * width_col * ksize * ksize;
//int num_kernels = channels * ksize * ksize * height_col;
int num_kernels = channels * ksize * ksize;
int num_blocks = num_kernels / BLOCK + 1;
//im2col_align_bin_gpu_kernel << <(num_kernels + BLOCK - 1) / BLOCK,
im2col_align_bin_gpu_kernel << <num_blocks,
BLOCK, 0, 0 >> >(
num_kernels, im, height, width, ksize, channels, pad,
stride, height_col,
width_col, data_col, bit_align);
}
// --------------------------------
__global__ void float_to_bit_gpu_kernel(float *src, unsigned char *dst, size_t size)
{
//const int size_aligned = size + (WARP_SIZE - size % WARP_SIZE);
int index = blockIdx.x*blockDim.x + threadIdx.x;
float src_val;
//for (; index < size_aligned; index += blockDim.x*gridDim.x)
{
//src_val = src[index];
if (index < size) src_val = src[index];
else src_val = 0;
//unsigned int bit_mask = __ballot_sync(0xffffffff, src_val > 0);
unsigned int bit_mask = __ballot(src_val > 0);
if (threadIdx.x % WARP_SIZE == 0) ((unsigned int*)dst)[index / 32] = bit_mask;
}
}
void float_to_bit_gpu(float *src, unsigned char *dst, size_t size)
{
const int num_blocks = size / BLOCK + 1;
float_to_bit_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(src, dst, size);
}
// --------------------------------
__device__ __host__ static inline void remove_bit(unsigned char *const dst, size_t index) {
size_t dst_i = index / 8;
int dst_shift = index % 8;
dst[dst_i] &= ~(1 << dst_shift);
}
__device__ __host__ static inline void set_bit(unsigned char *const dst, size_t index) {
size_t dst_i = index / 8;
int dst_shift = index % 8;
dst[dst_i] |= 1 << dst_shift;
//dst[dst_i] |= 1 << (8 - dst_shift);
}
__device__ __host__ static inline unsigned char get_bit(unsigned char const*const src, size_t index) {
size_t src_i = index / 8;
int src_shift = index % 8;
unsigned char val = (src[src_i] & (1 << src_shift)) > 0;
//unsigned char val = (src[src_i] & (1 << (8 - src_shift))) > 0;
return val;
}
// Intel CPUs and nVidia CUDA GPU are little endian
__device__ __host__ unsigned char reverse_byte(unsigned char a)
{
return ((a & 0x1) << 7) | ((a & 0x2) << 5) |
((a & 0x4) << 3) | ((a & 0x8) << 1) |
((a & 0x10) >> 1) | ((a & 0x20) >> 3) |
((a & 0x40) >> 5) | ((a & 0x80) >> 7);
}
__device__ unsigned char reverse_byte_CUDA(unsigned char a)
{
uint32_t tmp = __brev(a);
return tmp >> 24;
}
__device__ __host__ unsigned char reverse_byte_2(unsigned char a)
{
return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16;
}
__device__ void transpose8rS32_reversed_diagonale(unsigned char* A, int m, int n, unsigned char* B)
{
unsigned x, y, t;
// Load the array and pack it into x and y.
x = (A[0] << 24) | (A[m] << 16) | (A[2 * m] << 8) | A[3 * m];
y = (A[4 * m] << 24) | (A[5 * m] << 16) | (A[6 * m] << 8) | A[7 * m];
t = (x ^ (x >> 7)) & 0x00AA00AA; x = x ^ t ^ (t << 7);
t = (y ^ (y >> 7)) & 0x00AA00AA; y = y ^ t ^ (t << 7);
t = (x ^ (x >> 14)) & 0x0000CCCC; x = x ^ t ^ (t << 14);
t = (y ^ (y >> 14)) & 0x0000CCCC; y = y ^ t ^ (t << 14);
t = (x & 0xF0F0F0F0) | ((y >> 4) & 0x0F0F0F0F);
y = ((x << 4) & 0xF0F0F0F0) | (y & 0x0F0F0F0F);
x = t;
B[7 * n] = reverse_byte_CUDA(x >> 24); B[6 * n] = reverse_byte_CUDA(x >> 16); B[5 * n] = reverse_byte_CUDA(x >> 8); B[4 * n] = reverse_byte_CUDA(x);
B[3 * n] = reverse_byte_CUDA(y >> 24); B[2 * n] = reverse_byte_CUDA(y >> 16); B[1 * n] = reverse_byte_CUDA(y >> 8); B[0 * n] = reverse_byte_CUDA(y);
}
__global__ void transpose_bin_gpu_kernel(unsigned char *A, unsigned char *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
int i;
int index = blockIdx.x*blockDim.x + threadIdx.x;
//for (i = 0; i < n; i += 8)
{
i = (index * 8) % n;
int j;
//for (j = 0; j < m - 8; j += 8)
{
j = ((index * 8) / n) * 8;
if (j < m - 8) {
int a_index = i*lda + j;
int b_index = j*ldb + i;
transpose8rS32_reversed_diagonale(&A[a_index / 8], lda / 8, ldb / 8, &B[b_index / 8]);
}
else if (j < m) {
for (; j < m; ++j) {
if (get_bit(A, i*lda + j)) set_bit(B, j*ldb + i);
else remove_bit(B, j*ldb + i);
}
}
}
}
}
__device__ __host__ uint8_t reverse_8_bit(uint8_t a) {
return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16;
}
__device__ uint32_t reverse_32_bit(uint32_t a)
{
// __device__ unsigned int __brev(unsigned int x) // CUDA
// unsigned int __rbit(unsigned int val) // for ARM //__asm__("rbit %0, %1\n" : "=r"(output) : "r"(input));
return __brev(a);
//return (reverse_8_bit(a >> 24) << 0) |
// (reverse_8_bit(a >> 16) << 8) |
// (reverse_8_bit(a >> 8) << 16) |
// (reverse_8_bit(a >> 0) << 24);
}
#define swap(a0, a1, j, m) t = (a0 ^ (a1 >>j)) & m; a0 = a0 ^ t; a1 = a1 ^ (t << j);
__device__ void transpose32_optimized(uint32_t A[32]) {
int j, k;
unsigned m, t;
//m = 0x0000FFFF;
//for (j = 16; j != 0; j = j >> 1, m = m ^ (m << j)) {
// for (k = 0; k < 32; k = (k + j + 1) & ~j) {
// t = (A[k] ^ (A[k + j] >> j)) & m;
// A[k] = A[k] ^ t;
// A[k + j] = A[k + j] ^ (t << j);
// }
//}
j = 16;
m = 0x0000FFFF;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 8;
m = 0x00ff00ff;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 4;
m = 0x0f0f0f0f;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 2;
m = 0x33333333;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 1;
m = 0x55555555;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
// reverse Y
for (j = 0; j < 16; ++j) {
uint32_t tmp = A[j];
A[j] = reverse_32_bit(A[31 - j]);
A[31 - j] = reverse_32_bit(tmp);
}
}
#define BLOCK_TRANSPOSE32 256
__device__ void transpose_32x32_bits_reversed_diagonale(uint32_t *A, uint32_t *B, int m, int n)
{
//unsigned A_tmp[32];
//int i;
//#pragma unroll
//for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m];
//transpose32_optimized(A_tmp);
//#pragma unroll
//for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i];
__shared__ uint32_t A_shared[32 * BLOCK_TRANSPOSE32];
uint32_t *A_tmp = &A_shared[32 * threadIdx.x];
int i;
#pragma unroll 32
for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m];
transpose32_optimized(A_tmp);
#pragma unroll 32
for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i];
}
// transpose 32x32 bit
__global__ void transpose_bin_gpu_kernel_32(uint32_t *A, uint32_t *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
int i;
int index = (blockIdx.x*blockDim.x + threadIdx.x) * 32;
//for (i = 0; i < n; i += 8)
{
i = index % n;
int j;
//for (j = 0; j < m - 8; j += 8)
{
j = (index / n) * 32;
if (j < m) {
int a_index = i*lda + j;
int b_index = j*ldb + i;
transpose_32x32_bits_reversed_diagonale(&A[a_index / 32], &B[b_index / 32], lda / 32, ldb / 32);
}
}
}
}
void transpose_bin_gpu(unsigned char *A, unsigned char *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
size_t size = n*m / (8 * 8) + 1;
size_t size32 = n*m / (32 * 32) + 1;
const int num_blocks = size / BLOCK + 1;
const int num_blocks32 = size32 / BLOCK_TRANSPOSE32 + 1;
transpose_bin_gpu_kernel_32 << <num_blocks32, BLOCK_TRANSPOSE32, 0, 0 >> >((uint32_t *)A, (uint32_t *)B, n, m, lda, ldb, block_size);
//transpose_bin_gpu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(A, B, n, m, lda, ldb, block_size);
}
// --------------------------------
__global__ void fill_int8_gpu_kernel(unsigned char *src, unsigned char val, size_t size) {
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) src[index] = 0;
}
void fill_int8_gpu(unsigned char *src, unsigned char val, size_t size)
{
const int num_blocks = size / BLOCK + 1;
fill_int8_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(src, val, size);
}
// --------------------------------
//typedef unsigned long long int uint64_t;
//typedef unsigned int uint32_t;
//typedef unsigned char uint8_t;
//typedef char int8_t;
__device__ __host__ static inline uint64_t broadcast_bit_1_to_64(uint8_t src) {
return (src > 0) ? 0xFFFFFFFFFFFFFFFF : 0;
}
__device__ __host__ static inline uint8_t xnor_bit1(uint8_t a, uint8_t b) {
return ~(a^b) & 0b1;
}
__device__ __host__ static inline uint32_t xnor_int32(uint32_t a, uint32_t b) {
return ~(a^b);
}
__device__ __host__ static inline uint64_t xnor_int64(uint64_t a, uint64_t b) {
return ~(a^b);
}
__device__ __host__ static inline uint4 xnor_int128(uint4 a, uint4 b) {
uint4 res;
res.w = ~(a.w^b.w);
res.x = ~(a.x^b.x);
res.y = ~(a.y^b.y);
res.z = ~(a.z^b.z);
return res;
}
__device__ __host__ static inline ulonglong4 xnor_int256(ulonglong4 a, ulonglong4 b) {
ulonglong4 res;
res.w = ~(a.w^b.w);
res.x = ~(a.x^b.x);
res.y = ~(a.y^b.y);
res.z = ~(a.z^b.z);
return res;
}
__device__ __host__ static inline uint8_t xor_bit1(uint8_t a, uint8_t b) {
return (a^b) & 0b1;
}
__device__ __host__ static inline uint32_t xor_int32(uint32_t a, uint32_t b) {
return (a^b);
}
__device__ __host__ static inline uint64_t xor_int64(uint64_t a, uint64_t b) {
return (a^b);
}
__device__ __host__ static inline uint4 xor_int128(uint4 a, uint4 b) {
uint4 res;
res.w = (a.w^b.w);
res.x = (a.x^b.x);
res.y = (a.y^b.y);
res.z = (a.z^b.z);
return res;
}
__device__ __host__ static inline ulonglong4 xor_int256(ulonglong4 a, ulonglong4 b) {
ulonglong4 res;
res.w = (a.w^b.w);
res.x = (a.x^b.x);
res.y = (a.y^b.y);
res.z = (a.z^b.z);
return res;
}
__device__ static inline int popcnt_256(ulonglong4 a) {
return __popcll(a.w) + __popcll(a.x) + __popcll(a.y) + __popcll(a.z);
}
// --------------------------------
// --------------------------------
// --------------------------------
// sequentially - B (input) in the shared_memory - BAD
// --------------------------------
__global__ void gemm_nn_custom_bin_mean_transposed_sequentially_gpu_kernel(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
//__shared__ float mean_shared[32];
//__shared__ uint32_t B_s[8192]; // 32 KB // [ldb x N`] // max = 262 144 bits
//__shared__ uint32_t B_s[4096]; // 16 KB // [ldb x N`] // max = 131 072 bits
__shared__ uint8_t B_s[4096 * 4]; // 16 KB // [ldb x N`] // max = 131 072 bits
const int K_items = WARP_SIZE;
int start_j = blockIdx.x*blockDim.x / (K_items * M);
{
int end_j = (blockIdx.x*blockDim.x + blockDim.x) / (K_items * M) + 1;
if (end_j > N) end_j = N;
size_t shared_size = ldb * (end_j - start_j);
if (shared_size != 0) {
//if(threadIdx.x == 0) printf(" start_j = %d, end_j = %d, shared_size = %d \n", start_j, end_j, shared_size);
int k;
for (int k = threadIdx.x * 32; k < shared_size; k += blockDim.x * 32) {
int x = start_j*ldb + k;
if (x < (N*ldb)) *((uint32_t *)(B_s + k / 8)) = *((uint32_t *)(B + x / 8));
}
}
}
__syncthreads();
int index = blockIdx.x*blockDim.x + threadIdx.x;
{
int i; // l.n
int j; // out_h*out_w
int k; // l.size * l.size * l.c
const int index2 = index / K_items;
i = index2 % M; // max M
j = index2 / M; // max N
int local_j = j - start_j;
//if (i <= 1 && j <= 1 ) printf(" k = %d, K = %d, K_items = %d, i = %d, j = %d, lda = %d, ldb = %d, ldc = %d \n",
// k, K, K_items, i, j, lda, ldb, ldc);
{ // l.n - filters [16 - 55 - 1024]
// further improvements: for (l.n == 1024) iterate several (j)
if (j < N)
{ // out_h*out_w - one channel output size [169 - 173056]
int count = 0;
const int bit_step = 32;
for (k = (threadIdx.x % WARP_SIZE) * bit_step; k < K; k += bit_step*WARP_SIZE)
{ // l.size*l.size*l.c - one filter size [27 - 144 - 9216]
uint32_t a_bit32 = *((uint32_t *)(A + (i*lda + k) / 8)); // weights
//uint32_t b_bit32 = *((uint32_t *)(B + (j*ldb + k) / 8)); // input
uint32_t b_bit32 = *((uint32_t *)(B_s + (local_j*ldb + k) / 8)); // input
uint32_t c_bit32 = xnor_int32(a_bit32, b_bit32);
count += __popc(c_bit32);
}
for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2)
count += __shfl_down(count, offset);
if (threadIdx.x % WARP_SIZE == 0) {
int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1;
float mean_val = mean_arr[i];
C[i*ldc + j] = (2 * count - K) * mean_val;
//B_s[threadIdx.x / WARP_SIZE] = (2 * count - K) * mean_val;
}
}
}
}
}
// sequentially - BAD
void gemm_nn_custom_bin_mean_transposed_sequentially_gpu(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
//size_t size = M*N;
size_t size = M*N * 32;
const int num_blocks = size / BLOCK + 1;
//printf(" K = %d \n", K);
/*
printf("\n gemm_bin size = %d, num_blocks = %d, M*K = %d KB, N*K = %d KB \n (w) M*K/num_blocks = %d KB, (i) N*K/num_blocks = %d KB \n",
size, num_blocks, M*K / 1024, N*K / 1024, M*lda / num_blocks / 1024, N*ldb / num_blocks / 1024);
printf(" M / 512 = %d, N / 512 = %d, M*lda / 512 = %d, N*ldb / 512 = %d \n", M / 512, N / 512, M*lda/512, N*ldb/512);
*/
//printf(" shared_memory: (w) lda*BLOCK/N = %d, (i) ldb*BLOCK/M = %d, \t lda = %d \n\n", lda*BLOCK / N, ldb*BLOCK / M, lda);
gemm_nn_custom_bin_mean_transposed_sequentially_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(
M, N, K,
A, lda,
B, ldb,
C, ldc,
mean_arr);
}
// --------------------------------
// 32 channels -> 1 channel (with 32 floats)
// 256 channels -> 8 channels (with 32 floats)
__global__ void repack_input_kernel_bin(float *input, uint32_t *re_packed_input_bin, int w, int h, int c)
{
//__shared__ uint32_t tmp[32];
const int index = blockIdx.x*blockDim.x + threadIdx.x;
const int global_warp_id = index / WARP_SIZE;
const int lane_id = threadIdx.x % WARP_SIZE;
const int items_per_channel = w * h;
const int items_per_channel_aligned = items_per_channel + WARP_SIZE - (items_per_channel % WARP_SIZE);
int i = 32 * (global_warp_id % (items_per_channel_aligned / WARP_SIZE));
int chan = 32 * (global_warp_id / (items_per_channel_aligned / WARP_SIZE));
if (chan < c)
{
uint32_t result_bits = 0;
for (int c_pack = 0; c_pack < 32; ++c_pack)
{
float src = 0;
if ((i + lane_id) < items_per_channel) {
src = input[(chan + c_pack)*items_per_channel + (i + lane_id)];
}
uint32_t bit_mask = __ballot_custom(src > 0);
uint32_t cur_bit = (bit_mask >> lane_id) & uint32_t(1);
result_bits |= (cur_bit << c_pack);
}
if ((i + lane_id) < items_per_channel) {
re_packed_input_bin[chan*items_per_channel / 32 + (i + lane_id)] = result_bits;
}
}
}
void repack_input_gpu_bin(float *input, uint32_t *re_packed_input_bin, int w, int h, int c)
{
int size = (w * h * c) / 32 + 1;
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(size, block_size);
//printf("\n num_blocks = %d, num_blocks/32 = %d, block_size = %d \n", num_blocks, num_blocks / 32, block_size);
repack_input_kernel_bin << <num_blocks, block_size>> >(input, re_packed_input_bin, w, h, c);
CHECK_CUDA(hipPeekAtLastError());
}
// --------------------------------
__global__ void transpose_uint32_kernel(uint32_t *src, uint32_t *dst, int src_h, int src_w, int src_align, int dst_align)
{
//l.bit_align - algined (n) by 32
//new_ldb - aligned (k) by 256
int index = blockIdx.x*blockDim.x + threadIdx.x;
//for (i = 0; i < src_h; i += 1)
int i = index % src_h; // l.size*l.size*l.c;
{
//for (j = 0; j < src_w; j += 1)
int j = index / src_h; // out_h*out_w;
if (j < src_w)
{
((uint32_t *)dst)[j*dst_align / 32 + i] = ((uint32_t *)src)[i*src_align + j];
}
}
}
void transpose_uint32_gpu(uint32_t *src, uint32_t *dst, int src_h, int src_w, int src_align, int dst_align)
{
int size = src_w * src_h;
const int num_blocks = size / BLOCK + 1;
transpose_uint32_kernel << <num_blocks, BLOCK >> >(src, dst, src_h, src_w, src_align, dst_align);
CHECK_CUDA(hipPeekAtLastError());
}
// --------------------------------
__inline__ __device__
int warpAllReduceSum(int val) {
for (int mask = WARP_SIZE / 2; mask > 0; mask /= 2)
#if CUDART_VERSION >= 9000
val += __shfl_xor_sync(FULL_MASK, val, mask);
#else
val += __shfl_xor(val, mask);
#endif
return val;
}
// --------------------------------
// Coalescing
// A (weights) in the shared_memory - GOOD
__global__ void gemm_nn_custom_bin_mean_transposed_gpu_kernel(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr, float *bias_arr, int leaky_activation,
float *shortcut_in_gpu, float *shortcut_out_gpu)
{
// total 57%
int index = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ uint8_t A_s[6144 * 8 / 4];
//__shared__ uint64_t A_s[6144]; // 48 KB // [lda x M`]
//__shared__ uint8_t A_s[6144*8]; // 48 KB // [lda x M`]
int start_i = blockIdx.x*blockDim.x / N;
int end_i = (blockIdx.x*blockDim.x + blockDim.x) / N + 1;
size_t shared_size = lda * (end_i - start_i);
int i_cur = index / N;
int local_i = i_cur - start_i;
// ~10%
for (int k = threadIdx.x * 64; k < shared_size; k += blockDim.x * 64) {
int x = start_i*lda + k;
if (x < (M*lda)) *((uint64_t *)(A_s + k / 8)) = *((uint64_t *)(A + x / 8));
}
__syncthreads();
int i, j, k, h;
// 47% = 29 + 10 + 8
j = index % N;
{ // out_h*out_w - one channel output size [169 - 173056]
i = index / N;
//if (i < M) // l.n - filters [16 - 55 - 1024]
{
int count = 0;
k = 0;
#ifdef NOT_USED
// 32 thread X 256 bit = 8192 bit
for (; k < (K - 8192); k += 8192) { // l.size*l.size*l.c - one filter size [27 - 9216]
ulonglong4 c_bit256;
//int64_t A_cur_index = (i*lda + k) / 8;
int64_t A_cur_index = (local_i*lda + k) / 8;
int64_t B_cur_index = (j*ldb + k) / 8;
if (i >= M) A_cur_index = 0;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t) {
const int lane_id = threadIdx.x % WARP_SIZE;
const int64_t A_i = __shfl_custom(A_cur_index, t) + 32 * lane_id;
const int64_t B_i = __shfl_custom(B_cur_index, t) + 32 * lane_id;
{
//ulonglong4 a_bit256 = *((ulonglong4 *)(A + A_i)); // weights
ulonglong4 a_bit256 = *((ulonglong4 *)(A_s + A_i)); // weights
ulonglong4 b_bit256 = *((ulonglong4 *)(B + B_i)); // input
c_bit256 = xor_int256(a_bit256, b_bit256);
int tmp_count = __popcll(c_bit256.w) + __popcll(c_bit256.x) +
__popcll(c_bit256.y) + __popcll(c_bit256.z);
int sum_count = warpAllReduceSum(tmp_count);
if (lane_id == t) count += sum_count;
}
}
}
#endif
//#ifdef NOT_USED
// 32 thread X 64 bit = 2048 bit // 29%
for (; k < (K - 2048); k += 2048) { // l.size*l.size*l.c - one filter size [27 - 9216]
uint64_t c_bit64;
//int64_t A_cur_index = (i*lda + k) / 8;
int64_t A_cur_index = (local_i*lda + k) / 8;
int64_t B_cur_index = (j*ldb + k) / 8;
if (i >= M) A_cur_index = 0;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t) {
const int lane_id = threadIdx.x % WARP_SIZE;
const int64_t A_i = __shfl_custom(A_cur_index, t) + 8 * lane_id;
const int64_t B_i = __shfl_custom(B_cur_index, t) + 8 * lane_id;
{
//uint64_t a_bit64 = *((uint64_t *)(A + A_i)); // weights
uint64_t a_bit64 = *((uint64_t *)(A_s + A_i)); // weights
uint64_t b_bit64 = *((uint64_t *)(B + B_i)); // input
c_bit64 = xor_int64(a_bit64, b_bit64);
int tmp_count = __popcll(c_bit64);
int sum_count = warpAllReduceSum(tmp_count);
if (lane_id == t) count += sum_count;
}
}
}
//#endif
//#ifdef NOT_USED
// 32 thread X 32 bit = 1024 bit // 10%
for (; k < (K - 1024); k += 1024) { // l.size*l.size*l.c - one filter size [27 - 9216]
//int64_t A_cur_index = (i*lda + k) / 8;
int64_t A_cur_index = (local_i*lda + k) / 8;
int64_t B_cur_index = (j*ldb + k) / 8;
if (i >= M) A_cur_index = 0;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t) {
const int lane_id = threadIdx.x % WARP_SIZE;
const int64_t A_i = __shfl_custom(A_cur_index, t) + 4 * lane_id;
const int64_t B_i = __shfl_custom(B_cur_index, t) + 4 * lane_id;
{
//uint64_t a_bit64 = *((uint64_t *)(A + A_i)); // weights
uint32_t a_bit32 = *((uint32_t *)(A_s + A_i)); // weights
uint32_t b_bit32 = *((uint32_t *)(B + B_i)); // input
uint32_t c_bit32 = xor_int32(a_bit32, b_bit32);
int tmp_count = __popc(c_bit32);
int sum_count = warpAllReduceSum(tmp_count);
if (lane_id == t) count += sum_count;
}
}
}
//#endif
if (i < M)
{
float mean_val = mean_arr[i];
float bias_val = bias_arr[i];
//#ifdef NOT_USED
// 8%
for (; k < K; k += 256) { // l.size*l.size*l.c - one filter size [27 - 144 - 9216]
//ulonglong4 a_bit256 = *((ulonglong4 *)(A + (i*lda + k) / 8)); // weights
ulonglong4 a_bit256 = *((ulonglong4 *)(A_s + (local_i*lda + k) / 8)); // weights
ulonglong4 b_bit256 = *((ulonglong4 *)(B + (j*ldb + k) / 8)); // input
ulonglong4 c_bit256 = xor_int256(a_bit256, b_bit256);
count += __popcll(c_bit256.w) + __popcll(c_bit256.x) +
__popcll(c_bit256.y) + __popcll(c_bit256.z);
}
//#endif
#ifdef NOT_USED
for (; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
//uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); // weights
uint64_t a_bit64 = *((uint64_t *)(A_s + (local_i*lda + k) / 8)); // weights
uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); // input
uint64_t c_bit64 = xor_int64(a_bit64, b_bit64);
count += __popcll(c_bit64);
}
#endif
const int bit_step = 256;
int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1; // remove extra bits (from empty space for align only)
float dst_val = (2 * count - K) *mean_val + bias_val;
if (leaky_activation)
dst_val = (dst_val >= 0) ? (dst_val) : (0.1f*dst_val); // Leaky activation
size_t out_index = i*ldc + j;
C[out_index] = dst_val;
if (shortcut_out_gpu) {
shortcut_out_gpu[out_index] = shortcut_in_gpu[out_index] + dst_val;
}
}
}
}
}
// --------------------------------
// src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu
// You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE
__global__ void im2col_gpu_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_col) {
int index = blockIdx.x*blockDim.x + threadIdx.x;
for (; index < n; index += blockDim.x*gridDim.x) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
float* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
//data_im[(channel_in * height + h_in) * width + w_in + i * width + j];
//*data_col_ptr = data_im_ptr[ii * width + jj];
data_col_ptr += height_col * width_col;
}
}
}
}
void im2col_ongpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col)
{
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
im2col_gpu_kernel << <(num_kernels + BLOCK - 1) / BLOCK,
BLOCK >> >(
num_kernels, im, height, width, ksize, pad,
stride, height_col,
width_col, data_col);
CHECK_CUDA(hipPeekAtLastError());
}
// --------------------------------
// Tensor Cores binary (CC >= 7.3 && CUDA >= 10.0) - __CUDA_SUBBYTE_IMMA__
#if CUDART_VERSION >= 10000
#include <mma.h>
#define WMMA_M 8
#define WMMA_N 8
#define WMMA_K 128
#define WMMA_K32 (WMMA_K/32)
#define WMMA_Nx2 (WMMA_N*2)
// Tensor Cores are used for XOR-GEMM
__global__ void gemm_nn_custom_bin_mean_transposed_tensor_kernel(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr, float *bias_arr, int leaky_activation,
float *shortcut_in_gpu, float *shortcut_out_gpu)
{
// total 57%
int index = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ int C_s[WMMA_N * WMMA_M * 32 * 2]; // 2 * 8 KB - Temprorary result of GEMM WMMA for 32 warps
const int lane_id = threadIdx.x % 32;
const int warp_id = threadIdx.x / 32;
const int global_warp_id = index / 32;
const int N_aligned = N + WMMA_Nx2 - (N % WMMA_Nx2);
/*
__syncthreads();
__shared__ uint32_t A_s[8 * 512]; // 8x512 = 8 x 16384 bits, instead of 8x4
const int start_global_warp_id = blockIdx.x*blockDim.x / 32;
int start_i = start_global_warp_id / (N_aligned / WMMA_N);
start_i = start_i * WMMA_M;
if (start_i + WMMA_M > M) start_i = M - WMMA_M; // must be: i+7 < M
for (int tmp_index = threadIdx.x; tmp_index < (8 * 512); tmp_index += blockDim.x)
{
int k_tmp = tmp_index % 512;
int local_i = tmp_index / 512;
uint32_t a_val = ((uint32_t *)(A))[(start_i + local_i)*lda/32 + k_tmp];
A_s[local_i * 512 + k_tmp] = a_val;
}
__syncthreads();
*/
int i, j, k, h;
// 47% = 29 + 10 + 8
j = global_warp_id % (N_aligned / WMMA_Nx2);
j = j * WMMA_Nx2;
{ // out_h*out_w - one channel output size [169 - 173056]
i = global_warp_id / (N_aligned / WMMA_Nx2);
i = i * WMMA_M;
int count = 0;
k = 0;
if (i < M) //if (i < M) // l.n - filters [16 - 55 - 1024]
{
if (j + WMMA_Nx2 > N) j = N - WMMA_Nx2; // must be: j+7 < N
if (i + WMMA_M > M) i = M - WMMA_M; // must be: i+7 < M
#if __CUDA_ARCH__ >= 730
// Tensor Cores
using namespace nvcuda;
wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, wmma::experimental::precision::b1, wmma::row_major> a_frag;
wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, wmma::experimental::precision::b1, wmma::col_major> b_frag;
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, int> c1_frag, c2_frag;
wmma::fill_fragment(c1_frag, 0); // !!!! XOR isn't XNOR !!!!!!!!!!
wmma::fill_fragment(c2_frag, 0); // !!!! XOR isn't XNOR !!!!!!!!!!
// 8 x 8 x 4 (uint32_t, 4 * 32 = 128 bit)
for (; k < K; k += 128) // l.size*l.size*l.c - one filter size [27 - 144 - 9216]
{
int64_t A_cur_index = (i*lda + k) / 8; // index in bits
int64_t B1_cur_index = (j*ldb + k) / 8; // index in bits
int64_t B2_cur_index = ((j + 8)*ldb + k) / 8; // index in bits
// try to use A that is cached in shared memory - poor performance
//if (i == start_i) wmma::load_matrix_sync(a_frag, &A_s[k / 32], (512 * 32)); // lda = (128*32) bits
//else wmma::load_matrix_sync(a_frag, (uint32_t *)(A + A_cur_index), lda); // lda = M
// lda, ldb - are in bits
wmma::load_matrix_sync(a_frag, (uint32_t *)(A + A_cur_index), lda); // lda = M
wmma::load_matrix_sync(b_frag, (uint32_t *)(B + B1_cur_index), ldb); // ldb = K
wmma::bmma_sync(c1_frag, a_frag, b_frag, c1_frag); // XOR-GEMM
wmma::load_matrix_sync(b_frag, (uint32_t *)(B + B2_cur_index), ldb); // ldb = K
wmma::bmma_sync(c2_frag, a_frag, b_frag, c2_frag); // XOR-GEMM
}
// C[i*ldc + j]
wmma::store_matrix_sync(&C_s[warp_id*WMMA_M*WMMA_N], c1_frag, WMMA_N, wmma::mem_row_major);
wmma::store_matrix_sync(&C_s[warp_id*WMMA_M*WMMA_N + WMMA_M*WMMA_N * 32], c2_frag, WMMA_N, wmma::mem_row_major);
#else // __CUDA_ARCH__ >= 730
// Custom XOR-GEMM
int k_d = lane_id % 4;
int i_d = lane_id / 4;
int j_d = lane_id / 4;
int32_t accum_c_val[8 * 2]; // wmma::fill_fragment(c_frag, 0);
for (int local_j = 0; local_j < 8 * 2; ++local_j) {
accum_c_val[local_j] = 0;
}
// 8 x 8 x 4 (uint32_t, 4 * 32 = 128 bit)
for (; k < K; k += 128) // l.size*l.size*l.c - one filter size [27 - 144 - 9216]
{
int64_t A_cur_index = (i*lda + k) / 8;
//int64_t A_cur_index = (local_i*lda + k) / 8;
int64_t B_cur_index = (j*ldb + k) / 8;
// lda, ldb - are in bits
// 8*4 = 32
// 8*8 = 64
int k_d = lane_id % 4;
int i_d = lane_id / 4;
int j_d = lane_id / 4;
uint32_t a_val = *(uint32_t *)(A + ((i + i_d)*lda + (k + k_d * 32)) / 8); // wmma::load_matrix_sync(a_frag, (uint32_t *)(A + A_cur_index), lda);
for (int c_x = 0; c_x < 2; c_x++)
{
uint32_t b_val = *(uint32_t *)(B + ((c_x * 8 + j + j_d)*ldb + (k + k_d * 32)) / 8); // wmma::load_matrix_sync(b_frag, (uint32_t *)(B + B_cur_index), ldb);
// wmma::bmma_sync(c_frag, a_frag, b_frag, c_frag);
int32_t c_val[8]; // 8 x 32 threads = 256
#pragma UNROLL
for (int local_j = 0; local_j < 8; ++local_j)
{
uint32_t b_val_cur = __shfl_custom(b_val, local_j * 4 + k_d);
c_val[local_j] = __popc(xor_int32(a_val, b_val_cur));
}
#pragma UNROLL
for (int local_j = 0; local_j < 8; ++local_j)
{
#pragma UNROLL
for (int local_k = 0; local_k < 4; ++local_k) {
accum_c_val[local_j + c_x * 8] += __shfl_custom(c_val[local_j], i_d * 4 + local_k);
}
}
}
}
// only the first 8 threads (i) contain 8 good values each, in c_val[8] (j) = 8 x 8 =64
// wmma::store_matrix_sync(&C_s[warp_id*WMMA_M*WMMA_N], c_frag, WMMA_N, wmma::mem_row_major);
if (k_d == 0) {
for (int c_x = 0; c_x < 2; c_x++)
{
for (int local_j = 0; local_j < 8; ++local_j)
{
C_s[warp_id*WMMA_M*WMMA_N + i_d*WMMA_N + local_j + WMMA_M*WMMA_N * 32 * c_x] = accum_c_val[local_j + c_x * 8];
}
}
}
#endif // __CUDA_ARCH__ >= 730
for (int c_x = 0; c_x < 2; c_x++)
{
int j_d = lane_id % WMMA_N;
{
#pragma UNROLL
for (int i_d = lane_id / WMMA_N; i_d < WMMA_M; i_d += WMMA_M / 2)
{
int count = C_s[warp_id*WMMA_M*WMMA_N + i_d*WMMA_N + j_d + WMMA_M*WMMA_N * 32 * c_x];
const int bit_step = 128;
int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1; // remove extra bits (from empty space for align only)
count = (2 * count - K);
float mean_val = mean_arr[i + i_d];
float bias_val = bias_arr[i + i_d];
float dst_val = count *mean_val + bias_val;
if (leaky_activation)
dst_val = (dst_val >= 0) ? (dst_val) : (0.1f*dst_val); // Leaky activation
size_t out_index = (i + i_d)*ldc + (c_x * 8 + j + j_d);
C[out_index] = dst_val;
if (shortcut_out_gpu) {
shortcut_out_gpu[out_index] = shortcut_in_gpu[out_index] + dst_val;
}
}
}
}
}
}
}
#endif // CUDART_VERSION >= 10000
// --------------------------------
// GOOD
void gemm_nn_custom_bin_mean_transposed_gpu(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr, float *bias, int leaky_activation,
float *shortcut_in_gpu, float *shortcut_out_gpu)
{
int size = M*N;
const int num_blocks = get_number_of_blocks(size, BLOCK);
//printf("\n M = %d, N = %d, M %% 8 = %d, N %% 8 = %d \n", M, N, M % 8, N % 8);
//if (M >= 32) // l.n >= 32
#if CUDART_VERSION >= 10000
if (1)
{
const int M_aligned = M + (8 - (M % 8));
const int N_aligned = N + (16 - (N % 16));
int size = (M_aligned / 8)*(N_aligned / 16)*WARP_SIZE;
const int num_blocks = get_number_of_blocks(size, BLOCK);
//printf(" lda = %d, ldb = %d, ldc = %d, lda/32 = %d, ldb/32 = %d, ldc/32 = %d \n", lda, ldb, ldc, lda / 32, ldb / 32, ldc / 32);
//printf(" l.c (K/9) = %d, M (l.n) = %d \n", (K%9 == 0)? K / 9: K, M);
gemm_nn_custom_bin_mean_transposed_tensor_kernel << <num_blocks, BLOCK >> > (
M, N, K,
A, lda,
B, ldb,
C, ldc,
mean_arr, bias, leaky_activation,
shortcut_in_gpu, shortcut_out_gpu);
}
else
#endif //# CUDART_VERSION >= 10000
{
gemm_nn_custom_bin_mean_transposed_gpu_kernel << <num_blocks, BLOCK >> > (
M, N, K,
A, lda,
B, ldb,
C, ldc,
mean_arr, bias, leaky_activation,
shortcut_in_gpu, shortcut_out_gpu);
}
CHECK_CUDA(hipPeekAtLastError());
}
// -------------------------------- | 71fc40277ea23a6d182d6ed50651fe6a1e8a11b3.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "additionally.h"
#include "gpu.h"
extern int gpu_index;
#define BLOCK 512
#define FULL_MASK 0xffffffff
#define WARP_SIZE 32
template<typename T1, typename T2>
__device__ inline T1 __shfl_custom(T1 val, T2 lane) {
#if CUDART_VERSION >= 9000
return __shfl_sync(FULL_MASK, val, lane);
#else
return __shfl(val, lane);
#endif
}
template<typename T>
__device__ inline uint32_t __ballot_custom(T val) {
#if CUDART_VERSION >= 9000
return __ballot_sync(FULL_MASK, val);
#else
return __ballot(val);
#endif
}
void pull_batchnorm_layer(layer l) {} // not required now
void push_batchnorm_layer(layer l) {} // not required now
void pull_local_layer(local_layer l) {} // not required now
void push_local_layer(local_layer l) {} // not required now
void pull_connected_layer(local_layer l) {} // not required now
void push_connected_layer(local_layer l) {} // not required now
int get_number_of_blocks(int array_size, int block_size)
{
return array_size / block_size + ((array_size % block_size > 0) ? 1 : 0);
}
void check_error(cudaError_t status)
{
cudaError_t status2 = cudaGetLastError();
if (status != cudaSuccess)
{
const char *s = cudaGetErrorString(status);
char buffer[256];
printf("CUDA Error: %s\n", s);
snprintf(buffer, 256, "CUDA Error: %s", s);
#ifdef WIN32
getchar();
#endif
error(buffer);
}
if (status2 != cudaSuccess)
{
const char *s = cudaGetErrorString(status2);
char buffer[256];
printf("CUDA Error Prev: %s\n", s);
snprintf(buffer, 256, "CUDA Error Prev: %s", s);
#ifdef WIN32
getchar();
#endif
error(buffer);
}
}
void check_error_extended(cudaError_t status, const char *file, int line, const char *date_time)
{
if (status != cudaSuccess)
printf("CUDA status Error: file: %s() : line: %d : build time: %s \n", file, line, date_time);
#ifdef DEBUG
status = cudaDeviceSynchronize();
if (status != cudaSuccess)
printf("CUDA status = cudaDeviceSynchronize() Error: file: %s() : line: %d : build time: %s \n", file, line, date_time);
#endif
check_error(status);
}
void cuda_set_device(int n)
{
gpu_index = n;
cudaError_t status = cudaSetDevice(n);
check_error(status);
}
int cuda_get_device()
{
int n = 0;
cudaError_t status = cudaGetDevice(&n);
check_error(status);
return n;
}
#ifdef CUDNN
cudnnHandle_t cudnn_handle()
{
static int init[16] = { 0 };
static cudnnHandle_t handle[16];
int i = cuda_get_device();
if (!init[i]) {
cudnnCreate(&handle[i]);
init[i] = 1;
}
return handle[i];
}
void cudnn_check_error(cudnnStatus_t status)
{
#ifdef DEBUG
cudaDeviceSynchronize();
#endif
cudnnStatus_t status2 = CUDNN_STATUS_SUCCESS;
#ifdef CUDNN_ERRQUERY_RAWCODE
cudnnStatus_t status_tmp = cudnnQueryRuntimeError(cudnn_handle(), &status2, CUDNN_ERRQUERY_RAWCODE, NULL);
#endif
if (status != CUDNN_STATUS_SUCCESS)
{
const char *s = cudnnGetErrorString(status);
char buffer[256];
printf("cuDNN Error: %s\n", s);
snprintf(buffer, 256, "cuDNN Error: %s", s);
#ifdef WIN32
getchar();
#endif
error(buffer);
}
if (status2 != CUDNN_STATUS_SUCCESS)
{
const char *s = cudnnGetErrorString(status2);
char buffer[256];
printf("cuDNN Error Prev: %s\n", s);
snprintf(buffer, 256, "cuDNN Error Prev: %s", s);
#ifdef WIN32
getchar();
#endif
error(buffer);
}
}
void cudnn_check_error_extended(cudnnStatus_t status, const char *file, int line, const char *date_time)
{
if (status != CUDNN_STATUS_SUCCESS)
printf("\n cuDNN status Error in: file: %s() : line: %d : build time: %s \n", file, line, date_time);
#ifdef DEBUG
status = cudaDeviceSynchronize();
if (status != CUDNN_STATUS_SUCCESS)
printf("\n cuDNN status = cudaDeviceSynchronize() Error in: file: %s() : line: %d : build time: %s \n", file, line, date_time);
#endif
cudnn_check_error(status);
}
#endif // CUDNN
float *cuda_make_array(float *x, size_t n)
{
float *x_gpu;
size_t size = sizeof(float)*n;
cudaError_t status = cudaMalloc((void **)&x_gpu, size);
check_error(status);
if (x) {
status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice);
check_error(status);
}
if (!x_gpu) error("Cuda malloc failed\n");
return x_gpu;
}
int *cuda_make_int_array(size_t n)
{
int *x_gpu;
size_t size = sizeof(int)*n;
cudaError_t status = cudaMalloc((void **)&x_gpu, size);
check_error(status);
return x_gpu;
}
void cuda_free(float *x_gpu)
{
cudaError_t status = cudaFree(x_gpu);
check_error(status);
}
void cuda_push_array(float *x_gpu, float *x, size_t n)
{
size_t size = sizeof(float)*n;
cudaError_t status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice);
check_error(status);
}
void cuda_pull_array(float *x_gpu, float *x, size_t n)
{
size_t size = sizeof(float)*n;
cudaError_t status = cudaMemcpy(x, x_gpu, size, cudaMemcpyDeviceToHost);
check_error(status);
}
float *get_network_output_layer_gpu(network net, int i)
{
layer l = net.layers[i];
if (l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
return l.output;
}
float *get_network_output_gpu(network net)
{
int i;
for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break;
return get_network_output_layer_gpu(net, i);
}
dim3 cuda_gridsize(size_t n) {
size_t k = (n - 1) / BLOCK + 1;
size_t x = k;
size_t y = 1;
if (x > 65535) {
x = ceil(sqrtf(k));
y = (n - 1) / (x*BLOCK) + 1;
}
dim3 d;
d.x = x;
d.y = y;
d.z = 1;
//printf("%ld %ld %ld %ld\n", n, x, y, x*y*BLOCK);
return d;
}
void pull_convolutional_layer(convolutional_layer layer)
{
cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.biases_gpu, layer.biases, layer.n);
if (layer.batch_normalize) {
cuda_pull_array(layer.scales_gpu, layer.scales, layer.n);
cuda_pull_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_pull_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
}
void push_convolutional_layer(convolutional_layer layer)
{
cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.biases_gpu, layer.biases, layer.n);
if (layer.batch_normalize) {
cuda_push_array(layer.scales_gpu, layer.scales, layer.n);
cuda_push_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
cuda_push_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
}
}
// -------------------- CUDA functions -------------------
// add BIAS
__global__ void add_bias_kernel(float *output, float *biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset < size) output[(batch*n + filter)*size + offset] += biases[filter];
}
void add_bias_gpu(float *output, float *biases, int batch, int n, int size)
{
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
add_bias_kernel << <dimGrid, dimBlock >> >(output, biases, n, size);
check_error(cudaPeekAtLastError());
}
// normalization
__global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index / spatial) % filters;
x[index] = (x[index] - mean[f]) / (sqrtf(variance[f]) + .000001f);
}
void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
size_t N = batch*filters*spatial;
normalize_kernel << <cuda_gridsize(N), BLOCK >> >(N, x, mean, variance, batch, filters, spatial);
check_error(cudaPeekAtLastError());
}
// fill array
__global__ void fill_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < N) X[i*INCX] = ALPHA;
}
void fill_ongpu(int N, float ALPHA, float * X, int INCX)
{
fill_kernel << <cuda_gridsize(N), BLOCK >> >(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
// scale BIAS
__global__ void scale_bias_kernel(float *output, float *biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset < size) output[(batch*n + filter)*size + offset] *= biases[filter];
}
void scale_bias_gpu(float *output, float *biases, int batch, int n, int size)
{
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
scale_bias_kernel << <dimGrid, dimBlock >> >(output, biases, n, size);
check_error(cudaPeekAtLastError());
}
// max-pool layer
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *input, float *output, int *indexes)
{
int h = (in_h + pad - size) / stride + 1;
int w = (in_w + pad - size) / stride + 1;
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -pad / 2;
int h_offset = -pad / 2;
int out_index = j + w*(i + h*(k + c*b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for (l = 0; l < size; ++l) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i*stride + l;
int cur_w = w_offset + j*stride + m;
int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c));
int valid = (cur_h >= 0 && cur_h < in_h &&
cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
indexes[out_index] = max_i;
}
void forward_maxpool_layer_gpu(maxpool_layer layer, network_state state)
{
if (layer.stride == layer.size) {
//if(1) {
cudnnStatus_t maxpool_status;
float alpha = 1, beta = 0;
maxpool_status = cudnnPoolingForward(
cudnn_handle(),
layer.poolingDesc,
&alpha,
layer.srcTensorDesc,
state.input,
&beta,
layer.dstTensorDesc,
layer.output_gpu);
//maxpool_status = cudnnDestroyPoolingDescriptor(poolingDesc);
//cudnnDestroyTensorDescriptor(layer.srcTensorDesc);
//cudnnDestroyTensorDescriptor(layer.dstTensorDesc);
}
else {
int h = layer.out_h;
int w = layer.out_w;
int c = layer.c;
size_t n = h*w*c*layer.batch;
forward_maxpool_layer_kernel << <cuda_gridsize(n), BLOCK >> > (n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, state.input, layer.output_gpu, layer.indexes_gpu);
check_error(cudaPeekAtLastError());
}
}
// flatten
__global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= N) return;
int in_s = i%spatial;
i = i / spatial;
int in_c = i%layers;
i = i / layers;
int b = i;
int i1 = b*layers*spatial + in_c*spatial + in_s;
int i2 = b*layers*spatial + in_s*layers + in_c;
if (forward) out[i2] = x[i1];
else out[i1] = x[i2];
}
void flatten_ongpu(float *x, int spatial, int layers, int batch, int forward, float *out)
{
int size = spatial*batch*layers;
flatten_kernel << <cuda_gridsize(size), BLOCK >> >(size, x, spatial, layers, batch, forward, out);
check_error(cudaPeekAtLastError());
}
// activations
__device__ float lhtan_activate_kernel(float x)
{
if (x < 0) return .001*x;
if (x > 1) return .001*(x - 1) + 1;
return x;
}
__device__ float lhtan_gradient_kernel(float x)
{
if (x > 0 && x < 1) return 1;
return .001;
}
__device__ float hardtan_activate_kernel(float x)
{
if (x < -1) return -1;
if (x > 1) return 1;
return x;
}
__device__ float linear_activate_kernel(float x) { return x; }
__device__ float logistic_activate_kernel(float x) { return 1.f / (1.f + expf(-x)); }
__device__ float loggy_activate_kernel(float x) { return 2.f / (1.f + expf(-x)) - 1; }
__device__ float relu_activate_kernel(float x) { return x*(x>0); }
__device__ float elu_activate_kernel(float x) { return (x >= 0)*x + (x < 0)*(expf(x) - 1); }
__device__ float selu_activate_kernel(float x) { return (x >= 0)*1.0507f*x + (x < 0)*1.0507f*1.6732f*(expf(x) - 1); }
__device__ float relie_activate_kernel(float x) { return (x>0) ? x : .01f*x; }
__device__ float ramp_activate_kernel(float x) { return x*(x>0) + .1f*x; }
__device__ float leaky_activate_kernel(float x) { return (x>0) ? x : .1f*x; }
__device__ float tanh_activate_kernel(float x) { return (2 / (1 + expf(-2 * x)) - 1); }
__device__ float plse_activate_kernel(float x)
{
if (x < -4) return .01 * (x + 4);
if (x > 4) return .01 * (x - 4) + 1;
return .125*x + .5;
}
__device__ float stair_activate_kernel(float x)
{
int n = floor(x);
if (n % 2 == 0) return floor(x / 2.);
else return (x - n) + floor(x / 2.);
}
__device__ float activate_kernel(float x, ACTIVATION a)
{
switch (a) {
case LINEAR:
return linear_activate_kernel(x);
case LOGISTIC:
return logistic_activate_kernel(x);
case LOGGY:
return loggy_activate_kernel(x);
case RELU:
return relu_activate_kernel(x);
case ELU:
return elu_activate_kernel(x);
case RELIE:
return relie_activate_kernel(x);
case RAMP:
return ramp_activate_kernel(x);
case LEAKY:
return leaky_activate_kernel(x);
case TANH:
return tanh_activate_kernel(x);
case PLSE:
return plse_activate_kernel(x);
case STAIR:
return stair_activate_kernel(x);
case HARDTAN:
return hardtan_activate_kernel(x);
case LHTAN:
return lhtan_activate_kernel(x);
}
return 0;
}
__global__ void activate_array_kernel(float *x, int n, ACTIVATION a)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) x[i] = activate_kernel(x[i], a);
}
__global__ void activate_array_leaky_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = leaky_activate_kernel(x[index]);
}
// if(index < 10){
// printf("output %d is %f\n", index,x[index]);
// }
}
__global__ void activate_array_selu_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = selu_activate_kernel(x[index]);
}
}
__global__ void activate_array_logistic_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = logistic_activate_kernel(x[index]);
}
}
extern "C" void activate_array_ongpu(float *x, int n, ACTIVATION a)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
if (a == LINEAR) return;
else if (a == LEAKY) activate_array_leaky_kernel << <num_blocks, BLOCK, 0 >> >(x, n);
else if (a == LOGISTIC) activate_array_logistic_kernel << <num_blocks, BLOCK >> >(x, n);
else if (a == SELU) activate_array_selu_kernel << <num_blocks, BLOCK >> >(x, n);
else
activate_array_kernel << <cuda_gridsize(n), BLOCK >> >(x, n, a);
CHECK_CUDA(cudaPeekAtLastError());
}
// softmax layer
__device__ void softmax_device(int n, float *input, float temp, float *output)
{
int i;
float sum = 0;
float largest = -INFINITY;
for (i = 0; i < n; ++i) {
int val = input[i];
largest = (val>largest) ? val : largest;
}
for (i = 0; i < n; ++i) {
float e = expf(input[i] / temp - largest / temp);
sum += e;
output[i] = e;
}
for (i = 0; i < n; ++i) {
output[i] /= sum;
}
}
__global__ void softmax_kernel(int n, int offset, int batch, float *input, float temp, float *output)
{
int b = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (b >= batch) return;
softmax_device(n, input + b*offset, temp, output + b*offset);
}
void softmax_gpu(float *input, int n, int offset, int groups, float temp, float *output)
{
int inputs = n;
int batch = groups;
softmax_kernel << <cuda_gridsize(batch), BLOCK >> >(inputs, offset, batch, input, temp, output);
check_error(cudaPeekAtLastError());
}
// reorg layer
__global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= N) return;
int in_index = i;
int in_w = i%w;
i = i / w;
int in_h = i%h;
i = i / h;
int in_c = i%c;
i = i / c;
int b = i%batch;
int out_c = c / (stride*stride);
int c2 = in_c % out_c;
int offset = in_c / out_c;
int w2 = in_w*stride + offset % stride;
int h2 = in_h*stride + offset / stride;
int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
out[in_index] = x[out_index];
}
void reorg_ongpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int size = w*h*c*batch;
reorg_kernel << <cuda_gridsize(size), BLOCK >> >(size, x, w, h, c, batch, stride, forward, out);
check_error(cudaPeekAtLastError());
}
// upsample layer
__global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= N) return;
int out_index = i;
int out_w = i % (w*stride);
i = i / (w*stride);
int out_h = i % (h*stride);
i = i / (h*stride);
int out_c = i%c;
i = i / c;
int b = i%batch;
int in_w = out_w / stride;
int in_h = out_h / stride;
int in_c = out_c;
int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w;
if (forward) out[out_index] += scale * x[in_index];
else atomicAdd(x + in_index, scale * out[out_index]);
}
extern "C" void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
size_t size = w*h*c*batch*stride*stride;
upsample_kernel << <cuda_gridsize(size), BLOCK >> >(size, in, w, h, c, batch, stride, forward, scale, out);
check_error(cudaPeekAtLastError());
}
__global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX];
}
extern "C" void copy_ongpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
{
copy_kernel << <cuda_gridsize(N), BLOCK>> >(N, X, OFFX, INCX, Y, OFFY, INCY);
check_error(cudaPeekAtLastError());
}
extern "C" void copy_ongpu(int N, float * X, int INCX, float * Y, int INCY)
{
copy_ongpu_offset(N, X, 0, INCX, Y, 0, INCY);
}
// shortcut layer
__global__ void simple_input_shortcut_kernel(float *in, int size, float *add, float *out)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
out[id] = in[id] + add[id];
}
__global__ void input_shortcut_kernel(float *in, int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
int i = id % minw;
id /= minw;
int j = id % minh;
id /= minh;
int k = id % minc;
id /= minc;
int b = id % batch;
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] = in[out_index] + add[add_index];
}
extern "C" void input_shortcut_gpu(float *in, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
if (w1 == w2 && h1 == h2 && c1 == c2) {
int size = batch * w1 * h1 * c1;
simple_input_shortcut_kernel << <cuda_gridsize(size), BLOCK >> >(in, size, add, out);
CHECK_CUDA(cudaPeekAtLastError());
return;
}
int minw = (w1 < w2) ? w1 : w2;
int minh = (h1 < h2) ? h1 : h2;
int minc = (c1 < c2) ? c1 : c2;
int stride = w1 / w2;
int sample = w2 / w1;
assert(stride == h1 / h2);
assert(sample == h2 / h1);
if (stride < 1) stride = 1;
if (sample < 1) sample = 1;
int size = batch * minw * minh * minc;
input_shortcut_kernel << <cuda_gridsize(size), BLOCK >> >(in, size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, out);
CHECK_CUDA(cudaPeekAtLastError());
}
// ----------- Quantinization --------------
__host__ __device__ int max_abs(int src, int max_val) {
if (abs(src) > abs(max_val)) src = (src > 0) ? max_val : -max_val;
return src;
}
__global__ void cuda_f32_to_int8(float* input_f32, size_t size, int8_t *output_int8, float multipler, int max_val)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_int8[idx] = max_abs(input_f32[idx] * multipler, max_val); // 7-bit (1-bit sign)
if (idx < 10)
printf("output_int8 %d is %d\n", idx, output_int8[idx]);
}
void cuda_convert_f32_to_int8(float* input_f32, size_t size, int8_t *output_int8, float multipler, int max_val) {
cuda_f32_to_int8 << < size / BLOCK + 1, BLOCK >> >(input_f32, size, output_int8, multipler, max_val);
}
__global__ void print_ints_kernel(int8_t* x, int size,int num_print){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
if (idx < num_print)
printf("x %d is %d\n", idx, x[idx]);
}
}
void print_ints(int8_t* x, int size, int num_print){
print_ints_kernel<< < size / BLOCK + 1, BLOCK >> >(x, size, num_print);
}
__global__ void print_floats_kernel(float* x, int size,int num_print){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
if (idx < num_print)
printf("x %d is %f\n", idx, x[idx]);
}
}
void print_floats(float* x, int size, int num_print){
print_floats_kernel<< < size / BLOCK + 1, BLOCK >> >(x, size, num_print);
}
__global__ void cuda_f32_to_int8_nomax(float* input_f32, size_t size, int8_t *output_int8, float multipler)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_int8[idx] = input_f32[idx] * multipler; // 7-bit (1-bit sign)
}
void cuda_convert_f32_to_int8_nomax(float* input_f32, size_t size, int8_t *output_int8, float multipler) {
cuda_f32_to_int8_nomax << < size / BLOCK + 1, BLOCK >> >(input_f32, size, output_int8, multipler);
}
__global__ void cuda_int8_to_f32(int8_t* input_int8, size_t size, float *output_f32, float multipler)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f32[idx] = input_int8[idx] * multipler; // 7-bit (1-bit sign)
}
void cuda_convert_int8_to_f32(int8_t* input_int8, size_t size, float *output_f32, float multipler) {
cuda_int8_to_f32 << < size / BLOCK + 1, BLOCK >> >(input_int8, size, output_f32, multipler);
}
__global__ void cuda_multiply_f32(float *input_output, size_t size, float multipler)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) input_output[idx] = input_output[idx] * multipler; // 7-bit (1-bit sign)
}
void cuda_do_multiply_f32(float *input_output, size_t size, float multipler) {
cuda_multiply_f32 << < size / BLOCK + 1, BLOCK >> >(input_output, size, multipler);
}
// --------------------------------
// ------------- XNOR -------------
// --------------------------------
__global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary)
{
int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (f >= n) return;
int i = 0;
float mean = 0;
for (i = 0; i < size; ++i) {
mean += fabs(weights[f*size + i]);
}
mean = mean / size;
for (i = 0; i < size; ++i) {
binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
//binary[f*size + i] = weights[f*size + i];
}
}
void binarize_weights_gpu(float *weights, int n, int size, float *binary)
{
binarize_weights_kernel << <cuda_gridsize(n), BLOCK >> >(weights, n, size, binary);
check_error(cudaPeekAtLastError());
}
// --------------------------------
__global__ void binarize_kernel(float *x, int n, float *binary)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= n) return;
binary[i] = (x[i] >= 0) ? 1 : -1;
}
void binarize_gpu(float *x, int n, float *binary)
{
binarize_kernel << <cuda_gridsize(n), BLOCK >> >(x, n, binary);
check_error(cudaPeekAtLastError());
}
// --------------------------------
void swap_binary(convolutional_layer *l)
{
float *swap = l->weights;
l->weights = l->binary_weights;
l->binary_weights = swap;
#ifdef GPU
swap = l->weights_gpu;
l->weights_gpu = l->binary_weights_gpu;
l->binary_weights_gpu = swap;
#endif
}
// --------------------------------
#define WARP_SIZE 32
__global__ void im2col_align_gpu_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_col, const int bit_align)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
for (; index < n; index += blockDim.x*gridDim.x) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
float* data_col_ptr = data_col;
//data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
data_col_ptr += channel_out * bit_align + h_out * width_col + w_out;
float* data_col_ptr_32 = data_col + (channel_out * bit_align + h_out * width_col + w_out) / 32;
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
//float src_val = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : 0;
//unsigned int bit_mask = __ballot_sync(0xffffffff, src_val > 0);
//if (threadIdx.x % WARP_SIZE == 0) *((unsigned int*)data_col_ptr_32) = bit_mask;
//data_col_ptr_32 += bit_align / 32;
//data_col_ptr += height_col * width_col;
data_col_ptr += bit_align;
}
}
}
}
void im2col_align_ongpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col, int bit_align)
{
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
im2col_align_gpu_kernel << <(num_kernels + BLOCK - 1) / BLOCK,
BLOCK, 0, 0>> >(
num_kernels, im, height, width, ksize, pad,
stride, height_col,
width_col, data_col, bit_align);
}
// --------------------------------
// binary im2col - stride=1
__global__ void im2col_align_bin_gpu_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize, const int channels,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_col, const int bit_align)
{
__shared__ float tmp_s[1];
__shared__ ulonglong4 tmp256_s[1];
//#define SHRED_VALS ((BLOCK / 169) * )
//__shared__ float dst_s[1024];
//__shared__ float dst_s[1024];
//__shared__ uint32_t bit_s[32];
//__shared__ uint8_t bit_s[128];
int index = blockIdx.x*blockDim.x + threadIdx.x;
//for (; index < n; index += blockDim.x*gridDim.x)
{
int c_index = index;
int channel_in = c_index % channels;
//int h_out = index % height_col;
//int c_index = index / height_col;
//int channel_in = c_index % channels;
int channel_out = channel_in * ksize * ksize;
int j_index = c_index / channels;
int j = j_index % ksize;
int i = j_index / ksize;
int pre_out_index = (channel_out + i*ksize + j) * bit_align;
int j_pad = (j - pad);
int i_pad = (i - pad);
for (int wh_index = 0; wh_index < (height_col*width_col); wh_index += 32)
//for (int h_out = 0; h_out < height_col; ++h_out)
{
// the end of padding
//if(0)
//for (int w_out = 0; w_out < (width_col); w_out += 32)
{
const int w_out = wh_index % width_col;
const int h_out = wh_index / width_col;
const int w = w_out + j_pad;
const int h = h_out + i_pad;
int pre_in_index = channel_in * height * width;
int pre_in_wh_index = h * width + w;
int send_wh_index = wh_index;
if (i >= ksize) send_wh_index = height_col*width_col;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t)
{
const int lane_id = threadIdx.x % WARP_SIZE;
const int cur_wh_index = __shfl(send_wh_index, t) + lane_id;
if (cur_wh_index < (width_col*height_col))// && (cur_i_pad+pad) < ksize)
{
const int cur_pre_out_index = __shfl(pre_out_index, t);
const int cur_pre_in_index = __shfl(pre_in_index, t);
const int cur_pre_in_wh_index = __shfl(pre_in_wh_index, t) + lane_id;
int w = cur_pre_in_wh_index % width;
int h = cur_pre_in_wh_index / width;
int in_index = cur_pre_in_index + cur_pre_in_wh_index;
int out_index = cur_pre_out_index + cur_wh_index;
float val = (w >= 0 && w < width && h >= 0 && h < height) ?
data_im[in_index] : float();
//data_col[out_index] = val;
//tmp_s[0] = val;
uint32_t bit_mask = __ballot(val > 0);
if (lane_id == 0) {
uint8_t *bit8_ptr = &(((uint8_t *)data_col)[out_index / 8]);
uint32_t *bit32_ptr = (uint32_t *)bit8_ptr;
*bit32_ptr = bit_mask;
}
}
}
}// w_out
}
}
}
void im2col_align_bin_ongpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col, int bit_align) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
//int num_kernels = channels * height_col * width_col * ksize * ksize;
//int num_kernels = channels * ksize * ksize * height_col;
int num_kernels = channels * ksize * ksize;
int num_blocks = num_kernels / BLOCK + 1;
//im2col_align_bin_gpu_kernel << <(num_kernels + BLOCK - 1) / BLOCK,
im2col_align_bin_gpu_kernel << <num_blocks,
BLOCK, 0, 0 >> >(
num_kernels, im, height, width, ksize, channels, pad,
stride, height_col,
width_col, data_col, bit_align);
}
// --------------------------------
__global__ void float_to_bit_gpu_kernel(float *src, unsigned char *dst, size_t size)
{
//const int size_aligned = size + (WARP_SIZE - size % WARP_SIZE);
int index = blockIdx.x*blockDim.x + threadIdx.x;
float src_val;
//for (; index < size_aligned; index += blockDim.x*gridDim.x)
{
//src_val = src[index];
if (index < size) src_val = src[index];
else src_val = 0;
//unsigned int bit_mask = __ballot_sync(0xffffffff, src_val > 0);
unsigned int bit_mask = __ballot(src_val > 0);
if (threadIdx.x % WARP_SIZE == 0) ((unsigned int*)dst)[index / 32] = bit_mask;
}
}
void float_to_bit_gpu(float *src, unsigned char *dst, size_t size)
{
const int num_blocks = size / BLOCK + 1;
float_to_bit_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(src, dst, size);
}
// --------------------------------
__device__ __host__ static inline void remove_bit(unsigned char *const dst, size_t index) {
size_t dst_i = index / 8;
int dst_shift = index % 8;
dst[dst_i] &= ~(1 << dst_shift);
}
__device__ __host__ static inline void set_bit(unsigned char *const dst, size_t index) {
size_t dst_i = index / 8;
int dst_shift = index % 8;
dst[dst_i] |= 1 << dst_shift;
//dst[dst_i] |= 1 << (8 - dst_shift);
}
__device__ __host__ static inline unsigned char get_bit(unsigned char const*const src, size_t index) {
size_t src_i = index / 8;
int src_shift = index % 8;
unsigned char val = (src[src_i] & (1 << src_shift)) > 0;
//unsigned char val = (src[src_i] & (1 << (8 - src_shift))) > 0;
return val;
}
// Intel CPUs and nVidia CUDA GPU are little endian
__device__ __host__ unsigned char reverse_byte(unsigned char a)
{
return ((a & 0x1) << 7) | ((a & 0x2) << 5) |
((a & 0x4) << 3) | ((a & 0x8) << 1) |
((a & 0x10) >> 1) | ((a & 0x20) >> 3) |
((a & 0x40) >> 5) | ((a & 0x80) >> 7);
}
__device__ unsigned char reverse_byte_CUDA(unsigned char a)
{
uint32_t tmp = __brev(a);
return tmp >> 24;
}
__device__ __host__ unsigned char reverse_byte_2(unsigned char a)
{
return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16;
}
__device__ void transpose8rS32_reversed_diagonale(unsigned char* A, int m, int n, unsigned char* B)
{
unsigned x, y, t;
// Load the array and pack it into x and y.
x = (A[0] << 24) | (A[m] << 16) | (A[2 * m] << 8) | A[3 * m];
y = (A[4 * m] << 24) | (A[5 * m] << 16) | (A[6 * m] << 8) | A[7 * m];
t = (x ^ (x >> 7)) & 0x00AA00AA; x = x ^ t ^ (t << 7);
t = (y ^ (y >> 7)) & 0x00AA00AA; y = y ^ t ^ (t << 7);
t = (x ^ (x >> 14)) & 0x0000CCCC; x = x ^ t ^ (t << 14);
t = (y ^ (y >> 14)) & 0x0000CCCC; y = y ^ t ^ (t << 14);
t = (x & 0xF0F0F0F0) | ((y >> 4) & 0x0F0F0F0F);
y = ((x << 4) & 0xF0F0F0F0) | (y & 0x0F0F0F0F);
x = t;
B[7 * n] = reverse_byte_CUDA(x >> 24); B[6 * n] = reverse_byte_CUDA(x >> 16); B[5 * n] = reverse_byte_CUDA(x >> 8); B[4 * n] = reverse_byte_CUDA(x);
B[3 * n] = reverse_byte_CUDA(y >> 24); B[2 * n] = reverse_byte_CUDA(y >> 16); B[1 * n] = reverse_byte_CUDA(y >> 8); B[0 * n] = reverse_byte_CUDA(y);
}
__global__ void transpose_bin_gpu_kernel(unsigned char *A, unsigned char *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
int i;
int index = blockIdx.x*blockDim.x + threadIdx.x;
//for (i = 0; i < n; i += 8)
{
i = (index * 8) % n;
int j;
//for (j = 0; j < m - 8; j += 8)
{
j = ((index * 8) / n) * 8;
if (j < m - 8) {
int a_index = i*lda + j;
int b_index = j*ldb + i;
transpose8rS32_reversed_diagonale(&A[a_index / 8], lda / 8, ldb / 8, &B[b_index / 8]);
}
else if (j < m) {
for (; j < m; ++j) {
if (get_bit(A, i*lda + j)) set_bit(B, j*ldb + i);
else remove_bit(B, j*ldb + i);
}
}
}
}
}
__device__ __host__ uint8_t reverse_8_bit(uint8_t a) {
return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16;
}
__device__ uint32_t reverse_32_bit(uint32_t a)
{
// __device__ unsigned int __brev(unsigned int x) // CUDA
// unsigned int __rbit(unsigned int val) // for ARM //__asm__("rbit %0, %1\n" : "=r"(output) : "r"(input));
return __brev(a);
//return (reverse_8_bit(a >> 24) << 0) |
// (reverse_8_bit(a >> 16) << 8) |
// (reverse_8_bit(a >> 8) << 16) |
// (reverse_8_bit(a >> 0) << 24);
}
#define swap(a0, a1, j, m) t = (a0 ^ (a1 >>j)) & m; a0 = a0 ^ t; a1 = a1 ^ (t << j);
__device__ void transpose32_optimized(uint32_t A[32]) {
int j, k;
unsigned m, t;
//m = 0x0000FFFF;
//for (j = 16; j != 0; j = j >> 1, m = m ^ (m << j)) {
// for (k = 0; k < 32; k = (k + j + 1) & ~j) {
// t = (A[k] ^ (A[k + j] >> j)) & m;
// A[k] = A[k] ^ t;
// A[k + j] = A[k + j] ^ (t << j);
// }
//}
j = 16;
m = 0x0000FFFF;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 8;
m = 0x00ff00ff;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 4;
m = 0x0f0f0f0f;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 2;
m = 0x33333333;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 1;
m = 0x55555555;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
// reverse Y
for (j = 0; j < 16; ++j) {
uint32_t tmp = A[j];
A[j] = reverse_32_bit(A[31 - j]);
A[31 - j] = reverse_32_bit(tmp);
}
}
#define BLOCK_TRANSPOSE32 256
__device__ void transpose_32x32_bits_reversed_diagonale(uint32_t *A, uint32_t *B, int m, int n)
{
//unsigned A_tmp[32];
//int i;
//#pragma unroll
//for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m];
//transpose32_optimized(A_tmp);
//#pragma unroll
//for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i];
__shared__ uint32_t A_shared[32 * BLOCK_TRANSPOSE32];
uint32_t *A_tmp = &A_shared[32 * threadIdx.x];
int i;
#pragma unroll 32
for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m];
transpose32_optimized(A_tmp);
#pragma unroll 32
for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i];
}
// transpose 32x32 bit
__global__ void transpose_bin_gpu_kernel_32(uint32_t *A, uint32_t *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
int i;
int index = (blockIdx.x*blockDim.x + threadIdx.x) * 32;
//for (i = 0; i < n; i += 8)
{
i = index % n;
int j;
//for (j = 0; j < m - 8; j += 8)
{
j = (index / n) * 32;
if (j < m) {
int a_index = i*lda + j;
int b_index = j*ldb + i;
transpose_32x32_bits_reversed_diagonale(&A[a_index / 32], &B[b_index / 32], lda / 32, ldb / 32);
}
}
}
}
void transpose_bin_gpu(unsigned char *A, unsigned char *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
size_t size = n*m / (8 * 8) + 1;
size_t size32 = n*m / (32 * 32) + 1;
const int num_blocks = size / BLOCK + 1;
const int num_blocks32 = size32 / BLOCK_TRANSPOSE32 + 1;
transpose_bin_gpu_kernel_32 << <num_blocks32, BLOCK_TRANSPOSE32, 0, 0 >> >((uint32_t *)A, (uint32_t *)B, n, m, lda, ldb, block_size);
//transpose_bin_gpu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(A, B, n, m, lda, ldb, block_size);
}
// --------------------------------
__global__ void fill_int8_gpu_kernel(unsigned char *src, unsigned char val, size_t size) {
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) src[index] = 0;
}
void fill_int8_gpu(unsigned char *src, unsigned char val, size_t size)
{
const int num_blocks = size / BLOCK + 1;
fill_int8_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(src, val, size);
}
// --------------------------------
//typedef unsigned long long int uint64_t;
//typedef unsigned int uint32_t;
//typedef unsigned char uint8_t;
//typedef char int8_t;
__device__ __host__ static inline uint64_t broadcast_bit_1_to_64(uint8_t src) {
return (src > 0) ? 0xFFFFFFFFFFFFFFFF : 0;
}
__device__ __host__ static inline uint8_t xnor_bit1(uint8_t a, uint8_t b) {
return ~(a^b) & 0b1;
}
__device__ __host__ static inline uint32_t xnor_int32(uint32_t a, uint32_t b) {
return ~(a^b);
}
__device__ __host__ static inline uint64_t xnor_int64(uint64_t a, uint64_t b) {
return ~(a^b);
}
__device__ __host__ static inline uint4 xnor_int128(uint4 a, uint4 b) {
uint4 res;
res.w = ~(a.w^b.w);
res.x = ~(a.x^b.x);
res.y = ~(a.y^b.y);
res.z = ~(a.z^b.z);
return res;
}
__device__ __host__ static inline ulonglong4 xnor_int256(ulonglong4 a, ulonglong4 b) {
ulonglong4 res;
res.w = ~(a.w^b.w);
res.x = ~(a.x^b.x);
res.y = ~(a.y^b.y);
res.z = ~(a.z^b.z);
return res;
}
__device__ __host__ static inline uint8_t xor_bit1(uint8_t a, uint8_t b) {
return (a^b) & 0b1;
}
__device__ __host__ static inline uint32_t xor_int32(uint32_t a, uint32_t b) {
return (a^b);
}
__device__ __host__ static inline uint64_t xor_int64(uint64_t a, uint64_t b) {
return (a^b);
}
__device__ __host__ static inline uint4 xor_int128(uint4 a, uint4 b) {
uint4 res;
res.w = (a.w^b.w);
res.x = (a.x^b.x);
res.y = (a.y^b.y);
res.z = (a.z^b.z);
return res;
}
__device__ __host__ static inline ulonglong4 xor_int256(ulonglong4 a, ulonglong4 b) {
ulonglong4 res;
res.w = (a.w^b.w);
res.x = (a.x^b.x);
res.y = (a.y^b.y);
res.z = (a.z^b.z);
return res;
}
__device__ static inline int popcnt_256(ulonglong4 a) {
return __popcll(a.w) + __popcll(a.x) + __popcll(a.y) + __popcll(a.z);
}
// --------------------------------
// --------------------------------
// --------------------------------
// sequentially - B (input) in the shared_memory - BAD
// --------------------------------
__global__ void gemm_nn_custom_bin_mean_transposed_sequentially_gpu_kernel(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
//__shared__ float mean_shared[32];
//__shared__ uint32_t B_s[8192]; // 32 KB // [ldb x N`] // max = 262 144 bits
//__shared__ uint32_t B_s[4096]; // 16 KB // [ldb x N`] // max = 131 072 bits
__shared__ uint8_t B_s[4096 * 4]; // 16 KB // [ldb x N`] // max = 131 072 bits
const int K_items = WARP_SIZE;
int start_j = blockIdx.x*blockDim.x / (K_items * M);
{
int end_j = (blockIdx.x*blockDim.x + blockDim.x) / (K_items * M) + 1;
if (end_j > N) end_j = N;
size_t shared_size = ldb * (end_j - start_j);
if (shared_size != 0) {
//if(threadIdx.x == 0) printf(" start_j = %d, end_j = %d, shared_size = %d \n", start_j, end_j, shared_size);
int k;
for (int k = threadIdx.x * 32; k < shared_size; k += blockDim.x * 32) {
int x = start_j*ldb + k;
if (x < (N*ldb)) *((uint32_t *)(B_s + k / 8)) = *((uint32_t *)(B + x / 8));
}
}
}
__syncthreads();
int index = blockIdx.x*blockDim.x + threadIdx.x;
{
int i; // l.n
int j; // out_h*out_w
int k; // l.size * l.size * l.c
const int index2 = index / K_items;
i = index2 % M; // max M
j = index2 / M; // max N
int local_j = j - start_j;
//if (i <= 1 && j <= 1 ) printf(" k = %d, K = %d, K_items = %d, i = %d, j = %d, lda = %d, ldb = %d, ldc = %d \n",
// k, K, K_items, i, j, lda, ldb, ldc);
{ // l.n - filters [16 - 55 - 1024]
// further improvements: for (l.n == 1024) iterate several (j)
if (j < N)
{ // out_h*out_w - one channel output size [169 - 173056]
int count = 0;
const int bit_step = 32;
for (k = (threadIdx.x % WARP_SIZE) * bit_step; k < K; k += bit_step*WARP_SIZE)
{ // l.size*l.size*l.c - one filter size [27 - 144 - 9216]
uint32_t a_bit32 = *((uint32_t *)(A + (i*lda + k) / 8)); // weights
//uint32_t b_bit32 = *((uint32_t *)(B + (j*ldb + k) / 8)); // input
uint32_t b_bit32 = *((uint32_t *)(B_s + (local_j*ldb + k) / 8)); // input
uint32_t c_bit32 = xnor_int32(a_bit32, b_bit32);
count += __popc(c_bit32);
}
for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2)
count += __shfl_down(count, offset);
if (threadIdx.x % WARP_SIZE == 0) {
int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1;
float mean_val = mean_arr[i];
C[i*ldc + j] = (2 * count - K) * mean_val;
//B_s[threadIdx.x / WARP_SIZE] = (2 * count - K) * mean_val;
}
}
}
}
}
// sequentially - BAD
void gemm_nn_custom_bin_mean_transposed_sequentially_gpu(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
//size_t size = M*N;
size_t size = M*N * 32;
const int num_blocks = size / BLOCK + 1;
//printf(" K = %d \n", K);
/*
printf("\n gemm_bin size = %d, num_blocks = %d, M*K = %d KB, N*K = %d KB \n (w) M*K/num_blocks = %d KB, (i) N*K/num_blocks = %d KB \n",
size, num_blocks, M*K / 1024, N*K / 1024, M*lda / num_blocks / 1024, N*ldb / num_blocks / 1024);
printf(" M / 512 = %d, N / 512 = %d, M*lda / 512 = %d, N*ldb / 512 = %d \n", M / 512, N / 512, M*lda/512, N*ldb/512);
*/
//printf(" shared_memory: (w) lda*BLOCK/N = %d, (i) ldb*BLOCK/M = %d, \t lda = %d \n\n", lda*BLOCK / N, ldb*BLOCK / M, lda);
gemm_nn_custom_bin_mean_transposed_sequentially_gpu_kernel << <num_blocks, BLOCK, 0, 0 >> >(
M, N, K,
A, lda,
B, ldb,
C, ldc,
mean_arr);
}
// --------------------------------
// 32 channels -> 1 channel (with 32 floats)
// 256 channels -> 8 channels (with 32 floats)
__global__ void repack_input_kernel_bin(float *input, uint32_t *re_packed_input_bin, int w, int h, int c)
{
//__shared__ uint32_t tmp[32];
const int index = blockIdx.x*blockDim.x + threadIdx.x;
const int global_warp_id = index / WARP_SIZE;
const int lane_id = threadIdx.x % WARP_SIZE;
const int items_per_channel = w * h;
const int items_per_channel_aligned = items_per_channel + WARP_SIZE - (items_per_channel % WARP_SIZE);
int i = 32 * (global_warp_id % (items_per_channel_aligned / WARP_SIZE));
int chan = 32 * (global_warp_id / (items_per_channel_aligned / WARP_SIZE));
if (chan < c)
{
uint32_t result_bits = 0;
for (int c_pack = 0; c_pack < 32; ++c_pack)
{
float src = 0;
if ((i + lane_id) < items_per_channel) {
src = input[(chan + c_pack)*items_per_channel + (i + lane_id)];
}
uint32_t bit_mask = __ballot_custom(src > 0);
uint32_t cur_bit = (bit_mask >> lane_id) & uint32_t(1);
result_bits |= (cur_bit << c_pack);
}
if ((i + lane_id) < items_per_channel) {
re_packed_input_bin[chan*items_per_channel / 32 + (i + lane_id)] = result_bits;
}
}
}
void repack_input_gpu_bin(float *input, uint32_t *re_packed_input_bin, int w, int h, int c)
{
int size = (w * h * c) / 32 + 1;
const int block_size = BLOCK;
const int num_blocks = get_number_of_blocks(size, block_size);
//printf("\n num_blocks = %d, num_blocks/32 = %d, block_size = %d \n", num_blocks, num_blocks / 32, block_size);
repack_input_kernel_bin << <num_blocks, block_size>> >(input, re_packed_input_bin, w, h, c);
CHECK_CUDA(cudaPeekAtLastError());
}
// --------------------------------
__global__ void transpose_uint32_kernel(uint32_t *src, uint32_t *dst, int src_h, int src_w, int src_align, int dst_align)
{
//l.bit_align - algined (n) by 32
//new_ldb - aligned (k) by 256
int index = blockIdx.x*blockDim.x + threadIdx.x;
//for (i = 0; i < src_h; i += 1)
int i = index % src_h; // l.size*l.size*l.c;
{
//for (j = 0; j < src_w; j += 1)
int j = index / src_h; // out_h*out_w;
if (j < src_w)
{
((uint32_t *)dst)[j*dst_align / 32 + i] = ((uint32_t *)src)[i*src_align + j];
}
}
}
void transpose_uint32_gpu(uint32_t *src, uint32_t *dst, int src_h, int src_w, int src_align, int dst_align)
{
int size = src_w * src_h;
const int num_blocks = size / BLOCK + 1;
transpose_uint32_kernel << <num_blocks, BLOCK >> >(src, dst, src_h, src_w, src_align, dst_align);
CHECK_CUDA(cudaPeekAtLastError());
}
// --------------------------------
__inline__ __device__
int warpAllReduceSum(int val) {
for (int mask = WARP_SIZE / 2; mask > 0; mask /= 2)
#if CUDART_VERSION >= 9000
val += __shfl_xor_sync(FULL_MASK, val, mask);
#else
val += __shfl_xor(val, mask);
#endif
return val;
}
// --------------------------------
// Coalescing
// A (weights) in the shared_memory - GOOD
__global__ void gemm_nn_custom_bin_mean_transposed_gpu_kernel(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr, float *bias_arr, int leaky_activation,
float *shortcut_in_gpu, float *shortcut_out_gpu)
{
// total 57%
int index = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ uint8_t A_s[6144 * 8 / 4];
//__shared__ uint64_t A_s[6144]; // 48 KB // [lda x M`]
//__shared__ uint8_t A_s[6144*8]; // 48 KB // [lda x M`]
int start_i = blockIdx.x*blockDim.x / N;
int end_i = (blockIdx.x*blockDim.x + blockDim.x) / N + 1;
size_t shared_size = lda * (end_i - start_i);
int i_cur = index / N;
int local_i = i_cur - start_i;
// ~10%
for (int k = threadIdx.x * 64; k < shared_size; k += blockDim.x * 64) {
int x = start_i*lda + k;
if (x < (M*lda)) *((uint64_t *)(A_s + k / 8)) = *((uint64_t *)(A + x / 8));
}
__syncthreads();
int i, j, k, h;
// 47% = 29 + 10 + 8
j = index % N;
{ // out_h*out_w - one channel output size [169 - 173056]
i = index / N;
//if (i < M) // l.n - filters [16 - 55 - 1024]
{
int count = 0;
k = 0;
#ifdef NOT_USED
// 32 thread X 256 bit = 8192 bit
for (; k < (K - 8192); k += 8192) { // l.size*l.size*l.c - one filter size [27 - 9216]
ulonglong4 c_bit256;
//int64_t A_cur_index = (i*lda + k) / 8;
int64_t A_cur_index = (local_i*lda + k) / 8;
int64_t B_cur_index = (j*ldb + k) / 8;
if (i >= M) A_cur_index = 0;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t) {
const int lane_id = threadIdx.x % WARP_SIZE;
const int64_t A_i = __shfl_custom(A_cur_index, t) + 32 * lane_id;
const int64_t B_i = __shfl_custom(B_cur_index, t) + 32 * lane_id;
{
//ulonglong4 a_bit256 = *((ulonglong4 *)(A + A_i)); // weights
ulonglong4 a_bit256 = *((ulonglong4 *)(A_s + A_i)); // weights
ulonglong4 b_bit256 = *((ulonglong4 *)(B + B_i)); // input
c_bit256 = xor_int256(a_bit256, b_bit256);
int tmp_count = __popcll(c_bit256.w) + __popcll(c_bit256.x) +
__popcll(c_bit256.y) + __popcll(c_bit256.z);
int sum_count = warpAllReduceSum(tmp_count);
if (lane_id == t) count += sum_count;
}
}
}
#endif
//#ifdef NOT_USED
// 32 thread X 64 bit = 2048 bit // 29%
for (; k < (K - 2048); k += 2048) { // l.size*l.size*l.c - one filter size [27 - 9216]
uint64_t c_bit64;
//int64_t A_cur_index = (i*lda + k) / 8;
int64_t A_cur_index = (local_i*lda + k) / 8;
int64_t B_cur_index = (j*ldb + k) / 8;
if (i >= M) A_cur_index = 0;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t) {
const int lane_id = threadIdx.x % WARP_SIZE;
const int64_t A_i = __shfl_custom(A_cur_index, t) + 8 * lane_id;
const int64_t B_i = __shfl_custom(B_cur_index, t) + 8 * lane_id;
{
//uint64_t a_bit64 = *((uint64_t *)(A + A_i)); // weights
uint64_t a_bit64 = *((uint64_t *)(A_s + A_i)); // weights
uint64_t b_bit64 = *((uint64_t *)(B + B_i)); // input
c_bit64 = xor_int64(a_bit64, b_bit64);
int tmp_count = __popcll(c_bit64);
int sum_count = warpAllReduceSum(tmp_count);
if (lane_id == t) count += sum_count;
}
}
}
//#endif
//#ifdef NOT_USED
// 32 thread X 32 bit = 1024 bit // 10%
for (; k < (K - 1024); k += 1024) { // l.size*l.size*l.c - one filter size [27 - 9216]
//int64_t A_cur_index = (i*lda + k) / 8;
int64_t A_cur_index = (local_i*lda + k) / 8;
int64_t B_cur_index = (j*ldb + k) / 8;
if (i >= M) A_cur_index = 0;
#pragma unroll
for (int t = 0; t < WARP_SIZE; ++t) {
const int lane_id = threadIdx.x % WARP_SIZE;
const int64_t A_i = __shfl_custom(A_cur_index, t) + 4 * lane_id;
const int64_t B_i = __shfl_custom(B_cur_index, t) + 4 * lane_id;
{
//uint64_t a_bit64 = *((uint64_t *)(A + A_i)); // weights
uint32_t a_bit32 = *((uint32_t *)(A_s + A_i)); // weights
uint32_t b_bit32 = *((uint32_t *)(B + B_i)); // input
uint32_t c_bit32 = xor_int32(a_bit32, b_bit32);
int tmp_count = __popc(c_bit32);
int sum_count = warpAllReduceSum(tmp_count);
if (lane_id == t) count += sum_count;
}
}
}
//#endif
if (i < M)
{
float mean_val = mean_arr[i];
float bias_val = bias_arr[i];
//#ifdef NOT_USED
// 8%
for (; k < K; k += 256) { // l.size*l.size*l.c - one filter size [27 - 144 - 9216]
//ulonglong4 a_bit256 = *((ulonglong4 *)(A + (i*lda + k) / 8)); // weights
ulonglong4 a_bit256 = *((ulonglong4 *)(A_s + (local_i*lda + k) / 8)); // weights
ulonglong4 b_bit256 = *((ulonglong4 *)(B + (j*ldb + k) / 8)); // input
ulonglong4 c_bit256 = xor_int256(a_bit256, b_bit256);
count += __popcll(c_bit256.w) + __popcll(c_bit256.x) +
__popcll(c_bit256.y) + __popcll(c_bit256.z);
}
//#endif
#ifdef NOT_USED
for (; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
//uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8)); // weights
uint64_t a_bit64 = *((uint64_t *)(A_s + (local_i*lda + k) / 8)); // weights
uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8)); // input
uint64_t c_bit64 = xor_int64(a_bit64, b_bit64);
count += __popcll(c_bit64);
}
#endif
const int bit_step = 256;
int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1; // remove extra bits (from empty space for align only)
float dst_val = (2 * count - K) *mean_val + bias_val;
if (leaky_activation)
dst_val = (dst_val >= 0) ? (dst_val) : (0.1f*dst_val); // Leaky activation
size_t out_index = i*ldc + j;
C[out_index] = dst_val;
if (shortcut_out_gpu) {
shortcut_out_gpu[out_index] = shortcut_in_gpu[out_index] + dst_val;
}
}
}
}
}
// --------------------------------
// src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu
// You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE
__global__ void im2col_gpu_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_col) {
int index = blockIdx.x*blockDim.x + threadIdx.x;
for (; index < n; index += blockDim.x*gridDim.x) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
float* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
//data_im[(channel_in * height + h_in) * width + w_in + i * width + j];
//*data_col_ptr = data_im_ptr[ii * width + jj];
data_col_ptr += height_col * width_col;
}
}
}
}
void im2col_ongpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col)
{
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
im2col_gpu_kernel << <(num_kernels + BLOCK - 1) / BLOCK,
BLOCK >> >(
num_kernels, im, height, width, ksize, pad,
stride, height_col,
width_col, data_col);
CHECK_CUDA(cudaPeekAtLastError());
}
// --------------------------------
// Tensor Cores binary (CC >= 7.3 && CUDA >= 10.0) - __CUDA_SUBBYTE_IMMA__
#if CUDART_VERSION >= 10000
#include <mma.h>
#define WMMA_M 8
#define WMMA_N 8
#define WMMA_K 128
#define WMMA_K32 (WMMA_K/32)
#define WMMA_Nx2 (WMMA_N*2)
// Tensor Cores are used for XOR-GEMM
__global__ void gemm_nn_custom_bin_mean_transposed_tensor_kernel(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr, float *bias_arr, int leaky_activation,
float *shortcut_in_gpu, float *shortcut_out_gpu)
{
// total 57%
int index = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ int C_s[WMMA_N * WMMA_M * 32 * 2]; // 2 * 8 KB - Temprorary result of GEMM WMMA for 32 warps
const int lane_id = threadIdx.x % 32;
const int warp_id = threadIdx.x / 32;
const int global_warp_id = index / 32;
const int N_aligned = N + WMMA_Nx2 - (N % WMMA_Nx2);
/*
__syncthreads();
__shared__ uint32_t A_s[8 * 512]; // 8x512 = 8 x 16384 bits, instead of 8x4
const int start_global_warp_id = blockIdx.x*blockDim.x / 32;
int start_i = start_global_warp_id / (N_aligned / WMMA_N);
start_i = start_i * WMMA_M;
if (start_i + WMMA_M > M) start_i = M - WMMA_M; // must be: i+7 < M
for (int tmp_index = threadIdx.x; tmp_index < (8 * 512); tmp_index += blockDim.x)
{
int k_tmp = tmp_index % 512;
int local_i = tmp_index / 512;
uint32_t a_val = ((uint32_t *)(A))[(start_i + local_i)*lda/32 + k_tmp];
A_s[local_i * 512 + k_tmp] = a_val;
}
__syncthreads();
*/
int i, j, k, h;
// 47% = 29 + 10 + 8
j = global_warp_id % (N_aligned / WMMA_Nx2);
j = j * WMMA_Nx2;
{ // out_h*out_w - one channel output size [169 - 173056]
i = global_warp_id / (N_aligned / WMMA_Nx2);
i = i * WMMA_M;
int count = 0;
k = 0;
if (i < M) //if (i < M) // l.n - filters [16 - 55 - 1024]
{
if (j + WMMA_Nx2 > N) j = N - WMMA_Nx2; // must be: j+7 < N
if (i + WMMA_M > M) i = M - WMMA_M; // must be: i+7 < M
#if __CUDA_ARCH__ >= 730
// Tensor Cores
using namespace nvcuda;
wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, wmma::experimental::precision::b1, wmma::row_major> a_frag;
wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, wmma::experimental::precision::b1, wmma::col_major> b_frag;
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, int> c1_frag, c2_frag;
wmma::fill_fragment(c1_frag, 0); // !!!! XOR isn't XNOR !!!!!!!!!!
wmma::fill_fragment(c2_frag, 0); // !!!! XOR isn't XNOR !!!!!!!!!!
// 8 x 8 x 4 (uint32_t, 4 * 32 = 128 bit)
for (; k < K; k += 128) // l.size*l.size*l.c - one filter size [27 - 144 - 9216]
{
int64_t A_cur_index = (i*lda + k) / 8; // index in bits
int64_t B1_cur_index = (j*ldb + k) / 8; // index in bits
int64_t B2_cur_index = ((j + 8)*ldb + k) / 8; // index in bits
// try to use A that is cached in shared memory - poor performance
//if (i == start_i) wmma::load_matrix_sync(a_frag, &A_s[k / 32], (512 * 32)); // lda = (128*32) bits
//else wmma::load_matrix_sync(a_frag, (uint32_t *)(A + A_cur_index), lda); // lda = M
// lda, ldb - are in bits
wmma::load_matrix_sync(a_frag, (uint32_t *)(A + A_cur_index), lda); // lda = M
wmma::load_matrix_sync(b_frag, (uint32_t *)(B + B1_cur_index), ldb); // ldb = K
wmma::bmma_sync(c1_frag, a_frag, b_frag, c1_frag); // XOR-GEMM
wmma::load_matrix_sync(b_frag, (uint32_t *)(B + B2_cur_index), ldb); // ldb = K
wmma::bmma_sync(c2_frag, a_frag, b_frag, c2_frag); // XOR-GEMM
}
// C[i*ldc + j]
wmma::store_matrix_sync(&C_s[warp_id*WMMA_M*WMMA_N], c1_frag, WMMA_N, wmma::mem_row_major);
wmma::store_matrix_sync(&C_s[warp_id*WMMA_M*WMMA_N + WMMA_M*WMMA_N * 32], c2_frag, WMMA_N, wmma::mem_row_major);
#else // __CUDA_ARCH__ >= 730
// Custom XOR-GEMM
int k_d = lane_id % 4;
int i_d = lane_id / 4;
int j_d = lane_id / 4;
int32_t accum_c_val[8 * 2]; // wmma::fill_fragment(c_frag, 0);
for (int local_j = 0; local_j < 8 * 2; ++local_j) {
accum_c_val[local_j] = 0;
}
// 8 x 8 x 4 (uint32_t, 4 * 32 = 128 bit)
for (; k < K; k += 128) // l.size*l.size*l.c - one filter size [27 - 144 - 9216]
{
int64_t A_cur_index = (i*lda + k) / 8;
//int64_t A_cur_index = (local_i*lda + k) / 8;
int64_t B_cur_index = (j*ldb + k) / 8;
// lda, ldb - are in bits
// 8*4 = 32
// 8*8 = 64
int k_d = lane_id % 4;
int i_d = lane_id / 4;
int j_d = lane_id / 4;
uint32_t a_val = *(uint32_t *)(A + ((i + i_d)*lda + (k + k_d * 32)) / 8); // wmma::load_matrix_sync(a_frag, (uint32_t *)(A + A_cur_index), lda);
for (int c_x = 0; c_x < 2; c_x++)
{
uint32_t b_val = *(uint32_t *)(B + ((c_x * 8 + j + j_d)*ldb + (k + k_d * 32)) / 8); // wmma::load_matrix_sync(b_frag, (uint32_t *)(B + B_cur_index), ldb);
// wmma::bmma_sync(c_frag, a_frag, b_frag, c_frag);
int32_t c_val[8]; // 8 x 32 threads = 256
#pragma UNROLL
for (int local_j = 0; local_j < 8; ++local_j)
{
uint32_t b_val_cur = __shfl_custom(b_val, local_j * 4 + k_d);
c_val[local_j] = __popc(xor_int32(a_val, b_val_cur));
}
#pragma UNROLL
for (int local_j = 0; local_j < 8; ++local_j)
{
#pragma UNROLL
for (int local_k = 0; local_k < 4; ++local_k) {
accum_c_val[local_j + c_x * 8] += __shfl_custom(c_val[local_j], i_d * 4 + local_k);
}
}
}
}
// only the first 8 threads (i) contain 8 good values each, in c_val[8] (j) = 8 x 8 =64
// wmma::store_matrix_sync(&C_s[warp_id*WMMA_M*WMMA_N], c_frag, WMMA_N, wmma::mem_row_major);
if (k_d == 0) {
for (int c_x = 0; c_x < 2; c_x++)
{
for (int local_j = 0; local_j < 8; ++local_j)
{
C_s[warp_id*WMMA_M*WMMA_N + i_d*WMMA_N + local_j + WMMA_M*WMMA_N * 32 * c_x] = accum_c_val[local_j + c_x * 8];
}
}
}
#endif // __CUDA_ARCH__ >= 730
for (int c_x = 0; c_x < 2; c_x++)
{
int j_d = lane_id % WMMA_N;
{
#pragma UNROLL
for (int i_d = lane_id / WMMA_N; i_d < WMMA_M; i_d += WMMA_M / 2)
{
int count = C_s[warp_id*WMMA_M*WMMA_N + i_d*WMMA_N + j_d + WMMA_M*WMMA_N * 32 * c_x];
const int bit_step = 128;
int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1; // remove extra bits (from empty space for align only)
count = (2 * count - K);
float mean_val = mean_arr[i + i_d];
float bias_val = bias_arr[i + i_d];
float dst_val = count *mean_val + bias_val;
if (leaky_activation)
dst_val = (dst_val >= 0) ? (dst_val) : (0.1f*dst_val); // Leaky activation
size_t out_index = (i + i_d)*ldc + (c_x * 8 + j + j_d);
C[out_index] = dst_val;
if (shortcut_out_gpu) {
shortcut_out_gpu[out_index] = shortcut_in_gpu[out_index] + dst_val;
}
}
}
}
}
}
}
#endif // CUDART_VERSION >= 10000
// --------------------------------
// GOOD
void gemm_nn_custom_bin_mean_transposed_gpu(int M, int N, int K,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr, float *bias, int leaky_activation,
float *shortcut_in_gpu, float *shortcut_out_gpu)
{
int size = M*N;
const int num_blocks = get_number_of_blocks(size, BLOCK);
//printf("\n M = %d, N = %d, M %% 8 = %d, N %% 8 = %d \n", M, N, M % 8, N % 8);
//if (M >= 32) // l.n >= 32
#if CUDART_VERSION >= 10000
if (1)
{
const int M_aligned = M + (8 - (M % 8));
const int N_aligned = N + (16 - (N % 16));
int size = (M_aligned / 8)*(N_aligned / 16)*WARP_SIZE;
const int num_blocks = get_number_of_blocks(size, BLOCK);
//printf(" lda = %d, ldb = %d, ldc = %d, lda/32 = %d, ldb/32 = %d, ldc/32 = %d \n", lda, ldb, ldc, lda / 32, ldb / 32, ldc / 32);
//printf(" l.c (K/9) = %d, M (l.n) = %d \n", (K%9 == 0)? K / 9: K, M);
gemm_nn_custom_bin_mean_transposed_tensor_kernel << <num_blocks, BLOCK >> > (
M, N, K,
A, lda,
B, ldb,
C, ldc,
mean_arr, bias, leaky_activation,
shortcut_in_gpu, shortcut_out_gpu);
}
else
#endif //# CUDART_VERSION >= 10000
{
gemm_nn_custom_bin_mean_transposed_gpu_kernel << <num_blocks, BLOCK >> > (
M, N, K,
A, lda,
B, ldb,
C, ldc,
mean_arr, bias, leaky_activation,
shortcut_in_gpu, shortcut_out_gpu);
}
CHECK_CUDA(cudaPeekAtLastError());
}
// -------------------------------- |
898f23a8e2bbc68eb911e7a4312078b18baf83bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "file_system.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
__device__ void user_program(FileSystem *fs, uchar *input, uchar *output) {
/*
/////////////// Test Case 1 ///////////////
u32 fp = fs_open(fs, "t.txt\0", G_WRITE);
fs_write(fs, input, 64, fp);
fp = fs_open(fs, "b.txt\0", G_WRITE);
fs_write(fs, input + 32, 32, fp);
fp = fs_open(fs, "t.txt\0", G_WRITE);
fs_write(fs, input + 32, 32, fp);
fp = fs_open(fs, "t.txt\0", G_READ);
fs_read(fs, output, 32, fp);
fs_gsys(fs,LS_D);
fs_gsys(fs, LS_S);
fp = fs_open(fs, "b.txt\0", G_WRITE);
fs_write(fs, input + 64, 12, fp);
fs_gsys(fs, LS_S);
fs_gsys(fs, LS_D);
fs_gsys(fs, RM, "t.txt\0");
fs_gsys(fs, LS_S);
*/
/////////////// Test Case 2 ///////////////
/*u32 fp = fs_open(fs, "t.txt\0", G_WRITE);
fs_write(fs,input, 64, fp);
fp = fs_open(fs,"b.txt\0", G_WRITE);
fs_write(fs,input + 32, 32, fp);
fp = fs_open(fs,"t.txt\0", G_WRITE);
fs_write(fs,input + 32, 32, fp);
fp = fs_open(fs,"t.txt\0", G_READ);
fs_read(fs,output, 32, fp);
fs_gsys(fs,LS_D);
fs_gsys(fs,LS_S);
fp = fs_open(fs,"b.txt\0", G_WRITE);
fs_write(fs,input + 64, 12, fp);
fs_gsys(fs,LS_S);
fs_gsys(fs,LS_D);
fs_gsys(fs,RM, "t.txt\0");
fs_gsys(fs,LS_S);
char fname[10][20];
for (int i = 0; i < 10; i++)
{
fname[i][0] = i + 33;
for (int j = 1; j < 19; j++)
fname[i][j] = 64 + j;
fname[i][19] = '\0';
}
for (int i = 0; i < 10; i++)
{
fp = fs_open(fs,fname[i], G_WRITE);
fs_write(fs,input + i, 24 + i, fp);
}
fs_gsys(fs,LS_S);
for (int i = 0; i < 5; i++)
fs_gsys(fs,RM, fname[i]);
fs_gsys(fs,LS_D);*/
/////////////// Test Case 3 ///////////////
u32 fp = fs_open(fs, "t.txt\0", G_WRITE);
fs_write(fs, input, 64, fp);
fp = fs_open(fs, "b.txt\0", G_WRITE);
fs_write(fs, input + 32, 32, fp);
fp = fs_open(fs, "t.txt\0", G_WRITE);
fs_write(fs, input + 32, 32, fp);
fp = fs_open(fs, "t.txt\0", G_READ);
fs_read(fs, output, 32, fp);
fs_gsys(fs, LS_D);
fs_gsys(fs, LS_S);
fp = fs_open(fs, "b.txt\0", G_WRITE);
fs_write(fs, input + 64, 12, fp);
fs_gsys(fs, LS_S);
fs_gsys(fs, LS_D);
fs_gsys(fs, RM, "t.txt\0");
fs_gsys(fs, LS_S);
char fname[10][20];
for (int i = 0; i < 10; i++)
{
fname[i][0] = i + 33;
for (int j = 1; j < 19; j++)
fname[i][j] = 64 + j;
fname[i][19] = '\0';
}
for (int i = 0; i < 10; i++)
{
fp = fs_open(fs, fname[i], G_WRITE);
fs_write(fs, input + i, 24 + i, fp);
}
fs_gsys(fs, LS_S);
for (int i = 0; i < 5; i++)
fs_gsys(fs, RM, fname[i]);
fs_gsys(fs, LS_D);
char fname2[1018][20];
int p = 0;
for (int k = 2; k < 15; k++)
for (int i = 50; i <= 126; i++, p++)
{
fname2[p][0] = i;
for (int j = 1; j < k; j++)
fname2[p][j] = 64 + j;
fname2[p][k] = '\0';
}
for (int i = 0; i < 1001; i++)
{
fp = fs_open(fs, fname2[i], G_WRITE);
fs_write(fs, input + i, 24 + i, fp);
}
fs_gsys(fs, LS_S);
fp = fs_open(fs, fname2[1000], G_READ);
fs_read(fs, output + 1000, 1024, fp);
char fname3[17][3];
for (int i = 0; i < 17; i++)
{
fname3[i][0] = 97 + i;
fname3[i][1] = 97 + i;
fname3[i][2] = '\0';
fp = fs_open(fs, fname3[i], G_WRITE);
fs_write(fs, input + 1024 * i, 1024, fp);
}
fp = fs_open(fs, "EA\0", G_WRITE);
fs_write(fs, input + 1024 * 100, 1024, fp);
fs_gsys(fs, LS_S);
}
| 898f23a8e2bbc68eb911e7a4312078b18baf83bf.cu | #include "file_system.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
__device__ void user_program(FileSystem *fs, uchar *input, uchar *output) {
/*
/////////////// Test Case 1 ///////////////
u32 fp = fs_open(fs, "t.txt\0", G_WRITE);
fs_write(fs, input, 64, fp);
fp = fs_open(fs, "b.txt\0", G_WRITE);
fs_write(fs, input + 32, 32, fp);
fp = fs_open(fs, "t.txt\0", G_WRITE);
fs_write(fs, input + 32, 32, fp);
fp = fs_open(fs, "t.txt\0", G_READ);
fs_read(fs, output, 32, fp);
fs_gsys(fs,LS_D);
fs_gsys(fs, LS_S);
fp = fs_open(fs, "b.txt\0", G_WRITE);
fs_write(fs, input + 64, 12, fp);
fs_gsys(fs, LS_S);
fs_gsys(fs, LS_D);
fs_gsys(fs, RM, "t.txt\0");
fs_gsys(fs, LS_S);
*/
/////////////// Test Case 2 ///////////////
/*u32 fp = fs_open(fs, "t.txt\0", G_WRITE);
fs_write(fs,input, 64, fp);
fp = fs_open(fs,"b.txt\0", G_WRITE);
fs_write(fs,input + 32, 32, fp);
fp = fs_open(fs,"t.txt\0", G_WRITE);
fs_write(fs,input + 32, 32, fp);
fp = fs_open(fs,"t.txt\0", G_READ);
fs_read(fs,output, 32, fp);
fs_gsys(fs,LS_D);
fs_gsys(fs,LS_S);
fp = fs_open(fs,"b.txt\0", G_WRITE);
fs_write(fs,input + 64, 12, fp);
fs_gsys(fs,LS_S);
fs_gsys(fs,LS_D);
fs_gsys(fs,RM, "t.txt\0");
fs_gsys(fs,LS_S);
char fname[10][20];
for (int i = 0; i < 10; i++)
{
fname[i][0] = i + 33;
for (int j = 1; j < 19; j++)
fname[i][j] = 64 + j;
fname[i][19] = '\0';
}
for (int i = 0; i < 10; i++)
{
fp = fs_open(fs,fname[i], G_WRITE);
fs_write(fs,input + i, 24 + i, fp);
}
fs_gsys(fs,LS_S);
for (int i = 0; i < 5; i++)
fs_gsys(fs,RM, fname[i]);
fs_gsys(fs,LS_D);*/
/////////////// Test Case 3 ///////////////
u32 fp = fs_open(fs, "t.txt\0", G_WRITE);
fs_write(fs, input, 64, fp);
fp = fs_open(fs, "b.txt\0", G_WRITE);
fs_write(fs, input + 32, 32, fp);
fp = fs_open(fs, "t.txt\0", G_WRITE);
fs_write(fs, input + 32, 32, fp);
fp = fs_open(fs, "t.txt\0", G_READ);
fs_read(fs, output, 32, fp);
fs_gsys(fs, LS_D);
fs_gsys(fs, LS_S);
fp = fs_open(fs, "b.txt\0", G_WRITE);
fs_write(fs, input + 64, 12, fp);
fs_gsys(fs, LS_S);
fs_gsys(fs, LS_D);
fs_gsys(fs, RM, "t.txt\0");
fs_gsys(fs, LS_S);
char fname[10][20];
for (int i = 0; i < 10; i++)
{
fname[i][0] = i + 33;
for (int j = 1; j < 19; j++)
fname[i][j] = 64 + j;
fname[i][19] = '\0';
}
for (int i = 0; i < 10; i++)
{
fp = fs_open(fs, fname[i], G_WRITE);
fs_write(fs, input + i, 24 + i, fp);
}
fs_gsys(fs, LS_S);
for (int i = 0; i < 5; i++)
fs_gsys(fs, RM, fname[i]);
fs_gsys(fs, LS_D);
char fname2[1018][20];
int p = 0;
for (int k = 2; k < 15; k++)
for (int i = 50; i <= 126; i++, p++)
{
fname2[p][0] = i;
for (int j = 1; j < k; j++)
fname2[p][j] = 64 + j;
fname2[p][k] = '\0';
}
for (int i = 0; i < 1001; i++)
{
fp = fs_open(fs, fname2[i], G_WRITE);
fs_write(fs, input + i, 24 + i, fp);
}
fs_gsys(fs, LS_S);
fp = fs_open(fs, fname2[1000], G_READ);
fs_read(fs, output + 1000, 1024, fp);
char fname3[17][3];
for (int i = 0; i < 17; i++)
{
fname3[i][0] = 97 + i;
fname3[i][1] = 97 + i;
fname3[i][2] = '\0';
fp = fs_open(fs, fname3[i], G_WRITE);
fs_write(fs, input + 1024 * i, 1024, fp);
}
fp = fs_open(fs, "EA\0", G_WRITE);
fs_write(fs, input + 1024 * 100, 1024, fp);
fs_gsys(fs, LS_S);
}
|
2b8832d1eaae02373771a0c1a3a0f80cb970f078.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @File nbody.cu
*
* Implementation of the N-Body problem
*
* Paraleln programovn na GPU (PCG 2020)
* Projekt c. 1 (cuda)
* Login: xmarci10
*/
#include <cmath>
#include <cfloat>
#include "nbody.h"
/**
* CUDA kernel to calculate velocity and new position for each particle
* @param p_in - input particles
* @param p_out - output particles
* @param N - Number of particles
* @param dt - Size of the time step
*/
__global__ void calculate_velocity(const t_particles p_in, t_particles p_out, int N, float dt)
{
extern __shared__ float shared_particles[];
float *shared_posx = &shared_particles[0];
float *shared_posy = &shared_particles[blockDim.x];
float *shared_posz = &shared_particles[2 * blockDim.x];
float *shared_velx = &shared_particles[3 * blockDim.x];
float *shared_vely = &shared_particles[4 * blockDim.x];
float *shared_velz = &shared_particles[5 * blockDim.x];
float *shared_weight = &shared_particles[6 * blockDim.x];
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
float r, dx, dy, dz;
float posx, posy, posz;
float velx, vely, velz;
float weight;
float F;
/**
* Ressetting the registers for partial results.
* note: Using registers reduces the number of accesses to global memory.
* Partial results are saved at the end of the calculation.
*/
float tmpvelx = 0.0f;
float tmpvely = 0.0f;
float tmpvelz = 0.0f;
/**
* Loading positions, velocities and weights from the global memory into the registers.
* note: Pre-reading data from the global memory, reduces the number of
* memory accesses and thus signigicantly speeds up the calculation.
*/
posx = (thread_id < N) ? p_in.pos_x[thread_id] : 0.0f;
posy = (thread_id < N) ? p_in.pos_y[thread_id] : 0.0f;
posz = (thread_id < N) ? p_in.pos_z[thread_id] : 0.0f;
velx = (thread_id < N) ? p_in.vel_x[thread_id] : 0.0f;
vely = (thread_id < N) ? p_in.vel_y[thread_id] : 0.0f;
velz = (thread_id < N) ? p_in.vel_z[thread_id] : 0.0f;
weight = (thread_id < N) ? p_in.weight[thread_id] : 0.0f;
// Process the input in the form of "tiles" that are the same size as the blockDim.x.
for ( struct {int i = 0; int tile = 0;} loop;
loop.i < N;
loop.i+=blockDim.x, loop.tile++)
{
// Appropriate index into global memory.
int idx = loop.tile * blockDim.x + threadIdx.x;
/**
* Loading a single "tile" into shared memory.
* note: Pre-reading data from the global memory 2 shared memory, reduces the number of
* memory accesses and thus signigicantly speeds up the calculation.
*/
shared_posx[threadIdx.x] = (idx < N) ? p_in.pos_x[idx] : 0.0f;
shared_posy[threadIdx.x] = (idx < N) ? p_in.pos_y[idx] : 0.0f;
shared_posz[threadIdx.x] = (idx < N) ? p_in.pos_z[idx] : 0.0f;
shared_velx[threadIdx.x] = (idx < N) ? p_in.vel_x[idx] : 0.0f;
shared_vely[threadIdx.x] = (idx < N) ? p_in.vel_y[idx] : 0.0f;
shared_velz[threadIdx.x] = (idx < N) ? p_in.vel_z[idx] : 0.0f;
shared_weight[threadIdx.x] =(idx < N) ? p_in.weight[idx] : 0.0f;
__syncthreads();
// Process the tile.
for (int j = 0; j < blockDim.x; j++) {
/**
* The calculation of the gravitational force is divided into several
* several instructions in order to eliminate data dependencies, and thus
* we have increased the ILP.
*/
F = -G * dt * shared_weight[j];
dx = posx - shared_posx[j];
dy = posy - shared_posy[j];
dz = posz - shared_posz[j];
r = sqrt(dx*dx + dy*dy + dz*dz);
// see previous comment
F /= (r * r * r + FLT_MIN);
tmpvelx += (r > COLLISION_DISTANCE) ? F * dx : 0.0f;
tmpvely += (r > COLLISION_DISTANCE) ? F * dy : 0.0f;
tmpvelz += (r > COLLISION_DISTANCE) ? F * dz : 0.0f;
// Add the velocity obtained by the gravitational action of the body 'j'.
if (r < COLLISION_DISTANCE) {
/**
* Reuseage of the registers of distances.
* note: The values are calculated only once and then used several times, see below.
*/
dx = weight - shared_weight[j];
dy = 2 * shared_weight[j];
dz = weight + shared_weight[j];
// Add the velocity obtained by the collision with the body 'j'.
tmpvelx += (r > 0.0f) ? ((dx * velx + dy * shared_velx[j]) / dz) - velx : 0.0f;
tmpvely += (r > 0.0f) ? ((dx * vely + dy * shared_vely[j]) / dz) - vely : 0.0f;
tmpvelz += (r > 0.0f) ? ((dx * velz + dy * shared_velz[j]) / dz) - velz : 0.0f;
}
}
__syncthreads();
}
/**
* Update particle
* note: Write to global memory only once at the end of the cycle.
*/
if (thread_id < N) {
velx += tmpvelx;
p_out.vel_x[thread_id] = velx;
p_out.pos_x[thread_id] = velx * dt + posx;
vely += tmpvely;
p_out.vel_y[thread_id] = vely;
p_out.pos_y[thread_id] = vely * dt + posy;
velz += tmpvelz;
p_out.vel_z[thread_id] = velz;
p_out.pos_z[thread_id] = velz * dt + posz;
}
}// end of calculate_velocity
//-----------------------------------------------------------------------------------------------------------------------
/**
* Reduction in thread registers. The function uses "shuffle" to exchange data between
* threads within the warp (fastest version)
*
* @param val - each thread in the warp sends its value 'val'
* @return - reduced value
*/
__inline__ __device__ float warp_reduce (float val)
{
val += __shfl_down_sync(FULL_WARP_MASK, val, 16);
val += __shfl_down_sync(FULL_WARP_MASK, val, 8);
val += __shfl_down_sync(FULL_WARP_MASK, val, 4);
val += __shfl_down_sync(FULL_WARP_MASK, val, 2);
val += __shfl_down_sync(FULL_WARP_MASK, val, 1);
return val;
}
/******************************************************************/
/* REDUCTION BY WARP-SYNCHRONOUS PROGRAMMING *
/******************************************************************/
/**
* CUDA kernel to update particle
* @param p - particles
* @param comX - pointer to a center of mass position in X
* @param comY - pointer to a center of mass position in Y
* @param comZ - pointer to a center of mass position in Z
* @param comW - pointer to a center of mass weight
* @param lock - pointer to a user-implemented lock
* @param N - Number of particles
*/
__global__ void centerOfMass(t_particles p, float* comX, float* comY, float* comZ, float* comW, int* lock, const int N)
{
extern __shared__ float partial_sums[];
int num_threads = blockDim.x * gridDim.x;
int warp_count = blockDim.x / 32;
int thread_id = threadIdx.x;
int warp_id = thread_id / 32;
int lane = thread_id % 32;
float *shared_wx = &partial_sums[0];
float *shared_wy = &partial_sums[1 * warp_count];
float *shared_wz = &partial_sums[2 * warp_count];
float *shared_w = &partial_sums[3 * warp_count];
// Each thread resets its local partial sums
float wx = 0.0f;
float wy = 0.0f;
float wz = 0.0f;
float w = 0.0f;
// Reduce multiple elements per thread
for (int i = thread_id + blockIdx.x * blockDim.x; i < N; i += num_threads) {
float weight_i = p.weight[i];
wx += p.pos_x[i] * weight_i;
wy += p.pos_y[i] * weight_i;
wz += p.pos_z[i] * weight_i;
w += weight_i;
}
// Each warp within block performs partial reduction. After this step we get (blockDim.x/32) values.
wx = warp_reduce(wx);
wy = warp_reduce(wy);
wz = warp_reduce(wz);
w = warp_reduce(w);
/**
* Zero thread within a warp writes the result from the previous step to the shared memory.
* We write the result on the index of the given warp, by which we ensure the continuos
* storage of the results.
*/
if (lane == 0) {
shared_wx[warp_id] = wx;
shared_wy[warp_id] = wy;
shared_wz[warp_id] = wz;
shared_w[warp_id] = w;
}
__syncthreads(); // wait for all partial reductions
/**
* If the block size is larger than 1024, a reduction in shared memory is used.
* However, the last 32 values are reduced within one warp (see below).
*
* WARNING: IF U ARE USING BLOCKDIM.X > 1024 CHANGE MAX_BLOCKDIMX VALUE IN nbody.h.
* OTHERWISE THE RESULT WILL NOT BE CORRECT.
*/
#if MAX_BLOCKDIMX > 1024
for (int stride = warp_count/2; stride > 16; stride >>= 1) {
if(thread_id < stride) {
shared_wx[thread_id] += shared_wx[thread_id + stride];
shared_wy[thread_id] += shared_wy[thread_id + stride];
shared_wz[thread_id] += shared_wz[thread_id + stride];
shared_w[thread_id] += shared_w[thread_id + stride];
}
__syncthreads();
}
// First warp loads values from shared memory, so they can be reduces in registers.
wx = (thread_id < 32) ? shared_wx[thread_id] : 0.0f;
wy = (thread_id < 32) ? shared_wy[thread_id] : 0.0f;
wz = (thread_id < 32) ? shared_wz[thread_id] : 0.0f;
w = (thread_id < 32) ? shared_w[thread_id] : 0.0f;
/**
* If the block size is less than 1024 then the max number of values after warp
* reduction will be 32. This means that we can again reduce these values within one
* warp. So each thread in the first warp reads the appropriate value from the shared
* memory into its register if it exists (otherwise zero).
*/
#else
wx = (thread_id < warp_count) ? shared_wx[thread_id] : 0.0f;
wy = (thread_id < warp_count) ? shared_wy[thread_id] : 0.0f;
wz = (thread_id < warp_count) ? shared_wz[thread_id] : 0.0f;
w = (thread_id < warp_count) ? shared_w[thread_id] : 0.0f;
#endif
// First warp performs the final reduction.
if(warp_id == 0) {
wx = warp_reduce(wx);
wy = warp_reduce(wy);
wz = warp_reduce(wz);
w = warp_reduce(w);
}
// Thread 0 writes result into global memory
if (thread_id == 0) {
// Write needs to be atomic.
while(atomicCAS(lock, 0, 1) != 0);
// critical section
*comX += wx;
*comY += wy;
*comZ += wz;
*comW += w;
atomicExch(lock, 0);
}
}// end of centerOfMass
//----------------------------------------------------------------------------------------------------------------------
/**
* CPU implementation of the Center of Mass calculation
* @param particles - All particles in the system
* @param N - Number of particles
*/
__host__ float4 centerOfMassCPU(MemDesc& memDesc)
{
float4 com = {0 ,0, 0, 0};
for(int i = 0; i < memDesc.getDataSize(); i++)
{
// Calculate the vector on the line connecting points and most recent position of center-of-mass
const float dx = memDesc.getPosX(i) - com.x;
const float dy = memDesc.getPosY(i) - com.y;
const float dz = memDesc.getPosZ(i) - com.z;
// Calculate weight ratio only if at least one particle isn't massless
const float dw = ((memDesc.getWeight(i) + com.w) > 0.0f)
? ( memDesc.getWeight(i) / (memDesc.getWeight(i) + com.w)) : 0.0f;
// Update position and weight of the center-of-mass according to the weight ration and vector
com.x += dx * dw;
com.y += dy * dw;
com.z += dz * dw;
com.w += memDesc.getWeight(i);
}
return com;
}// enf of centerOfMassCPU
//----------------------------------------------------------------------------------------------------------------------
| 2b8832d1eaae02373771a0c1a3a0f80cb970f078.cu | /**
* @File nbody.cu
*
* Implementation of the N-Body problem
*
* Paralelní programování na GPU (PCG 2020)
* Projekt c. 1 (cuda)
* Login: xmarci10
*/
#include <cmath>
#include <cfloat>
#include "nbody.h"
/**
* CUDA kernel to calculate velocity and new position for each particle
* @param p_in - input particles
* @param p_out - output particles
* @param N - Number of particles
* @param dt - Size of the time step
*/
__global__ void calculate_velocity(const t_particles p_in, t_particles p_out, int N, float dt)
{
extern __shared__ float shared_particles[];
float *shared_posx = &shared_particles[0];
float *shared_posy = &shared_particles[blockDim.x];
float *shared_posz = &shared_particles[2 * blockDim.x];
float *shared_velx = &shared_particles[3 * blockDim.x];
float *shared_vely = &shared_particles[4 * blockDim.x];
float *shared_velz = &shared_particles[5 * blockDim.x];
float *shared_weight = &shared_particles[6 * blockDim.x];
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
float r, dx, dy, dz;
float posx, posy, posz;
float velx, vely, velz;
float weight;
float F;
/**
* Ressetting the registers for partial results.
* note: Using registers reduces the number of accesses to global memory.
* Partial results are saved at the end of the calculation.
*/
float tmpvelx = 0.0f;
float tmpvely = 0.0f;
float tmpvelz = 0.0f;
/**
* Loading positions, velocities and weights from the global memory into the registers.
* note: Pre-reading data from the global memory, reduces the number of
* memory accesses and thus signigicantly speeds up the calculation.
*/
posx = (thread_id < N) ? p_in.pos_x[thread_id] : 0.0f;
posy = (thread_id < N) ? p_in.pos_y[thread_id] : 0.0f;
posz = (thread_id < N) ? p_in.pos_z[thread_id] : 0.0f;
velx = (thread_id < N) ? p_in.vel_x[thread_id] : 0.0f;
vely = (thread_id < N) ? p_in.vel_y[thread_id] : 0.0f;
velz = (thread_id < N) ? p_in.vel_z[thread_id] : 0.0f;
weight = (thread_id < N) ? p_in.weight[thread_id] : 0.0f;
// Process the input in the form of "tiles" that are the same size as the blockDim.x.
for ( struct {int i = 0; int tile = 0;} loop;
loop.i < N;
loop.i+=blockDim.x, loop.tile++)
{
// Appropriate index into global memory.
int idx = loop.tile * blockDim.x + threadIdx.x;
/**
* Loading a single "tile" into shared memory.
* note: Pre-reading data from the global memory 2 shared memory, reduces the number of
* memory accesses and thus signigicantly speeds up the calculation.
*/
shared_posx[threadIdx.x] = (idx < N) ? p_in.pos_x[idx] : 0.0f;
shared_posy[threadIdx.x] = (idx < N) ? p_in.pos_y[idx] : 0.0f;
shared_posz[threadIdx.x] = (idx < N) ? p_in.pos_z[idx] : 0.0f;
shared_velx[threadIdx.x] = (idx < N) ? p_in.vel_x[idx] : 0.0f;
shared_vely[threadIdx.x] = (idx < N) ? p_in.vel_y[idx] : 0.0f;
shared_velz[threadIdx.x] = (idx < N) ? p_in.vel_z[idx] : 0.0f;
shared_weight[threadIdx.x] =(idx < N) ? p_in.weight[idx] : 0.0f;
__syncthreads();
// Process the tile.
for (int j = 0; j < blockDim.x; j++) {
/**
* The calculation of the gravitational force is divided into several
* several instructions in order to eliminate data dependencies, and thus
* we have increased the ILP.
*/
F = -G * dt * shared_weight[j];
dx = posx - shared_posx[j];
dy = posy - shared_posy[j];
dz = posz - shared_posz[j];
r = sqrt(dx*dx + dy*dy + dz*dz);
// see previous comment
F /= (r * r * r + FLT_MIN);
tmpvelx += (r > COLLISION_DISTANCE) ? F * dx : 0.0f;
tmpvely += (r > COLLISION_DISTANCE) ? F * dy : 0.0f;
tmpvelz += (r > COLLISION_DISTANCE) ? F * dz : 0.0f;
// Add the velocity obtained by the gravitational action of the body 'j'.
if (r < COLLISION_DISTANCE) {
/**
* Reuseage of the registers of distances.
* note: The values are calculated only once and then used several times, see below.
*/
dx = weight - shared_weight[j];
dy = 2 * shared_weight[j];
dz = weight + shared_weight[j];
// Add the velocity obtained by the collision with the body 'j'.
tmpvelx += (r > 0.0f) ? ((dx * velx + dy * shared_velx[j]) / dz) - velx : 0.0f;
tmpvely += (r > 0.0f) ? ((dx * vely + dy * shared_vely[j]) / dz) - vely : 0.0f;
tmpvelz += (r > 0.0f) ? ((dx * velz + dy * shared_velz[j]) / dz) - velz : 0.0f;
}
}
__syncthreads();
}
/**
* Update particle
* note: Write to global memory only once at the end of the cycle.
*/
if (thread_id < N) {
velx += tmpvelx;
p_out.vel_x[thread_id] = velx;
p_out.pos_x[thread_id] = velx * dt + posx;
vely += tmpvely;
p_out.vel_y[thread_id] = vely;
p_out.pos_y[thread_id] = vely * dt + posy;
velz += tmpvelz;
p_out.vel_z[thread_id] = velz;
p_out.pos_z[thread_id] = velz * dt + posz;
}
}// end of calculate_velocity
//-----------------------------------------------------------------------------------------------------------------------
/**
* Reduction in thread registers. The function uses "shuffle" to exchange data between
* threads within the warp (fastest version)
*
* @param val - each thread in the warp sends its value 'val'
* @return - reduced value
*/
__inline__ __device__ float warp_reduce (float val)
{
val += __shfl_down_sync(FULL_WARP_MASK, val, 16);
val += __shfl_down_sync(FULL_WARP_MASK, val, 8);
val += __shfl_down_sync(FULL_WARP_MASK, val, 4);
val += __shfl_down_sync(FULL_WARP_MASK, val, 2);
val += __shfl_down_sync(FULL_WARP_MASK, val, 1);
return val;
}
/******************************************************************/
/* REDUCTION BY WARP-SYNCHRONOUS PROGRAMMING *
/******************************************************************/
/**
* CUDA kernel to update particle
* @param p - particles
* @param comX - pointer to a center of mass position in X
* @param comY - pointer to a center of mass position in Y
* @param comZ - pointer to a center of mass position in Z
* @param comW - pointer to a center of mass weight
* @param lock - pointer to a user-implemented lock
* @param N - Number of particles
*/
__global__ void centerOfMass(t_particles p, float* comX, float* comY, float* comZ, float* comW, int* lock, const int N)
{
extern __shared__ float partial_sums[];
int num_threads = blockDim.x * gridDim.x;
int warp_count = blockDim.x / 32;
int thread_id = threadIdx.x;
int warp_id = thread_id / 32;
int lane = thread_id % 32;
float *shared_wx = &partial_sums[0];
float *shared_wy = &partial_sums[1 * warp_count];
float *shared_wz = &partial_sums[2 * warp_count];
float *shared_w = &partial_sums[3 * warp_count];
// Each thread resets its local partial sums
float wx = 0.0f;
float wy = 0.0f;
float wz = 0.0f;
float w = 0.0f;
// Reduce multiple elements per thread
for (int i = thread_id + blockIdx.x * blockDim.x; i < N; i += num_threads) {
float weight_i = p.weight[i];
wx += p.pos_x[i] * weight_i;
wy += p.pos_y[i] * weight_i;
wz += p.pos_z[i] * weight_i;
w += weight_i;
}
// Each warp within block performs partial reduction. After this step we get (blockDim.x/32) values.
wx = warp_reduce(wx);
wy = warp_reduce(wy);
wz = warp_reduce(wz);
w = warp_reduce(w);
/**
* Zero thread within a warp writes the result from the previous step to the shared memory.
* We write the result on the index of the given warp, by which we ensure the continuos
* storage of the results.
*/
if (lane == 0) {
shared_wx[warp_id] = wx;
shared_wy[warp_id] = wy;
shared_wz[warp_id] = wz;
shared_w[warp_id] = w;
}
__syncthreads(); // wait for all partial reductions
/**
* If the block size is larger than 1024, a reduction in shared memory is used.
* However, the last 32 values are reduced within one warp (see below).
*
* WARNING: IF U ARE USING BLOCKDIM.X > 1024 CHANGE MAX_BLOCKDIMX VALUE IN nbody.h.
* OTHERWISE THE RESULT WILL NOT BE CORRECT.
*/
#if MAX_BLOCKDIMX > 1024
for (int stride = warp_count/2; stride > 16; stride >>= 1) {
if(thread_id < stride) {
shared_wx[thread_id] += shared_wx[thread_id + stride];
shared_wy[thread_id] += shared_wy[thread_id + stride];
shared_wz[thread_id] += shared_wz[thread_id + stride];
shared_w[thread_id] += shared_w[thread_id + stride];
}
__syncthreads();
}
// First warp loads values from shared memory, so they can be reduces in registers.
wx = (thread_id < 32) ? shared_wx[thread_id] : 0.0f;
wy = (thread_id < 32) ? shared_wy[thread_id] : 0.0f;
wz = (thread_id < 32) ? shared_wz[thread_id] : 0.0f;
w = (thread_id < 32) ? shared_w[thread_id] : 0.0f;
/**
* If the block size is less than 1024 then the max number of values after warp
* reduction will be 32. This means that we can again reduce these values within one
* warp. So each thread in the first warp reads the appropriate value from the shared
* memory into its register if it exists (otherwise zero).
*/
#else
wx = (thread_id < warp_count) ? shared_wx[thread_id] : 0.0f;
wy = (thread_id < warp_count) ? shared_wy[thread_id] : 0.0f;
wz = (thread_id < warp_count) ? shared_wz[thread_id] : 0.0f;
w = (thread_id < warp_count) ? shared_w[thread_id] : 0.0f;
#endif
// First warp performs the final reduction.
if(warp_id == 0) {
wx = warp_reduce(wx);
wy = warp_reduce(wy);
wz = warp_reduce(wz);
w = warp_reduce(w);
}
// Thread 0 writes result into global memory
if (thread_id == 0) {
// Write needs to be atomic.
while(atomicCAS(lock, 0, 1) != 0);
// critical section
*comX += wx;
*comY += wy;
*comZ += wz;
*comW += w;
atomicExch(lock, 0);
}
}// end of centerOfMass
//----------------------------------------------------------------------------------------------------------------------
/**
* CPU implementation of the Center of Mass calculation
* @param particles - All particles in the system
* @param N - Number of particles
*/
__host__ float4 centerOfMassCPU(MemDesc& memDesc)
{
float4 com = {0 ,0, 0, 0};
for(int i = 0; i < memDesc.getDataSize(); i++)
{
// Calculate the vector on the line connecting points and most recent position of center-of-mass
const float dx = memDesc.getPosX(i) - com.x;
const float dy = memDesc.getPosY(i) - com.y;
const float dz = memDesc.getPosZ(i) - com.z;
// Calculate weight ratio only if at least one particle isn't massless
const float dw = ((memDesc.getWeight(i) + com.w) > 0.0f)
? ( memDesc.getWeight(i) / (memDesc.getWeight(i) + com.w)) : 0.0f;
// Update position and weight of the center-of-mass according to the weight ration and vector
com.x += dx * dw;
com.y += dy * dw;
com.z += dz * dw;
com.w += memDesc.getWeight(i);
}
return com;
}// enf of centerOfMassCPU
//----------------------------------------------------------------------------------------------------------------------
|
4440abc5a7a683756fb555ed19e293de9c3ddc37.hip | // !!! This is a file automatically generated by hipify!!!
#define _USE_MATH_DEFINES
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <vtkVersion.h>
#include <vtkSmartPointer.h>
#include <vtkXMLImageDataWriter.h>
#include <vtkImageData.h>
#include <vtkPointData.h>
#include <vtkDoubleArray.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "utils.h"
#include "dev_matrix.h"
#define I3D(Nx,Ny,Nz,i,j,k,n) ((i)+(Nx)*(j)+(Nx)*(Ny)*(k)+(Nx)*(Ny)*(Nz)*(n))
#define BLOCK_SIZE_X 16
#define BLOCK_SIZE_Y 16
#define BLOCK_SIZE_Z 16
__global__ void pf3d_gpu(int Nx, int Ny, int Nz, int Ng, double L, double alpha, double beta, double gamma, double kappa, double ht, double hx, double hy, double hz, double *in, double *out, double *pf)
{
int i, j, k, n, m, P, Pm, W, E, S, N, U, B, temp;
double lxx, lyy, lzz, sum;
sum=0.0;
// find i and j indices of this thread
i = blockIdx.x*(BLOCK_SIZE_X) + threadIdx.x;
j = blockIdx.y*(BLOCK_SIZE_Y) + threadIdx.y;
k = blockIdx.z*(BLOCK_SIZE_Z) + threadIdx.z;
// find indices into linear memory
for (n=0; n < Ng; n++) {
P = I3D(Nx, Ny, Nz, i, j, k, n);
W = I3D(Nx, Ny, Nz, i-1, j, k, n); E = I3D(Nx, Ny, Nz, i+1, j, k, n);
S = I3D(Nx, Ny, Nz, i, j-1, k, n); N = I3D(Nx, Ny, Nz, i, j+1, k, n);
B = I3D(Nx, Ny, Nz, i, j, k-1, n); U = I3D(Nx, Ny, Nz, i, j, k+1, n);
// check that thread is within domain (not on boundary or outside domain)
if (i > 0 && i < Nx-1 && j > 0 && j < Ny-1 && k>0 && k<Nz-1) {
lxx = (in[W] - 2.0*in[P] + in[E])/pow(hx,2);
lyy = (in[N] - 2.0*in[P] + in[S])/pow(hy,2);
lzz = (in[U] - 2.0*in[P] + in[B])/pow(hz,2);
for (m=0; m < Ng && m!=n; m++) {
Pm = I3D(Nx, Ny, Nz, i, j, k, m);
sum=sum+pow(in[Pm],2);
}
out[P] = in[P]+ht*(alpha*L*in[P]-beta*L*pow(in[P],3)-2*gamma*L*in[P]*sum+kappa*L*(lxx+lyy+lzz));
}
if (i==0) {
out[P] = in[E];
}
if (j==0) {
out[P] = in[N];
}
if (k==0) {
out[P] = in[U];
}
if (i==Nx-1) {
temp = I3D(Nx, Ny, Nz, 0, j, k, n);
out[P] = in[temp];
}
if (j==Ny-1) {
temp = I3D(Nx, Ny, Nz, i, 0, k, n);
out[P] = in[temp];
}
if (k==Nz-1) {
temp = I3D(Nx, Ny, Nz, i, j, 0, n);
out[P] = in[temp];
}
pf[P] = pf[P] + pow(out[P],2);
}
}
int main()
{
int Nx, Ny, Nz, Nt, Ng;
double *uh_old, *uh_new, *pfh, *tmp_h;
int iter;
double L, kappa, alpha, beta, gamma;
double hx; double hy; double hz; double ht;
dim3 numBlocks, threadsPerBlock;
Nx = 32;
Ny = 32;
Nz = 32;
Nt = 10000;
Ng = 5;
hx = 2.0;
hy = 2.0;
hz = 2.0;
ht = 0.25;
L = 1.0;
kappa = 2.0;
alpha = 1.0;
beta = 1.0;
gamma = 1.0;
uh_old = dvector(Nx*Ny*Nz*Ng); uh_new = dvector(Nx*Ny*Nz*Ng); pfh = dvector(Nx*Ny*Nz);
zero_matrix(uh_old, Nx, Ny, Nz, Ng);
zero_matrix(uh_new, Nx, Ny, Nz, Ng);
zero_matrix(pfh, Nx, Ny, Nz, 1);
// initial
initialize(uh_old, Nx, Ny, Nz, Ng);
dev_matrix<double> ud_old(Nx, Ny, Nz, Ng); ud_old.set(uh_old, Nx, Ny, Nz, Ng);
dev_matrix<double> ud_new(Nx, Ny, Nz, Ng); ud_new.set(uh_new, Nx, Ny, Nz, Ng);
dev_matrix<double> tmp_d(Nx, Ny, Nz, Ng);
dev_matrix<double> pfd(Nx, Ny, Nz, 1);
numBlocks = dim3(iDivUp(Nx,BLOCK_SIZE_X), iDivUp(Ny,BLOCK_SIZE_Y), iDivUp(Nz,BLOCK_SIZE_Z));
threadsPerBlock = dim3(BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z);
vtkSmartPointer<vtkImageData> imageData =
vtkSmartPointer<vtkImageData>::New();
imageData->SetDimensions(Nx, Ny, Nz);
vtkSmartPointer<vtkDoubleArray> phase =
vtkSmartPointer<vtkDoubleArray>::New();
phase->SetNumberOfComponents(1);
phase->SetNumberOfTuples(Nx * Ny * Nz);
for (iter = 0; iter < Nt; iter++) {
hipLaunchKernelGGL(( pf3d_gpu), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, Nx, Ny, Nz, Ng, L, alpha, beta, gamma, kappa, ht, hx, hy, hz, ud_old.getData(), ud_new.getData(), pfd.getData());
tmp_d = ud_new;
ud_new = ud_old;
ud_old = tmp_d;
tmp_h = uh_new;
uh_new = uh_old;
uh_old = tmp_h;
char myfile[16];
sprintf(myfile, "myfile_%d.vti", iter);
pfd.get(pfh, Nx, Ny, Nz, 1);
for (i=0; i < Nx; i++) {
for (j=0; j < Ny; j++) {
for (k=0; k < Nz; k++) {
P = I3D(Nx, Ny, Nz, i, j, k, 0);
phase->SetValue(P, pfh[P]);
}
}
}
imageData->GetPointData()->AddArray(phase);
phase->SetName("Phase Field");
vtkSmartPointer<vtkXMLImageDataWriter> writer =
vtkSmartPointer<vtkXMLImageDataWriter>::New();
writer->SetFileName(myfile);
#if VTK_MAJOR_VERSION <= 5
writer->SetInputConnection(imageData->GetProducerPort());
#else
writer->SetInputData(imageData);
#endif
writer->Write();
}
hipDeviceSynchronize();
}
| 4440abc5a7a683756fb555ed19e293de9c3ddc37.cu | #define _USE_MATH_DEFINES
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <cuda.h>
#include <vtkVersion.h>
#include <vtkSmartPointer.h>
#include <vtkXMLImageDataWriter.h>
#include <vtkImageData.h>
#include <vtkPointData.h>
#include <vtkDoubleArray.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "utils.h"
#include "dev_matrix.h"
#define I3D(Nx,Ny,Nz,i,j,k,n) ((i)+(Nx)*(j)+(Nx)*(Ny)*(k)+(Nx)*(Ny)*(Nz)*(n))
#define BLOCK_SIZE_X 16
#define BLOCK_SIZE_Y 16
#define BLOCK_SIZE_Z 16
__global__ void pf3d_gpu(int Nx, int Ny, int Nz, int Ng, double L, double alpha, double beta, double gamma, double kappa, double ht, double hx, double hy, double hz, double *in, double *out, double *pf)
{
int i, j, k, n, m, P, Pm, W, E, S, N, U, B, temp;
double lxx, lyy, lzz, sum;
sum=0.0;
// find i and j indices of this thread
i = blockIdx.x*(BLOCK_SIZE_X) + threadIdx.x;
j = blockIdx.y*(BLOCK_SIZE_Y) + threadIdx.y;
k = blockIdx.z*(BLOCK_SIZE_Z) + threadIdx.z;
// find indices into linear memory
for (n=0; n < Ng; n++) {
P = I3D(Nx, Ny, Nz, i, j, k, n);
W = I3D(Nx, Ny, Nz, i-1, j, k, n); E = I3D(Nx, Ny, Nz, i+1, j, k, n);
S = I3D(Nx, Ny, Nz, i, j-1, k, n); N = I3D(Nx, Ny, Nz, i, j+1, k, n);
B = I3D(Nx, Ny, Nz, i, j, k-1, n); U = I3D(Nx, Ny, Nz, i, j, k+1, n);
// check that thread is within domain (not on boundary or outside domain)
if (i > 0 && i < Nx-1 && j > 0 && j < Ny-1 && k>0 && k<Nz-1) {
lxx = (in[W] - 2.0*in[P] + in[E])/pow(hx,2);
lyy = (in[N] - 2.0*in[P] + in[S])/pow(hy,2);
lzz = (in[U] - 2.0*in[P] + in[B])/pow(hz,2);
for (m=0; m < Ng && m!=n; m++) {
Pm = I3D(Nx, Ny, Nz, i, j, k, m);
sum=sum+pow(in[Pm],2);
}
out[P] = in[P]+ht*(alpha*L*in[P]-beta*L*pow(in[P],3)-2*gamma*L*in[P]*sum+kappa*L*(lxx+lyy+lzz));
}
if (i==0) {
out[P] = in[E];
}
if (j==0) {
out[P] = in[N];
}
if (k==0) {
out[P] = in[U];
}
if (i==Nx-1) {
temp = I3D(Nx, Ny, Nz, 0, j, k, n);
out[P] = in[temp];
}
if (j==Ny-1) {
temp = I3D(Nx, Ny, Nz, i, 0, k, n);
out[P] = in[temp];
}
if (k==Nz-1) {
temp = I3D(Nx, Ny, Nz, i, j, 0, n);
out[P] = in[temp];
}
pf[P] = pf[P] + pow(out[P],2);
}
}
int main()
{
int Nx, Ny, Nz, Nt, Ng;
double *uh_old, *uh_new, *pfh, *tmp_h;
int iter;
double L, kappa, alpha, beta, gamma;
double hx; double hy; double hz; double ht;
dim3 numBlocks, threadsPerBlock;
Nx = 32;
Ny = 32;
Nz = 32;
Nt = 10000;
Ng = 5;
hx = 2.0;
hy = 2.0;
hz = 2.0;
ht = 0.25;
L = 1.0;
kappa = 2.0;
alpha = 1.0;
beta = 1.0;
gamma = 1.0;
uh_old = dvector(Nx*Ny*Nz*Ng); uh_new = dvector(Nx*Ny*Nz*Ng); pfh = dvector(Nx*Ny*Nz);
zero_matrix(uh_old, Nx, Ny, Nz, Ng);
zero_matrix(uh_new, Nx, Ny, Nz, Ng);
zero_matrix(pfh, Nx, Ny, Nz, 1);
// initial
initialize(uh_old, Nx, Ny, Nz, Ng);
dev_matrix<double> ud_old(Nx, Ny, Nz, Ng); ud_old.set(uh_old, Nx, Ny, Nz, Ng);
dev_matrix<double> ud_new(Nx, Ny, Nz, Ng); ud_new.set(uh_new, Nx, Ny, Nz, Ng);
dev_matrix<double> tmp_d(Nx, Ny, Nz, Ng);
dev_matrix<double> pfd(Nx, Ny, Nz, 1);
numBlocks = dim3(iDivUp(Nx,BLOCK_SIZE_X), iDivUp(Ny,BLOCK_SIZE_Y), iDivUp(Nz,BLOCK_SIZE_Z));
threadsPerBlock = dim3(BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z);
vtkSmartPointer<vtkImageData> imageData =
vtkSmartPointer<vtkImageData>::New();
imageData->SetDimensions(Nx, Ny, Nz);
vtkSmartPointer<vtkDoubleArray> phase =
vtkSmartPointer<vtkDoubleArray>::New();
phase->SetNumberOfComponents(1);
phase->SetNumberOfTuples(Nx * Ny * Nz);
for (iter = 0; iter < Nt; iter++) {
pf3d_gpu<<<numBlocks, threadsPerBlock>>>(Nx, Ny, Nz, Ng, L, alpha, beta, gamma, kappa, ht, hx, hy, hz, ud_old.getData(), ud_new.getData(), pfd.getData());
tmp_d = ud_new;
ud_new = ud_old;
ud_old = tmp_d;
tmp_h = uh_new;
uh_new = uh_old;
uh_old = tmp_h;
char myfile[16];
sprintf(myfile, "myfile_%d.vti", iter);
pfd.get(pfh, Nx, Ny, Nz, 1);
for (i=0; i < Nx; i++) {
for (j=0; j < Ny; j++) {
for (k=0; k < Nz; k++) {
P = I3D(Nx, Ny, Nz, i, j, k, 0);
phase->SetValue(P, pfh[P]);
}
}
}
imageData->GetPointData()->AddArray(phase);
phase->SetName("Phase Field");
vtkSmartPointer<vtkXMLImageDataWriter> writer =
vtkSmartPointer<vtkXMLImageDataWriter>::New();
writer->SetFileName(myfile);
#if VTK_MAJOR_VERSION <= 5
writer->SetInputConnection(imageData->GetProducerPort());
#else
writer->SetInputData(imageData);
#endif
writer->Write();
}
cudaThreadSynchronize();
}
|
36037d1516e7c678f1e8d6b58d1c519a0bf346dc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "FrequencyAdjust.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *OCTData = NULL;
hipMalloc(&OCTData, XSIZE*YSIZE);
float *KSpaceData = NULL;
hipMalloc(&KSpaceData, XSIZE*YSIZE);
float *PXScale = NULL;
hipMalloc(&PXScale, XSIZE*YSIZE);
int *IndexArray = NULL;
hipMalloc(&IndexArray, XSIZE*YSIZE);
int CutIndex = 1;
int SizeX = XSIZE*YSIZE;
int SizeY = XSIZE*YSIZE;
int SizeZ = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
FrequencyAdjust), dim3(gridBlock),dim3(threadBlock), 0, 0, OCTData,KSpaceData,PXScale,IndexArray,CutIndex,SizeX,SizeY,SizeZ);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
FrequencyAdjust), dim3(gridBlock),dim3(threadBlock), 0, 0, OCTData,KSpaceData,PXScale,IndexArray,CutIndex,SizeX,SizeY,SizeZ);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
FrequencyAdjust), dim3(gridBlock),dim3(threadBlock), 0, 0, OCTData,KSpaceData,PXScale,IndexArray,CutIndex,SizeX,SizeY,SizeZ);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 36037d1516e7c678f1e8d6b58d1c519a0bf346dc.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "FrequencyAdjust.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *OCTData = NULL;
cudaMalloc(&OCTData, XSIZE*YSIZE);
float *KSpaceData = NULL;
cudaMalloc(&KSpaceData, XSIZE*YSIZE);
float *PXScale = NULL;
cudaMalloc(&PXScale, XSIZE*YSIZE);
int *IndexArray = NULL;
cudaMalloc(&IndexArray, XSIZE*YSIZE);
int CutIndex = 1;
int SizeX = XSIZE*YSIZE;
int SizeY = XSIZE*YSIZE;
int SizeZ = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
FrequencyAdjust<<<gridBlock,threadBlock>>>(OCTData,KSpaceData,PXScale,IndexArray,CutIndex,SizeX,SizeY,SizeZ);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
FrequencyAdjust<<<gridBlock,threadBlock>>>(OCTData,KSpaceData,PXScale,IndexArray,CutIndex,SizeX,SizeY,SizeZ);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
FrequencyAdjust<<<gridBlock,threadBlock>>>(OCTData,KSpaceData,PXScale,IndexArray,CutIndex,SizeX,SizeY,SizeZ);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
fa09f2ae3dd943f08121f22cb2481359b1dec194.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void TEST(int n, float* x, float* y) {
for(int i = 0; i < n; i++)
y[i] += x[i];
} | fa09f2ae3dd943f08121f22cb2481359b1dec194.cu | #include "includes.h"
__global__ void TEST(int n, float* x, float* y) {
for(int i = 0; i < n; i++)
y[i] += x[i];
} |
d137138c02aa45762bb762bf8d982bb0b262fc9d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <stdio.h>
#include "hip/hip_runtime.h"
#include "cpu_bitmap.h"
static void HandleError(hipError_t err, const char *file, int line) {
if (err != hipSuccess) {
printf("%s in %s at line %d\n", hipGetErrorString( err ), file, line);
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define DIM 1024
#define rnd( x ) (x * rand() / RAND_MAX)
#define INF 2e10f
struct Sphere {
float ra,ba,ga;
float rd,bd,gd;
float radius;
float x,y,z;
__device__ float hit(float ox, float oy, float* nx, float* ny, float* nz, float* lx, float* ly, float* lz, float* rx, float* ry, float* rz, float* vx, float* vy, float* vz) {
// light position
float LX = 0.0f;
float LY = 1000.0f;
float LZ = 0.0f;
// camera position
float CameraX = 0.0f;
float CameraY = 0.0f;
float CameraZ = 10000.0f;
float dx = ox - x;
float dy = oy - y;
if (dx*dx + dy*dy < radius*radius) {
// calculate normal vector
float dz = sqrtf( radius*radius - dx*dx - dy*dy );
float oz = z + dz;
*nx = dx;
*ny = dy;
*nz = dz;
float n1 = sqrtf((*nx)*(*nx)+(*ny)*(*ny)+(*nz)*(*nz));
*nx /= n1;
*ny /= n1;
*nz /= n1;
// calculate light vector
*lx = LX - ox;
*ly = LY - oy;
*lz = LZ - oz;
float n2 = sqrtf((*lx)*(*lx)+(*ly)*(*ly)+(*lz)*(*lz));
*lx /= n2;
*ly /= n2;
*lz /= n2;
// calculate reflection vector
float n3 = 2.0f*((*nx)*(*lx)+(*ny)*(*ly)+(*nz)*(*lz));
*rx = n3*(*nx) - (*lx);
*ry = n3*(*ny) - (*ly);
*rz = n3*(*nz) - (*lz);
float n4 = sqrtf((*rx)*(*rx)+(*ry)*(*ry)+(*rz)*(*rz));
*rx /= n4;
*ry /= n4;
*rz /= n4;
// calculate camera vector
*vx = CameraX - ox;
*vy = CameraY - oy;
*vz = CameraZ - oz;
float n5 = sqrtf((*vx)*(*vx)+(*vy)*(*vy)+(*vz)*(*vz));
*vx /= n5;
*vy /= n5;
*vz /= n5;
return dz + z;
}
return -INF;
}
};
#define SPHERES 20
__constant__ Sphere s[SPHERES];
__global__ void kernel( unsigned char *ptr ) {
// colors
float LSR = 0.55f;
float LSG = 0.55f;
float LSB = 0.55f;
// strenght
float KAR = 0.3f;
float KAG = 0.3f;
float KAB = 0.3f;
float KDR = 1.0f;
float KDG = 1.0f;
float KDB = 1.0f;
float KSR = 0.8f;
float KSG = 0.8f;
float KSB = 0.8f;
//alfa
float ALPHA = 15.0f;
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float ox = (x - DIM/2);
float oy = (y - DIM/2);
float r=0, g=0, b=0;
float maxz = -INF;
for(int i=0; i<SPHERES; i++) {
float nx, ny, nz, lx, ly, lz, rx, ry, rz, vx, vy, vz;
float t = s[i].hit( ox, oy, &nx, &ny, &nz, &lx, &ly, &lz, &rx, &ry, &rz, &vx, &vy, &vz);
if (t > maxz) {
float ln = max(0.0f, (lx*nx+ly*ny+lz*nz));
float rv = max(0.0f, pow(rx*vx+ry*vy+rz*vz, ALPHA));
r = KAR*s[i].ra +
KDR*ln*s[i].rd + KSR*rv*LSR;
g = KAG*s[i].ga +
KDG*ln*s[i].gd + KSG*rv*LSG;
b = KAB*s[i].ba +
KDB*ln*s[i].bd + KSB*rv*LSB;
maxz = t;
}
}
ptr[offset*4 + 0] = (int)(min(1.0f, r) * 255);
ptr[offset*4 + 1] = (int)(min(1.0f, g) * 255);
ptr[offset*4 + 2] = (int)(min(1.0f, b) * 255);
ptr[offset*4 + 3] = 255;
}
// globals needed by the update routine
struct DataBlock {
unsigned char *dev_bitmap;
};
int main( void ) {
DataBlock data;
// capture the start time
hipEvent_t start, stop;
HANDLE_ERROR( hipEventCreate( &start ) );
HANDLE_ERROR( hipEventCreate( &stop ) );
HANDLE_ERROR( hipEventRecord( start, 0 ) );
CPUBitmap bitmap( DIM, DIM, &data );
unsigned char *dev_bitmap;
// allocate memory on the GPU for the output bitmap
HANDLE_ERROR( hipMalloc( (void**)&dev_bitmap, bitmap.image_size() ) );
// allocate temp memory, initialize it, copy to constant
// memory on the GPU, then free our temp memory
Sphere *temp_s = (Sphere*)malloc( sizeof(Sphere) * SPHERES );
for (int i=0; i<SPHERES; i++) {
temp_s[i].ra = temp_s[i].rd = rnd( 1.0f );
temp_s[i].ga = temp_s[i].gd = rnd( 1.0f );
temp_s[i].ba = temp_s[i].bd = rnd( 1.0f );
temp_s[i].x = rnd( 1000.0f ) - 500;
temp_s[i].y = rnd( 1000.0f ) - 500;
temp_s[i].z = rnd( 1000.0f ) - 500;
temp_s[i].radius = rnd( 100.0f ) + 20;
}
HANDLE_ERROR( hipMemcpyToSymbol( s, temp_s, sizeof(Sphere) * SPHERES) );
free( temp_s );
// generate a bitmap from our sphere data
dim3 grids(DIM/16,DIM/16);
dim3 threads(16,16);
hipLaunchKernelGGL(( kernel), dim3(grids),dim3(threads), 0, 0, dev_bitmap );
// copy our bitmap back from the GPU for display
HANDLE_ERROR( hipMemcpy( bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost ) );
// get stop time, and display the timing results
HANDLE_ERROR( hipEventRecord( stop, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( hipEventElapsedTime( &elapsedTime, start, stop ) );
printf( "Time to generate: %3.1f ms\n", elapsedTime );
HANDLE_ERROR( hipEventDestroy( start ) );
HANDLE_ERROR( hipEventDestroy( stop ) );
HANDLE_ERROR( hipFree( dev_bitmap ) );
// display
bitmap.display_and_exit();
} | d137138c02aa45762bb762bf8d982bb0b262fc9d.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <stdio.h>
#include "cuda.h"
#include "cpu_bitmap.h"
static void HandleError(cudaError_t err, const char *file, int line) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString( err ), file, line);
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define DIM 1024
#define rnd( x ) (x * rand() / RAND_MAX)
#define INF 2e10f
struct Sphere {
float ra,ba,ga;
float rd,bd,gd;
float radius;
float x,y,z;
__device__ float hit(float ox, float oy, float* nx, float* ny, float* nz, float* lx, float* ly, float* lz, float* rx, float* ry, float* rz, float* vx, float* vy, float* vz) {
// light position
float LX = 0.0f;
float LY = 1000.0f;
float LZ = 0.0f;
// camera position
float CameraX = 0.0f;
float CameraY = 0.0f;
float CameraZ = 10000.0f;
float dx = ox - x;
float dy = oy - y;
if (dx*dx + dy*dy < radius*radius) {
// calculate normal vector
float dz = sqrtf( radius*radius - dx*dx - dy*dy );
float oz = z + dz;
*nx = dx;
*ny = dy;
*nz = dz;
float n1 = sqrtf((*nx)*(*nx)+(*ny)*(*ny)+(*nz)*(*nz));
*nx /= n1;
*ny /= n1;
*nz /= n1;
// calculate light vector
*lx = LX - ox;
*ly = LY - oy;
*lz = LZ - oz;
float n2 = sqrtf((*lx)*(*lx)+(*ly)*(*ly)+(*lz)*(*lz));
*lx /= n2;
*ly /= n2;
*lz /= n2;
// calculate reflection vector
float n3 = 2.0f*((*nx)*(*lx)+(*ny)*(*ly)+(*nz)*(*lz));
*rx = n3*(*nx) - (*lx);
*ry = n3*(*ny) - (*ly);
*rz = n3*(*nz) - (*lz);
float n4 = sqrtf((*rx)*(*rx)+(*ry)*(*ry)+(*rz)*(*rz));
*rx /= n4;
*ry /= n4;
*rz /= n4;
// calculate camera vector
*vx = CameraX - ox;
*vy = CameraY - oy;
*vz = CameraZ - oz;
float n5 = sqrtf((*vx)*(*vx)+(*vy)*(*vy)+(*vz)*(*vz));
*vx /= n5;
*vy /= n5;
*vz /= n5;
return dz + z;
}
return -INF;
}
};
#define SPHERES 20
__constant__ Sphere s[SPHERES];
__global__ void kernel( unsigned char *ptr ) {
// colors
float LSR = 0.55f;
float LSG = 0.55f;
float LSB = 0.55f;
// strenght
float KAR = 0.3f;
float KAG = 0.3f;
float KAB = 0.3f;
float KDR = 1.0f;
float KDG = 1.0f;
float KDB = 1.0f;
float KSR = 0.8f;
float KSG = 0.8f;
float KSB = 0.8f;
//alfa
float ALPHA = 15.0f;
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float ox = (x - DIM/2);
float oy = (y - DIM/2);
float r=0, g=0, b=0;
float maxz = -INF;
for(int i=0; i<SPHERES; i++) {
float nx, ny, nz, lx, ly, lz, rx, ry, rz, vx, vy, vz;
float t = s[i].hit( ox, oy, &nx, &ny, &nz, &lx, &ly, &lz, &rx, &ry, &rz, &vx, &vy, &vz);
if (t > maxz) {
float ln = max(0.0f, (lx*nx+ly*ny+lz*nz));
float rv = max(0.0f, pow(rx*vx+ry*vy+rz*vz, ALPHA));
r = KAR*s[i].ra +
KDR*ln*s[i].rd + KSR*rv*LSR;
g = KAG*s[i].ga +
KDG*ln*s[i].gd + KSG*rv*LSG;
b = KAB*s[i].ba +
KDB*ln*s[i].bd + KSB*rv*LSB;
maxz = t;
}
}
ptr[offset*4 + 0] = (int)(min(1.0f, r) * 255);
ptr[offset*4 + 1] = (int)(min(1.0f, g) * 255);
ptr[offset*4 + 2] = (int)(min(1.0f, b) * 255);
ptr[offset*4 + 3] = 255;
}
// globals needed by the update routine
struct DataBlock {
unsigned char *dev_bitmap;
};
int main( void ) {
DataBlock data;
// capture the start time
cudaEvent_t start, stop;
HANDLE_ERROR( cudaEventCreate( &start ) );
HANDLE_ERROR( cudaEventCreate( &stop ) );
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
CPUBitmap bitmap( DIM, DIM, &data );
unsigned char *dev_bitmap;
// allocate memory on the GPU for the output bitmap
HANDLE_ERROR( cudaMalloc( (void**)&dev_bitmap, bitmap.image_size() ) );
// allocate temp memory, initialize it, copy to constant
// memory on the GPU, then free our temp memory
Sphere *temp_s = (Sphere*)malloc( sizeof(Sphere) * SPHERES );
for (int i=0; i<SPHERES; i++) {
temp_s[i].ra = temp_s[i].rd = rnd( 1.0f );
temp_s[i].ga = temp_s[i].gd = rnd( 1.0f );
temp_s[i].ba = temp_s[i].bd = rnd( 1.0f );
temp_s[i].x = rnd( 1000.0f ) - 500;
temp_s[i].y = rnd( 1000.0f ) - 500;
temp_s[i].z = rnd( 1000.0f ) - 500;
temp_s[i].radius = rnd( 100.0f ) + 20;
}
HANDLE_ERROR( cudaMemcpyToSymbol( s, temp_s, sizeof(Sphere) * SPHERES) );
free( temp_s );
// generate a bitmap from our sphere data
dim3 grids(DIM/16,DIM/16);
dim3 threads(16,16);
kernel<<<grids,threads>>>( dev_bitmap );
// copy our bitmap back from the GPU for display
HANDLE_ERROR( cudaMemcpy( bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost ) );
// get stop time, and display the timing results
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, start, stop ) );
printf( "Time to generate: %3.1f ms\n", elapsedTime );
HANDLE_ERROR( cudaEventDestroy( start ) );
HANDLE_ERROR( cudaEventDestroy( stop ) );
HANDLE_ERROR( cudaFree( dev_bitmap ) );
// display
bitmap.display_and_exit();
} |
cd6885e93c8342a8e766a7720adcdf6533920b52.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "header.h"
#include "bliinds_info.h"
__constant__ float dc_r_info[9971];
__global__ void pad(float const* src, const int size, float * new_img) {
//should have threads = blocks = 512 + overlap + blk_size + overlap
//and both are one dimensional
int x = threadIdx.x;
int y = blockIdx.x;
if (x<1 || x>size || y<1 || y>size)
new_img[x + y * (size + 2 * 1 + 3)] = 0;
else
new_img[x + y * (size + 2 * 1 + 3)] = src[x - 1 + (y - 1) * size];
}
__global__ void rearrangeForCuFFT(float const * new_img, const int size, hipfftComplex * rearr_img) {
// threads = 25, blocks = (512/3 +1)^2
int const x = threadIdx.x;
int const y = blockIdx.x;
int const pos = (y / (size / 3 + 1)) * 3 * (size + 5) + (y % (size / 3 + 1)) * 3; // position in new_img of first element to be copied
//*****Important*** change this to row-wise
rearr_img[y * 50 + x / 5 * 10 + x % 5].x = new_img[pos + x % 5 + x / 5 * (size + 5)];
rearr_img[y * 50 + (x / 5 + 1) * 10 - x % 5 - 1].x = new_img[pos + x % 5 + x / 5 * (size + 5)];
rearr_img[y * 50 + x / 5 * 10 + x % 5].y = 0;
rearr_img[y * 50 + (x / 5 + 1) * 10 - x % 5 - 1].y = 0;
}
__global__ void setZero(float * array) {
array[threadIdx.x + blockIdx.x * blockDim.x] = 0.0f;
}
__global__ void rearrangeForDCT(float const * new_img, const int size, float * rearr_img) {
// threads = 25, blocks = (512/3 +1)^2
int const x = threadIdx.x;
int const y = blockIdx.x;
int const pos = (y / (size / 3 + 1)) * 3 * (size + 5) + (y % (size / 3 + 1)) * 3; // position in new_img of first block element to be copied
//*****Important*** change this to row-wise
rearr_img[y * 25 + x] = new_img[pos + x % 5 + x / 5 * (size + 5)];
/*if (x == 0 && y == 0)
printf("I can print\n");*/
}
// Higher warps
__global__ void rearrangeForDCTv2(float const * new_img, const int size, float * rearr_img) {
// threads = 25, blocks = (512/3 +1)^2
int const x = threadIdx.x % 32;
int const y = blockIdx.x * 4;
int const sblkIdx = threadIdx.x / 32;
int const gblkIdx = y + sblkIdx;
int const pos = (gblkIdx / (size / 3 + 1)) * 3 * (size + 5) + (gblkIdx % (size / 3 + 1)) * 3; // position in new_img of first block element to be copied
//*****Important*** change this to row-wise
if (x<25)
rearr_img[gblkIdx * 32 + x] = new_img[pos + x % 5 + x / 5 * (size + 5)];
}
__global__ void transposeForCuFFT(hipfftComplex const*read, hipfftComplex *write) {
// threads = 25, blocks = (512/3 +1)^2
int const x = threadIdx.x;
int const y = blockIdx.x;
float temp1;// temp2;
temp1 = (read[y * 50 + x % 5 * 10 + x / 5].x * cos(PI * (x / 5) / 10.0) + read[y * 50 + x % 5 * 10 + x / 5].y * sin(PI * (x / 5) / 10.0)) / 2.0 * (x / 5 == 0 ? sqrt(0.2) : sqrt(0.4));
//temp2 = (read[y * 50 + x % 5 * 10 + x / 5].y * cos(PI * (x / 5) / 10.0) - read[y * 50 + x % 5 * 10 + x / 5].x * sin(PI * (x / 5) / 10.0)) / 2.0 * (x % 5 == 0 ? sqrt(0.2) : sqrt(0.4));
//__syncthreads();
write[y * 50 + x / 5 * 10 + x % 5].x = temp1;//fabsf(temp1) > 0.00001 ? temp1 : 0;
write[y * 50 + x / 5 * 10 + x % 5].y = 0;
write[y * 50 + (x / 5 + 1) * 10 - x % 5 - 1].x = temp1; //fabsf(temp1) > 0.00001 ? temp1 : 0;
write[y * 50 + (x / 5 + 1) * 10 - x % 5 - 1].y = 0;
}
__global__ void dct55(const float * rearr_img, const double* dctmtx, float* dctImg) {
//threads = 25, blocks = (512/3 +1)^2
int const x = threadIdx.x;
int const y = blockIdx.x;
__shared__ double img[25];
__shared__ double dct[25];
img[x] = rearr_img[y * 25 + x];
dct[x] = dctmtx[x];
double temp = 0.0;
__syncthreads();
/*if (x == 0) {
if (y == 450) {
for (int i = 0; i < 25; i++)
printf("%0.20f\n", img[i]);
printf("\n");
}
}*/
for (int i = 0; i < 5; i++) {
temp += dct[5 * (x / 5) + i] * (img[5 * i + x % 5]);
}
__syncthreads();
img[x] = temp;
__syncthreads();
temp = 0.0;
for (int i = 0; i < 5; i++) {
temp += img[5 * (x / 5) + i] * dct[5 * (x % 5) + i];
}
dctImg[y * 25 + x/*5*(x%5) + x/5*/] = temp; //fabsf(temp) > 0.0000001 ? temp : 0;
//__syncthreads();
/*if (x == 0) {
if (y == 7155) {
for (int i = 0; i < 25; i++)
printf("%0.20f, %0.20f\n", rearr_img[y*25 + i], dctImg[y * 25 + i]);
printf("\n");
}
}*/
}
__global__ void dct55v2(float* rearr_img, const double* dctmtx, float* dctImg) {
int const x = threadIdx.x%32;
int const y = blockIdx.x*8;
int const sblkIdx = threadIdx.x / 32;
int const gblkIdx = (y + sblkIdx) * 32;
__shared__ float img[32*8];
__shared__ double dct[32];
img[threadIdx.x] = rearr_img[gblkIdx + x];
dct[x] = dctmtx[x];
double temp = 0.0f;
for (int i = 0; i < 5; i++) {
temp += dct[5 * (x / 5) + i] * img[sblkIdx * 32 + 5 * i + x % 5];
}
if (x<25)
img[threadIdx.x] = temp;
temp = 0.0f;
for (int i = 0; i < 5; i++) {
temp += img[sblkIdx * 32 + 5 * (x / 5) + i] * dct[5 * (x % 5) + i];
}
if (x>0 && x<25)
dctImg[gblkIdx + x/*5*(x%5) + x/5*/] = temp;
/*if (gblkIdx == 0 && x < 32) {
printf("%f\n", dctImg[gblkIdx + x]);
}*/
}
// Merge rearrange and DCT into one kernel. Hoping to avoid register spilling
__global__ void rearrangeAndDCT55(float const* new_img, const int size, const double* dctmtx, float* dctImg) {
int const x = threadIdx.x % 32;
int const y = blockIdx.x * 8;
int const sblkIdx = threadIdx.x / 32;
int const gblkIdx = y + sblkIdx;
int const pos = (gblkIdx / (size / 3 + 1)) * 3 * (size + 5) + (gblkIdx % (size / 3 + 1)) * 3; // position in new_img of first block element to be copied
__shared__ float img[32 * 8];
__shared__ double dct[32];
//*****Important*** change this to row-wise
//img[threadIdx.x] = 0;
//if (x<25)
//int const gblkIdx = (y + sblkIdx) * 32;
img[threadIdx.x] = new_img[pos + x % 5 + x / 5 * (size + 5)];
dct[x] = dctmtx[x];
double temp = 0.0f;
for (int i = 0; i < 5; i++) {
temp += dct[5 * (x / 5) + i] * img[sblkIdx * 32 + 5 * i + x % 5];
}
if (x<25)
img[threadIdx.x] = temp;
temp = 0.0f;
for (int i = 0; i < 5; i++) {
temp += img[sblkIdx * 32 + 5 * (x / 5) + i] * dct[5 * (x % 5) + i];
}
if (x>0 && x<25)
dctImg[gblkIdx * 32 + x/*5*(x%5) + x/5*/] = temp;
}
__global__ void copyDCT(hipfftComplex const*dct_img, float *dctImg) {
//threads = 25, blocks = (512/3 +1)^2
int const x = threadIdx.x;
int const y = blockIdx.x;
//dctImg[y * 25 + x] = dct_img[y * 50 + x / 5 * 10 + x % 5].x;
dctImg[y * 25 + x] = dct_img[y * 50 + x / 5 * 10 + x % 5].x; // (fabsf(dct_img[y * 50 + x / 5 * 10 + x % 5].x) > 0.0001 ? dct_img[y * 50 + x / 5 * 10 + x % 5].x : 0);
}
__global__ void rearrangeTest(hipfftComplex * d_rearr_in){
int const x = threadIdx.x;
int const y = blockIdx.x;
d_rearr_in[y * 50 + x / 5 * 10 + x % 5].x = (x / 5)*(x % 5);
d_rearr_in[y * 50 + x / 5 * 10 + x % 5].y = 0;
d_rearr_in[y * 50 + (x / 5 + 1) * 10 - x % 5 - 1].x = (x / 5)*(x % 5);
d_rearr_in[y * 50 + (x / 5 + 1) * 10 - x % 5 - 1].y = 0;
}
__global__ void copyTest(hipfftComplex const* d_rearr_in, float *dctImg) {
int const x = threadIdx.x;
int const y = blockIdx.x;
dctImg[y * 20 + x] = (abs(d_rearr_in[y * 20 + x].x) > 0.00001 ? d_rearr_in[y * 20 + x].x : 0);
}
__global__ void rho_dct(float const* d_dctImg, float * coeff_freq_var) {
//plan grids = (512/3 + 1)^2, threads = 25
int const x = threadIdx.x;
int const y = blockIdx.x;
__shared__ float dctBlock[25];
dctBlock[x] = d_dctImg[y * 25 + x];
__syncthreads();
if (x == 0) {
float mean_abs = 0, std_gauss = 0;
for (int i = 1; i < 25; i++) {
mean_abs += abs(dctBlock[i]);
/*if (y == 450) {
printf("%0.20f, %0.20f\n", mean_abs, abs(dctBlock[i]));
}*/
}
mean_abs = mean_abs / 24.0;
/*if (mean_abs < 0.0001) {
coeff_freq_var[y] = 0;
return;
}*/
for (int i = 1; i < 25; i++) {
float temp = fabs(dctBlock[i]) - mean_abs;
std_gauss += temp * temp;
/*if (y == 450) {
printf("%0.20f, %0.20f\n", std_gauss, temp*temp);
}*/
}
std_gauss = sqrt(std_gauss / 23.0);
coeff_freq_var[y] = std_gauss / (mean_abs + 0.0000001);
/*if (y == 450) {
printf("std_gauss: %0.20f, \tmean_abs: %0.20f, \tcoeff: %0.20f\n", std_gauss, mean_abs, coeff_freq_var[y]);
}*/
}
}
__device__ inline void MyAtomicAdd(float *address, float value)
{
int oldval, newval, readback;
oldval = __float_as_int(*address);
newval = __float_as_int(__int_as_float(oldval) + value);
while ((readback = atomicCAS((int *)address, oldval, newval)) != oldval)
{
oldval = readback;
newval = __float_as_int(__int_as_float(oldval) + value);
}
}
// Higher number of warps
__global__ void rho_dct2(float const* d_dctImg, float * coeff_freq_var) {
//plan grids = (512/3 + 1)^2, threads = 25
int const x = threadIdx.x % 32;
int const y = blockIdx.x * 16;
int const sblkIdx = threadIdx.x / 32;
int const gblkIdx = (y + sblkIdx) * 32;
__shared__ float dctBlock[32 * 16];
dctBlock[threadIdx.x] = fabs(d_dctImg[gblkIdx + x]);
dctBlock[sblkIdx * 32] = 0;
//__syncthreads();
//if (x == 0) {
float mean_abs = 0, std_gauss = 0;
#pragma unroll
for (int i = 1; i < 25; i++) {
mean_abs += dctBlock[sblkIdx * 32 + i];
}
mean_abs /= 24.0f;
dctBlock[threadIdx.x] -= mean_abs;
#pragma unroll
for (int i = 1; i < 25; i++) {
//float temp = dctBlock[sblkIdx * 32 + i] - mean_abs;
std_gauss += dctBlock[sblkIdx * 32 + i] * dctBlock[sblkIdx * 32 + i];
}
std_gauss = sqrt(std_gauss / 23.0f);
coeff_freq_var[gblkIdx / 32] = std_gauss / (mean_abs + 0.0000001f);
//}
}
__global__ void gama_dct(float const * d_dctImg, float const * g_vector, float const * r_vector, float * d_gama) {
//plan grids = (512/3 + 1)^2, threads = 25
int const x = threadIdx.x;
int const y = blockIdx.x;
__shared__ float dctBlock[25];
dctBlock[x] = d_dctImg[y * 25 + x];
__syncthreads();
if (x == 0) {
float mean_gauss = 0;
for (int i = 1; i < 25; i++) {
mean_gauss += dctBlock[i];
}
mean_gauss = mean_gauss / 24.0;
float var_gauss = 0;
float mean_abs = 0;
for (int i = 1; i < 25; i++) {
float temp = fabsf(dctBlock[i] - mean_gauss);
var_gauss += temp * temp;
mean_abs += temp;
}
var_gauss = var_gauss / 23.0;
mean_abs = mean_abs / 24.0;
mean_abs *= mean_abs;
const float rho = var_gauss / (mean_abs + 0.0000001);
float gamma_gauss = 11.0;
for (int j = 0; j < 9970; j++) {
if (rho>r_vector[j + 1] && rho <= r_vector[j]) {
gamma_gauss = g_vector[j];
}
}
d_gama[y] = gamma_gauss;
}
}
__global__ void gama_dct2(float const * d_dctImg, float const * g_vector, float const * r_vector, float * d_gama) {
//plan grids = (512/3 + 1)^2, threads = 1024
int const x = threadIdx.x;
int const y = blockIdx.x;
__shared__ float dctBlock[9996];
// copy first 25 elements as dctImg and the remaining as r_vec so that r_vec completely fits in shared memory.
dctBlock[x] = x<25?d_dctImg[y * 25 + x]:r_vector[x-25];
__syncthreads();
if (x < 32) {
float mean_gauss = 0;
for (int i = 1; i < 25; i++) {
mean_gauss += dctBlock[i];
}
mean_gauss = mean_gauss / 24.0;
float var_gauss = 0;
float mean_abs = 0;
for (int i = 1; i < 25; i++) {
float temp = fabsf(dctBlock[i] - mean_gauss);
var_gauss += temp * temp;
mean_abs += temp;
}
var_gauss = var_gauss / 23.0;
mean_abs = mean_abs / 24.0;
mean_abs *= mean_abs;
dctBlock[0] = var_gauss / (mean_abs + 0.0000001);
}
else {
//int n = 999 + 10*(x - 32);// 1024 - 25 + x - 32
int n = 999 + x - 32;
/*for (int i = n; i < n + 10; i++) {
if (i < 9971) {
dctBlock[i + 25] = r_vector[i];
}
}*/
while (n < 9971) {
dctBlock[n + 25] = r_vector[n];
n += 992;
}
}
__syncthreads();
/*if (x == 0) {
float gamma_gauss = 11.0, rho = dctBlock[0];
for (int j = 25; j < 9995; j++) {
if (rho>dctBlock[j + 1] && rho <= dctBlock[j]) {
gamma_gauss = g_vector[j-25];
}
}
d_gama[y] = gamma_gauss;
}
*/
float rho = dctBlock[0];
dctBlock[1] = 11.0; // being used as gamma_gauss over here
for (int j = 10 * x; j <= 10 * x + 12; j++) {
if (j < 9970) {
int idx = 25 + j;
if (rho>dctBlock[idx + 1] && rho <= dctBlock[idx]) {
dctBlock[1] = g_vector[j];
}
}
}
__syncthreads();
d_gama[y] = dctBlock[1];
}
// gama_dct3 tries to use all 1024 threads for populating the shared memory with r_vector.
// Resulted in a 3 ms increase in time over gama_dct2. (29ms over 26 ms of gama_dct2)
__global__ void gama_dct3(float const * d_dctImg, float const * g_vector, float const * r_vector, float * d_gama) {
//plan grids = (512/3 + 1)^2, threads = 1024
int const x = threadIdx.x;
int const y = blockIdx.x;
__shared__ float dctBlock[9996];
// copy first 25 elements as dctImg and the remaining as r_vec so that r_vec completely fits in shared memory.
dctBlock[x] = x<25 ? d_dctImg[y * 25 + x] : r_vector[x - 25];
int n = 999 + x;
while (n < 9971) {
dctBlock[n + 25] = r_vector[n];
n += 1024;
}
__syncthreads();
if (x < 32) {
float mean_gauss = 0;
for (int i = 1; i < 25; i++) {
mean_gauss += dctBlock[i];
}
mean_gauss = mean_gauss / 24.0;
float var_gauss = 0;
float mean_abs = 0;
for (int i = 1; i < 25; i++) {
float temp = fabsf(dctBlock[i] - mean_gauss);
var_gauss += temp * temp;
mean_abs += temp;
}
var_gauss = var_gauss / 23.0;
mean_abs = mean_abs / 24.0;
mean_abs *= mean_abs;
dctBlock[0] = var_gauss / (mean_abs + 0.0000001);
}
__syncthreads();
float rho = dctBlock[0];
dctBlock[1] = 11.0; // being used as gamma_gauss over here
for (int j = 10 * x; j <= 10 * x + 12; j++) {
if (j < 9970) {
int idx = 25 + j;
if (rho>dctBlock[idx + 1] && rho <= dctBlock[idx]) {
dctBlock[1] = g_vector[j];
}
}
}
__syncthreads();
d_gama[y] = dctBlock[1];
}
// gama_dct4 makes use of r_vector in the constant memory.
// Takes 159 ms for all the three runs! :(
__global__ void gama_dct4(float const * d_dctImg, float const * g_vector, float const * r_vector, float * d_gama) {
//plan grids = (512/3 + 1)^2, threads = 25
int const x = threadIdx.x;
int const y = blockIdx.x;
__shared__ float dctBlock[25];
dctBlock[x] = d_dctImg[y * 25 + x];
__syncthreads();
if (x == 0) {
float mean_gauss = 0;
for (int i = 1; i < 25; i++) {
mean_gauss += dctBlock[i];
}
mean_gauss = mean_gauss / 24.0;
float var_gauss = 0;
float mean_abs = 0;
for (int i = 1; i < 25; i++) {
float temp = fabsf(dctBlock[i] - mean_gauss);
var_gauss += temp * temp;
mean_abs += temp;
}
var_gauss = var_gauss / 23.0;
mean_abs = mean_abs / 24.0;
mean_abs *= mean_abs;
const float rho = var_gauss / (mean_abs + 0.0000001);
float gamma_gauss = 11.0;
for (int j = 0; j < 9970; j++) {
if (rho>dc_r_info[j + 1] && rho <= dc_r_info[j]) {
//if (rho>r_vector[j + 1] && rho <= r_vector[j]) {
gamma_gauss = g_vector[j];
}
}
d_gama[y] = gamma_gauss;
}
}
// gama_dct5 copies r_vector into shared memory from const memory instead of global memory
// takes 108 ms, combined for all the three invocations :(
__global__ void gama_dct5(float const * d_dctImg, float const * g_vector, float const * r_vector, float * d_gama) {
//plan grids = (512/3 + 1)^2, threads = 1024
int const x = threadIdx.x;
int const y = blockIdx.x;
__shared__ float dctBlock[9996];
// copy first 25 elements as dctImg and the remaining as r_vec so that r_vec completely fits in shared memory.
dctBlock[x] = x<25 ? d_dctImg[y * 25 + x] : dc_r_info[x - 25];
__syncthreads();
if (x < 32) {
float mean_gauss = 0;
for (int i = 1; i < 25; i++) {
mean_gauss += dctBlock[i];
}
mean_gauss = mean_gauss / 24.0;
float var_gauss = 0;
float mean_abs = 0;
for (int i = 1; i < 25; i++) {
float temp = fabsf(dctBlock[i] - mean_gauss);
var_gauss += temp * temp;
mean_abs += temp;
}
var_gauss = var_gauss / 23.0;
mean_abs = mean_abs / 24.0;
mean_abs *= mean_abs;
dctBlock[0] = var_gauss / (mean_abs + 0.0000001);
}
else {
int n = 999 + x - 32;
while (n < 9971) {
dctBlock[n + 25] = dc_r_info[n];
n += 992;
}
}
__syncthreads();
float rho = dctBlock[0];
dctBlock[1] = 11.0; // being used as gamma_gauss over here
for (int j = 10 * x; j <= 10 * x + 12; j++) {
if (j < 9970) {
int idx = 25 + j;
if (rho>dctBlock[idx + 1] && rho <= dctBlock[idx]) {
dctBlock[1] = g_vector[j];
}
}
}
__syncthreads();
d_gama[y] = dctBlock[1];
}
// gama_dct6 returns the rho values to d_gama
__global__ void gama_dct6(float const * d_dctImg, float const * g_vector, float const * r_vector, float * d_gama) {
//plan grids = (512/3 + 1)^2, threads = 25
int const x = threadIdx.x;
int const y = blockIdx.x;
__shared__ float dctBlock[25];
dctBlock[x] = d_dctImg[y * 25 + x];
__syncthreads();
if (x == 0) {
float mean_gauss = 0;
for (int i = 1; i < 25; i++) {
mean_gauss += dctBlock[i];
}
mean_gauss = mean_gauss / 24.0;
float var_gauss = 0;
float mean_abs = 0;
for (int i = 1; i < 25; i++) {
float temp = fabsf(dctBlock[i] - mean_gauss);
var_gauss += temp * temp;
mean_abs += temp;
}
var_gauss = var_gauss / 23.0;
mean_abs = mean_abs / 24.0;
mean_abs *= mean_abs;
const float rho = var_gauss / (mean_abs + 0.0000001);
d_gama[y] = rho;
}
}
// gama_dct6 with higher warps
__global__ void gama_dct62(float const * d_dctImg, float const * g_vector, float const * r_vector, float * d_gama) {
//plan grids = (512/3 + 1)^2, threads = 25
int const x = threadIdx.x % 32;
int const y = blockIdx.x * 16;
int const sblkIdx = threadIdx.x / 32;
int const gblkIdx = (y + sblkIdx) * 32;
__shared__ float dctBlock[32*16];
dctBlock[threadIdx.x] = d_dctImg[gblkIdx + x];
//__syncthreads();
//if (x == 0) {
float mean_gauss = 0;
#pragma unroll
for (int i = 1; i < 25; i++) {
mean_gauss += dctBlock[sblkIdx * 32 + i];
}
mean_gauss = mean_gauss / 24.0f;
float var_gauss = 0;
float mean_abs = 0;
dctBlock[sblkIdx * 32 + x] = fabsf(dctBlock[sblkIdx * 32 + x] - mean_gauss);
#pragma unroll
for (int i = 1; i < 25; i++) {
//float temp = fabsf(dctBlock[i] - mean_gauss);
var_gauss += dctBlock[sblkIdx * 32 + i] * dctBlock[sblkIdx * 32 + i];
mean_abs += dctBlock[sblkIdx * 32 + i];
}
var_gauss = var_gauss / 23.0f;
mean_abs = mean_abs / 24.0f;
mean_abs *= mean_abs;
const float rho = var_gauss / (mean_abs + 0.0000001f);
d_gama[gblkIdx / 32] = rho;
//}
}
__global__ void gama_dct6_3(float * d_rho, float const * g_vector, float const * r_vector, float * d_gama, int max) {
int const pos = threadIdx.x + blockIdx.x * blockDim.x;
//if (pos < max) {
float const rho = d_rho[pos];
int left(0), right(9970), mid(4985);
float gamma_gauss = 11;
while (right > left) {
mid = (left + right) / 2;
float r_vec_mid_1 = r_vector[mid + 1];
if (rho > r_vec_mid_1 && rho <= r_vector[mid]) {
gamma_gauss = g_vector[mid];
break;
}
else if (rho <= r_vec_mid_1) {
left = mid + 1;
continue;
}
else {
right = mid;
}
}
d_gama[pos] = gamma_gauss;
//}
/*float gamma_gauss = 11.0;
for (int j = 0; j < 9970; j++) {
if (rho>r_vector[j + 1] && rho <= r_vector[j]) {
gamma_gauss = g_vector[j];
}
}
d_gama[y] = gamma_gauss;
*/
}
__global__ void oriented_dct_rho(float const * d_dctImg, float * ori_rho, int orient) {
//plan grids = (512/3 + 1)^2, threads = 8
__shared__ float dctBlock[8];
int const x = threadIdx.x;
int const y = blockIdx.x;
if (orient == 1) {
if (x == 0) {
dctBlock[0] = fabs(d_dctImg[blockIdx.x * 25 + 1]);
dctBlock[1] = fabs(d_dctImg[blockIdx.x * 25 + 2]);
dctBlock[2] = fabs(d_dctImg[blockIdx.x * 25 + 7]);
dctBlock[3] = fabs(d_dctImg[blockIdx.x * 25 + 3]);
dctBlock[4] = fabs(d_dctImg[blockIdx.x * 25 + 8]);
dctBlock[5] = fabs(d_dctImg[blockIdx.x * 25 + 4]);
dctBlock[6] = fabs(d_dctImg[blockIdx.x * 25 + 9]);
dctBlock[7] = fabs(d_dctImg[blockIdx.x * 25 + 14]);
}
}
else if (orient == 2) {
if (x == 0) {
dctBlock[0] = fabsf(d_dctImg[blockIdx.x * 25 + 6]);
dctBlock[1] = fabsf(d_dctImg[blockIdx.x * 25 + 12]);
dctBlock[2] = fabsf(d_dctImg[blockIdx.x * 25 + 17]);
dctBlock[3] = fabsf(d_dctImg[blockIdx.x * 25 + 13]);
dctBlock[4] = fabsf(d_dctImg[blockIdx.x * 25 + 18]);
dctBlock[5] = fabsf(d_dctImg[blockIdx.x * 25 + 23]);
dctBlock[6] = fabsf(d_dctImg[blockIdx.x * 25 + 19]);
dctBlock[7] = fabsf(d_dctImg[blockIdx.x * 25 + 24]);
}
}
else if (orient == 3) {
if (x == 0) {
dctBlock[0] = fabsf(d_dctImg[blockIdx.x * 25 + 5]);
dctBlock[1] = fabsf(d_dctImg[blockIdx.x * 25 + 10]);
dctBlock[2] = fabsf(d_dctImg[blockIdx.x * 25 + 15]);
dctBlock[3] = fabsf(d_dctImg[blockIdx.x * 25 + 20]);
dctBlock[4] = fabsf(d_dctImg[blockIdx.x * 25 + 11]);
dctBlock[5] = fabsf(d_dctImg[blockIdx.x * 25 + 16]);
dctBlock[6] = fabsf(d_dctImg[blockIdx.x * 25 + 21]);
dctBlock[7] = fabsf(d_dctImg[blockIdx.x * 25 + 22]);
}
}
/*for (int i = 0; i < 8; i++) {
if (dctBlock[i] < 0.0001)
dctBlock[i] = 0;
}*/
double mean = 0.0, std_gauss = 0.0;
if (x == 0) {
for (int i = 0; i < 8; i++) {
mean += dctBlock[i];
/*if (y == 1) {
printf("%f\n", dctBlock[i]);
}*/
}
/*if (y == 1) {
printf("\n");
}*/
mean /= 8.0;
/*if (fabsf(mean) < 0.0001) {
ori_rho[y] = 0;
return;
}*/
for (int i = 0; i < 8; i++) {
double temp = dctBlock[i] - mean;
std_gauss += temp * temp;
}
std_gauss = sqrt(std_gauss / 7.0);
ori_rho[y] = std_gauss / (mean + 0.0000001);
/*if (y == 7155) {
printf("mean = %0.20f, std_gauss = %0.20f\nori[i] = %0.20f\n", mean, std_gauss, std_gauss / (mean + 0.00000001));
}*/
}
}
// Increase the number of warps to 4, threads = 128
__global__ void oriented_dct_rho2(float const * d_dctImg, float * ori_rho, int orient) {
__shared__ float dctBlock[32*8];
int const x = threadIdx.x % 32;
int const y = blockIdx.x * 8;
int const sblkIdx = (threadIdx.x / 32) * 32;
int const gblkIdx = (y + threadIdx.x / 32) * 32;
int const ori[3] = {
2042757120, // 0b01111001110000100000000000000000
34369920, // 0b00000010000011000111000110000000
70356480 // 0b00000100001100011000111000000000
};
if (x < 8) {
if (orient == 1) {
int inter_idx = (x + 1) / 5 + (x + 1) / 8;
dctBlock[sblkIdx + x] = fabs(d_dctImg[gblkIdx + x + 1 + 5 * inter_idx - (x + 1) / 5 * 3 - (x + 1) / 8]);
}
else if (orient == 2) {
int row = (x + 1) - x / 2 - x / 5 + x / 6 - x / 7;
dctBlock[sblkIdx + x] = fabsf(d_dctImg[gblkIdx + row * 5 + x + 1 - x / 3 * 2]);
}
else if (orient == 3) {
int const col = (x + 1) / 5 + (x + 1) / 8;
dctBlock[sblkIdx + x] = fabsf(d_dctImg[gblkIdx + (x + 1) * 5 - 14 * col + (x + 1) / 8 * 10]);
}
float mean = dctBlock[sblkIdx + 0] + dctBlock[sblkIdx + 1] + dctBlock[sblkIdx + 2] + dctBlock[sblkIdx + 3] + \
dctBlock[sblkIdx + 4] + dctBlock[sblkIdx + 5] + dctBlock[sblkIdx + 6] + dctBlock[sblkIdx + 7];
mean /= 8;
dctBlock[sblkIdx + x] -= mean;
float std_gauss = dctBlock[sblkIdx + 0] * dctBlock[sblkIdx + 0] + dctBlock[sblkIdx + 1] * dctBlock[sblkIdx + 1] + dctBlock[sblkIdx + 2] * dctBlock[sblkIdx + 2] + \
dctBlock[sblkIdx + 3] * dctBlock[sblkIdx + 3] + dctBlock[sblkIdx + 4] * dctBlock[sblkIdx + 4] + dctBlock[sblkIdx + 5] * dctBlock[sblkIdx + 5] + \
dctBlock[sblkIdx + 6] * dctBlock[sblkIdx + 6] + dctBlock[sblkIdx + 7] * dctBlock[sblkIdx + 7];
std_gauss = sqrtf(std_gauss / 7);
ori_rho[gblkIdx / 32] = std_gauss / (mean + 0.0000001f);
}
}
// Increase the number of warps to 4, threads = 128
__global__ void oriented_dct_rho3(float const * d_dctImg, float * ori_rho, int orient) {
__shared__ float dctBlock[32 * 8];
//dctBlock[threadIdx.x] = 0;
int const x = threadIdx.x % 32;
int const y = blockIdx.x * 8;
int const sblkIdx = (threadIdx.x / 32);
int const gblkIdx = (y + threadIdx.x / 32) * 25;
int const ori = (orient - 1) * 32;
/*__shared__*/ bool const orient_mat[96] = {
//orient 1
0, 1, 1, 1, 1,
0, 0, 1, 1, 1,
0, 0, 0, 0, 1,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0, // extras for alignment to 32
0, 0,
//orient 2
0, 0, 0, 0, 0,
0, 1, 0, 0, 0,
0, 0, 1, 1, 0,
0, 0, 1, 1, 1,
0, 0, 0, 1, 1,
0, 0, 0, 0, 0, // extras for alignment to 32
0, 0,
//orient 3
0, 0, 0, 0, 0,
1, 0, 0, 0, 0,
1, 1, 0, 0, 0,
1, 1, 0, 0, 0,
1, 1, 1, 0, 0,
0, 0, 0, 0, 0, // extras for alignment to 32
0, 0,
};
dctBlock[threadIdx.x] = fabsf(d_dctImg[gblkIdx + x]);
if (orient_mat[ori + x] == 0) {
dctBlock[sblkIdx * 32 + x] = 0;
}
float mean = 0, std_gauss = 0;
for (int i = 1; i < 25; i++) {
mean += dctBlock[sblkIdx * 32 + i];
}
mean /= 8.0f;
dctBlock[threadIdx.x] -= mean;
for (int i = 1; i < 25; i++) {
if (orient_mat[ori + i]) {
std_gauss += dctBlock[sblkIdx * 32 + i] * dctBlock[sblkIdx * 32 + i];
}
}
std_gauss = sqrtf(std_gauss / 7.0f);
//if (x < 8) {
/*if (orient == 1) {
int inter_idx = (x + 1) / 5 + (x + 1) / 8;
dctBlock[sblkIdx + x] = fabs(d_dctImg[gblkIdx + x + 1 + 5 * inter_idx - (x + 1) / 5 * 3 - (x + 1) / 8]);
}
else if (orient == 2) {
int row = (x + 1) - x / 2 - x / 5 + x / 6 - x / 7;
dctBlock[sblkIdx + x] = fabsf(d_dctImg[gblkIdx + row * 5 + x + 1 - x / 3 * 2]);
}
else if (orient == 3) {
int const col = (x + 1) / 5 + (x + 1) / 8;
dctBlock[sblkIdx + x] = fabsf(d_dctImg[gblkIdx + (x + 1) * 5 - 14 * col + (x + 1) / 8 * 10]);
}
double mean = dctBlock[sblkIdx + 0] + dctBlock[sblkIdx + 1] + dctBlock[sblkIdx + 2] + dctBlock[sblkIdx + 3] + \
dctBlock[sblkIdx + 4] + dctBlock[sblkIdx + 5] + dctBlock[sblkIdx + 6] + dctBlock[sblkIdx + 7];
mean /= 8;
dctBlock[sblkIdx + x] -= mean;
double std_gauss = dctBlock[sblkIdx + 0] * dctBlock[sblkIdx + 0] + dctBlock[sblkIdx + 1] * dctBlock[sblkIdx + 1] + dctBlock[sblkIdx + 2] * dctBlock[sblkIdx + 2] + \
dctBlock[sblkIdx + 3] * dctBlock[sblkIdx + 3] + dctBlock[sblkIdx + 4] * dctBlock[sblkIdx + 4] + dctBlock[sblkIdx + 5] * dctBlock[sblkIdx + 5] + \
dctBlock[sblkIdx + 6] * dctBlock[sblkIdx + 6] + dctBlock[sblkIdx + 7] * dctBlock[sblkIdx + 7];
std_gauss = sqrtf(std_gauss / 7);*/
ori_rho[gblkIdx / 25] = std_gauss / (mean + 0.0000001f);
//}
}
__global__ void oriented_dct_final(const float * ori1_rho, const float * ori2_rho, const float * ori3_rho, float * ori_rho) {
//plan grids = (512/3 + 1)^2, threads = 1
int const x = threadIdx.x;
int const y = blockIdx.x;
float num[3];
num[1] = ori1_rho[y];
num[2] = ori2_rho[y];
num[0] = ori3_rho[y];
double mean = 0, variance = 0;
for (int i = 0; i < 3; i++) {
mean += num[i];
}
mean /= 3.0;
//const double variance = ((num[1] - mean) * (num[1] - mean) + (num[2] - mean) * (num[2] - mean) + (num[0] - mean) * (num[0] - mean)) / 2;
for (int i = 0; i < 3; i++) {
double temp = num[i] - mean;
variance += temp * temp;
}
variance /= 2.0;
ori_rho[y] = variance;
/*if (y == 7155) {
printf("ori1 = %0.20f\nori2 = %0.20f\nori3 = %0.20f\nori = %0.20f\n", ori1_rho[y], ori2_rho[y], ori3_rho[y], ori_rho[y]);
}*/
}
// For more warps
__global__ void oriented_dct_final2(const float * ori1_rho, const float * ori2_rho, const float * ori3_rho, float * ori_rho) {
//plan grids = (512/3 + 1)^2, threads = 1
int const x = threadIdx.x;
int const y = blockIdx.x * 512;
float num[3];
num[0] = ori1_rho[y + x];
num[1] = ori2_rho[y + x];
num[2] = ori3_rho[y + x];
float mean = (num[0] + num[1] + num[2])/3.0;
float variance = ((num[0] - mean)*(num[0] - mean) + (num[1] - mean)*(num[1] - mean) + (num[2] - mean)*(num[2] - mean)) / 2.0;
ori_rho[y + x] = variance;
}
__global__ void subband_energy(const float * d_dctImg, float * freq_bands) {
//plan grids = (512/3 + 1)^2, threads = 25
int const x = threadIdx.x;
int const y = blockIdx.x;
__shared__ float dctBlock[25];
__shared__ double inter[3];
dctBlock[x] = d_dctImg[y * 25 + x];
__syncthreads();
if (x == 0) {
//const float num1 = dctBlock[1], num2 = dctBlock[2], num3 = dctBlock[5],
// num4 = dctBlock[6], num5 = dctBlock[10];
const double mean = ((double)dctBlock[1] + dctBlock[2] + dctBlock[5] + dctBlock[6] + dctBlock[10]) / 5.0;
inter[0] = ((dctBlock[1] - mean) * (dctBlock[1] - mean) + (dctBlock[2] - mean) * (dctBlock[2] - mean) +
(dctBlock[5] - mean) * (dctBlock[5] - mean) + (dctBlock[6] - mean) * (dctBlock[6] - mean) + (dctBlock[10] - mean) * (dctBlock[10] - mean)) / 4.0;
}
if (x == 1) {
const float num1 = dctBlock[15], num2 = dctBlock[20], num3 = dctBlock[11],
num4 = dctBlock[16], num5 = dctBlock[21], num6 = dctBlock[7], num7 = dctBlock[12], num8 = dctBlock[17], num9 = dctBlock[3],
num10 = dctBlock[8], num11 = dctBlock[13], num12 = dctBlock[4], num13 = dctBlock[9];
const double mean = ((double)num1 + num2 + num3 + num4 + num5 + num6 + num7 + num8 + num9 + num10 + num11 + num12 + num13) / 13.0;
inter[1] = ((num1 - mean) * (num1 - mean) + (num2 - mean) * (num2 - mean) +
(num3 - mean) * (num3 - mean) + (num4 - mean) * (num4 - mean) + (num5 - mean) * (num5 - mean) +
(num6 - mean) * (num6 - mean) + (num7 - mean) * (num7 - mean) +
(num8 - mean) * (num8 - mean) + (num9 - mean) * (num9 - mean) + (num10 - mean) * (num10 - mean) +
(num11 - mean) * (num11 - mean) + (num12 - mean) * (num12 - mean) + (num13 - mean) * (num13 - mean)) / 12.0;
}
if (x == 2) {
//const float num1 = dctBlock[14], num2 = dctBlock[18], num3 = dctBlock[22],
// num4 = dctBlock[19], num5 = dctBlock[23], num6 = dctBlock[24];
const double mean = ((double)dctBlock[14] + dctBlock[18] + dctBlock[22] + dctBlock[19] + dctBlock[23] + dctBlock[24]) / 6.0;
inter[2] = ((dctBlock[14] - mean) * (dctBlock[14] - mean) + (dctBlock[18] - mean) * (dctBlock[18] - mean) +
(dctBlock[22] - mean) * (dctBlock[22] - mean) + (dctBlock[19] - mean) * (dctBlock[19] - mean) +
(dctBlock[23] - mean) * (dctBlock[23] - mean) + (dctBlock[24] - mean) * (dctBlock[24] - mean)) / 5.0;
}
__syncthreads();
if (x == 0) {
//const double var_band1 = dctBlock[25], var_band2 = dctBlock[26], var_band3 = dctBlock[27];
const double r1 = fabsf(inter[2] - (inter[0] + inter[1]) / 2.0) / (inter[2] + (inter[0] + inter[1]) / 2.0 + 0.00000001);
const double r2 = fabsf(inter[1] - inter[0]) / (inter[2] + inter[0] + 0.00000001);
//const float r1 = fabsf(var_band3 - (var_band1 + var_band2) / 2.0) / (var_band3 + (var_band1 + var_band2) / 2.0 + 0.00000001);
//const float r2 = fabsf(var_band2 - var_band1) / (var_band3 + var_band1 + 0.00000001);
/*if (var_band3 + var_band1 < 0.0001) {
freq_bands[y] = 0;
return;
}*/
freq_bands[y] = (r1 + r2) / 2.0;
}
}
// Higher number of warps
__global__ void subband_energy2(const float * d_dctImg, float * freq_bands) {
//plan grids = (512/3 + 1)^2, threads = 25
int const x = threadIdx.x % 32;
int const y = blockIdx.x * 4;
int const sblkIdx = threadIdx.x / 32;
int const gblkIdx = (y + sblkIdx) * 32;
__shared__ float dctBlock[32*4];
__shared__ float inter[3*4];
dctBlock[threadIdx.x] = d_dctImg[gblkIdx + x];
//__syncthreads();
//if (x == 0) {
const float mean1 = (dctBlock[sblkIdx * 32 + 1] + dctBlock[sblkIdx * 32 + 2] + dctBlock[sblkIdx * 32 + 5] + \
dctBlock[sblkIdx * 32 + 6] + dctBlock[sblkIdx * 32 + 10]) / 5.0f;
/*dctBlock[sblkIdx * 32 + x] -= mean;
inter[sblkIdx * 3 + 0] = ((dctBlock[sblkIdx * 32 + 1]) * (dctBlock[sblkIdx * 32 + 1]) + (dctBlock[sblkIdx * 32 + 2]) * (dctBlock[sblkIdx * 32 + 2]) +
(dctBlock[sblkIdx * 32 + 5]) * (dctBlock[sblkIdx * 32 + 5]) + (dctBlock[sblkIdx * 32 + 6]) * (dctBlock[sblkIdx * 32 + 6]) + \
(dctBlock[sblkIdx * 32 + 10]) * (dctBlock[sblkIdx * 32 + 10])) / 4.0;
dctBlock[sblkIdx * 32 + x] += mean;*/
inter[sblkIdx * 3 + 0] = ((dctBlock[sblkIdx * 32 + 1] - mean1) * (dctBlock[sblkIdx * 32 + 1] - mean1) + (dctBlock[sblkIdx * 32 + 2] - mean1) * (dctBlock[sblkIdx * 32 + 2] - mean1) +
(dctBlock[sblkIdx * 32 + 5] - mean1) * (dctBlock[sblkIdx * 32 + 5] - mean1) + (dctBlock[sblkIdx * 32 + 6] - mean1) * (dctBlock[sblkIdx * 32 + 6] - mean1) + \
(dctBlock[sblkIdx * 32 + 10] - mean1) * (dctBlock[sblkIdx * 32 + 10] - mean1)) / 4.0f;
//}
//if (x == 1) {
/*const float num1 = dctBlock[sblkIdx * 32 + 15], num2 = dctBlock[sblkIdx * 32 + 20], num3 = dctBlock[sblkIdx * 32 + 11], \
num4 = dctBlock[sblkIdx * 32 + 16], num5 = dctBlock[sblkIdx * 32 + 21], num6 = dctBlock[sblkIdx * 32 + 7], num7 = dctBlock[sblkIdx * 32 + 12], \
num8 = dctBlock[sblkIdx * 32 + 17], num9 = dctBlock[sblkIdx * 32 + 3], num10 = dctBlock[sblkIdx * 32 + 8], num11 = dctBlock[sblkIdx * 32 + 13], \
num12 = dctBlock[sblkIdx * 32 + 4], num13 = dctBlock[sblkIdx * 32 + 9];*/
/*const double*/ const float mean2 = (dctBlock[sblkIdx * 32 + 15] + dctBlock[sblkIdx * 32 + 20] + dctBlock[sblkIdx * 32 + 11] + \
dctBlock[sblkIdx * 32 + 16] + dctBlock[sblkIdx * 32 + 21] + dctBlock[sblkIdx * 32 + 7] + dctBlock[sblkIdx * 32 + 12] + \
dctBlock[sblkIdx * 32 + 17] + dctBlock[sblkIdx * 32 + 3] + dctBlock[sblkIdx * 32 + 8] + dctBlock[sblkIdx * 32 + 13] + \
dctBlock[sblkIdx * 32 + 4] + dctBlock[sblkIdx * 32 + 9]) / 13.0f;
/*dctBlock[sblkIdx * 32 + x] -= mean;
inter[sblkIdx * 3 + 1] = ((dctBlock[sblkIdx * 32 + 15]) * (dctBlock[sblkIdx * 32 + 15]) + (dctBlock[sblkIdx * 32 + 20]) * (dctBlock[sblkIdx * 32 + 20]) +
(dctBlock[sblkIdx * 32 + 11]) * (dctBlock[sblkIdx * 32 + 11]) + (dctBlock[sblkIdx * 32 + 16]) * (dctBlock[sblkIdx * 32 + 16]) + \
(dctBlock[sblkIdx * 32 + 21]) * (dctBlock[sblkIdx * 32 + 21]) + (dctBlock[sblkIdx * 32 + 7]) * (dctBlock[sblkIdx * 32 + 7]) + \
(dctBlock[sblkIdx * 32 + 12]) * (dctBlock[sblkIdx * 32 + 12]) + (dctBlock[sblkIdx * 32 + 17]) * (dctBlock[sblkIdx * 32 + 17]) + \
(dctBlock[sblkIdx * 32 + 3]) * (dctBlock[sblkIdx * 32 + 3]) + (dctBlock[sblkIdx * 32 + 8]) * (dctBlock[sblkIdx * 32 + 8]) + \
(dctBlock[sblkIdx * 32 + 13]) * (dctBlock[sblkIdx * 32 + 13]) + (dctBlock[sblkIdx * 32 + 4]) * (dctBlock[sblkIdx * 32 + 4]) + \
(dctBlock[sblkIdx * 32 + 9]) * (dctBlock[sblkIdx * 32 + 9])) / 12.0;
dctBlock[sblkIdx * 32 + x] += mean;*/
inter[sblkIdx * 3 + 1] = ((dctBlock[sblkIdx * 32 + 15] - mean2) * (dctBlock[sblkIdx * 32 + 15] - mean2) + (dctBlock[sblkIdx * 32 + 20] - mean2) * \
(dctBlock[sblkIdx * 32 + 20] - mean2) + (dctBlock[sblkIdx * 32 + 11] - mean2) * (dctBlock[sblkIdx * 32 + 11] - mean2) + (dctBlock[sblkIdx * 32 + 16] - mean2) * \
(dctBlock[sblkIdx * 32 + 16] - mean2) + (dctBlock[sblkIdx * 32 + 21] - mean2) * (dctBlock[sblkIdx * 32 + 21] - mean2) + (dctBlock[sblkIdx * 32 + 7] - mean2) * \
(dctBlock[sblkIdx * 32 + 7] - mean2) + (dctBlock[sblkIdx * 32 + 12] - mean2) * (dctBlock[sblkIdx * 32 + 12] - mean2) + (dctBlock[sblkIdx * 32 + 17] - mean2) * \
(dctBlock[sblkIdx * 32 + 17] - mean2) + (dctBlock[sblkIdx * 32 + 3] - mean2) * (dctBlock[sblkIdx * 32 + 3] - mean2) + (dctBlock[sblkIdx * 32 + 8] - mean2) * \
(dctBlock[sblkIdx * 32 + 8] - mean2) + (dctBlock[sblkIdx * 32 + 13] - mean2) * (dctBlock[sblkIdx * 32 + 13] - mean2) + (dctBlock[sblkIdx * 32 + 4] - mean2) * \
(dctBlock[sblkIdx * 32 + 4] - mean2) + (dctBlock[sblkIdx * 32 + 9] - mean2) * (dctBlock[sblkIdx * 32 + 9] - mean2)) / 12.0f;
//}
//if (x == 2) {
const float mean3 = (dctBlock[sblkIdx * 32 + 14] + dctBlock[sblkIdx * 32 + 18] + dctBlock[sblkIdx * 32 + 22] + dctBlock[sblkIdx * 32 + 19] + \
dctBlock[sblkIdx * 32 + 23] + dctBlock[sblkIdx * 32 + 24]) / 6.0f;
/*dctBlock[sblkIdx * 32 + x] -= mean;
inter[sblkIdx * 3 + 2] = ((dctBlock[sblkIdx * 32 + 14]) * (dctBlock[sblkIdx * 32 + 14]) + (dctBlock[sblkIdx * 32 + 18]) * (dctBlock[sblkIdx * 32 + 18]) +
(dctBlock[sblkIdx * 32 + 22]) * (dctBlock[sblkIdx * 32 + 22]) + (dctBlock[sblkIdx * 32 + 19]) * (dctBlock[sblkIdx * 32 + 19]) +
(dctBlock[sblkIdx * 32 + 23]) * (dctBlock[sblkIdx * 32 + 23]) + (dctBlock[sblkIdx * 32 + 24]) * (dctBlock[sblkIdx * 32 + 24])) / 5.0;
//dctBlock[sblkIdx * 32 + x] += mean;
const double mean = (dctBlock[sblkIdx * 32 + 14] + dctBlock[sblkIdx * 32 + 18] + dctBlock[sblkIdx * 32 + 22] + dctBlock[sblkIdx * 32 + 19] + \
dctBlock[sblkIdx * 32 + 23] + dctBlock[sblkIdx * 32 + 24]) / 6.0;*/
inter[sblkIdx * 3 + 2] = ((dctBlock[sblkIdx * 32 + 14] - mean3) * (dctBlock[sblkIdx * 32 + 14] - mean3) + (dctBlock[sblkIdx * 32 + 18] - mean3) * \
(dctBlock[sblkIdx * 32 + 18] - mean3) + (dctBlock[sblkIdx * 32 + 22] - mean3) * (dctBlock[sblkIdx * 32 + 22] - mean3) + (dctBlock[sblkIdx * 32 + 19] - mean3) * \
(dctBlock[sblkIdx * 32 + 19] - mean3) + (dctBlock[sblkIdx * 32 + 23] - mean3) * (dctBlock[sblkIdx * 32 + 23] - mean3) + (dctBlock[sblkIdx * 32 + 24] - mean3) * \
(dctBlock[sblkIdx * 32 + 24] - mean3)) / 5.0f;
//}
//__syncthreads();
//if (x == 0) {
const float r1 = fabs(inter[sblkIdx * 3 + 2] - (inter[sblkIdx * 3 + 0] + inter[sblkIdx * 3 + 1]) / 2.0f) / \
(inter[sblkIdx * 3 + 2] + (inter[sblkIdx * 3 + 0] + inter[sblkIdx * 3 + 1]) / 2.0f + 0.00000001f);
const float r2 = fabs(inter[sblkIdx * 3 + 1] - inter[sblkIdx * 3 + 0]) / (inter[sblkIdx * 3 + 2] + inter[sblkIdx * 3 + 0] + 0.00000001f);
freq_bands[gblkIdx / 32] = (r1 + r2) / 2.0f;
//}
/*if (gblkIdx + x == 200) {
printf("inter[0] = %f\ninter[1] = %f\ninter[2] = %f\n", inter[sblkIdx * 3], inter[sblkIdx + 1], inter[sblkIdx + 2]);
}*/
}
// Specialized warps for each subband does not do well :(
__global__ void subband_energy3(const float * d_dctImg, float * freq_bands) {
//plan grids = (512/3 + 1)^2, threads = 25
int const x = threadIdx.x % 32;
int const y = blockIdx.x * 4;
int const sblkIdx = threadIdx.x / 96;
int const gblkIdx = (y + sblkIdx) * 25;
__shared__ float dctBlock[32 * 4];
__shared__ float inter[3 * 4];
dctBlock[sblkIdx * 32 + x] = d_dctImg[gblkIdx + x];
//__syncthreads();
float mean;
if (threadIdx.x / 32 % 3 == 0) {
mean = (dctBlock[sblkIdx * 32 + 1] + dctBlock[sblkIdx * 32 + 2] + dctBlock[sblkIdx * 32 + 5] + \
dctBlock[sblkIdx * 32 + 6] + dctBlock[sblkIdx * 32 + 10]) / 5.0f;
inter[sblkIdx * 3 + 0] = ((dctBlock[sblkIdx * 32 + 1] - mean) * (dctBlock[sblkIdx * 32 + 1] - mean) + (dctBlock[sblkIdx * 32 + 2] - mean) * (dctBlock[sblkIdx * 32 + 2] - mean) +
(dctBlock[sblkIdx * 32 + 5] - mean) * (dctBlock[sblkIdx * 32 + 5] - mean) + (dctBlock[sblkIdx * 32 + 6] - mean) * (dctBlock[sblkIdx * 32 + 6] - mean) + \
(dctBlock[sblkIdx * 32 + 10] - mean) * (dctBlock[sblkIdx * 32 + 10] - mean)) / 4.0f;
}
if (threadIdx.x / 32 % 3 == 1) {
/*const double*/ mean = (dctBlock[sblkIdx * 32 + 15] + dctBlock[sblkIdx * 32 + 20] + dctBlock[sblkIdx * 32 + 11] + \
dctBlock[sblkIdx * 32 + 16] + dctBlock[sblkIdx * 32 + 21] + dctBlock[sblkIdx * 32 + 7] + dctBlock[sblkIdx * 32 + 12] + \
dctBlock[sblkIdx * 32 + 17] + dctBlock[sblkIdx * 32 + 3] + dctBlock[sblkIdx * 32 + 8] + dctBlock[sblkIdx * 32 + 13] + \
dctBlock[sblkIdx * 32 + 4] + dctBlock[sblkIdx * 32 + 9]) / 13.0f;
inter[sblkIdx * 3 + 1] = ((dctBlock[sblkIdx * 32 + 15] - mean) * (dctBlock[sblkIdx * 32 + 15] - mean) + (dctBlock[sblkIdx * 32 + 20] - mean) * (dctBlock[sblkIdx * 32 + 20] - mean) +
(dctBlock[sblkIdx * 32 + 11] - mean) * (dctBlock[sblkIdx * 32 + 11] - mean) + (dctBlock[sblkIdx * 32 + 16] - mean) * (dctBlock[sblkIdx * 32 + 16] - mean) + (dctBlock[sblkIdx * 32 + 21] - mean) * (dctBlock[sblkIdx * 32 + 21] - mean) +
(dctBlock[sblkIdx * 32 + 7] - mean) * (dctBlock[sblkIdx * 32 + 7] - mean) + (dctBlock[sblkIdx * 32 + 12] - mean) * (dctBlock[sblkIdx * 32 + 12] - mean) +
(dctBlock[sblkIdx * 32 + 17] - mean) * (dctBlock[sblkIdx * 32 + 17] - mean) + (dctBlock[sblkIdx * 32 + 3] - mean) * (dctBlock[sblkIdx * 32 + 3] - mean) + \
(dctBlock[sblkIdx * 32 + 8] - mean) * (dctBlock[sblkIdx * 32 + 8] - mean) +
(dctBlock[sblkIdx * 32 + 13] - mean) * (dctBlock[sblkIdx * 32 + 13] - mean) + (dctBlock[sblkIdx * 32 + 4] - mean) * (dctBlock[sblkIdx * 32 + 4] - mean) +
(dctBlock[sblkIdx * 32 + 9] - mean) * (dctBlock[sblkIdx * 32 + 9] - mean)) / 12.0f;
}
if (threadIdx.x / 32 % 3 == 2) {
mean = (dctBlock[sblkIdx * 32 + 14] + dctBlock[sblkIdx * 32 + 18] + dctBlock[sblkIdx * 32 + 22] + dctBlock[sblkIdx * 32 + 19] + \
dctBlock[sblkIdx * 32 + 23] + dctBlock[sblkIdx * 32 + 24]) / 6.0f;
inter[sblkIdx * 3 + 2] = ((dctBlock[sblkIdx * 32 + 14] - mean) * (dctBlock[sblkIdx * 32 + 14] - mean) + (dctBlock[sblkIdx * 32 + 18] - mean) * (dctBlock[sblkIdx * 32 + 18] - mean) +
(dctBlock[sblkIdx * 32 + 22] - mean) * (dctBlock[sblkIdx * 32 + 22] - mean) + (dctBlock[sblkIdx * 32 + 19] - mean) * (dctBlock[sblkIdx * 32 + 19] - mean) +
(dctBlock[sblkIdx * 32 + 23] - mean) * (dctBlock[sblkIdx * 32 + 23] - mean) + (dctBlock[sblkIdx * 32 + 24] - mean) * (dctBlock[sblkIdx * 32 + 24] - mean)) / 5.0f;
}
__syncthreads();
if (threadIdx.x / 32 % 3 == 1) {
const float r1 = fabsf(inter[sblkIdx * 3 + 2] - (inter[sblkIdx * 3 + 0] + inter[sblkIdx * 3 + 1]) / 2.0f) / \
(inter[sblkIdx * 3 + 2] + (inter[sblkIdx * 3 + 0] + inter[sblkIdx * 3 + 1]) / 2.0f + 0.00000001f);
const float r2 = fabsf(inter[sblkIdx * 3 + 1] - inter[sblkIdx * 3 + 0]) / (inter[sblkIdx * 3 + 2] + inter[sblkIdx * 3 + 0] + 0.00000001f);
freq_bands[gblkIdx / 25] = (r1 + r2) / 2.0f;
}
}
__global__ void mean_100(float * d_input, float * d_mean_array, int num_elements) {
// d_input is 171*171 array
// for now let us have it structured as shared[171]
//int thread2;
//double temp;
__shared__ float sum[171];
//_shared__ float sum[171];
int x = threadIdx.x;
int y = blockIdx.x * blockDim.x;
//int blockDi2 = (blockDim.x / 2);
sum[x] = d_input[y + x];
__syncthreads();
for (int s = 128; s > 0; s >>= 1) {
if (x < s) {
int temp = x + s;
if (temp<blockDim.x) sum[x] += sum[x + s];
}
__syncthreads();
}
if (x == 0) {
d_mean_array[blockIdx.x] = sum[0] / num_elements;
}
}
__global__ void convolveRow(const float * d_input, const int size, float * d_output) {
// Takes original image as input of dimensions size x size
const int x = threadIdx.x;
const int y = blockIdx.x;
const int offset = -1;
const int num_taps = 3;
const double h[3] = { 0.106506978919200, 0.786986042161605, 0.106506978919200 };
double val = 0.0;
/*
int x_comp = x + offset;
val += h[0] * ((x_comp<0 || x_comp >= size) ? 0 : d_input[y * size + x_comp]);
x_comp++;
val += h[1] * ((x_comp<0 || x_comp >= size) ? 0 : d_input[y * size + x_comp]);
x_comp++;
val += h[2] * ((x_comp<0 || x_comp >= size) ? 0 : d_input[y * size + x_comp]);
*/
for (int tap_idx = 0; tap_idx < num_taps; tap_idx++) {
int x_comp = x + offset + tap_idx;
val += h[tap_idx] * ((x_comp<0 || x_comp>=size) ? 0 : d_input[y * size + x_comp]);
//val += ((x_comp<0 || x_comp >= size) ? 0 : (h[tap_idx] * d_input[y * size + x_comp]));
}
d_output[y * size + x] = val;
__syncthreads();
}
__global__ void convolveCol(const float * d_input, const int size, float * d_output) {
const int x = threadIdx.x;
const int y = blockIdx.x;
const int offset = -1;
const int num_taps = 3;
const double h[3] = { 0.106506978919200, 0.786986042161605, 0.106506978919200 };
double val = 0.0;
/*
int y_comp = y + offset;
val += h[0] * ((y_comp<0 || y_comp >= size) ? 0 : d_input[y_comp * size + x]);
y_comp++;
val += h[1] * ((y_comp<0 || y_comp >= size) ? 0 : d_input[y_comp * size + x]);
y_comp++;
val += h[2] * ((y_comp<0 || y_comp >= size) ? 0 : d_input[y_comp * size + x]);
*/
for (int tap_idx = 0; tap_idx < num_taps; tap_idx++) {
int y_comp = y + offset + tap_idx;
val += h[tap_idx] * ((y_comp<0 || y_comp >= size) ? 0 : d_input[y_comp * size + x]);
//val += ((y_comp<0 || y_comp >= size) ? 0 : (h[tap_idx] * d_output[y_comp * size + x]));
}
__syncthreads();
d_output[y * size + x] = val;
}
__global__ void downsample_by2(const float * d_image, int const size, float * d_image_by2) {
const int pos = blockIdx.x * size / 2 + threadIdx.x;
d_image_by2[pos] = d_image[1 + size + 2 * pos + blockIdx.x * size];
}
void linearize_and_cast_from_Mat_to_float(const cv::Mat &Mat_in, float *float_array)
{
// Linearize image matrix => Convert from 2D unsigned char Mat into a 1D array of floats
for (int row = 0; row < 512; row++)
for (int col = 0; col < 512; col++)
float_array[row * 512 + col] = static_cast<float>(Mat_in.at<unsigned char>(row, col));
}
void device_rst()
{
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
hipError_t cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
}
}
void kernel_wrapper(const cv::Mat &Mat_in)
{
/*
// cuFFT settings for DCT
hipfftHandle p;
int rank = 1;
int lengthOfDFT = 10;
int howmany = 5 * (512 / 3 + 1) * (512 / 3 + 1);
int odist;
int idist = odist = lengthOfDFT;
int ostride;
int istride = ostride = 1; // array is contiguous in memory
hipfftPlanMany(&p, rank, &lengthOfDFT, NULL, istride, idist, NULL, ostride,
odist, HIPFFT_C2C, howmany);
*/
// The section of code below checks for zero copy feature. This way a kernel can directly use host memory.
/*hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
if (!prop.canMapHostMemory)
exit(0);
else
std::cout << "canMapHostMemory\n";
*/
// Allocate HOST memory
float* h_in; hipError_t status = hipHostMalloc(&h_in, 512 * 512 * sizeof(float));
if (status != hipSuccess) {
std::cout << "Error allocating pinned host memory for imput image";
}
float* features; status = hipHostMalloc(&features, 25 * sizeof(float));
if (status != hipSuccess) {
std::cout << "Error allocating pinned host memory for features";
}
LARGE_INTEGER frequency; // ticks per second
LARGE_INTEGER t1, t2; // ticks
double elapsedTime;
// get ticks per second
QueryPerformanceFrequency(&frequency);
// start timer
QueryPerformanceCounter(&t1);
// Linearize image and cast from 8UC1 Mat to float array
linearize_and_cast_from_Mat_to_float(Mat_in, h_in);
//const float* h_in = Mat_in.ptr<float>(0);
//Device memory allocations
float *d_in; hipMalloc((void **)&d_in, 512 * 512 * sizeof(float));
hipProfilerStart();
// Copy data from HOST -> DEVICE
hipMemcpy(d_in, h_in, 512 * 512 * sizeof(float), hipMemcpyHostToDevice);
float * d_g_info; hipMalloc((void **)&d_g_info, 9971 * sizeof(float));
float * d_r_info; hipMalloc((void **)&d_r_info, 9971 * sizeof(float));
float *d_in_pad; hipMalloc((void **)&d_in_pad, 517 * 517 * sizeof(float));
/*hipfftComplex *d_rearr_in; hipMalloc((void **)&d_rearr_in, 50 * (512 / 3 + 1) * (512 / 3 + 1) * sizeof(hipfftComplex));
hipfftComplex *d_dct_inter; hipMalloc((void **)&d_dct_inter, 50 * (512 / 3 + 1) * (512 / 3 + 1) * sizeof(hipfftComplex));
hipfftComplex *d_dct_in; hipMalloc((void **)&d_dct_in, 50 * (512 / 3 + 1) * (512 / 3 + 1) * sizeof(hipfftComplex));
*/
float * d_dctImg; hipMalloc((void **)&d_dctImg, (512 / 3 + 1) * (512 / 3 + 1) * 32 * sizeof(float));
float * d_coeff_freq_var_L1; hipMalloc((void **)&d_coeff_freq_var_L1, (512 / 3 + 1) * (512 / 3 + 1) * sizeof(float));
float * d_ori1_rho_L1; hipMalloc((void **)&d_ori1_rho_L1, (512 / 3 + 1) * (512 / 3 + 1) * sizeof(float));
float * d_ori2_rho_L1; hipMalloc((void **)&d_ori2_rho_L1, (512 / 3 + 1) * (512 / 3 + 1) * sizeof(float));
float * d_ori3_rho_L1; hipMalloc((void **)&d_ori3_rho_L1, (512 / 3 + 1) * (512 / 3 + 1) * sizeof(float));
float * d_ori_rho_L1; hipMalloc((void **)&d_ori_rho_L1, (512 / 3 + 1) * (512 / 3 + 1) * sizeof(float));
float * d_freq_bands; hipMalloc((void **)&d_freq_bands, (512 / 3 + 1) * (512 / 3 + 1) * sizeof(float));
float * d_gama_L1; hipMalloc((void **)&d_gama_L1, (512 / 3 + 1) * (512 / 3 + 1) * sizeof(float));
float * d_mean_array; hipMalloc((void **)&d_mean_array, (512 / 3 + 1) * sizeof(float));
hipStream_t stream1;
hipStream_t stream2;
hipStreamCreate(&stream1);
hipStreamCreate(&stream2);
//hipStreamCreateWithFlags(&stream1, hipStreamNonBlocking);
//hipStreamCreateWithFlags(&stream2, hipStreamNonBlocking);
// copy gama vector
hipMemcpyAsync(d_g_info, bliinds_info::g_vector, 9971 * sizeof(float), hipMemcpyHostToDevice, stream1);
// copy rho vector
hipMemcpyAsync(d_r_info, bliinds_info::r_vector, 9971 * sizeof(float), hipMemcpyHostToDevice, stream2);
hipMemcpyToSymbol(dc_r_info, bliinds_info::r_vector, 9971 * sizeof(float));
// pad input image for DCT in blocks
pad << <517, 517 >> >(d_in, 512, d_in_pad);
// Total number of DCT blocks at current scale
int square = (512 / 3 + 1) * (512 / 3 + 1);
//CuFFT at 512x512
/*rearrangeForCuFFT << <square, 25, 0, 0 >> >(d_in_pad, 512, d_rearr_in);
hipfftExecC2C(p, d_rearr_in, d_dct_in, HIPFFT_FORWARD);
transposeForCuFFT << <square, 25, 0, 0 >> >(d_dct_in, d_dct_in);
hipfftExecC2C(p, d_dct_in, d_dct_in, HIPFFT_FORWARD);
transposeForCuFFT << <square, 25, 0, 0 >> >(d_dct_in, d_dct_in);
copyDCT << <square, 25 >> >(d_dct_in, d_dctImg);
hipDeviceSynchronize();
*/
//float * h_dctImg = (float*)malloc((512 / 3 + 1) * (512 / 3 + 1) * sizeof(float));
//hipMemcpy(h_dctImg, d_dctImg, (512 / 3 + 1) * (512 / 3 + 1) * 25 * sizeof(float), hipMemcpyDeviceToHost);
float* d_rearr_man; hipMalloc((void **)&d_rearr_man, 32 * (512 / 3 + 1) * (512 / 3 + 1) * sizeof(float));
hipMemset(d_rearr_man, 0, 32 * square * sizeof(float));
hipMemset(d_dctImg, 0, 32 * square * sizeof(float));
double* d_dctmtx; hipMalloc((void **)&d_dctmtx, 32 * sizeof(double));
hipMemset(d_dctmtx, 0, 32 * sizeof(double));
hipMemcpy(d_dctmtx, dct2_55::dctmtx_5, 25 * sizeof(double), hipMemcpyHostToDevice);
//float* h_dctImg = (float *)malloc(32 * square*sizeof(float));
/*for (int i = 517; i < 517+32; i++) {
std::cout << h_dctImg[i] << "\t";
if ((i + 1) % 5 == 0)
std::cout << std::endl;
}*/
//rearrangeForDCTv2 << <square / 4 + 1, 128 >> >(d_in_pad, 512, d_rearr_man);
/*hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if (error != hipSuccess)
{
fprintf(stderr, "ERROR1: %s\n", hipGetErrorString(error));
exit(-1);
}*/
rearrangeAndDCT55 << <square / 8 + 1, 256 >> >(d_in_pad, 512, d_dctmtx, d_dctImg);
/*hipDeviceSynchronize();
error = hipGetLastError();
//hipMemcpy(h_dctImg, d_dctImg, 32 * square * sizeof(float), hipMemcpyDeviceToHost);
if (error != hipSuccess)
{
fprintf(stderr, "ERROR2: %s\n", hipGetErrorString(error));
exit(-1);
}*/
//#ifdef EBM
hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte);
//#endif
rho_dct2 << <square / 16 + 1, 512 >> >(d_dctImg, d_coeff_freq_var_L1);
//thrust::device_ptr<float> keys(d_coeff_freq_var_L1);
thrust::sort(thrust::device, d_coeff_freq_var_L1, d_coeff_freq_var_L1 + square);
//thrust::host_vector<float> h_coeff_freq_L1(d_coeff_freq_var_L1, d_coeff_freq_var_L1 + square);
int mean10_size = ceil((square) / 10.0);
features[0] = thrust::reduce(thrust::device, d_coeff_freq_var_L1, d_coeff_freq_var_L1 + square) / square;
features[1] = thrust::reduce(thrust::device, d_coeff_freq_var_L1 + square - mean10_size, d_coeff_freq_var_L1 + square) / mean10_size;
gama_dct62 << <square / 16 + 1, 512 >> >(d_dctImg, d_g_info, d_r_info, d_gama_L1);
thrust::sort(thrust::device, d_gama_L1, d_gama_L1 + square);
gama_dct6_3 << <square / 128 + 1, 128 >> >(d_gama_L1, d_g_info, d_r_info, d_gama_L1, square);
features[2] = thrust::reduce(thrust::device, d_gama_L1, d_gama_L1 + square) / square;
features[3] = thrust::reduce(thrust::device, d_gama_L1 + square - mean10_size, d_gama_L1 + square) / mean10_size;
/*hipMemcpy(h_dctImg, d_coeff_freq_var_L1, square * sizeof(float), hipMemcpyDeviceToHost);
std::ofstream outfile3("harbourJPGcoeff_freq_varL1GPU.txt");
for (int j = 0; j < square; j++) {
//for (int i = 0; i < 5; i++) {
outfile3 << h_dctImg[j];
//if ((i + 1) % 5 == 0){
//}
//}
outfile3 << std::endl;
}
outfile3.close();
*/
/*std::cout << "square1 = " << square << std::endl;
oriented_dct_rho << <square, 1 >> >(d_dctImg, d_ori1_rho_L1, 1);
oriented_dct_rho << <square, 1 >> >(d_dctImg, d_ori2_rho_L1, 2);
oriented_dct_rho << <square, 1 >> >(d_dctImg, d_ori3_rho_L1, 3);*/
oriented_dct_rho2 << <square / 8 + 1, 256 >> >(d_dctImg, d_ori1_rho_L1, 1);
oriented_dct_rho2 << <square / 8 + 1, 256 >> >(d_dctImg, d_ori2_rho_L1, 2);
oriented_dct_rho2 << <square / 8 + 1, 256 >> >(d_dctImg, d_ori3_rho_L1, 3);
oriented_dct_final2 << <square / 512 + 1, 512, 0 >> >(d_ori1_rho_L1, d_ori2_rho_L1, d_ori3_rho_L1, d_ori_rho_L1);
thrust::sort(thrust::device, d_ori_rho_L1, d_ori_rho_L1 + square);
features[6] = thrust::reduce(thrust::device, d_ori_rho_L1, d_ori_rho_L1 + square) / square;
features[7] = thrust::reduce(thrust::device, d_ori_rho_L1 + square - mean10_size, d_ori_rho_L1 + square) / mean10_size;
//std::cout << "or_rho_dct done\n";
subband_energy2 << <square / 4 + 1, 128 >> >(d_dctImg, d_freq_bands);
thrust::sort(thrust::device, d_freq_bands, d_freq_bands + square);
features[4] = thrust::reduce(thrust::device, d_freq_bands, d_freq_bands + square) / square;
features[5] = thrust::reduce(thrust::device, d_freq_bands + square - mean10_size, d_freq_bands + square) / mean10_size;
//std::cout << "subband done\n";
//hipMemcpy(h_dctImg, d_gama_L1, (512 / 3 + 1) * (512 / 3 + 1) * sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_ori1_rho_L1);
hipFree(d_ori2_rho_L1);
hipFree(d_ori3_rho_L1);
hipFree(d_gama_L1);
hipFree(d_coeff_freq_var_L1);
hipFree(d_freq_bands);
hipFree(d_ori_rho_L1);
//----------------------Start Phase 2----------------------------------------------------------
hipDeviceSynchronize();
square = (256 / 3 + 1) * (256 / 3 + 1);
float *d_in_conv_inter_L2; hipMalloc((void **)&d_in_conv_inter_L2, 512 * 512 * sizeof(float));
float *d_in_convolve_L2; hipMalloc((void **)&d_in_convolve_L2, 512 * 512 * sizeof(float));
float *d_in_L2; hipMalloc((void **)&d_in_L2, 256 * 256 * sizeof(float));
float *d_in_pad_L2; hipMalloc((void **)&d_in_pad_L2, 261 * 261 * sizeof(float));
float *d_coeff_freq_var_L2; hipMalloc((void **)&d_coeff_freq_var_L2, square * sizeof(float));
hipfftComplex *d_rearr_in_L2; hipMalloc((void **)&d_rearr_in_L2, 50 * square * sizeof(hipfftComplex));
float * d_ori1_rho_L2; hipMalloc((void **)&d_ori1_rho_L2, square * sizeof(float));
float * d_ori2_rho_L2; hipMalloc((void **)&d_ori2_rho_L2, square * sizeof(float));
float * d_ori3_rho_L2; hipMalloc((void **)&d_ori3_rho_L2, square * sizeof(float));
float * d_ori_rho_L2; hipMalloc((void **)&d_ori_rho_L2, square * sizeof(float));
float * d_freq_bands_L2; hipMalloc((void **)&d_freq_bands_L2, square * sizeof(float));
float * d_gama_L2; hipMalloc((void **)&d_gama_L2, square * sizeof(float));
convolveRow << <512, 512 >> >(d_in, 512, d_in_conv_inter_L2);
convolveCol << <512, 512 >> >(d_in_conv_inter_L2, 512, d_in_convolve_L2);
hipDeviceSynchronize();
downsample_by2 << <256, 256 >> >(d_in_convolve_L2, 512, d_in_L2);
pad << <261, 261 >> >(d_in_L2, 256, d_in_pad_L2);
/*float * h_dctImg = (float*)malloc(square * sizeof(float));
hipMemcpy(h_dctImg, d_in_convolve_L2, square * sizeof(float), hipMemcpyDeviceToHost);
std::ofstream outfile3("convolve_L2GPU.txt");
for (int j = 0; j < square; j++) {
//for (int i = 0; i < 5; i++) {
outfile3 << h_dctImg[j];
//if ((i + 1) % 5 == 0){
//}
//}
outfile3 << std::endl;
}
outfile3.close();
hipMemcpy(h_dctImg, d_in_L2, 256 * 256 * sizeof(float), hipMemcpyDeviceToHost);
std::ofstream outfile2("d_in_L2GPU.txt");
for (int j = 0; j < 256 * 256; j++) {
//for (int i = 0; i < 5; i++) {
outfile2 << h_dctImg[j];
//if ((i + 1) % 5 == 0){
//}
//}
outfile2 << std::endl;
}
outfile2.close();
*/
/*howmany = 5 * square;
hipfftPlanMany(&p, rank, &lengthOfDFT, NULL, istride, idist, NULL, ostride,
odist, HIPFFT_C2C, howmany);
hipDeviceSynchronize();
rearrangeForCuFFT << <square, 25 >> >(d_in_pad_L2, 256, d_rearr_in_L2);
hipfftExecC2C(p, d_rearr_in_L2, d_dct_in, HIPFFT_FORWARD);
transposeForCuFFT << <square, 25 >> >(d_dct_in, d_dct_in);
hipfftExecC2C(p, d_dct_in, d_dct_in, HIPFFT_FORWARD);
transposeForCuFFT << <square, 25 >> >(d_dct_in, d_dct_in);
copyDCT << <square, 25 >> >(d_dct_in, d_dctImg);
hipDeviceSynchronize();
*/
//std::cout << "phase 1 done \n";
//rearrangeForDCTv2 << <square / 4 + 1, 128 >> >(d_in_pad_L2, 256, d_rearr_man);
rearrangeAndDCT55 << <square / 8 + 1, 256 >> >(d_in_pad_L2, 256, d_dctmtx, d_dctImg);
//h_dctImg = (float*)malloc(25 * square * sizeof(float));
//std::cout << "second dct\n";
rho_dct2 << <square / 16 + 1, 512 >> >(d_dctImg, d_coeff_freq_var_L2);
thrust::sort(thrust::device, d_coeff_freq_var_L2, d_coeff_freq_var_L2 + square);
mean10_size = ceil((square) / 10.0);
features[9] = thrust::reduce(thrust::device, d_coeff_freq_var_L2 + square - mean10_size, d_coeff_freq_var_L2 + square) / mean10_size;
features[8] = thrust::reduce(thrust::device, d_coeff_freq_var_L2, d_coeff_freq_var_L2 + square) / square;
gama_dct62 << <square / 16 + 1, 512 >> >(d_dctImg, d_g_info, d_r_info, d_gama_L2);
//gama_dct5 << <square, 1024 >> >(d_dctImg, d_g_info, d_r_info, d_gama_L2);
thrust::sort(thrust::device, d_gama_L2, d_gama_L2 + square);
gama_dct6_3 << <square / 128 + 1, 128 >> >(d_gama_L2, d_g_info, d_r_info, d_gama_L2, square);
features[11] = thrust::reduce(thrust::device, d_gama_L2 + square - mean10_size, d_gama_L2 + square) / mean10_size;
features[10] = thrust::reduce(thrust::device, d_gama_L2, d_gama_L2 + square) / square;
subband_energy2 << <square / 4 + 1, 128 >> >(d_dctImg, d_freq_bands_L2);
thrust::sort(thrust::device, d_freq_bands_L2, d_freq_bands_L2 + square);
features[13] = thrust::reduce(thrust::device, d_freq_bands_L2 + square - mean10_size, d_freq_bands_L2 + square) / mean10_size;
features[12] = thrust::reduce(thrust::device, d_freq_bands_L2, d_freq_bands_L2 + square) / square;
/*std::cout << "square2 = " << square << std::endl;
oriented_dct_rho << <square, 1 >> >(d_dctImg, d_ori1_rho_L2, 1);
oriented_dct_rho << <square, 1 >> >(d_dctImg, d_ori2_rho_L2, 2);
oriented_dct_rho << <square, 1 >> >(d_dctImg, d_ori3_rho_L2, 3);*/
oriented_dct_rho2 << <square / 8 + 1, 256 >> >(d_dctImg, d_ori1_rho_L2, 1);
oriented_dct_rho2 << <square / 8 + 1, 256 >> >(d_dctImg, d_ori2_rho_L2, 2);
oriented_dct_rho2 << <square / 8 + 1, 256 >> >(d_dctImg, d_ori3_rho_L2, 3);
oriented_dct_final2 << <square / 512 + 1, 512 >> >(d_ori1_rho_L2, d_ori2_rho_L2, d_ori3_rho_L2, d_ori_rho_L2);
thrust::sort(thrust::device, d_ori_rho_L2, d_ori_rho_L2 + square);
features[15] = thrust::reduce(thrust::device, d_ori_rho_L2 + square - mean10_size, d_ori_rho_L2 + square) / mean10_size;
features[14] = thrust::reduce(thrust::device, d_ori_rho_L2, d_ori_rho_L2 + square) / square;
/*float * h_dctImg = (float*)malloc(square * 25 * sizeof(float));
hipMemcpy(h_dctImg, d_dctImg, square * 25 * sizeof(float), hipMemcpyDeviceToHost);
std::ofstream outfile3("d_dctImg_L2_babyJPG.txt");
for (int j = 0; j < square; j++) {
for (int i = 0; i < 25; i++) {
outfile3 << h_dctImg[j * 25 + i] << ",";
if ((i + 1) % 5 == 0)
outfile3 << std::endl;
}
outfile3 << std::endl;
}
outfile3.close();*/
/*hipMemcpy(h_dctImg, d_ori2_rho_L2, square * sizeof(float), hipMemcpyDeviceToHost);
std::ofstream outfile4("d_ori2_L2_babyJPG.txt");
for (int j = 0; j < square; j++) {
outfile4 << h_dctImg[j] << std::endl;
}
outfile4.close();
hipMemcpy(h_dctImg, d_ori3_rho_L2, square * sizeof(float), hipMemcpyDeviceToHost);
std::ofstream outfile5("d_ori3_L2_babyJPG.txt");
for (int j = 0; j < square; j++) {
outfile5 << h_dctImg[j] << std::endl;
}
outfile5.close();*/
hipFree(d_ori1_rho_L2);
hipFree(d_ori2_rho_L2);
hipFree(d_ori3_rho_L2);
hipFree(d_gama_L2);
hipFree(d_coeff_freq_var_L2);
hipFree(d_freq_bands_L2);
hipFree(d_ori_rho_L2);
/*
hipFree(d_in_conv_inter_L2);
hipFree(d_in_pad_L2);
hipFree(d_in_conv_inter_L2);
hipFree(d_rearr_in_L2);
*/
//----------------------Start Phase 3----------------------------------------------------------
hipDeviceSynchronize();
square = (128 / 3 + 1) * (128 / 3 + 1);
float *d_in_conv_inter_L3; hipMalloc((void **)&d_in_conv_inter_L3, 256 * 256 * sizeof(float));
float *d_in_convolve_L3; hipMalloc((void **)&d_in_convolve_L3, 256 * 256 * sizeof(float));
float *d_in_L3; hipMalloc((void **)&d_in_L3, 128 * 128 * sizeof(float));
float *d_in_pad_L3; hipMalloc((void **)&d_in_pad_L3, 133 * 133 * sizeof(float));
float *d_coeff_freq_var_L3; hipMalloc((void **)&d_coeff_freq_var_L3, square * sizeof(float));
hipfftComplex *d_rearr_in_L3; hipMalloc((void **)&d_rearr_in_L3, 50 * square * sizeof(hipfftComplex));
float * d_ori1_rho_L3; hipMalloc((void **)&d_ori1_rho_L3, square * sizeof(float));
float * d_ori2_rho_L3; hipMalloc((void **)&d_ori2_rho_L3, square * sizeof(float));
float * d_ori3_rho_L3; hipMalloc((void **)&d_ori3_rho_L3, square * sizeof(float));
float * d_ori_rho_L3; hipMalloc((void **)&d_ori_rho_L3, square * sizeof(float));
float * d_freq_bands_L3; hipMalloc((void **)&d_freq_bands_L3, square * sizeof(float));
float * d_gama_L3; hipMalloc((void **)&d_gama_L3, square * sizeof(float));
convolveRow << <256, 256 >> >(d_in_L2, 256, d_in_conv_inter_L3);
convolveCol << <256, 256 >> >(d_in_conv_inter_L3, 256, d_in_convolve_L3);
hipDeviceSynchronize();
downsample_by2 << <128, 128 >> >(d_in_convolve_L3, 256, d_in_L3);
pad << <133, 133 >> >(d_in_L3, 128, d_in_pad_L3);
/*howmany = 5 * square;
hipfftPlanMany(&p, rank, &lengthOfDFT, NULL, istride, idist, NULL, ostride,
odist, HIPFFT_C2C, howmany);
hipDeviceSynchronize();
rearrangeForCuFFT << <square, 25 >> >(d_in_pad_L3, 128, d_rearr_in_L3);
hipfftExecC2C(p, d_rearr_in_L3, d_dct_in, HIPFFT_FORWARD);
transposeForCuFFT << <square, 25 >> >(d_dct_in, d_dct_in);
hipfftExecC2C(p, d_dct_in, d_dct_in, HIPFFT_FORWARD);
transposeForCuFFT << <square, 25 >> >(d_dct_in, d_dct_in);
copyDCT << <square, 25 >> >(d_dct_in, d_dctImg);
hipDeviceSynchronize();
*/
//rearrangeForDCTv2 << <square / 4 + 1, 128 >> >(d_in_pad_L3, 128, d_rearr_man);
rearrangeAndDCT55 << <square / 8 + 1, 256 >> >(d_in_pad_L3, 128, d_dctmtx, d_dctImg);
hipFree(d_dctmtx);
rho_dct2 << <square / 16 + 1, 512 >> >(d_dctImg, d_coeff_freq_var_L3);
thrust::sort(thrust::device, d_coeff_freq_var_L3, d_coeff_freq_var_L3 + square);
mean10_size = ceil((square) / 10.0);
features[17] = thrust::reduce(thrust::device, d_coeff_freq_var_L3 + square - mean10_size, d_coeff_freq_var_L3 + square) / mean10_size;
features[16] = thrust::reduce(thrust::device, d_coeff_freq_var_L3, d_coeff_freq_var_L3 + square) / square;
gama_dct62 << <square / 16 + 1, 512 >> >(d_dctImg, d_g_info, d_r_info, d_gama_L3);
//gama_dct5 << <square, 1024 >> >(d_dctImg, d_g_info, d_r_info, d_gama_L3);
thrust::sort(thrust::device, d_gama_L3, d_gama_L3 + square);
gama_dct6_3 << <square / 128 + 1, 128 >> >(d_gama_L3, d_g_info, d_r_info, d_gama_L3, square);
features[19] = thrust::reduce(thrust::device, d_gama_L3 + square - mean10_size, d_gama_L3 + square) / mean10_size;
features[18] = thrust::reduce(thrust::device, d_gama_L3, d_gama_L3 + square) / square;
// square = 1849
subband_energy2 << <square / 4 + 1, 128 >> >(d_dctImg, d_freq_bands_L3);
thrust::sort(thrust::device, d_freq_bands_L3, d_freq_bands_L3 + square);
features[21] = thrust::reduce(thrust::device, d_freq_bands_L3 + square - mean10_size, d_freq_bands_L3 + square) / mean10_size;
features[20] = thrust::reduce(thrust::device, d_freq_bands_L3, d_freq_bands_L3 + square) / square;
/*std::cout << "square3 = " << square << std::endl;
oriented_dct_rho << <square, 1 >> >(d_dctImg, d_ori1_rho_L3, 1);
oriented_dct_rho << <square, 1 >> >(d_dctImg, d_ori2_rho_L3, 2);
oriented_dct_rho << <square, 1 >> >(d_dctImg, d_ori3_rho_L3, 3);*/
oriented_dct_rho2 << <square / 8 + 1, 256 >> >(d_dctImg, d_ori1_rho_L3, 1);
oriented_dct_rho2 << <square / 8 + 1, 256 >> >(d_dctImg, d_ori2_rho_L3, 2);
oriented_dct_rho2 << <square / 8 + 1, 256 >> >(d_dctImg, d_ori3_rho_L3, 3);
oriented_dct_final2 << <square / 512 + 1, 512 >> >(d_ori1_rho_L3, d_ori2_rho_L3, d_ori3_rho_L3, d_ori_rho_L3);
thrust::sort(thrust::device, d_ori_rho_L3, d_ori_rho_L3 + square);
features[23] = thrust::reduce(thrust::device, d_ori_rho_L3 + square - mean10_size, d_ori_rho_L3 + square) / mean10_size;
features[22] = thrust::reduce(thrust::device, d_ori_rho_L3, d_ori_rho_L3 + square) / square;
//Print features
/*
std::cout << "coeff_freq_var_l1:" << features[0] << ", " << features[1] << std::endl;
std::cout << "gama_dct_l1:" << features[2] << ", " << features[3] << std::endl;
std::cout << "freq_bands:" << features[4] << ", " << features[5] << std::endl;
std::cout << "ori_rho_l1:" << features[6] << ", " << features[7] << std::endl;
std::cout << "coeff_freq_var_l2: " << features[8] << ", " << features[9] << std::endl;
std::cout << "gama_l2: " << features[10] << ", " << features[11] << std::endl;
std::cout << "freq_bands_l2: " << features[12] << ", " << features[13] << std::endl;
std::cout << "ori_rho_l2: " << features[14] << ", " << features[15] << std::endl;
std::cout << "coeff_freq_var_l3: " << features[16] << ", " << features[17] << std::endl;
std::cout << "gama_l3: " << features[18] << ", " << features[19] << std::endl;
std::cout << "freq_bands_l3: " << features[20] << ", " << features[21] << std::endl;
std::cout << "ori_rho_l3: " << features[22] << ", " << features[23] << std::endl;
printf("coeff_freq_var_l1: %0.15f, %0.15f\n", features[0], features[1]);
printf("gama_dct_l1: %0.15f, %0.15f\n", features[2], features[3]);
printf("freq_bands: %0.15f, %0.15f\n", features[4], features[5]);
printf("ori_rho_l1: %0.15f, %0.15f\n", features[6], features[7]);
printf("coeff_freq_var_l2: %0.15f, %0.15f\n", features[8], features[9]);
printf("gama_l2: %0.15f, %0.15f\n", features[10], features[11]);
printf("freq_bands_l2: %0.15f, %0.15f\n", features[12], features[13]);
printf("ori_rho_l2: %0.15f, %0.15f\n", features[14], features[15]);
printf("coeff_freq_var_l3: %0.15f, %0.15f\n", features[16], features[17]);
printf("gama_l3: %0.15f, %0.15f\n", features[18], features[19]);
printf("freq_bands_l3: %0.15f, %0.15f\n", features[20], features[21]);
printf("ori_rho_l3: %0.15f, %0.15f\n", features[22], features[23]);
*/
hipFree(d_ori1_rho_L3);
hipFree(d_ori2_rho_L3);
hipFree(d_ori3_rho_L3);
hipFree(d_gama_L3);
hipFree(d_coeff_freq_var_L3);
hipFree(d_freq_bands_L3);
hipFree(d_ori_rho_L3);
hipFree(d_in_conv_inter_L3);
hipFree(d_in_convolve_L3);
hipFree(d_in_L3);
hipFree(d_in_pad_L3);
hipFree(d_rearr_in_L3);
hipFree(d_in);
/*// stop timer
QueryPerformanceCounter(&t2);
// compute and print the elapsed time in millisec
elapsedTime = (t2.QuadPart - t1.QuadPart) * 1000.0 / frequency.QuadPart;
std::cout << elapsedTime << " ms.\n";
*/
float * sub_temp = (float *)malloc(25 * sizeof(float));
for (int i = 0; i < 24; i++){
sub_temp[i] = features[i] - bliinds_info::mu_vector[i];
}
float * mult_temp = (float *)malloc(25 * sizeof(float));
float product, max = 0, max_k, b = 1.0168, gama = 0.4200;
for (int k = 2; k <= 200; k++) {
sub_temp[24] = k / 2.0 - bliinds_info::mu_vector[24];
for (int i = 0; i < 25; i++) {
mult_temp[i] = 0;
for (int j = 0; j < 25; j++) {
mult_temp[i] += sub_temp[j] * bliinds_info::sigma_inv_vector[i + 25 * j];
}
}
product = 0;
for (int i = 0; i < 25; i++){
product += mult_temp[i] * sub_temp[i];
}
product = exp(-pow(b*product, gama));
if (product > max) {
max = product;
max_k = k / 2.0 - 1;
}
}
std::cout << "BLIINDS score: " << max_k << std::endl;
// stop timer
QueryPerformanceCounter(&t2);
// compute and print the elapsed time in millisec
elapsedTime = (t2.QuadPart - t1.QuadPart) * 1000.0 / frequency.QuadPart;
std::cout << elapsedTime << " ms.\n\n";
hipDeviceSynchronize();
hipProfilerStop();
} | cd6885e93c8342a8e766a7720adcdf6533920b52.cu | #include "header.h"
#include "bliinds_info.h"
__constant__ float dc_r_info[9971];
__global__ void pad(float const* src, const int size, float * new_img) {
//should have threads = blocks = 512 + overlap + blk_size + overlap
//and both are one dimensional
int x = threadIdx.x;
int y = blockIdx.x;
if (x<1 || x>size || y<1 || y>size)
new_img[x + y * (size + 2 * 1 + 3)] = 0;
else
new_img[x + y * (size + 2 * 1 + 3)] = src[x - 1 + (y - 1) * size];
}
__global__ void rearrangeForCuFFT(float const * new_img, const int size, cufftComplex * rearr_img) {
// threads = 25, blocks = (512/3 +1)^2
int const x = threadIdx.x;
int const y = blockIdx.x;
int const pos = (y / (size / 3 + 1)) * 3 * (size + 5) + (y % (size / 3 + 1)) * 3; // position in new_img of first element to be copied
//*****Important*** change this to row-wise
rearr_img[y * 50 + x / 5 * 10 + x % 5].x = new_img[pos + x % 5 + x / 5 * (size + 5)];
rearr_img[y * 50 + (x / 5 + 1) * 10 - x % 5 - 1].x = new_img[pos + x % 5 + x / 5 * (size + 5)];
rearr_img[y * 50 + x / 5 * 10 + x % 5].y = 0;
rearr_img[y * 50 + (x / 5 + 1) * 10 - x % 5 - 1].y = 0;
}
__global__ void setZero(float * array) {
array[threadIdx.x + blockIdx.x * blockDim.x] = 0.0f;
}
__global__ void rearrangeForDCT(float const * new_img, const int size, float * rearr_img) {
// threads = 25, blocks = (512/3 +1)^2
int const x = threadIdx.x;
int const y = blockIdx.x;
int const pos = (y / (size / 3 + 1)) * 3 * (size + 5) + (y % (size / 3 + 1)) * 3; // position in new_img of first block element to be copied
//*****Important*** change this to row-wise
rearr_img[y * 25 + x] = new_img[pos + x % 5 + x / 5 * (size + 5)];
/*if (x == 0 && y == 0)
printf("I can print\n");*/
}
// Higher warps
__global__ void rearrangeForDCTv2(float const * new_img, const int size, float * rearr_img) {
// threads = 25, blocks = (512/3 +1)^2
int const x = threadIdx.x % 32;
int const y = blockIdx.x * 4;
int const sblkIdx = threadIdx.x / 32;
int const gblkIdx = y + sblkIdx;
int const pos = (gblkIdx / (size / 3 + 1)) * 3 * (size + 5) + (gblkIdx % (size / 3 + 1)) * 3; // position in new_img of first block element to be copied
//*****Important*** change this to row-wise
if (x<25)
rearr_img[gblkIdx * 32 + x] = new_img[pos + x % 5 + x / 5 * (size + 5)];
}
__global__ void transposeForCuFFT(cufftComplex const*read, cufftComplex *write) {
// threads = 25, blocks = (512/3 +1)^2
int const x = threadIdx.x;
int const y = blockIdx.x;
float temp1;// temp2;
temp1 = (read[y * 50 + x % 5 * 10 + x / 5].x * cos(PI * (x / 5) / 10.0) + read[y * 50 + x % 5 * 10 + x / 5].y * sin(PI * (x / 5) / 10.0)) / 2.0 * (x / 5 == 0 ? sqrt(0.2) : sqrt(0.4));
//temp2 = (read[y * 50 + x % 5 * 10 + x / 5].y * cos(PI * (x / 5) / 10.0) - read[y * 50 + x % 5 * 10 + x / 5].x * sin(PI * (x / 5) / 10.0)) / 2.0 * (x % 5 == 0 ? sqrt(0.2) : sqrt(0.4));
//__syncthreads();
write[y * 50 + x / 5 * 10 + x % 5].x = temp1;//fabsf(temp1) > 0.00001 ? temp1 : 0;
write[y * 50 + x / 5 * 10 + x % 5].y = 0;
write[y * 50 + (x / 5 + 1) * 10 - x % 5 - 1].x = temp1; //fabsf(temp1) > 0.00001 ? temp1 : 0;
write[y * 50 + (x / 5 + 1) * 10 - x % 5 - 1].y = 0;
}
__global__ void dct55(const float * rearr_img, const double* dctmtx, float* dctImg) {
//threads = 25, blocks = (512/3 +1)^2
int const x = threadIdx.x;
int const y = blockIdx.x;
__shared__ double img[25];
__shared__ double dct[25];
img[x] = rearr_img[y * 25 + x];
dct[x] = dctmtx[x];
double temp = 0.0;
__syncthreads();
/*if (x == 0) {
if (y == 450) {
for (int i = 0; i < 25; i++)
printf("%0.20f\n", img[i]);
printf("\n");
}
}*/
for (int i = 0; i < 5; i++) {
temp += dct[5 * (x / 5) + i] * (img[5 * i + x % 5]);
}
__syncthreads();
img[x] = temp;
__syncthreads();
temp = 0.0;
for (int i = 0; i < 5; i++) {
temp += img[5 * (x / 5) + i] * dct[5 * (x % 5) + i];
}
dctImg[y * 25 + x/*5*(x%5) + x/5*/] = temp; //fabsf(temp) > 0.0000001 ? temp : 0;
//__syncthreads();
/*if (x == 0) {
if (y == 7155) {
for (int i = 0; i < 25; i++)
printf("%0.20f, %0.20f\n", rearr_img[y*25 + i], dctImg[y * 25 + i]);
printf("\n");
}
}*/
}
__global__ void dct55v2(float* rearr_img, const double* dctmtx, float* dctImg) {
int const x = threadIdx.x%32;
int const y = blockIdx.x*8;
int const sblkIdx = threadIdx.x / 32;
int const gblkIdx = (y + sblkIdx) * 32;
__shared__ float img[32*8];
__shared__ double dct[32];
img[threadIdx.x] = rearr_img[gblkIdx + x];
dct[x] = dctmtx[x];
double temp = 0.0f;
for (int i = 0; i < 5; i++) {
temp += dct[5 * (x / 5) + i] * img[sblkIdx * 32 + 5 * i + x % 5];
}
if (x<25)
img[threadIdx.x] = temp;
temp = 0.0f;
for (int i = 0; i < 5; i++) {
temp += img[sblkIdx * 32 + 5 * (x / 5) + i] * dct[5 * (x % 5) + i];
}
if (x>0 && x<25)
dctImg[gblkIdx + x/*5*(x%5) + x/5*/] = temp;
/*if (gblkIdx == 0 && x < 32) {
printf("%f\n", dctImg[gblkIdx + x]);
}*/
}
// Merge rearrange and DCT into one kernel. Hoping to avoid register spilling
__global__ void rearrangeAndDCT55(float const* new_img, const int size, const double* dctmtx, float* dctImg) {
int const x = threadIdx.x % 32;
int const y = blockIdx.x * 8;
int const sblkIdx = threadIdx.x / 32;
int const gblkIdx = y + sblkIdx;
int const pos = (gblkIdx / (size / 3 + 1)) * 3 * (size + 5) + (gblkIdx % (size / 3 + 1)) * 3; // position in new_img of first block element to be copied
__shared__ float img[32 * 8];
__shared__ double dct[32];
//*****Important*** change this to row-wise
//img[threadIdx.x] = 0;
//if (x<25)
//int const gblkIdx = (y + sblkIdx) * 32;
img[threadIdx.x] = new_img[pos + x % 5 + x / 5 * (size + 5)];
dct[x] = dctmtx[x];
double temp = 0.0f;
for (int i = 0; i < 5; i++) {
temp += dct[5 * (x / 5) + i] * img[sblkIdx * 32 + 5 * i + x % 5];
}
if (x<25)
img[threadIdx.x] = temp;
temp = 0.0f;
for (int i = 0; i < 5; i++) {
temp += img[sblkIdx * 32 + 5 * (x / 5) + i] * dct[5 * (x % 5) + i];
}
if (x>0 && x<25)
dctImg[gblkIdx * 32 + x/*5*(x%5) + x/5*/] = temp;
}
__global__ void copyDCT(cufftComplex const*dct_img, float *dctImg) {
//threads = 25, blocks = (512/3 +1)^2
int const x = threadIdx.x;
int const y = blockIdx.x;
//dctImg[y * 25 + x] = dct_img[y * 50 + x / 5 * 10 + x % 5].x;
dctImg[y * 25 + x] = dct_img[y * 50 + x / 5 * 10 + x % 5].x; // (fabsf(dct_img[y * 50 + x / 5 * 10 + x % 5].x) > 0.0001 ? dct_img[y * 50 + x / 5 * 10 + x % 5].x : 0);
}
__global__ void rearrangeTest(cufftComplex * d_rearr_in){
int const x = threadIdx.x;
int const y = blockIdx.x;
d_rearr_in[y * 50 + x / 5 * 10 + x % 5].x = (x / 5)*(x % 5);
d_rearr_in[y * 50 + x / 5 * 10 + x % 5].y = 0;
d_rearr_in[y * 50 + (x / 5 + 1) * 10 - x % 5 - 1].x = (x / 5)*(x % 5);
d_rearr_in[y * 50 + (x / 5 + 1) * 10 - x % 5 - 1].y = 0;
}
__global__ void copyTest(cufftComplex const* d_rearr_in, float *dctImg) {
int const x = threadIdx.x;
int const y = blockIdx.x;
dctImg[y * 20 + x] = (abs(d_rearr_in[y * 20 + x].x) > 0.00001 ? d_rearr_in[y * 20 + x].x : 0);
}
__global__ void rho_dct(float const* d_dctImg, float * coeff_freq_var) {
//plan grids = (512/3 + 1)^2, threads = 25
int const x = threadIdx.x;
int const y = blockIdx.x;
__shared__ float dctBlock[25];
dctBlock[x] = d_dctImg[y * 25 + x];
__syncthreads();
if (x == 0) {
float mean_abs = 0, std_gauss = 0;
for (int i = 1; i < 25; i++) {
mean_abs += abs(dctBlock[i]);
/*if (y == 450) {
printf("%0.20f, %0.20f\n", mean_abs, abs(dctBlock[i]));
}*/
}
mean_abs = mean_abs / 24.0;
/*if (mean_abs < 0.0001) {
coeff_freq_var[y] = 0;
return;
}*/
for (int i = 1; i < 25; i++) {
float temp = fabs(dctBlock[i]) - mean_abs;
std_gauss += temp * temp;
/*if (y == 450) {
printf("%0.20f, %0.20f\n", std_gauss, temp*temp);
}*/
}
std_gauss = sqrt(std_gauss / 23.0);
coeff_freq_var[y] = std_gauss / (mean_abs + 0.0000001);
/*if (y == 450) {
printf("std_gauss: %0.20f, \tmean_abs: %0.20f, \tcoeff: %0.20f\n", std_gauss, mean_abs, coeff_freq_var[y]);
}*/
}
}
__device__ inline void MyAtomicAdd(float *address, float value)
{
int oldval, newval, readback;
oldval = __float_as_int(*address);
newval = __float_as_int(__int_as_float(oldval) + value);
while ((readback = atomicCAS((int *)address, oldval, newval)) != oldval)
{
oldval = readback;
newval = __float_as_int(__int_as_float(oldval) + value);
}
}
// Higher number of warps
__global__ void rho_dct2(float const* d_dctImg, float * coeff_freq_var) {
//plan grids = (512/3 + 1)^2, threads = 25
int const x = threadIdx.x % 32;
int const y = blockIdx.x * 16;
int const sblkIdx = threadIdx.x / 32;
int const gblkIdx = (y + sblkIdx) * 32;
__shared__ float dctBlock[32 * 16];
dctBlock[threadIdx.x] = fabs(d_dctImg[gblkIdx + x]);
dctBlock[sblkIdx * 32] = 0;
//__syncthreads();
//if (x == 0) {
float mean_abs = 0, std_gauss = 0;
#pragma unroll
for (int i = 1; i < 25; i++) {
mean_abs += dctBlock[sblkIdx * 32 + i];
}
mean_abs /= 24.0f;
dctBlock[threadIdx.x] -= mean_abs;
#pragma unroll
for (int i = 1; i < 25; i++) {
//float temp = dctBlock[sblkIdx * 32 + i] - mean_abs;
std_gauss += dctBlock[sblkIdx * 32 + i] * dctBlock[sblkIdx * 32 + i];
}
std_gauss = sqrt(std_gauss / 23.0f);
coeff_freq_var[gblkIdx / 32] = std_gauss / (mean_abs + 0.0000001f);
//}
}
__global__ void gama_dct(float const * d_dctImg, float const * g_vector, float const * r_vector, float * d_gama) {
//plan grids = (512/3 + 1)^2, threads = 25
int const x = threadIdx.x;
int const y = blockIdx.x;
__shared__ float dctBlock[25];
dctBlock[x] = d_dctImg[y * 25 + x];
__syncthreads();
if (x == 0) {
float mean_gauss = 0;
for (int i = 1; i < 25; i++) {
mean_gauss += dctBlock[i];
}
mean_gauss = mean_gauss / 24.0;
float var_gauss = 0;
float mean_abs = 0;
for (int i = 1; i < 25; i++) {
float temp = fabsf(dctBlock[i] - mean_gauss);
var_gauss += temp * temp;
mean_abs += temp;
}
var_gauss = var_gauss / 23.0;
mean_abs = mean_abs / 24.0;
mean_abs *= mean_abs;
const float rho = var_gauss / (mean_abs + 0.0000001);
float gamma_gauss = 11.0;
for (int j = 0; j < 9970; j++) {
if (rho>r_vector[j + 1] && rho <= r_vector[j]) {
gamma_gauss = g_vector[j];
}
}
d_gama[y] = gamma_gauss;
}
}
__global__ void gama_dct2(float const * d_dctImg, float const * g_vector, float const * r_vector, float * d_gama) {
//plan grids = (512/3 + 1)^2, threads = 1024
int const x = threadIdx.x;
int const y = blockIdx.x;
__shared__ float dctBlock[9996];
// copy first 25 elements as dctImg and the remaining as r_vec so that r_vec completely fits in shared memory.
dctBlock[x] = x<25?d_dctImg[y * 25 + x]:r_vector[x-25];
__syncthreads();
if (x < 32) {
float mean_gauss = 0;
for (int i = 1; i < 25; i++) {
mean_gauss += dctBlock[i];
}
mean_gauss = mean_gauss / 24.0;
float var_gauss = 0;
float mean_abs = 0;
for (int i = 1; i < 25; i++) {
float temp = fabsf(dctBlock[i] - mean_gauss);
var_gauss += temp * temp;
mean_abs += temp;
}
var_gauss = var_gauss / 23.0;
mean_abs = mean_abs / 24.0;
mean_abs *= mean_abs;
dctBlock[0] = var_gauss / (mean_abs + 0.0000001);
}
else {
//int n = 999 + 10*(x - 32);// 1024 - 25 + x - 32
int n = 999 + x - 32;
/*for (int i = n; i < n + 10; i++) {
if (i < 9971) {
dctBlock[i + 25] = r_vector[i];
}
}*/
while (n < 9971) {
dctBlock[n + 25] = r_vector[n];
n += 992;
}
}
__syncthreads();
/*if (x == 0) {
float gamma_gauss = 11.0, rho = dctBlock[0];
for (int j = 25; j < 9995; j++) {
if (rho>dctBlock[j + 1] && rho <= dctBlock[j]) {
gamma_gauss = g_vector[j-25];
}
}
d_gama[y] = gamma_gauss;
}
*/
float rho = dctBlock[0];
dctBlock[1] = 11.0; // being used as gamma_gauss over here
for (int j = 10 * x; j <= 10 * x + 12; j++) {
if (j < 9970) {
int idx = 25 + j;
if (rho>dctBlock[idx + 1] && rho <= dctBlock[idx]) {
dctBlock[1] = g_vector[j];
}
}
}
__syncthreads();
d_gama[y] = dctBlock[1];
}
// gama_dct3 tries to use all 1024 threads for populating the shared memory with r_vector.
// Resulted in a 3 ms increase in time over gama_dct2. (29ms over 26 ms of gama_dct2)
__global__ void gama_dct3(float const * d_dctImg, float const * g_vector, float const * r_vector, float * d_gama) {
//plan grids = (512/3 + 1)^2, threads = 1024
int const x = threadIdx.x;
int const y = blockIdx.x;
__shared__ float dctBlock[9996];
// copy first 25 elements as dctImg and the remaining as r_vec so that r_vec completely fits in shared memory.
dctBlock[x] = x<25 ? d_dctImg[y * 25 + x] : r_vector[x - 25];
int n = 999 + x;
while (n < 9971) {
dctBlock[n + 25] = r_vector[n];
n += 1024;
}
__syncthreads();
if (x < 32) {
float mean_gauss = 0;
for (int i = 1; i < 25; i++) {
mean_gauss += dctBlock[i];
}
mean_gauss = mean_gauss / 24.0;
float var_gauss = 0;
float mean_abs = 0;
for (int i = 1; i < 25; i++) {
float temp = fabsf(dctBlock[i] - mean_gauss);
var_gauss += temp * temp;
mean_abs += temp;
}
var_gauss = var_gauss / 23.0;
mean_abs = mean_abs / 24.0;
mean_abs *= mean_abs;
dctBlock[0] = var_gauss / (mean_abs + 0.0000001);
}
__syncthreads();
float rho = dctBlock[0];
dctBlock[1] = 11.0; // being used as gamma_gauss over here
for (int j = 10 * x; j <= 10 * x + 12; j++) {
if (j < 9970) {
int idx = 25 + j;
if (rho>dctBlock[idx + 1] && rho <= dctBlock[idx]) {
dctBlock[1] = g_vector[j];
}
}
}
__syncthreads();
d_gama[y] = dctBlock[1];
}
// gama_dct4 makes use of r_vector in the constant memory.
// Takes 159 ms for all the three runs! :(
__global__ void gama_dct4(float const * d_dctImg, float const * g_vector, float const * r_vector, float * d_gama) {
//plan grids = (512/3 + 1)^2, threads = 25
int const x = threadIdx.x;
int const y = blockIdx.x;
__shared__ float dctBlock[25];
dctBlock[x] = d_dctImg[y * 25 + x];
__syncthreads();
if (x == 0) {
float mean_gauss = 0;
for (int i = 1; i < 25; i++) {
mean_gauss += dctBlock[i];
}
mean_gauss = mean_gauss / 24.0;
float var_gauss = 0;
float mean_abs = 0;
for (int i = 1; i < 25; i++) {
float temp = fabsf(dctBlock[i] - mean_gauss);
var_gauss += temp * temp;
mean_abs += temp;
}
var_gauss = var_gauss / 23.0;
mean_abs = mean_abs / 24.0;
mean_abs *= mean_abs;
const float rho = var_gauss / (mean_abs + 0.0000001);
float gamma_gauss = 11.0;
for (int j = 0; j < 9970; j++) {
if (rho>dc_r_info[j + 1] && rho <= dc_r_info[j]) {
//if (rho>r_vector[j + 1] && rho <= r_vector[j]) {
gamma_gauss = g_vector[j];
}
}
d_gama[y] = gamma_gauss;
}
}
// gama_dct5 copies r_vector into shared memory from const memory instead of global memory
// takes 108 ms, combined for all the three invocations :(
__global__ void gama_dct5(float const * d_dctImg, float const * g_vector, float const * r_vector, float * d_gama) {
//plan grids = (512/3 + 1)^2, threads = 1024
int const x = threadIdx.x;
int const y = blockIdx.x;
__shared__ float dctBlock[9996];
// copy first 25 elements as dctImg and the remaining as r_vec so that r_vec completely fits in shared memory.
dctBlock[x] = x<25 ? d_dctImg[y * 25 + x] : dc_r_info[x - 25];
__syncthreads();
if (x < 32) {
float mean_gauss = 0;
for (int i = 1; i < 25; i++) {
mean_gauss += dctBlock[i];
}
mean_gauss = mean_gauss / 24.0;
float var_gauss = 0;
float mean_abs = 0;
for (int i = 1; i < 25; i++) {
float temp = fabsf(dctBlock[i] - mean_gauss);
var_gauss += temp * temp;
mean_abs += temp;
}
var_gauss = var_gauss / 23.0;
mean_abs = mean_abs / 24.0;
mean_abs *= mean_abs;
dctBlock[0] = var_gauss / (mean_abs + 0.0000001);
}
else {
int n = 999 + x - 32;
while (n < 9971) {
dctBlock[n + 25] = dc_r_info[n];
n += 992;
}
}
__syncthreads();
float rho = dctBlock[0];
dctBlock[1] = 11.0; // being used as gamma_gauss over here
for (int j = 10 * x; j <= 10 * x + 12; j++) {
if (j < 9970) {
int idx = 25 + j;
if (rho>dctBlock[idx + 1] && rho <= dctBlock[idx]) {
dctBlock[1] = g_vector[j];
}
}
}
__syncthreads();
d_gama[y] = dctBlock[1];
}
// gama_dct6 returns the rho values to d_gama
__global__ void gama_dct6(float const * d_dctImg, float const * g_vector, float const * r_vector, float * d_gama) {
//plan grids = (512/3 + 1)^2, threads = 25
int const x = threadIdx.x;
int const y = blockIdx.x;
__shared__ float dctBlock[25];
dctBlock[x] = d_dctImg[y * 25 + x];
__syncthreads();
if (x == 0) {
float mean_gauss = 0;
for (int i = 1; i < 25; i++) {
mean_gauss += dctBlock[i];
}
mean_gauss = mean_gauss / 24.0;
float var_gauss = 0;
float mean_abs = 0;
for (int i = 1; i < 25; i++) {
float temp = fabsf(dctBlock[i] - mean_gauss);
var_gauss += temp * temp;
mean_abs += temp;
}
var_gauss = var_gauss / 23.0;
mean_abs = mean_abs / 24.0;
mean_abs *= mean_abs;
const float rho = var_gauss / (mean_abs + 0.0000001);
d_gama[y] = rho;
}
}
// gama_dct6 with higher warps
__global__ void gama_dct62(float const * d_dctImg, float const * g_vector, float const * r_vector, float * d_gama) {
//plan grids = (512/3 + 1)^2, threads = 25
int const x = threadIdx.x % 32;
int const y = blockIdx.x * 16;
int const sblkIdx = threadIdx.x / 32;
int const gblkIdx = (y + sblkIdx) * 32;
__shared__ float dctBlock[32*16];
dctBlock[threadIdx.x] = d_dctImg[gblkIdx + x];
//__syncthreads();
//if (x == 0) {
float mean_gauss = 0;
#pragma unroll
for (int i = 1; i < 25; i++) {
mean_gauss += dctBlock[sblkIdx * 32 + i];
}
mean_gauss = mean_gauss / 24.0f;
float var_gauss = 0;
float mean_abs = 0;
dctBlock[sblkIdx * 32 + x] = fabsf(dctBlock[sblkIdx * 32 + x] - mean_gauss);
#pragma unroll
for (int i = 1; i < 25; i++) {
//float temp = fabsf(dctBlock[i] - mean_gauss);
var_gauss += dctBlock[sblkIdx * 32 + i] * dctBlock[sblkIdx * 32 + i];
mean_abs += dctBlock[sblkIdx * 32 + i];
}
var_gauss = var_gauss / 23.0f;
mean_abs = mean_abs / 24.0f;
mean_abs *= mean_abs;
const float rho = var_gauss / (mean_abs + 0.0000001f);
d_gama[gblkIdx / 32] = rho;
//}
}
__global__ void gama_dct6_3(float * d_rho, float const * g_vector, float const * r_vector, float * d_gama, int max) {
int const pos = threadIdx.x + blockIdx.x * blockDim.x;
//if (pos < max) {
float const rho = d_rho[pos];
int left(0), right(9970), mid(4985);
float gamma_gauss = 11;
while (right > left) {
mid = (left + right) / 2;
float r_vec_mid_1 = r_vector[mid + 1];
if (rho > r_vec_mid_1 && rho <= r_vector[mid]) {
gamma_gauss = g_vector[mid];
break;
}
else if (rho <= r_vec_mid_1) {
left = mid + 1;
continue;
}
else {
right = mid;
}
}
d_gama[pos] = gamma_gauss;
//}
/*float gamma_gauss = 11.0;
for (int j = 0; j < 9970; j++) {
if (rho>r_vector[j + 1] && rho <= r_vector[j]) {
gamma_gauss = g_vector[j];
}
}
d_gama[y] = gamma_gauss;
*/
}
__global__ void oriented_dct_rho(float const * d_dctImg, float * ori_rho, int orient) {
//plan grids = (512/3 + 1)^2, threads = 8
__shared__ float dctBlock[8];
int const x = threadIdx.x;
int const y = blockIdx.x;
if (orient == 1) {
if (x == 0) {
dctBlock[0] = fabs(d_dctImg[blockIdx.x * 25 + 1]);
dctBlock[1] = fabs(d_dctImg[blockIdx.x * 25 + 2]);
dctBlock[2] = fabs(d_dctImg[blockIdx.x * 25 + 7]);
dctBlock[3] = fabs(d_dctImg[blockIdx.x * 25 + 3]);
dctBlock[4] = fabs(d_dctImg[blockIdx.x * 25 + 8]);
dctBlock[5] = fabs(d_dctImg[blockIdx.x * 25 + 4]);
dctBlock[6] = fabs(d_dctImg[blockIdx.x * 25 + 9]);
dctBlock[7] = fabs(d_dctImg[blockIdx.x * 25 + 14]);
}
}
else if (orient == 2) {
if (x == 0) {
dctBlock[0] = fabsf(d_dctImg[blockIdx.x * 25 + 6]);
dctBlock[1] = fabsf(d_dctImg[blockIdx.x * 25 + 12]);
dctBlock[2] = fabsf(d_dctImg[blockIdx.x * 25 + 17]);
dctBlock[3] = fabsf(d_dctImg[blockIdx.x * 25 + 13]);
dctBlock[4] = fabsf(d_dctImg[blockIdx.x * 25 + 18]);
dctBlock[5] = fabsf(d_dctImg[blockIdx.x * 25 + 23]);
dctBlock[6] = fabsf(d_dctImg[blockIdx.x * 25 + 19]);
dctBlock[7] = fabsf(d_dctImg[blockIdx.x * 25 + 24]);
}
}
else if (orient == 3) {
if (x == 0) {
dctBlock[0] = fabsf(d_dctImg[blockIdx.x * 25 + 5]);
dctBlock[1] = fabsf(d_dctImg[blockIdx.x * 25 + 10]);
dctBlock[2] = fabsf(d_dctImg[blockIdx.x * 25 + 15]);
dctBlock[3] = fabsf(d_dctImg[blockIdx.x * 25 + 20]);
dctBlock[4] = fabsf(d_dctImg[blockIdx.x * 25 + 11]);
dctBlock[5] = fabsf(d_dctImg[blockIdx.x * 25 + 16]);
dctBlock[6] = fabsf(d_dctImg[blockIdx.x * 25 + 21]);
dctBlock[7] = fabsf(d_dctImg[blockIdx.x * 25 + 22]);
}
}
/*for (int i = 0; i < 8; i++) {
if (dctBlock[i] < 0.0001)
dctBlock[i] = 0;
}*/
double mean = 0.0, std_gauss = 0.0;
if (x == 0) {
for (int i = 0; i < 8; i++) {
mean += dctBlock[i];
/*if (y == 1) {
printf("%f\n", dctBlock[i]);
}*/
}
/*if (y == 1) {
printf("\n");
}*/
mean /= 8.0;
/*if (fabsf(mean) < 0.0001) {
ori_rho[y] = 0;
return;
}*/
for (int i = 0; i < 8; i++) {
double temp = dctBlock[i] - mean;
std_gauss += temp * temp;
}
std_gauss = sqrt(std_gauss / 7.0);
ori_rho[y] = std_gauss / (mean + 0.0000001);
/*if (y == 7155) {
printf("mean = %0.20f, std_gauss = %0.20f\nori[i] = %0.20f\n", mean, std_gauss, std_gauss / (mean + 0.00000001));
}*/
}
}
// Increase the number of warps to 4, threads = 128
__global__ void oriented_dct_rho2(float const * d_dctImg, float * ori_rho, int orient) {
__shared__ float dctBlock[32*8];
int const x = threadIdx.x % 32;
int const y = blockIdx.x * 8;
int const sblkIdx = (threadIdx.x / 32) * 32;
int const gblkIdx = (y + threadIdx.x / 32) * 32;
int const ori[3] = {
2042757120, // 0b01111001110000100000000000000000
34369920, // 0b00000010000011000111000110000000
70356480 // 0b00000100001100011000111000000000
};
if (x < 8) {
if (orient == 1) {
int inter_idx = (x + 1) / 5 + (x + 1) / 8;
dctBlock[sblkIdx + x] = fabs(d_dctImg[gblkIdx + x + 1 + 5 * inter_idx - (x + 1) / 5 * 3 - (x + 1) / 8]);
}
else if (orient == 2) {
int row = (x + 1) - x / 2 - x / 5 + x / 6 - x / 7;
dctBlock[sblkIdx + x] = fabsf(d_dctImg[gblkIdx + row * 5 + x + 1 - x / 3 * 2]);
}
else if (orient == 3) {
int const col = (x + 1) / 5 + (x + 1) / 8;
dctBlock[sblkIdx + x] = fabsf(d_dctImg[gblkIdx + (x + 1) * 5 - 14 * col + (x + 1) / 8 * 10]);
}
float mean = dctBlock[sblkIdx + 0] + dctBlock[sblkIdx + 1] + dctBlock[sblkIdx + 2] + dctBlock[sblkIdx + 3] + \
dctBlock[sblkIdx + 4] + dctBlock[sblkIdx + 5] + dctBlock[sblkIdx + 6] + dctBlock[sblkIdx + 7];
mean /= 8;
dctBlock[sblkIdx + x] -= mean;
float std_gauss = dctBlock[sblkIdx + 0] * dctBlock[sblkIdx + 0] + dctBlock[sblkIdx + 1] * dctBlock[sblkIdx + 1] + dctBlock[sblkIdx + 2] * dctBlock[sblkIdx + 2] + \
dctBlock[sblkIdx + 3] * dctBlock[sblkIdx + 3] + dctBlock[sblkIdx + 4] * dctBlock[sblkIdx + 4] + dctBlock[sblkIdx + 5] * dctBlock[sblkIdx + 5] + \
dctBlock[sblkIdx + 6] * dctBlock[sblkIdx + 6] + dctBlock[sblkIdx + 7] * dctBlock[sblkIdx + 7];
std_gauss = sqrtf(std_gauss / 7);
ori_rho[gblkIdx / 32] = std_gauss / (mean + 0.0000001f);
}
}
// Increase the number of warps to 4, threads = 128
__global__ void oriented_dct_rho3(float const * d_dctImg, float * ori_rho, int orient) {
__shared__ float dctBlock[32 * 8];
//dctBlock[threadIdx.x] = 0;
int const x = threadIdx.x % 32;
int const y = blockIdx.x * 8;
int const sblkIdx = (threadIdx.x / 32);
int const gblkIdx = (y + threadIdx.x / 32) * 25;
int const ori = (orient - 1) * 32;
/*__shared__*/ bool const orient_mat[96] = {
//orient 1
0, 1, 1, 1, 1,
0, 0, 1, 1, 1,
0, 0, 0, 0, 1,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0, // extras for alignment to 32
0, 0,
//orient 2
0, 0, 0, 0, 0,
0, 1, 0, 0, 0,
0, 0, 1, 1, 0,
0, 0, 1, 1, 1,
0, 0, 0, 1, 1,
0, 0, 0, 0, 0, // extras for alignment to 32
0, 0,
//orient 3
0, 0, 0, 0, 0,
1, 0, 0, 0, 0,
1, 1, 0, 0, 0,
1, 1, 0, 0, 0,
1, 1, 1, 0, 0,
0, 0, 0, 0, 0, // extras for alignment to 32
0, 0,
};
dctBlock[threadIdx.x] = fabsf(d_dctImg[gblkIdx + x]);
if (orient_mat[ori + x] == 0) {
dctBlock[sblkIdx * 32 + x] = 0;
}
float mean = 0, std_gauss = 0;
for (int i = 1; i < 25; i++) {
mean += dctBlock[sblkIdx * 32 + i];
}
mean /= 8.0f;
dctBlock[threadIdx.x] -= mean;
for (int i = 1; i < 25; i++) {
if (orient_mat[ori + i]) {
std_gauss += dctBlock[sblkIdx * 32 + i] * dctBlock[sblkIdx * 32 + i];
}
}
std_gauss = sqrtf(std_gauss / 7.0f);
//if (x < 8) {
/*if (orient == 1) {
int inter_idx = (x + 1) / 5 + (x + 1) / 8;
dctBlock[sblkIdx + x] = fabs(d_dctImg[gblkIdx + x + 1 + 5 * inter_idx - (x + 1) / 5 * 3 - (x + 1) / 8]);
}
else if (orient == 2) {
int row = (x + 1) - x / 2 - x / 5 + x / 6 - x / 7;
dctBlock[sblkIdx + x] = fabsf(d_dctImg[gblkIdx + row * 5 + x + 1 - x / 3 * 2]);
}
else if (orient == 3) {
int const col = (x + 1) / 5 + (x + 1) / 8;
dctBlock[sblkIdx + x] = fabsf(d_dctImg[gblkIdx + (x + 1) * 5 - 14 * col + (x + 1) / 8 * 10]);
}
double mean = dctBlock[sblkIdx + 0] + dctBlock[sblkIdx + 1] + dctBlock[sblkIdx + 2] + dctBlock[sblkIdx + 3] + \
dctBlock[sblkIdx + 4] + dctBlock[sblkIdx + 5] + dctBlock[sblkIdx + 6] + dctBlock[sblkIdx + 7];
mean /= 8;
dctBlock[sblkIdx + x] -= mean;
double std_gauss = dctBlock[sblkIdx + 0] * dctBlock[sblkIdx + 0] + dctBlock[sblkIdx + 1] * dctBlock[sblkIdx + 1] + dctBlock[sblkIdx + 2] * dctBlock[sblkIdx + 2] + \
dctBlock[sblkIdx + 3] * dctBlock[sblkIdx + 3] + dctBlock[sblkIdx + 4] * dctBlock[sblkIdx + 4] + dctBlock[sblkIdx + 5] * dctBlock[sblkIdx + 5] + \
dctBlock[sblkIdx + 6] * dctBlock[sblkIdx + 6] + dctBlock[sblkIdx + 7] * dctBlock[sblkIdx + 7];
std_gauss = sqrtf(std_gauss / 7);*/
ori_rho[gblkIdx / 25] = std_gauss / (mean + 0.0000001f);
//}
}
__global__ void oriented_dct_final(const float * ori1_rho, const float * ori2_rho, const float * ori3_rho, float * ori_rho) {
//plan grids = (512/3 + 1)^2, threads = 1
int const x = threadIdx.x;
int const y = blockIdx.x;
float num[3];
num[1] = ori1_rho[y];
num[2] = ori2_rho[y];
num[0] = ori3_rho[y];
double mean = 0, variance = 0;
for (int i = 0; i < 3; i++) {
mean += num[i];
}
mean /= 3.0;
//const double variance = ((num[1] - mean) * (num[1] - mean) + (num[2] - mean) * (num[2] - mean) + (num[0] - mean) * (num[0] - mean)) / 2;
for (int i = 0; i < 3; i++) {
double temp = num[i] - mean;
variance += temp * temp;
}
variance /= 2.0;
ori_rho[y] = variance;
/*if (y == 7155) {
printf("ori1 = %0.20f\nori2 = %0.20f\nori3 = %0.20f\nori = %0.20f\n", ori1_rho[y], ori2_rho[y], ori3_rho[y], ori_rho[y]);
}*/
}
// For more warps
__global__ void oriented_dct_final2(const float * ori1_rho, const float * ori2_rho, const float * ori3_rho, float * ori_rho) {
//plan grids = (512/3 + 1)^2, threads = 1
int const x = threadIdx.x;
int const y = blockIdx.x * 512;
float num[3];
num[0] = ori1_rho[y + x];
num[1] = ori2_rho[y + x];
num[2] = ori3_rho[y + x];
float mean = (num[0] + num[1] + num[2])/3.0;
float variance = ((num[0] - mean)*(num[0] - mean) + (num[1] - mean)*(num[1] - mean) + (num[2] - mean)*(num[2] - mean)) / 2.0;
ori_rho[y + x] = variance;
}
__global__ void subband_energy(const float * d_dctImg, float * freq_bands) {
//plan grids = (512/3 + 1)^2, threads = 25
int const x = threadIdx.x;
int const y = blockIdx.x;
__shared__ float dctBlock[25];
__shared__ double inter[3];
dctBlock[x] = d_dctImg[y * 25 + x];
__syncthreads();
if (x == 0) {
//const float num1 = dctBlock[1], num2 = dctBlock[2], num3 = dctBlock[5],
// num4 = dctBlock[6], num5 = dctBlock[10];
const double mean = ((double)dctBlock[1] + dctBlock[2] + dctBlock[5] + dctBlock[6] + dctBlock[10]) / 5.0;
inter[0] = ((dctBlock[1] - mean) * (dctBlock[1] - mean) + (dctBlock[2] - mean) * (dctBlock[2] - mean) +
(dctBlock[5] - mean) * (dctBlock[5] - mean) + (dctBlock[6] - mean) * (dctBlock[6] - mean) + (dctBlock[10] - mean) * (dctBlock[10] - mean)) / 4.0;
}
if (x == 1) {
const float num1 = dctBlock[15], num2 = dctBlock[20], num3 = dctBlock[11],
num4 = dctBlock[16], num5 = dctBlock[21], num6 = dctBlock[7], num7 = dctBlock[12], num8 = dctBlock[17], num9 = dctBlock[3],
num10 = dctBlock[8], num11 = dctBlock[13], num12 = dctBlock[4], num13 = dctBlock[9];
const double mean = ((double)num1 + num2 + num3 + num4 + num5 + num6 + num7 + num8 + num9 + num10 + num11 + num12 + num13) / 13.0;
inter[1] = ((num1 - mean) * (num1 - mean) + (num2 - mean) * (num2 - mean) +
(num3 - mean) * (num3 - mean) + (num4 - mean) * (num4 - mean) + (num5 - mean) * (num5 - mean) +
(num6 - mean) * (num6 - mean) + (num7 - mean) * (num7 - mean) +
(num8 - mean) * (num8 - mean) + (num9 - mean) * (num9 - mean) + (num10 - mean) * (num10 - mean) +
(num11 - mean) * (num11 - mean) + (num12 - mean) * (num12 - mean) + (num13 - mean) * (num13 - mean)) / 12.0;
}
if (x == 2) {
//const float num1 = dctBlock[14], num2 = dctBlock[18], num3 = dctBlock[22],
// num4 = dctBlock[19], num5 = dctBlock[23], num6 = dctBlock[24];
const double mean = ((double)dctBlock[14] + dctBlock[18] + dctBlock[22] + dctBlock[19] + dctBlock[23] + dctBlock[24]) / 6.0;
inter[2] = ((dctBlock[14] - mean) * (dctBlock[14] - mean) + (dctBlock[18] - mean) * (dctBlock[18] - mean) +
(dctBlock[22] - mean) * (dctBlock[22] - mean) + (dctBlock[19] - mean) * (dctBlock[19] - mean) +
(dctBlock[23] - mean) * (dctBlock[23] - mean) + (dctBlock[24] - mean) * (dctBlock[24] - mean)) / 5.0;
}
__syncthreads();
if (x == 0) {
//const double var_band1 = dctBlock[25], var_band2 = dctBlock[26], var_band3 = dctBlock[27];
const double r1 = fabsf(inter[2] - (inter[0] + inter[1]) / 2.0) / (inter[2] + (inter[0] + inter[1]) / 2.0 + 0.00000001);
const double r2 = fabsf(inter[1] - inter[0]) / (inter[2] + inter[0] + 0.00000001);
//const float r1 = fabsf(var_band3 - (var_band1 + var_band2) / 2.0) / (var_band3 + (var_band1 + var_band2) / 2.0 + 0.00000001);
//const float r2 = fabsf(var_band2 - var_band1) / (var_band3 + var_band1 + 0.00000001);
/*if (var_band3 + var_band1 < 0.0001) {
freq_bands[y] = 0;
return;
}*/
freq_bands[y] = (r1 + r2) / 2.0;
}
}
// Higher number of warps
__global__ void subband_energy2(const float * d_dctImg, float * freq_bands) {
//plan grids = (512/3 + 1)^2, threads = 25
int const x = threadIdx.x % 32;
int const y = blockIdx.x * 4;
int const sblkIdx = threadIdx.x / 32;
int const gblkIdx = (y + sblkIdx) * 32;
__shared__ float dctBlock[32*4];
__shared__ float inter[3*4];
dctBlock[threadIdx.x] = d_dctImg[gblkIdx + x];
//__syncthreads();
//if (x == 0) {
const float mean1 = (dctBlock[sblkIdx * 32 + 1] + dctBlock[sblkIdx * 32 + 2] + dctBlock[sblkIdx * 32 + 5] + \
dctBlock[sblkIdx * 32 + 6] + dctBlock[sblkIdx * 32 + 10]) / 5.0f;
/*dctBlock[sblkIdx * 32 + x] -= mean;
inter[sblkIdx * 3 + 0] = ((dctBlock[sblkIdx * 32 + 1]) * (dctBlock[sblkIdx * 32 + 1]) + (dctBlock[sblkIdx * 32 + 2]) * (dctBlock[sblkIdx * 32 + 2]) +
(dctBlock[sblkIdx * 32 + 5]) * (dctBlock[sblkIdx * 32 + 5]) + (dctBlock[sblkIdx * 32 + 6]) * (dctBlock[sblkIdx * 32 + 6]) + \
(dctBlock[sblkIdx * 32 + 10]) * (dctBlock[sblkIdx * 32 + 10])) / 4.0;
dctBlock[sblkIdx * 32 + x] += mean;*/
inter[sblkIdx * 3 + 0] = ((dctBlock[sblkIdx * 32 + 1] - mean1) * (dctBlock[sblkIdx * 32 + 1] - mean1) + (dctBlock[sblkIdx * 32 + 2] - mean1) * (dctBlock[sblkIdx * 32 + 2] - mean1) +
(dctBlock[sblkIdx * 32 + 5] - mean1) * (dctBlock[sblkIdx * 32 + 5] - mean1) + (dctBlock[sblkIdx * 32 + 6] - mean1) * (dctBlock[sblkIdx * 32 + 6] - mean1) + \
(dctBlock[sblkIdx * 32 + 10] - mean1) * (dctBlock[sblkIdx * 32 + 10] - mean1)) / 4.0f;
//}
//if (x == 1) {
/*const float num1 = dctBlock[sblkIdx * 32 + 15], num2 = dctBlock[sblkIdx * 32 + 20], num3 = dctBlock[sblkIdx * 32 + 11], \
num4 = dctBlock[sblkIdx * 32 + 16], num5 = dctBlock[sblkIdx * 32 + 21], num6 = dctBlock[sblkIdx * 32 + 7], num7 = dctBlock[sblkIdx * 32 + 12], \
num8 = dctBlock[sblkIdx * 32 + 17], num9 = dctBlock[sblkIdx * 32 + 3], num10 = dctBlock[sblkIdx * 32 + 8], num11 = dctBlock[sblkIdx * 32 + 13], \
num12 = dctBlock[sblkIdx * 32 + 4], num13 = dctBlock[sblkIdx * 32 + 9];*/
/*const double*/ const float mean2 = (dctBlock[sblkIdx * 32 + 15] + dctBlock[sblkIdx * 32 + 20] + dctBlock[sblkIdx * 32 + 11] + \
dctBlock[sblkIdx * 32 + 16] + dctBlock[sblkIdx * 32 + 21] + dctBlock[sblkIdx * 32 + 7] + dctBlock[sblkIdx * 32 + 12] + \
dctBlock[sblkIdx * 32 + 17] + dctBlock[sblkIdx * 32 + 3] + dctBlock[sblkIdx * 32 + 8] + dctBlock[sblkIdx * 32 + 13] + \
dctBlock[sblkIdx * 32 + 4] + dctBlock[sblkIdx * 32 + 9]) / 13.0f;
/*dctBlock[sblkIdx * 32 + x] -= mean;
inter[sblkIdx * 3 + 1] = ((dctBlock[sblkIdx * 32 + 15]) * (dctBlock[sblkIdx * 32 + 15]) + (dctBlock[sblkIdx * 32 + 20]) * (dctBlock[sblkIdx * 32 + 20]) +
(dctBlock[sblkIdx * 32 + 11]) * (dctBlock[sblkIdx * 32 + 11]) + (dctBlock[sblkIdx * 32 + 16]) * (dctBlock[sblkIdx * 32 + 16]) + \
(dctBlock[sblkIdx * 32 + 21]) * (dctBlock[sblkIdx * 32 + 21]) + (dctBlock[sblkIdx * 32 + 7]) * (dctBlock[sblkIdx * 32 + 7]) + \
(dctBlock[sblkIdx * 32 + 12]) * (dctBlock[sblkIdx * 32 + 12]) + (dctBlock[sblkIdx * 32 + 17]) * (dctBlock[sblkIdx * 32 + 17]) + \
(dctBlock[sblkIdx * 32 + 3]) * (dctBlock[sblkIdx * 32 + 3]) + (dctBlock[sblkIdx * 32 + 8]) * (dctBlock[sblkIdx * 32 + 8]) + \
(dctBlock[sblkIdx * 32 + 13]) * (dctBlock[sblkIdx * 32 + 13]) + (dctBlock[sblkIdx * 32 + 4]) * (dctBlock[sblkIdx * 32 + 4]) + \
(dctBlock[sblkIdx * 32 + 9]) * (dctBlock[sblkIdx * 32 + 9])) / 12.0;
dctBlock[sblkIdx * 32 + x] += mean;*/
inter[sblkIdx * 3 + 1] = ((dctBlock[sblkIdx * 32 + 15] - mean2) * (dctBlock[sblkIdx * 32 + 15] - mean2) + (dctBlock[sblkIdx * 32 + 20] - mean2) * \
(dctBlock[sblkIdx * 32 + 20] - mean2) + (dctBlock[sblkIdx * 32 + 11] - mean2) * (dctBlock[sblkIdx * 32 + 11] - mean2) + (dctBlock[sblkIdx * 32 + 16] - mean2) * \
(dctBlock[sblkIdx * 32 + 16] - mean2) + (dctBlock[sblkIdx * 32 + 21] - mean2) * (dctBlock[sblkIdx * 32 + 21] - mean2) + (dctBlock[sblkIdx * 32 + 7] - mean2) * \
(dctBlock[sblkIdx * 32 + 7] - mean2) + (dctBlock[sblkIdx * 32 + 12] - mean2) * (dctBlock[sblkIdx * 32 + 12] - mean2) + (dctBlock[sblkIdx * 32 + 17] - mean2) * \
(dctBlock[sblkIdx * 32 + 17] - mean2) + (dctBlock[sblkIdx * 32 + 3] - mean2) * (dctBlock[sblkIdx * 32 + 3] - mean2) + (dctBlock[sblkIdx * 32 + 8] - mean2) * \
(dctBlock[sblkIdx * 32 + 8] - mean2) + (dctBlock[sblkIdx * 32 + 13] - mean2) * (dctBlock[sblkIdx * 32 + 13] - mean2) + (dctBlock[sblkIdx * 32 + 4] - mean2) * \
(dctBlock[sblkIdx * 32 + 4] - mean2) + (dctBlock[sblkIdx * 32 + 9] - mean2) * (dctBlock[sblkIdx * 32 + 9] - mean2)) / 12.0f;
//}
//if (x == 2) {
const float mean3 = (dctBlock[sblkIdx * 32 + 14] + dctBlock[sblkIdx * 32 + 18] + dctBlock[sblkIdx * 32 + 22] + dctBlock[sblkIdx * 32 + 19] + \
dctBlock[sblkIdx * 32 + 23] + dctBlock[sblkIdx * 32 + 24]) / 6.0f;
/*dctBlock[sblkIdx * 32 + x] -= mean;
inter[sblkIdx * 3 + 2] = ((dctBlock[sblkIdx * 32 + 14]) * (dctBlock[sblkIdx * 32 + 14]) + (dctBlock[sblkIdx * 32 + 18]) * (dctBlock[sblkIdx * 32 + 18]) +
(dctBlock[sblkIdx * 32 + 22]) * (dctBlock[sblkIdx * 32 + 22]) + (dctBlock[sblkIdx * 32 + 19]) * (dctBlock[sblkIdx * 32 + 19]) +
(dctBlock[sblkIdx * 32 + 23]) * (dctBlock[sblkIdx * 32 + 23]) + (dctBlock[sblkIdx * 32 + 24]) * (dctBlock[sblkIdx * 32 + 24])) / 5.0;
//dctBlock[sblkIdx * 32 + x] += mean;
const double mean = (dctBlock[sblkIdx * 32 + 14] + dctBlock[sblkIdx * 32 + 18] + dctBlock[sblkIdx * 32 + 22] + dctBlock[sblkIdx * 32 + 19] + \
dctBlock[sblkIdx * 32 + 23] + dctBlock[sblkIdx * 32 + 24]) / 6.0;*/
inter[sblkIdx * 3 + 2] = ((dctBlock[sblkIdx * 32 + 14] - mean3) * (dctBlock[sblkIdx * 32 + 14] - mean3) + (dctBlock[sblkIdx * 32 + 18] - mean3) * \
(dctBlock[sblkIdx * 32 + 18] - mean3) + (dctBlock[sblkIdx * 32 + 22] - mean3) * (dctBlock[sblkIdx * 32 + 22] - mean3) + (dctBlock[sblkIdx * 32 + 19] - mean3) * \
(dctBlock[sblkIdx * 32 + 19] - mean3) + (dctBlock[sblkIdx * 32 + 23] - mean3) * (dctBlock[sblkIdx * 32 + 23] - mean3) + (dctBlock[sblkIdx * 32 + 24] - mean3) * \
(dctBlock[sblkIdx * 32 + 24] - mean3)) / 5.0f;
//}
//__syncthreads();
//if (x == 0) {
const float r1 = fabs(inter[sblkIdx * 3 + 2] - (inter[sblkIdx * 3 + 0] + inter[sblkIdx * 3 + 1]) / 2.0f) / \
(inter[sblkIdx * 3 + 2] + (inter[sblkIdx * 3 + 0] + inter[sblkIdx * 3 + 1]) / 2.0f + 0.00000001f);
const float r2 = fabs(inter[sblkIdx * 3 + 1] - inter[sblkIdx * 3 + 0]) / (inter[sblkIdx * 3 + 2] + inter[sblkIdx * 3 + 0] + 0.00000001f);
freq_bands[gblkIdx / 32] = (r1 + r2) / 2.0f;
//}
/*if (gblkIdx + x == 200) {
printf("inter[0] = %f\ninter[1] = %f\ninter[2] = %f\n", inter[sblkIdx * 3], inter[sblkIdx + 1], inter[sblkIdx + 2]);
}*/
}
// Specialized warps for each subband does not do well :(
__global__ void subband_energy3(const float * d_dctImg, float * freq_bands) {
//plan grids = (512/3 + 1)^2, threads = 25
int const x = threadIdx.x % 32;
int const y = blockIdx.x * 4;
int const sblkIdx = threadIdx.x / 96;
int const gblkIdx = (y + sblkIdx) * 25;
__shared__ float dctBlock[32 * 4];
__shared__ float inter[3 * 4];
dctBlock[sblkIdx * 32 + x] = d_dctImg[gblkIdx + x];
//__syncthreads();
float mean;
if (threadIdx.x / 32 % 3 == 0) {
mean = (dctBlock[sblkIdx * 32 + 1] + dctBlock[sblkIdx * 32 + 2] + dctBlock[sblkIdx * 32 + 5] + \
dctBlock[sblkIdx * 32 + 6] + dctBlock[sblkIdx * 32 + 10]) / 5.0f;
inter[sblkIdx * 3 + 0] = ((dctBlock[sblkIdx * 32 + 1] - mean) * (dctBlock[sblkIdx * 32 + 1] - mean) + (dctBlock[sblkIdx * 32 + 2] - mean) * (dctBlock[sblkIdx * 32 + 2] - mean) +
(dctBlock[sblkIdx * 32 + 5] - mean) * (dctBlock[sblkIdx * 32 + 5] - mean) + (dctBlock[sblkIdx * 32 + 6] - mean) * (dctBlock[sblkIdx * 32 + 6] - mean) + \
(dctBlock[sblkIdx * 32 + 10] - mean) * (dctBlock[sblkIdx * 32 + 10] - mean)) / 4.0f;
}
if (threadIdx.x / 32 % 3 == 1) {
/*const double*/ mean = (dctBlock[sblkIdx * 32 + 15] + dctBlock[sblkIdx * 32 + 20] + dctBlock[sblkIdx * 32 + 11] + \
dctBlock[sblkIdx * 32 + 16] + dctBlock[sblkIdx * 32 + 21] + dctBlock[sblkIdx * 32 + 7] + dctBlock[sblkIdx * 32 + 12] + \
dctBlock[sblkIdx * 32 + 17] + dctBlock[sblkIdx * 32 + 3] + dctBlock[sblkIdx * 32 + 8] + dctBlock[sblkIdx * 32 + 13] + \
dctBlock[sblkIdx * 32 + 4] + dctBlock[sblkIdx * 32 + 9]) / 13.0f;
inter[sblkIdx * 3 + 1] = ((dctBlock[sblkIdx * 32 + 15] - mean) * (dctBlock[sblkIdx * 32 + 15] - mean) + (dctBlock[sblkIdx * 32 + 20] - mean) * (dctBlock[sblkIdx * 32 + 20] - mean) +
(dctBlock[sblkIdx * 32 + 11] - mean) * (dctBlock[sblkIdx * 32 + 11] - mean) + (dctBlock[sblkIdx * 32 + 16] - mean) * (dctBlock[sblkIdx * 32 + 16] - mean) + (dctBlock[sblkIdx * 32 + 21] - mean) * (dctBlock[sblkIdx * 32 + 21] - mean) +
(dctBlock[sblkIdx * 32 + 7] - mean) * (dctBlock[sblkIdx * 32 + 7] - mean) + (dctBlock[sblkIdx * 32 + 12] - mean) * (dctBlock[sblkIdx * 32 + 12] - mean) +
(dctBlock[sblkIdx * 32 + 17] - mean) * (dctBlock[sblkIdx * 32 + 17] - mean) + (dctBlock[sblkIdx * 32 + 3] - mean) * (dctBlock[sblkIdx * 32 + 3] - mean) + \
(dctBlock[sblkIdx * 32 + 8] - mean) * (dctBlock[sblkIdx * 32 + 8] - mean) +
(dctBlock[sblkIdx * 32 + 13] - mean) * (dctBlock[sblkIdx * 32 + 13] - mean) + (dctBlock[sblkIdx * 32 + 4] - mean) * (dctBlock[sblkIdx * 32 + 4] - mean) +
(dctBlock[sblkIdx * 32 + 9] - mean) * (dctBlock[sblkIdx * 32 + 9] - mean)) / 12.0f;
}
if (threadIdx.x / 32 % 3 == 2) {
mean = (dctBlock[sblkIdx * 32 + 14] + dctBlock[sblkIdx * 32 + 18] + dctBlock[sblkIdx * 32 + 22] + dctBlock[sblkIdx * 32 + 19] + \
dctBlock[sblkIdx * 32 + 23] + dctBlock[sblkIdx * 32 + 24]) / 6.0f;
inter[sblkIdx * 3 + 2] = ((dctBlock[sblkIdx * 32 + 14] - mean) * (dctBlock[sblkIdx * 32 + 14] - mean) + (dctBlock[sblkIdx * 32 + 18] - mean) * (dctBlock[sblkIdx * 32 + 18] - mean) +
(dctBlock[sblkIdx * 32 + 22] - mean) * (dctBlock[sblkIdx * 32 + 22] - mean) + (dctBlock[sblkIdx * 32 + 19] - mean) * (dctBlock[sblkIdx * 32 + 19] - mean) +
(dctBlock[sblkIdx * 32 + 23] - mean) * (dctBlock[sblkIdx * 32 + 23] - mean) + (dctBlock[sblkIdx * 32 + 24] - mean) * (dctBlock[sblkIdx * 32 + 24] - mean)) / 5.0f;
}
__syncthreads();
if (threadIdx.x / 32 % 3 == 1) {
const float r1 = fabsf(inter[sblkIdx * 3 + 2] - (inter[sblkIdx * 3 + 0] + inter[sblkIdx * 3 + 1]) / 2.0f) / \
(inter[sblkIdx * 3 + 2] + (inter[sblkIdx * 3 + 0] + inter[sblkIdx * 3 + 1]) / 2.0f + 0.00000001f);
const float r2 = fabsf(inter[sblkIdx * 3 + 1] - inter[sblkIdx * 3 + 0]) / (inter[sblkIdx * 3 + 2] + inter[sblkIdx * 3 + 0] + 0.00000001f);
freq_bands[gblkIdx / 25] = (r1 + r2) / 2.0f;
}
}
__global__ void mean_100(float * d_input, float * d_mean_array, int num_elements) {
// d_input is 171*171 array
// for now let us have it structured as shared[171]
//int thread2;
//double temp;
__shared__ float sum[171];
//_shared__ float sum[171];
int x = threadIdx.x;
int y = blockIdx.x * blockDim.x;
//int blockDi2 = (blockDim.x / 2);
sum[x] = d_input[y + x];
__syncthreads();
for (int s = 128; s > 0; s >>= 1) {
if (x < s) {
int temp = x + s;
if (temp<blockDim.x) sum[x] += sum[x + s];
}
__syncthreads();
}
if (x == 0) {
d_mean_array[blockIdx.x] = sum[0] / num_elements;
}
}
__global__ void convolveRow(const float * d_input, const int size, float * d_output) {
// Takes original image as input of dimensions size x size
const int x = threadIdx.x;
const int y = blockIdx.x;
const int offset = -1;
const int num_taps = 3;
const double h[3] = { 0.106506978919200, 0.786986042161605, 0.106506978919200 };
double val = 0.0;
/*
int x_comp = x + offset;
val += h[0] * ((x_comp<0 || x_comp >= size) ? 0 : d_input[y * size + x_comp]);
x_comp++;
val += h[1] * ((x_comp<0 || x_comp >= size) ? 0 : d_input[y * size + x_comp]);
x_comp++;
val += h[2] * ((x_comp<0 || x_comp >= size) ? 0 : d_input[y * size + x_comp]);
*/
for (int tap_idx = 0; tap_idx < num_taps; tap_idx++) {
int x_comp = x + offset + tap_idx;
val += h[tap_idx] * ((x_comp<0 || x_comp>=size) ? 0 : d_input[y * size + x_comp]);
//val += ((x_comp<0 || x_comp >= size) ? 0 : (h[tap_idx] * d_input[y * size + x_comp]));
}
d_output[y * size + x] = val;
__syncthreads();
}
__global__ void convolveCol(const float * d_input, const int size, float * d_output) {
const int x = threadIdx.x;
const int y = blockIdx.x;
const int offset = -1;
const int num_taps = 3;
const double h[3] = { 0.106506978919200, 0.786986042161605, 0.106506978919200 };
double val = 0.0;
/*
int y_comp = y + offset;
val += h[0] * ((y_comp<0 || y_comp >= size) ? 0 : d_input[y_comp * size + x]);
y_comp++;
val += h[1] * ((y_comp<0 || y_comp >= size) ? 0 : d_input[y_comp * size + x]);
y_comp++;
val += h[2] * ((y_comp<0 || y_comp >= size) ? 0 : d_input[y_comp * size + x]);
*/
for (int tap_idx = 0; tap_idx < num_taps; tap_idx++) {
int y_comp = y + offset + tap_idx;
val += h[tap_idx] * ((y_comp<0 || y_comp >= size) ? 0 : d_input[y_comp * size + x]);
//val += ((y_comp<0 || y_comp >= size) ? 0 : (h[tap_idx] * d_output[y_comp * size + x]));
}
__syncthreads();
d_output[y * size + x] = val;
}
__global__ void downsample_by2(const float * d_image, int const size, float * d_image_by2) {
const int pos = blockIdx.x * size / 2 + threadIdx.x;
d_image_by2[pos] = d_image[1 + size + 2 * pos + blockIdx.x * size];
}
void linearize_and_cast_from_Mat_to_float(const cv::Mat &Mat_in, float *float_array)
{
// Linearize image matrix => Convert from 2D unsigned char Mat into a 1D array of floats
for (int row = 0; row < 512; row++)
for (int col = 0; col < 512; col++)
float_array[row * 512 + col] = static_cast<float>(Mat_in.at<unsigned char>(row, col));
}
void device_rst()
{
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaError_t cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
}
}
void kernel_wrapper(const cv::Mat &Mat_in)
{
/*
// cuFFT settings for DCT
cufftHandle p;
int rank = 1;
int lengthOfDFT = 10;
int howmany = 5 * (512 / 3 + 1) * (512 / 3 + 1);
int odist;
int idist = odist = lengthOfDFT;
int ostride;
int istride = ostride = 1; // array is contiguous in memory
cufftPlanMany(&p, rank, &lengthOfDFT, NULL, istride, idist, NULL, ostride,
odist, CUFFT_C2C, howmany);
*/
// The section of code below checks for zero copy feature. This way a kernel can directly use host memory.
/*cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
if (!prop.canMapHostMemory)
exit(0);
else
std::cout << "canMapHostMemory\n";
*/
// Allocate HOST memory
float* h_in; cudaError_t status = cudaMallocHost(&h_in, 512 * 512 * sizeof(float));
if (status != cudaSuccess) {
std::cout << "Error allocating pinned host memory for imput image";
}
float* features; status = cudaMallocHost(&features, 25 * sizeof(float));
if (status != cudaSuccess) {
std::cout << "Error allocating pinned host memory for features";
}
LARGE_INTEGER frequency; // ticks per second
LARGE_INTEGER t1, t2; // ticks
double elapsedTime;
// get ticks per second
QueryPerformanceFrequency(&frequency);
// start timer
QueryPerformanceCounter(&t1);
// Linearize image and cast from 8UC1 Mat to float array
linearize_and_cast_from_Mat_to_float(Mat_in, h_in);
//const float* h_in = Mat_in.ptr<float>(0);
//Device memory allocations
float *d_in; cudaMalloc((void **)&d_in, 512 * 512 * sizeof(float));
cudaProfilerStart();
// Copy data from HOST -> DEVICE
cudaMemcpy(d_in, h_in, 512 * 512 * sizeof(float), cudaMemcpyHostToDevice);
float * d_g_info; cudaMalloc((void **)&d_g_info, 9971 * sizeof(float));
float * d_r_info; cudaMalloc((void **)&d_r_info, 9971 * sizeof(float));
float *d_in_pad; cudaMalloc((void **)&d_in_pad, 517 * 517 * sizeof(float));
/*cufftComplex *d_rearr_in; cudaMalloc((void **)&d_rearr_in, 50 * (512 / 3 + 1) * (512 / 3 + 1) * sizeof(cufftComplex));
cufftComplex *d_dct_inter; cudaMalloc((void **)&d_dct_inter, 50 * (512 / 3 + 1) * (512 / 3 + 1) * sizeof(cufftComplex));
cufftComplex *d_dct_in; cudaMalloc((void **)&d_dct_in, 50 * (512 / 3 + 1) * (512 / 3 + 1) * sizeof(cufftComplex));
*/
float * d_dctImg; cudaMalloc((void **)&d_dctImg, (512 / 3 + 1) * (512 / 3 + 1) * 32 * sizeof(float));
float * d_coeff_freq_var_L1; cudaMalloc((void **)&d_coeff_freq_var_L1, (512 / 3 + 1) * (512 / 3 + 1) * sizeof(float));
float * d_ori1_rho_L1; cudaMalloc((void **)&d_ori1_rho_L1, (512 / 3 + 1) * (512 / 3 + 1) * sizeof(float));
float * d_ori2_rho_L1; cudaMalloc((void **)&d_ori2_rho_L1, (512 / 3 + 1) * (512 / 3 + 1) * sizeof(float));
float * d_ori3_rho_L1; cudaMalloc((void **)&d_ori3_rho_L1, (512 / 3 + 1) * (512 / 3 + 1) * sizeof(float));
float * d_ori_rho_L1; cudaMalloc((void **)&d_ori_rho_L1, (512 / 3 + 1) * (512 / 3 + 1) * sizeof(float));
float * d_freq_bands; cudaMalloc((void **)&d_freq_bands, (512 / 3 + 1) * (512 / 3 + 1) * sizeof(float));
float * d_gama_L1; cudaMalloc((void **)&d_gama_L1, (512 / 3 + 1) * (512 / 3 + 1) * sizeof(float));
float * d_mean_array; cudaMalloc((void **)&d_mean_array, (512 / 3 + 1) * sizeof(float));
cudaStream_t stream1;
cudaStream_t stream2;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
//cudaStreamCreateWithFlags(&stream1, cudaStreamNonBlocking);
//cudaStreamCreateWithFlags(&stream2, cudaStreamNonBlocking);
// copy gama vector
cudaMemcpyAsync(d_g_info, bliinds_info::g_vector, 9971 * sizeof(float), cudaMemcpyHostToDevice, stream1);
// copy rho vector
cudaMemcpyAsync(d_r_info, bliinds_info::r_vector, 9971 * sizeof(float), cudaMemcpyHostToDevice, stream2);
cudaMemcpyToSymbol(dc_r_info, bliinds_info::r_vector, 9971 * sizeof(float));
// pad input image for DCT in blocks
pad << <517, 517 >> >(d_in, 512, d_in_pad);
// Total number of DCT blocks at current scale
int square = (512 / 3 + 1) * (512 / 3 + 1);
//CuFFT at 512x512
/*rearrangeForCuFFT << <square, 25, 0, 0 >> >(d_in_pad, 512, d_rearr_in);
cufftExecC2C(p, d_rearr_in, d_dct_in, CUFFT_FORWARD);
transposeForCuFFT << <square, 25, 0, 0 >> >(d_dct_in, d_dct_in);
cufftExecC2C(p, d_dct_in, d_dct_in, CUFFT_FORWARD);
transposeForCuFFT << <square, 25, 0, 0 >> >(d_dct_in, d_dct_in);
copyDCT << <square, 25 >> >(d_dct_in, d_dctImg);
cudaDeviceSynchronize();
*/
//float * h_dctImg = (float*)malloc((512 / 3 + 1) * (512 / 3 + 1) * sizeof(float));
//cudaMemcpy(h_dctImg, d_dctImg, (512 / 3 + 1) * (512 / 3 + 1) * 25 * sizeof(float), cudaMemcpyDeviceToHost);
float* d_rearr_man; cudaMalloc((void **)&d_rearr_man, 32 * (512 / 3 + 1) * (512 / 3 + 1) * sizeof(float));
cudaMemset(d_rearr_man, 0, 32 * square * sizeof(float));
cudaMemset(d_dctImg, 0, 32 * square * sizeof(float));
double* d_dctmtx; cudaMalloc((void **)&d_dctmtx, 32 * sizeof(double));
cudaMemset(d_dctmtx, 0, 32 * sizeof(double));
cudaMemcpy(d_dctmtx, dct2_55::dctmtx_5, 25 * sizeof(double), cudaMemcpyHostToDevice);
//float* h_dctImg = (float *)malloc(32 * square*sizeof(float));
/*for (int i = 517; i < 517+32; i++) {
std::cout << h_dctImg[i] << "\t";
if ((i + 1) % 5 == 0)
std::cout << std::endl;
}*/
//rearrangeForDCTv2 << <square / 4 + 1, 128 >> >(d_in_pad, 512, d_rearr_man);
/*cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
{
fprintf(stderr, "ERROR1: %s\n", cudaGetErrorString(error));
exit(-1);
}*/
rearrangeAndDCT55 << <square / 8 + 1, 256 >> >(d_in_pad, 512, d_dctmtx, d_dctImg);
/*cudaDeviceSynchronize();
error = cudaGetLastError();
//cudaMemcpy(h_dctImg, d_dctImg, 32 * square * sizeof(float), cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
fprintf(stderr, "ERROR2: %s\n", cudaGetErrorString(error));
exit(-1);
}*/
//#ifdef EBM
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
//#endif
rho_dct2 << <square / 16 + 1, 512 >> >(d_dctImg, d_coeff_freq_var_L1);
//thrust::device_ptr<float> keys(d_coeff_freq_var_L1);
thrust::sort(thrust::device, d_coeff_freq_var_L1, d_coeff_freq_var_L1 + square);
//thrust::host_vector<float> h_coeff_freq_L1(d_coeff_freq_var_L1, d_coeff_freq_var_L1 + square);
int mean10_size = ceil((square) / 10.0);
features[0] = thrust::reduce(thrust::device, d_coeff_freq_var_L1, d_coeff_freq_var_L1 + square) / square;
features[1] = thrust::reduce(thrust::device, d_coeff_freq_var_L1 + square - mean10_size, d_coeff_freq_var_L1 + square) / mean10_size;
gama_dct62 << <square / 16 + 1, 512 >> >(d_dctImg, d_g_info, d_r_info, d_gama_L1);
thrust::sort(thrust::device, d_gama_L1, d_gama_L1 + square);
gama_dct6_3 << <square / 128 + 1, 128 >> >(d_gama_L1, d_g_info, d_r_info, d_gama_L1, square);
features[2] = thrust::reduce(thrust::device, d_gama_L1, d_gama_L1 + square) / square;
features[3] = thrust::reduce(thrust::device, d_gama_L1 + square - mean10_size, d_gama_L1 + square) / mean10_size;
/*cudaMemcpy(h_dctImg, d_coeff_freq_var_L1, square * sizeof(float), cudaMemcpyDeviceToHost);
std::ofstream outfile3("harbourJPGcoeff_freq_varL1GPU.txt");
for (int j = 0; j < square; j++) {
//for (int i = 0; i < 5; i++) {
outfile3 << h_dctImg[j];
//if ((i + 1) % 5 == 0){
//}
//}
outfile3 << std::endl;
}
outfile3.close();
*/
/*std::cout << "square1 = " << square << std::endl;
oriented_dct_rho << <square, 1 >> >(d_dctImg, d_ori1_rho_L1, 1);
oriented_dct_rho << <square, 1 >> >(d_dctImg, d_ori2_rho_L1, 2);
oriented_dct_rho << <square, 1 >> >(d_dctImg, d_ori3_rho_L1, 3);*/
oriented_dct_rho2 << <square / 8 + 1, 256 >> >(d_dctImg, d_ori1_rho_L1, 1);
oriented_dct_rho2 << <square / 8 + 1, 256 >> >(d_dctImg, d_ori2_rho_L1, 2);
oriented_dct_rho2 << <square / 8 + 1, 256 >> >(d_dctImg, d_ori3_rho_L1, 3);
oriented_dct_final2 << <square / 512 + 1, 512, 0 >> >(d_ori1_rho_L1, d_ori2_rho_L1, d_ori3_rho_L1, d_ori_rho_L1);
thrust::sort(thrust::device, d_ori_rho_L1, d_ori_rho_L1 + square);
features[6] = thrust::reduce(thrust::device, d_ori_rho_L1, d_ori_rho_L1 + square) / square;
features[7] = thrust::reduce(thrust::device, d_ori_rho_L1 + square - mean10_size, d_ori_rho_L1 + square) / mean10_size;
//std::cout << "or_rho_dct done\n";
subband_energy2 << <square / 4 + 1, 128 >> >(d_dctImg, d_freq_bands);
thrust::sort(thrust::device, d_freq_bands, d_freq_bands + square);
features[4] = thrust::reduce(thrust::device, d_freq_bands, d_freq_bands + square) / square;
features[5] = thrust::reduce(thrust::device, d_freq_bands + square - mean10_size, d_freq_bands + square) / mean10_size;
//std::cout << "subband done\n";
//cudaMemcpy(h_dctImg, d_gama_L1, (512 / 3 + 1) * (512 / 3 + 1) * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_ori1_rho_L1);
cudaFree(d_ori2_rho_L1);
cudaFree(d_ori3_rho_L1);
cudaFree(d_gama_L1);
cudaFree(d_coeff_freq_var_L1);
cudaFree(d_freq_bands);
cudaFree(d_ori_rho_L1);
//----------------------Start Phase 2----------------------------------------------------------
cudaDeviceSynchronize();
square = (256 / 3 + 1) * (256 / 3 + 1);
float *d_in_conv_inter_L2; cudaMalloc((void **)&d_in_conv_inter_L2, 512 * 512 * sizeof(float));
float *d_in_convolve_L2; cudaMalloc((void **)&d_in_convolve_L2, 512 * 512 * sizeof(float));
float *d_in_L2; cudaMalloc((void **)&d_in_L2, 256 * 256 * sizeof(float));
float *d_in_pad_L2; cudaMalloc((void **)&d_in_pad_L2, 261 * 261 * sizeof(float));
float *d_coeff_freq_var_L2; cudaMalloc((void **)&d_coeff_freq_var_L2, square * sizeof(float));
cufftComplex *d_rearr_in_L2; cudaMalloc((void **)&d_rearr_in_L2, 50 * square * sizeof(cufftComplex));
float * d_ori1_rho_L2; cudaMalloc((void **)&d_ori1_rho_L2, square * sizeof(float));
float * d_ori2_rho_L2; cudaMalloc((void **)&d_ori2_rho_L2, square * sizeof(float));
float * d_ori3_rho_L2; cudaMalloc((void **)&d_ori3_rho_L2, square * sizeof(float));
float * d_ori_rho_L2; cudaMalloc((void **)&d_ori_rho_L2, square * sizeof(float));
float * d_freq_bands_L2; cudaMalloc((void **)&d_freq_bands_L2, square * sizeof(float));
float * d_gama_L2; cudaMalloc((void **)&d_gama_L2, square * sizeof(float));
convolveRow << <512, 512 >> >(d_in, 512, d_in_conv_inter_L2);
convolveCol << <512, 512 >> >(d_in_conv_inter_L2, 512, d_in_convolve_L2);
cudaDeviceSynchronize();
downsample_by2 << <256, 256 >> >(d_in_convolve_L2, 512, d_in_L2);
pad << <261, 261 >> >(d_in_L2, 256, d_in_pad_L2);
/*float * h_dctImg = (float*)malloc(square * sizeof(float));
cudaMemcpy(h_dctImg, d_in_convolve_L2, square * sizeof(float), cudaMemcpyDeviceToHost);
std::ofstream outfile3("convolve_L2GPU.txt");
for (int j = 0; j < square; j++) {
//for (int i = 0; i < 5; i++) {
outfile3 << h_dctImg[j];
//if ((i + 1) % 5 == 0){
//}
//}
outfile3 << std::endl;
}
outfile3.close();
cudaMemcpy(h_dctImg, d_in_L2, 256 * 256 * sizeof(float), cudaMemcpyDeviceToHost);
std::ofstream outfile2("d_in_L2GPU.txt");
for (int j = 0; j < 256 * 256; j++) {
//for (int i = 0; i < 5; i++) {
outfile2 << h_dctImg[j];
//if ((i + 1) % 5 == 0){
//}
//}
outfile2 << std::endl;
}
outfile2.close();
*/
/*howmany = 5 * square;
cufftPlanMany(&p, rank, &lengthOfDFT, NULL, istride, idist, NULL, ostride,
odist, CUFFT_C2C, howmany);
cudaDeviceSynchronize();
rearrangeForCuFFT << <square, 25 >> >(d_in_pad_L2, 256, d_rearr_in_L2);
cufftExecC2C(p, d_rearr_in_L2, d_dct_in, CUFFT_FORWARD);
transposeForCuFFT << <square, 25 >> >(d_dct_in, d_dct_in);
cufftExecC2C(p, d_dct_in, d_dct_in, CUFFT_FORWARD);
transposeForCuFFT << <square, 25 >> >(d_dct_in, d_dct_in);
copyDCT << <square, 25 >> >(d_dct_in, d_dctImg);
cudaDeviceSynchronize();
*/
//std::cout << "phase 1 done \n";
//rearrangeForDCTv2 << <square / 4 + 1, 128 >> >(d_in_pad_L2, 256, d_rearr_man);
rearrangeAndDCT55 << <square / 8 + 1, 256 >> >(d_in_pad_L2, 256, d_dctmtx, d_dctImg);
//h_dctImg = (float*)malloc(25 * square * sizeof(float));
//std::cout << "second dct\n";
rho_dct2 << <square / 16 + 1, 512 >> >(d_dctImg, d_coeff_freq_var_L2);
thrust::sort(thrust::device, d_coeff_freq_var_L2, d_coeff_freq_var_L2 + square);
mean10_size = ceil((square) / 10.0);
features[9] = thrust::reduce(thrust::device, d_coeff_freq_var_L2 + square - mean10_size, d_coeff_freq_var_L2 + square) / mean10_size;
features[8] = thrust::reduce(thrust::device, d_coeff_freq_var_L2, d_coeff_freq_var_L2 + square) / square;
gama_dct62 << <square / 16 + 1, 512 >> >(d_dctImg, d_g_info, d_r_info, d_gama_L2);
//gama_dct5 << <square, 1024 >> >(d_dctImg, d_g_info, d_r_info, d_gama_L2);
thrust::sort(thrust::device, d_gama_L2, d_gama_L2 + square);
gama_dct6_3 << <square / 128 + 1, 128 >> >(d_gama_L2, d_g_info, d_r_info, d_gama_L2, square);
features[11] = thrust::reduce(thrust::device, d_gama_L2 + square - mean10_size, d_gama_L2 + square) / mean10_size;
features[10] = thrust::reduce(thrust::device, d_gama_L2, d_gama_L2 + square) / square;
subband_energy2 << <square / 4 + 1, 128 >> >(d_dctImg, d_freq_bands_L2);
thrust::sort(thrust::device, d_freq_bands_L2, d_freq_bands_L2 + square);
features[13] = thrust::reduce(thrust::device, d_freq_bands_L2 + square - mean10_size, d_freq_bands_L2 + square) / mean10_size;
features[12] = thrust::reduce(thrust::device, d_freq_bands_L2, d_freq_bands_L2 + square) / square;
/*std::cout << "square2 = " << square << std::endl;
oriented_dct_rho << <square, 1 >> >(d_dctImg, d_ori1_rho_L2, 1);
oriented_dct_rho << <square, 1 >> >(d_dctImg, d_ori2_rho_L2, 2);
oriented_dct_rho << <square, 1 >> >(d_dctImg, d_ori3_rho_L2, 3);*/
oriented_dct_rho2 << <square / 8 + 1, 256 >> >(d_dctImg, d_ori1_rho_L2, 1);
oriented_dct_rho2 << <square / 8 + 1, 256 >> >(d_dctImg, d_ori2_rho_L2, 2);
oriented_dct_rho2 << <square / 8 + 1, 256 >> >(d_dctImg, d_ori3_rho_L2, 3);
oriented_dct_final2 << <square / 512 + 1, 512 >> >(d_ori1_rho_L2, d_ori2_rho_L2, d_ori3_rho_L2, d_ori_rho_L2);
thrust::sort(thrust::device, d_ori_rho_L2, d_ori_rho_L2 + square);
features[15] = thrust::reduce(thrust::device, d_ori_rho_L2 + square - mean10_size, d_ori_rho_L2 + square) / mean10_size;
features[14] = thrust::reduce(thrust::device, d_ori_rho_L2, d_ori_rho_L2 + square) / square;
/*float * h_dctImg = (float*)malloc(square * 25 * sizeof(float));
cudaMemcpy(h_dctImg, d_dctImg, square * 25 * sizeof(float), cudaMemcpyDeviceToHost);
std::ofstream outfile3("d_dctImg_L2_babyJPG.txt");
for (int j = 0; j < square; j++) {
for (int i = 0; i < 25; i++) {
outfile3 << h_dctImg[j * 25 + i] << ",";
if ((i + 1) % 5 == 0)
outfile3 << std::endl;
}
outfile3 << std::endl;
}
outfile3.close();*/
/*cudaMemcpy(h_dctImg, d_ori2_rho_L2, square * sizeof(float), cudaMemcpyDeviceToHost);
std::ofstream outfile4("d_ori2_L2_babyJPG.txt");
for (int j = 0; j < square; j++) {
outfile4 << h_dctImg[j] << std::endl;
}
outfile4.close();
cudaMemcpy(h_dctImg, d_ori3_rho_L2, square * sizeof(float), cudaMemcpyDeviceToHost);
std::ofstream outfile5("d_ori3_L2_babyJPG.txt");
for (int j = 0; j < square; j++) {
outfile5 << h_dctImg[j] << std::endl;
}
outfile5.close();*/
cudaFree(d_ori1_rho_L2);
cudaFree(d_ori2_rho_L2);
cudaFree(d_ori3_rho_L2);
cudaFree(d_gama_L2);
cudaFree(d_coeff_freq_var_L2);
cudaFree(d_freq_bands_L2);
cudaFree(d_ori_rho_L2);
/*
cudaFree(d_in_conv_inter_L2);
cudaFree(d_in_pad_L2);
cudaFree(d_in_conv_inter_L2);
cudaFree(d_rearr_in_L2);
*/
//----------------------Start Phase 3----------------------------------------------------------
cudaDeviceSynchronize();
square = (128 / 3 + 1) * (128 / 3 + 1);
float *d_in_conv_inter_L3; cudaMalloc((void **)&d_in_conv_inter_L3, 256 * 256 * sizeof(float));
float *d_in_convolve_L3; cudaMalloc((void **)&d_in_convolve_L3, 256 * 256 * sizeof(float));
float *d_in_L3; cudaMalloc((void **)&d_in_L3, 128 * 128 * sizeof(float));
float *d_in_pad_L3; cudaMalloc((void **)&d_in_pad_L3, 133 * 133 * sizeof(float));
float *d_coeff_freq_var_L3; cudaMalloc((void **)&d_coeff_freq_var_L3, square * sizeof(float));
cufftComplex *d_rearr_in_L3; cudaMalloc((void **)&d_rearr_in_L3, 50 * square * sizeof(cufftComplex));
float * d_ori1_rho_L3; cudaMalloc((void **)&d_ori1_rho_L3, square * sizeof(float));
float * d_ori2_rho_L3; cudaMalloc((void **)&d_ori2_rho_L3, square * sizeof(float));
float * d_ori3_rho_L3; cudaMalloc((void **)&d_ori3_rho_L3, square * sizeof(float));
float * d_ori_rho_L3; cudaMalloc((void **)&d_ori_rho_L3, square * sizeof(float));
float * d_freq_bands_L3; cudaMalloc((void **)&d_freq_bands_L3, square * sizeof(float));
float * d_gama_L3; cudaMalloc((void **)&d_gama_L3, square * sizeof(float));
convolveRow << <256, 256 >> >(d_in_L2, 256, d_in_conv_inter_L3);
convolveCol << <256, 256 >> >(d_in_conv_inter_L3, 256, d_in_convolve_L3);
cudaDeviceSynchronize();
downsample_by2 << <128, 128 >> >(d_in_convolve_L3, 256, d_in_L3);
pad << <133, 133 >> >(d_in_L3, 128, d_in_pad_L3);
/*howmany = 5 * square;
cufftPlanMany(&p, rank, &lengthOfDFT, NULL, istride, idist, NULL, ostride,
odist, CUFFT_C2C, howmany);
cudaDeviceSynchronize();
rearrangeForCuFFT << <square, 25 >> >(d_in_pad_L3, 128, d_rearr_in_L3);
cufftExecC2C(p, d_rearr_in_L3, d_dct_in, CUFFT_FORWARD);
transposeForCuFFT << <square, 25 >> >(d_dct_in, d_dct_in);
cufftExecC2C(p, d_dct_in, d_dct_in, CUFFT_FORWARD);
transposeForCuFFT << <square, 25 >> >(d_dct_in, d_dct_in);
copyDCT << <square, 25 >> >(d_dct_in, d_dctImg);
cudaDeviceSynchronize();
*/
//rearrangeForDCTv2 << <square / 4 + 1, 128 >> >(d_in_pad_L3, 128, d_rearr_man);
rearrangeAndDCT55 << <square / 8 + 1, 256 >> >(d_in_pad_L3, 128, d_dctmtx, d_dctImg);
cudaFree(d_dctmtx);
rho_dct2 << <square / 16 + 1, 512 >> >(d_dctImg, d_coeff_freq_var_L3);
thrust::sort(thrust::device, d_coeff_freq_var_L3, d_coeff_freq_var_L3 + square);
mean10_size = ceil((square) / 10.0);
features[17] = thrust::reduce(thrust::device, d_coeff_freq_var_L3 + square - mean10_size, d_coeff_freq_var_L3 + square) / mean10_size;
features[16] = thrust::reduce(thrust::device, d_coeff_freq_var_L3, d_coeff_freq_var_L3 + square) / square;
gama_dct62 << <square / 16 + 1, 512 >> >(d_dctImg, d_g_info, d_r_info, d_gama_L3);
//gama_dct5 << <square, 1024 >> >(d_dctImg, d_g_info, d_r_info, d_gama_L3);
thrust::sort(thrust::device, d_gama_L3, d_gama_L3 + square);
gama_dct6_3 << <square / 128 + 1, 128 >> >(d_gama_L3, d_g_info, d_r_info, d_gama_L3, square);
features[19] = thrust::reduce(thrust::device, d_gama_L3 + square - mean10_size, d_gama_L3 + square) / mean10_size;
features[18] = thrust::reduce(thrust::device, d_gama_L3, d_gama_L3 + square) / square;
// square = 1849
subband_energy2 << <square / 4 + 1, 128 >> >(d_dctImg, d_freq_bands_L3);
thrust::sort(thrust::device, d_freq_bands_L3, d_freq_bands_L3 + square);
features[21] = thrust::reduce(thrust::device, d_freq_bands_L3 + square - mean10_size, d_freq_bands_L3 + square) / mean10_size;
features[20] = thrust::reduce(thrust::device, d_freq_bands_L3, d_freq_bands_L3 + square) / square;
/*std::cout << "square3 = " << square << std::endl;
oriented_dct_rho << <square, 1 >> >(d_dctImg, d_ori1_rho_L3, 1);
oriented_dct_rho << <square, 1 >> >(d_dctImg, d_ori2_rho_L3, 2);
oriented_dct_rho << <square, 1 >> >(d_dctImg, d_ori3_rho_L3, 3);*/
oriented_dct_rho2 << <square / 8 + 1, 256 >> >(d_dctImg, d_ori1_rho_L3, 1);
oriented_dct_rho2 << <square / 8 + 1, 256 >> >(d_dctImg, d_ori2_rho_L3, 2);
oriented_dct_rho2 << <square / 8 + 1, 256 >> >(d_dctImg, d_ori3_rho_L3, 3);
oriented_dct_final2 << <square / 512 + 1, 512 >> >(d_ori1_rho_L3, d_ori2_rho_L3, d_ori3_rho_L3, d_ori_rho_L3);
thrust::sort(thrust::device, d_ori_rho_L3, d_ori_rho_L3 + square);
features[23] = thrust::reduce(thrust::device, d_ori_rho_L3 + square - mean10_size, d_ori_rho_L3 + square) / mean10_size;
features[22] = thrust::reduce(thrust::device, d_ori_rho_L3, d_ori_rho_L3 + square) / square;
//Print features
/*
std::cout << "coeff_freq_var_l1:" << features[0] << ", " << features[1] << std::endl;
std::cout << "gama_dct_l1:" << features[2] << ", " << features[3] << std::endl;
std::cout << "freq_bands:" << features[4] << ", " << features[5] << std::endl;
std::cout << "ori_rho_l1:" << features[6] << ", " << features[7] << std::endl;
std::cout << "coeff_freq_var_l2: " << features[8] << ", " << features[9] << std::endl;
std::cout << "gama_l2: " << features[10] << ", " << features[11] << std::endl;
std::cout << "freq_bands_l2: " << features[12] << ", " << features[13] << std::endl;
std::cout << "ori_rho_l2: " << features[14] << ", " << features[15] << std::endl;
std::cout << "coeff_freq_var_l3: " << features[16] << ", " << features[17] << std::endl;
std::cout << "gama_l3: " << features[18] << ", " << features[19] << std::endl;
std::cout << "freq_bands_l3: " << features[20] << ", " << features[21] << std::endl;
std::cout << "ori_rho_l3: " << features[22] << ", " << features[23] << std::endl;
printf("coeff_freq_var_l1: %0.15f, %0.15f\n", features[0], features[1]);
printf("gama_dct_l1: %0.15f, %0.15f\n", features[2], features[3]);
printf("freq_bands: %0.15f, %0.15f\n", features[4], features[5]);
printf("ori_rho_l1: %0.15f, %0.15f\n", features[6], features[7]);
printf("coeff_freq_var_l2: %0.15f, %0.15f\n", features[8], features[9]);
printf("gama_l2: %0.15f, %0.15f\n", features[10], features[11]);
printf("freq_bands_l2: %0.15f, %0.15f\n", features[12], features[13]);
printf("ori_rho_l2: %0.15f, %0.15f\n", features[14], features[15]);
printf("coeff_freq_var_l3: %0.15f, %0.15f\n", features[16], features[17]);
printf("gama_l3: %0.15f, %0.15f\n", features[18], features[19]);
printf("freq_bands_l3: %0.15f, %0.15f\n", features[20], features[21]);
printf("ori_rho_l3: %0.15f, %0.15f\n", features[22], features[23]);
*/
cudaFree(d_ori1_rho_L3);
cudaFree(d_ori2_rho_L3);
cudaFree(d_ori3_rho_L3);
cudaFree(d_gama_L3);
cudaFree(d_coeff_freq_var_L3);
cudaFree(d_freq_bands_L3);
cudaFree(d_ori_rho_L3);
cudaFree(d_in_conv_inter_L3);
cudaFree(d_in_convolve_L3);
cudaFree(d_in_L3);
cudaFree(d_in_pad_L3);
cudaFree(d_rearr_in_L3);
cudaFree(d_in);
/*// stop timer
QueryPerformanceCounter(&t2);
// compute and print the elapsed time in millisec
elapsedTime = (t2.QuadPart - t1.QuadPart) * 1000.0 / frequency.QuadPart;
std::cout << elapsedTime << " ms.\n";
*/
float * sub_temp = (float *)malloc(25 * sizeof(float));
for (int i = 0; i < 24; i++){
sub_temp[i] = features[i] - bliinds_info::mu_vector[i];
}
float * mult_temp = (float *)malloc(25 * sizeof(float));
float product, max = 0, max_k, b = 1.0168, gama = 0.4200;
for (int k = 2; k <= 200; k++) {
sub_temp[24] = k / 2.0 - bliinds_info::mu_vector[24];
for (int i = 0; i < 25; i++) {
mult_temp[i] = 0;
for (int j = 0; j < 25; j++) {
mult_temp[i] += sub_temp[j] * bliinds_info::sigma_inv_vector[i + 25 * j];
}
}
product = 0;
for (int i = 0; i < 25; i++){
product += mult_temp[i] * sub_temp[i];
}
product = exp(-pow(b*product, gama));
if (product > max) {
max = product;
max_k = k / 2.0 - 1;
}
}
std::cout << "BLIINDS score: " << max_k << std::endl;
// stop timer
QueryPerformanceCounter(&t2);
// compute and print the elapsed time in millisec
elapsedTime = (t2.QuadPart - t1.QuadPart) * 1000.0 / frequency.QuadPart;
std::cout << elapsedTime << " ms.\n\n";
cudaDeviceSynchronize();
cudaProfilerStop();
} |
f3102d7f5b90d70b3db0263170ee455fd0a4290f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <sys/time.h>
#include "init/cParameters.h"
#include "init/cInit.h"
#include "init/cInitSizes.h"
#include "common/detectorData.h"
#include "common/cAdquisition.h"
#include "common/cROIfilter.h"
#include "common/refinementWrapper.h"
//#include "common/Camera.h"
//#include "common/IDSCamera.h"
#include "utils/cudaUtils.cuh"
#include "utils/cudaDataHandler.h"
#include "utils/nvtxHandler.h"
//#include "hip/hip_fp16.h"
#include "device/ImageProcessing/colorTransformation.cuh"
//////////////////////////////////////
// Type definition for the algorithms
//////////////////////////////////////
typedef uchar input_t;
typedef int desc_t;
typedef float roifeat_t;
//////////////////////////////////////
int main()
{
// Read application parameters
cParameters paramsHandle;
paramsHandle.readParameters();
parameters *params = paramsHandle.getParams();
// Initialize Acquisition handler and read image
cAcquisition acquisition(params);
//acquisition.readAllimages();
cv::Mat *rawImg = acquisition.acquireFrameRGB();
// Initialize dataSizes structure
cInitSizes sizesHandler(params, rawImg->rows, rawImg->cols);
dataSizes *dSizes = sizesHandler.getDsizes();
// Initialize Algorithm Handler and pointers to functions to initialize algorithm
cInit init(params);
detectorFunctions<input_t, desc_t, roifeat_t> detectorF;
detectorF = init.algorithmHandler<input_t, desc_t, roifeat_t>();
// Initialize Algorithm Data structures
detectorData<input_t, desc_t, roifeat_t> detectData;
detectorF.initPyramid(&detectData, dSizes, dSizes->pyr.pyramidLayers);
detectorF.initFeatures(&detectData, dSizes, dSizes->pyr.pyramidLayers);
detectorF.initClassifi(&detectData, dSizes, dSizes->pyr.pyramidLayers, params->pathToSVMmodel);
// Initialize ROI filtering object
cROIfilter<roifeat_t> ROIfilter(params, dSizes);
// Initialize refinement object
refinementWrapper refinement;
// Set up CUDA configuration and device to be used
cInitCuda cudaConf(params);
cudaBlockConfig blkconfig = cudaConf.getBlockConfig();
cudaConf.printDeviceInfo();
// Create Device data manager
DeviceDataHandler devDataHandler;
// Allocate RGB and GRAYSCALE raw images
init.allocateRawImage<input_t>(&(detectData.rawImg), dSizes->rawSize); //TODO: add allocation on host
init.allocateRawImage<input_t>(&(detectData.rawImgBW), dSizes->rawSizeBW);
// Start the timer
time_t start;
time(&start);
timeval startVal, endVal;
gettimeofday(&startVal, 0);
// Start the counter of iterations
int count = 0;
const int iterations = 5;
// Image processing loop
while (!rawImg->empty() && count < iterations)
//for (int ite = 0; ite < 1 /*acquisition.getDiskImages()->size()-1*/; ite++)
{
NVTXhandler lat(COLOR_GREEN, "latency");
lat.nvtxStartEvent();
count++;
// Copy each frame to device
copyHtoD<input_t>(detectData.rawImg, rawImg->data, dSizes->rawSize);
//acquisition.setCurrentFrame(0);
//copyHtoD<input_t>(detectData.rawImg, acquisition.getCurrentFrame()->data, dSizes->rawSize);
// Input image preprocessing
detectorF.preprocess(&detectData, dSizes, &blkconfig);
// Compute the pyramid
NVTXhandler pyramide(COLOR_ORANGE, "Pyramid");
pyramide.nvtxStartEvent();
detectorF.pyramid(&detectData, dSizes, &blkconfig);
pyramide.nvtxStopEvent();
// Detection algorithm for each pyramid layer
for (uint i = 0; i < dSizes->pyr.pyramidLayers; i++) {
detectorF.featureExtraction(&detectData, dSizes, i, &blkconfig);
// cv::Mat inp, lbp;
//
// string name = "00000002a_LBP_";
// stringstream stream;
// stream << name;
// stream << i;
// stream << ".tif";
// cout << stream.str() << endl;
// //name = name + (string)i;
// lbp = cv::imread(stream.str(), CV_LOAD_IMAGE_GRAYSCALE);
// if (lbp.cols == 0)
// cout << "unable to read GT lbp images" << endl;
// //cv::cvtColor(inp, lbp, CV_BGR2GRAY);
//
// cout << "GT lbp: rows:" << lbp.rows << " cols: " << lbp.cols << endl;
// cout << "CUDA lbp: rows:" << dSizes->imgRows[i] << " cols: " << dSizes->imgCols[i] << endl;
//
// cv::Mat lbpcuda(lbp.rows, lbp.cols, CV_8UC1);
//
// copyDtoH(lbpcuda.data, getOffset(detectData.imgDescriptor, dSizes->imgDescElems, i), dSizes->imgDescElems[i]);
// stream << "GT";
// cv::imshow(stream.str(), lbp);
// cv::waitKey(0);
// stream << "CUDA";
// cv::imshow(stream.str(), lbpcuda);
// cv::waitKey(0);
// cv::imwrite("cudalbp.png", lbpcuda);
//
// cv::Mat dif(lbp.rows, lbp.cols, CV_8UC1);
// cv::absdiff(lbpcuda,lbp, dif);
// //dif = lbp - lbpcuda;
// cv::imshow("diference", dif);
// cv::waitKey(0);
///////////////////////////////////////////////////////////
// uchar *cell = (uchar*)malloc(dSizes->cellHistosElems[i]);
// copyDtoH(cell, detectData.cellHistos, dSizes->cellHistosElems[i]);
// for (int u = 0; u < dSizes->cellHistosElems[i]; u++) {
// printf( "cell feature: %d: %d\n", u, cell[u]);
// }
detectorF.classification(&detectData, dSizes, i, &blkconfig);
copyDtoH<roifeat_t>(getOffset<roifeat_t>(ROIfilter.getHostScoresVector(), dSizes->svm.scoresElems, i),
getOffset<roifeat_t>(detectData.svm.ROIscores, dSizes->svm.scoresElems, i),
dSizes->svm.scoresElems[i]);
ROIfilter.roisDecision(i, dSizes->pyr.scalesResizeFactor[i], dSizes->pyr.xBorder, dSizes->pyr.yBorder, params->minRoiMargin);
}
// Non-Max supression refinement
NVTXhandler nms(COLOR_BLUE, "Non maximum suppression");
nms.nvtxStartEvent();
refinement.AccRefinement(ROIfilter.getHitROIs());
refinement.drawRois(*(acquisition.getCurrentFrame()));
nms.nvtxStopEvent();
// Clear vector for the next iteration
NVTXhandler clearVecs(COLOR_YELLOW, "Reset nms Vectors");
clearVecs.nvtxStartEvent();
ROIfilter.clearVector();
refinement.clearVector();
clearVecs.nvtxStopEvent();
// Show the frame
NVTXhandler showF(COLOR_RED, "Show frame");
showF.nvtxStartEvent();
//acquisition.showFrame();
showF.nvtxStopEvent();
//Acquisition of the new frame
NVTXhandler frameTime(COLOR_GREEN, "Frame adquisition");
frameTime.nvtxStartEvent();
// Get a new frame
// char str[256];
// sprintf(str, "%d.png", count);
// cv::imwrite(str, *rawImg);
rawImg = acquisition.acquireFrameRGB();
frameTime.nvtxStopEvent();
NVTXhandler resetFeat(COLOR_ORANGE, "Reset device Features");
resetFeat.nvtxStartEvent();
// Reset device data structures
detectorF.resetFeatures(&detectData, dSizes);
resetFeat.nvtxStopEvent();
lat.nvtxStopEvent();
}
// Stop the timer
time_t end;
time(&end);
gettimeofday(&endVal, 0);
//printf("elapsed time: %lu \n", (endVal.tv_usec - startVal.tv_usec));
// Get the elapsed time
double seconds = difftime(end, start);
cout << "FPS : " << iterations / seconds << endl;
cout << "elapsed secs: " << seconds << endl;
cudaErrorCheck();
return 0;
}
| f3102d7f5b90d70b3db0263170ee455fd0a4290f.cu | #include <stdio.h>
#include <sys/time.h>
#include "init/cParameters.h"
#include "init/cInit.h"
#include "init/cInitSizes.h"
#include "common/detectorData.h"
#include "common/cAdquisition.h"
#include "common/cROIfilter.h"
#include "common/refinementWrapper.h"
//#include "common/Camera.h"
//#include "common/IDSCamera.h"
#include "utils/cudaUtils.cuh"
#include "utils/cudaDataHandler.h"
#include "utils/nvtxHandler.h"
//#include "cuda_fp16.h"
#include "device/ImageProcessing/colorTransformation.cuh"
//////////////////////////////////////
// Type definition for the algorithms
//////////////////////////////////////
typedef uchar input_t;
typedef int desc_t;
typedef float roifeat_t;
//////////////////////////////////////
int main()
{
// Read application parameters
cParameters paramsHandle;
paramsHandle.readParameters();
parameters *params = paramsHandle.getParams();
// Initialize Acquisition handler and read image
cAcquisition acquisition(params);
//acquisition.readAllimages();
cv::Mat *rawImg = acquisition.acquireFrameRGB();
// Initialize dataSizes structure
cInitSizes sizesHandler(params, rawImg->rows, rawImg->cols);
dataSizes *dSizes = sizesHandler.getDsizes();
// Initialize Algorithm Handler and pointers to functions to initialize algorithm
cInit init(params);
detectorFunctions<input_t, desc_t, roifeat_t> detectorF;
detectorF = init.algorithmHandler<input_t, desc_t, roifeat_t>();
// Initialize Algorithm Data structures
detectorData<input_t, desc_t, roifeat_t> detectData;
detectorF.initPyramid(&detectData, dSizes, dSizes->pyr.pyramidLayers);
detectorF.initFeatures(&detectData, dSizes, dSizes->pyr.pyramidLayers);
detectorF.initClassifi(&detectData, dSizes, dSizes->pyr.pyramidLayers, params->pathToSVMmodel);
// Initialize ROI filtering object
cROIfilter<roifeat_t> ROIfilter(params, dSizes);
// Initialize refinement object
refinementWrapper refinement;
// Set up CUDA configuration and device to be used
cInitCuda cudaConf(params);
cudaBlockConfig blkconfig = cudaConf.getBlockConfig();
cudaConf.printDeviceInfo();
// Create Device data manager
DeviceDataHandler devDataHandler;
// Allocate RGB and GRAYSCALE raw images
init.allocateRawImage<input_t>(&(detectData.rawImg), dSizes->rawSize); //TODO: add allocation on host
init.allocateRawImage<input_t>(&(detectData.rawImgBW), dSizes->rawSizeBW);
// Start the timer
time_t start;
time(&start);
timeval startVal, endVal;
gettimeofday(&startVal, 0);
// Start the counter of iterations
int count = 0;
const int iterations = 5;
// Image processing loop
while (!rawImg->empty() && count < iterations)
//for (int ite = 0; ite < 1 /*acquisition.getDiskImages()->size()-1*/; ite++)
{
NVTXhandler lat(COLOR_GREEN, "latency");
lat.nvtxStartEvent();
count++;
// Copy each frame to device
copyHtoD<input_t>(detectData.rawImg, rawImg->data, dSizes->rawSize);
//acquisition.setCurrentFrame(0);
//copyHtoD<input_t>(detectData.rawImg, acquisition.getCurrentFrame()->data, dSizes->rawSize);
// Input image preprocessing
detectorF.preprocess(&detectData, dSizes, &blkconfig);
// Compute the pyramid
NVTXhandler pyramide(COLOR_ORANGE, "Pyramid");
pyramide.nvtxStartEvent();
detectorF.pyramid(&detectData, dSizes, &blkconfig);
pyramide.nvtxStopEvent();
// Detection algorithm for each pyramid layer
for (uint i = 0; i < dSizes->pyr.pyramidLayers; i++) {
detectorF.featureExtraction(&detectData, dSizes, i, &blkconfig);
// cv::Mat inp, lbp;
//
// string name = "00000002a_LBP_";
// stringstream stream;
// stream << name;
// stream << i;
// stream << ".tif";
// cout << stream.str() << endl;
// //name = name + (string)i;
// lbp = cv::imread(stream.str(), CV_LOAD_IMAGE_GRAYSCALE);
// if (lbp.cols == 0)
// cout << "unable to read GT lbp images" << endl;
// //cv::cvtColor(inp, lbp, CV_BGR2GRAY);
//
// cout << "GT lbp: rows:" << lbp.rows << " cols: " << lbp.cols << endl;
// cout << "CUDA lbp: rows:" << dSizes->imgRows[i] << " cols: " << dSizes->imgCols[i] << endl;
//
// cv::Mat lbpcuda(lbp.rows, lbp.cols, CV_8UC1);
//
// copyDtoH(lbpcuda.data, getOffset(detectData.imgDescriptor, dSizes->imgDescElems, i), dSizes->imgDescElems[i]);
// stream << "GT";
// cv::imshow(stream.str(), lbp);
// cv::waitKey(0);
// stream << "CUDA";
// cv::imshow(stream.str(), lbpcuda);
// cv::waitKey(0);
// cv::imwrite("cudalbp.png", lbpcuda);
//
// cv::Mat dif(lbp.rows, lbp.cols, CV_8UC1);
// cv::absdiff(lbpcuda,lbp, dif);
// //dif = lbp - lbpcuda;
// cv::imshow("diference", dif);
// cv::waitKey(0);
///////////////////////////////////////////////////////////
// uchar *cell = (uchar*)malloc(dSizes->cellHistosElems[i]);
// copyDtoH(cell, detectData.cellHistos, dSizes->cellHistosElems[i]);
// for (int u = 0; u < dSizes->cellHistosElems[i]; u++) {
// printf( "cell feature: %d: %d\n", u, cell[u]);
// }
detectorF.classification(&detectData, dSizes, i, &blkconfig);
copyDtoH<roifeat_t>(getOffset<roifeat_t>(ROIfilter.getHostScoresVector(), dSizes->svm.scoresElems, i),
getOffset<roifeat_t>(detectData.svm.ROIscores, dSizes->svm.scoresElems, i),
dSizes->svm.scoresElems[i]);
ROIfilter.roisDecision(i, dSizes->pyr.scalesResizeFactor[i], dSizes->pyr.xBorder, dSizes->pyr.yBorder, params->minRoiMargin);
}
// Non-Max supression refinement
NVTXhandler nms(COLOR_BLUE, "Non maximum suppression");
nms.nvtxStartEvent();
refinement.AccRefinement(ROIfilter.getHitROIs());
refinement.drawRois(*(acquisition.getCurrentFrame()));
nms.nvtxStopEvent();
// Clear vector for the next iteration
NVTXhandler clearVecs(COLOR_YELLOW, "Reset nms Vectors");
clearVecs.nvtxStartEvent();
ROIfilter.clearVector();
refinement.clearVector();
clearVecs.nvtxStopEvent();
// Show the frame
NVTXhandler showF(COLOR_RED, "Show frame");
showF.nvtxStartEvent();
//acquisition.showFrame();
showF.nvtxStopEvent();
//Acquisition of the new frame
NVTXhandler frameTime(COLOR_GREEN, "Frame adquisition");
frameTime.nvtxStartEvent();
// Get a new frame
// char str[256];
// sprintf(str, "%d.png", count);
// cv::imwrite(str, *rawImg);
rawImg = acquisition.acquireFrameRGB();
frameTime.nvtxStopEvent();
NVTXhandler resetFeat(COLOR_ORANGE, "Reset device Features");
resetFeat.nvtxStartEvent();
// Reset device data structures
detectorF.resetFeatures(&detectData, dSizes);
resetFeat.nvtxStopEvent();
lat.nvtxStopEvent();
}
// Stop the timer
time_t end;
time(&end);
gettimeofday(&endVal, 0);
//printf("elapsed time: %lu \n", (endVal.tv_usec - startVal.tv_usec));
// Get the elapsed time
double seconds = difftime(end, start);
cout << "FPS : " << iterations / seconds << endl;
cout << "elapsed secs: " << seconds << endl;
cudaErrorCheck();
return 0;
}
|
4e4deba208766ea6336a2cf283ab6d5310a7a111.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifdef _WIN32
# define NOMINMAX
#endif
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <iostream>
// includes, project
//#include <cutil.h>
// includes, kernels
#include "vector_reduction_kernel.hip"
#include "vector_reduction_gold.cpp"
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
int ReadFile(float*, char* file_name);
double computeOnDevice(double* h_data, long long array_mem_size);
extern "C" void computeGold( double* reference, double* idata, const long long len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char* argv[])
{
if (argc > 2) {
runTest( argc, argv);
} else {
printf("Not enough arguments \n");
return 1;
}
return EXIT_SUCCESS;
}
////////////////////////////////////////////////////////////////////////////////
//! Run test
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char* argv[])
{
long long num_elements;
int max;
hipError_t error;
num_elements = strtoll(argv[1],NULL,10);
if(num_elements < 0) num_elements = (0 - num_elements);
max = atoi(argv[2]);
if(max < 0) max = (0 - max);
const long long array_mem_size = sizeof(double) * num_elements;
// allocate host memory to store the input data
double* h_data;
error = hipHostMalloc(&h_data, array_mem_size);
if(error != hipSuccess)
{
printf("hipHostMalloc returned error code %d, line(%d) \n", error, __LINE__);
printf("Array must be too large \n");
exit(EXIT_FAILURE);
}
// initialize the input data on the host to be float values
// between -M and M
for( long i = 0; i < num_elements; ++i)
{
h_data[i] = 2.0*max*(rand()/(double)RAND_MAX) - max;
}
//Start cpu timing here
hipEvent_t startCPU, stopCPU;
hipEventCreate(&startCPU);
hipEventCreate(&stopCPU);
hipEventRecord(startCPU, 0);
// compute reference solution
double reference = 0.0;
computeGold(&reference , h_data, num_elements);
//Stop cpu timing here
hipEventRecord(stopCPU, 0);
hipEventSynchronize(stopCPU);
float cpuTime;
hipEventElapsedTime(&cpuTime, startCPU, stopCPU);
hipEventDestroy(startCPU);
hipEventDestroy(stopCPU);
printf("CPU time: %f ms. \n", cpuTime);
// **===-------- Modify the body of this function -----------===**
double result = computeOnDevice(h_data, num_elements);
// **===-----------------------------------------------------------===**
// Run accuracy test
//float epsilon = 0.0001f;
//unsigned int result_regtest = (abs(result - reference) <= epsilon);
//printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED");
printf( "device: %f host: %f\n", result, reference);
// cleanup memory
hipFree( h_data);
}
// **===----------------- Modify this function ---------------------===**
// Take h_data from host, copies it to device, setup grid and thread
// dimensions, excutes kernel function, and copy result of scan back
// to h_data.
// Note: float* h_data is both the input and the output of this function.
double computeOnDevice(double* h_data, long long num_elements)
{
//Allocate memory on the device
double *d_data;
hipError_t errord;
errord = hipMalloc((void**) &d_data, sizeof(double)*num_elements);
if(errord != hipSuccess)
{
printf("hipMalloc returned error code %d, line(%d) \n", errord, __LINE__);
printf("Array must be too large \n");
exit(EXIT_FAILURE);
}
//Start inclusive timing here
hipEvent_t startIn, stopIn;
hipEventCreate(&startIn);
hipEventCreate(&stopIn);
hipEventRecord(startIn, 0);
//Copy onto the device
hipMemcpy(d_data, h_data, sizeof(double)*num_elements, hipMemcpyHostToDevice);
//Start exclusive timing here
hipEvent_t startEx, stopEx;
hipEventCreate(&startEx);
hipEventCreate(&stopEx);
hipEventRecord(startEx, 0);
//Use kernel to compute the reduction
int blocksx, blocksy, blocks;
int threads = 512;
int nestElements = num_elements;
blocksx = (nestElements+511)/threads;
blocks = blocksx;
blocksy = 1;
if (blocksx > 32768) {
blocksy = (blocksx+32767)/32768;
blocksx = 32768;
}
dim3 dimGrid(blocksx,blocksy);
while(nestElements > 1)
{
/* Recursive implementation to compute the reduction
*/
hipLaunchKernelGGL(( reduction) , dim3(dimGrid),dim3(threads), 0, 0, d_data, nestElements);
nestElements = blocks;
blocksx = (nestElements+511)/threads;
blocks = blocksx;
blocksy = 1;
if (blocksx > 32768) {
blocksy = (blocksx+32767)/32768;
blocksx = 32768;
}
dim3 dimGrid(blocksx, blocksy);
}
//Stop exclusive timing here
hipEventRecord(stopEx, 0);
hipEventSynchronize(stopEx);
float exTime;
hipEventElapsedTime(&exTime, startEx, stopEx);
hipEventDestroy(startEx);
hipEventDestroy(stopEx);
//Copy back to the device
hipMemcpy(h_data, d_data, sizeof(double)*num_elements, hipMemcpyDeviceToHost);
//Stop inclusive timing here
hipEventRecord(stopIn, 0);
hipEventSynchronize(stopIn);
float inTime;
hipEventElapsedTime(&inTime, startIn, stopIn);
hipEventDestroy(startIn);
hipEventDestroy(stopIn);
//Output timing
printf("Inclusive time: %f ms. \n", inTime);
printf("Exclusive time: %f ms. \n", exTime);
// Return final result
return h_data[0];
}
| 4e4deba208766ea6336a2cf283ab6d5310a7a111.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifdef _WIN32
# define NOMINMAX
#endif
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <iostream>
// includes, project
//#include <cutil.h>
// includes, kernels
#include "vector_reduction_kernel.cu"
#include "vector_reduction_gold.cpp"
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
int ReadFile(float*, char* file_name);
double computeOnDevice(double* h_data, long long array_mem_size);
extern "C" void computeGold( double* reference, double* idata, const long long len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char* argv[])
{
if (argc > 2) {
runTest( argc, argv);
} else {
printf("Not enough arguments \n");
return 1;
}
return EXIT_SUCCESS;
}
////////////////////////////////////////////////////////////////////////////////
//! Run test
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char* argv[])
{
long long num_elements;
int max;
cudaError_t error;
num_elements = strtoll(argv[1],NULL,10);
if(num_elements < 0) num_elements = (0 - num_elements);
max = atoi(argv[2]);
if(max < 0) max = (0 - max);
const long long array_mem_size = sizeof(double) * num_elements;
// allocate host memory to store the input data
double* h_data;
error = cudaMallocHost(&h_data, array_mem_size);
if(error != cudaSuccess)
{
printf("cudaMallocHost returned error code %d, line(%d) \n", error, __LINE__);
printf("Array must be too large \n");
exit(EXIT_FAILURE);
}
// initialize the input data on the host to be float values
// between -M and M
for( long i = 0; i < num_elements; ++i)
{
h_data[i] = 2.0*max*(rand()/(double)RAND_MAX) - max;
}
//Start cpu timing here
cudaEvent_t startCPU, stopCPU;
cudaEventCreate(&startCPU);
cudaEventCreate(&stopCPU);
cudaEventRecord(startCPU, 0);
// compute reference solution
double reference = 0.0;
computeGold(&reference , h_data, num_elements);
//Stop cpu timing here
cudaEventRecord(stopCPU, 0);
cudaEventSynchronize(stopCPU);
float cpuTime;
cudaEventElapsedTime(&cpuTime, startCPU, stopCPU);
cudaEventDestroy(startCPU);
cudaEventDestroy(stopCPU);
printf("CPU time: %f ms. \n", cpuTime);
// **===-------- Modify the body of this function -----------===**
double result = computeOnDevice(h_data, num_elements);
// **===-----------------------------------------------------------===**
// Run accuracy test
//float epsilon = 0.0001f;
//unsigned int result_regtest = (abs(result - reference) <= epsilon);
//printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED");
printf( "device: %f host: %f\n", result, reference);
// cleanup memory
cudaFree( h_data);
}
// **===----------------- Modify this function ---------------------===**
// Take h_data from host, copies it to device, setup grid and thread
// dimensions, excutes kernel function, and copy result of scan back
// to h_data.
// Note: float* h_data is both the input and the output of this function.
double computeOnDevice(double* h_data, long long num_elements)
{
//Allocate memory on the device
double *d_data;
cudaError_t errord;
errord = cudaMalloc((void**) &d_data, sizeof(double)*num_elements);
if(errord != cudaSuccess)
{
printf("cudaMalloc returned error code %d, line(%d) \n", errord, __LINE__);
printf("Array must be too large \n");
exit(EXIT_FAILURE);
}
//Start inclusive timing here
cudaEvent_t startIn, stopIn;
cudaEventCreate(&startIn);
cudaEventCreate(&stopIn);
cudaEventRecord(startIn, 0);
//Copy onto the device
cudaMemcpy(d_data, h_data, sizeof(double)*num_elements, cudaMemcpyHostToDevice);
//Start exclusive timing here
cudaEvent_t startEx, stopEx;
cudaEventCreate(&startEx);
cudaEventCreate(&stopEx);
cudaEventRecord(startEx, 0);
//Use kernel to compute the reduction
int blocksx, blocksy, blocks;
int threads = 512;
int nestElements = num_elements;
blocksx = (nestElements+511)/threads;
blocks = blocksx;
blocksy = 1;
if (blocksx > 32768) {
blocksy = (blocksx+32767)/32768;
blocksx = 32768;
}
dim3 dimGrid(blocksx,blocksy);
while(nestElements > 1)
{
/* Recursive implementation to compute the reduction
*/
reduction <<<dimGrid,threads>>> (d_data, nestElements);
nestElements = blocks;
blocksx = (nestElements+511)/threads;
blocks = blocksx;
blocksy = 1;
if (blocksx > 32768) {
blocksy = (blocksx+32767)/32768;
blocksx = 32768;
}
dim3 dimGrid(blocksx, blocksy);
}
//Stop exclusive timing here
cudaEventRecord(stopEx, 0);
cudaEventSynchronize(stopEx);
float exTime;
cudaEventElapsedTime(&exTime, startEx, stopEx);
cudaEventDestroy(startEx);
cudaEventDestroy(stopEx);
//Copy back to the device
cudaMemcpy(h_data, d_data, sizeof(double)*num_elements, cudaMemcpyDeviceToHost);
//Stop inclusive timing here
cudaEventRecord(stopIn, 0);
cudaEventSynchronize(stopIn);
float inTime;
cudaEventElapsedTime(&inTime, startIn, stopIn);
cudaEventDestroy(startIn);
cudaEventDestroy(stopIn);
//Output timing
printf("Inclusive time: %f ms. \n", inTime);
printf("Exclusive time: %f ms. \n", exTime);
// Return final result
return h_data[0];
}
|
979b66fd20e858d32f94f3634787fc2143cad68b.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/Math.cuh>
#include <ATen/NumericUtils.h>
#include <c10/hip/HIPMathCompat.h>
#include <ATen/NumericUtils.h>
#include <c10/util/complex.h>
namespace at {
namespace native {
void bitwise_not_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == ScalarType::Bool) {
gpu_kernel(iter, []GPU_LAMBDA(bool a) {
return !a;
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_not_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ~a;
});
});
}
}
void exp_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "exp_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::exp(a);
});
});
}
void exp2_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "exp2_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::exp2(a);
});
});
}
void expm1_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "expm1_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::expm1(a);
});
});
}
void i0_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "i0_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_i0(a);
});
});
}
// We manually overload rsqrt because std::rsqrt does not work with complex types.
template<typename scalar_t>
__host__ __device__ static inline scalar_t rsqrt_wrapper(scalar_t v) {
return ::rsqrt(v);
}
template<typename T>
__host__ __device__ static inline c10::complex<T> rsqrt_wrapper(c10::complex<T> v) {
const c10::complex<T> one = c10::complex<T>(1.0, 0);
// std::sqrt for c10::complex is overloaded in c10/util/complex_math.h
return one / ::sqrt(v);
}
void rsqrt_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "rsqrt_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
// In CUDA, ::rsqrt is overloaded for float and at::Half here is implicitly cast to float.
return rsqrt_wrapper(a);
});
});
}
void sqrt_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "sqrt_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sqrt(a);
});
});
}
void sigmoid_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "sigmoid_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
scalar_t one = scalar_t(1);
return one / (one + ::exp(- a));
});
});
}
void logit_kernel_cuda(TensorIterator& iter, Scalar eps_scalar) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"logit_cuda",
[&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC eps = eps_scalar.to<T_ACC>();
if (eps < T_ACC(0)) {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t {
const T_ACC x_acc = static_cast<T_ACC>(x);
return c10::hip::compat::log(x_acc / (T_ACC(1) - x_acc));
});
} else {
const T_ACC lo = eps;
const T_ACC hi = T_ACC(1) - eps;
gpu_kernel(
iter, [lo, hi] GPU_LAMBDA(scalar_t x) -> scalar_t {
const T_ACC x_acc = static_cast<T_ACC>(x);
T_ACC z = x_acc < lo ? lo : (x_acc > hi ? hi : x_acc);
return c10::hip::compat::log(z / (T_ACC(1) - z));
});
}
});
}
void erf_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "erf_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erf(a);
});
});
}
void erfc_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "erfc_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erfc(a);
});
});
}
void erfinv_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "erfinv_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erfinv(a);
});
});
}
void clamp_kernel_cuda(TensorIterator& iter, Scalar min_value, Scalar max_value) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_cuda", [&]() {
auto lower = min_value.to<scalar_t>();
auto upper = max_value.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::min(::max(v, lower), upper);
}
});
});
}
void clamp_min_kernel_cuda(TensorIterator& iter, Scalar min_value) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_min_cuda", [&]() {
auto lower = min_value.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::max(v, lower);
}
});
});
}
void clamp_max_kernel_cuda(TensorIterator& iter, Scalar max_value) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_max_cuda", [&]() {
auto upper = max_value.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::min(v, upper);
}
});
});
}
void nan_to_num_kernel_cuda(
TensorIterator& iter,
c10::optional<double> nan,
c10::optional<double> pos_inf,
c10::optional<double> neg_inf) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "nan_to_num_cuda", [&]() {
scalar_t nan_replacement = static_cast<scalar_t>(nan.value_or(0.));
scalar_t pos_inf_replacement = pos_inf.has_value()
? static_cast<scalar_t>(pos_inf.value())
: std::numeric_limits<scalar_t>::max();
scalar_t neg_inf_replacement = neg_inf.has_value()
? static_cast<scalar_t>(neg_inf.value())
: std::numeric_limits<scalar_t>::lowest();
gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t a) -> scalar_t {
return (
at::_isnan(a)
? nan_replacement
: (a == std::numeric_limits<scalar_t>::infinity()
? pos_inf_replacement
: (a == -std::numeric_limits<scalar_t>::infinity()
? neg_inf_replacement
: a)));
});
});
}
void kaiser_window_kernel_cuda(TensorIterator& iter, int64_t window_length, double beta_){
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "kaiser_window_cuda", [&](){
using T_ACC = acc_type<scalar_t, true>;
const T_ACC inv_alpha = static_cast<T_ACC>(2.0 / (window_length - 1));
const T_ACC beta = static_cast<T_ACC>(beta_);
const T_ACC inv_i0_beta = 1.0 / calc_i0(beta);
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t a) -> scalar_t {
T_ACC x = static_cast<T_ACC>(a) * inv_alpha - 1;
T_ACC y = std::max<T_ACC>(0, 1 - x * x);
return calc_i0(beta * ::sqrt(y)) * inv_i0_beta;
});
});
}
REGISTER_DISPATCH(bitwise_not_stub, &bitwise_not_kernel_cuda);
REGISTER_DISPATCH(exp_stub, &exp_kernel_cuda);
REGISTER_DISPATCH(exp2_stub, &exp2_kernel_cuda);
REGISTER_DISPATCH(expm1_stub, &expm1_kernel_cuda);
REGISTER_DISPATCH(i0_stub, &i0_kernel_cuda);
REGISTER_DISPATCH(rsqrt_stub, &rsqrt_kernel_cuda);
REGISTER_DISPATCH(sqrt_stub, &sqrt_kernel_cuda);
REGISTER_DISPATCH(sigmoid_stub, &sigmoid_kernel_cuda);
REGISTER_DISPATCH(logit_stub, &logit_kernel_cuda);
REGISTER_DISPATCH(erf_stub, &erf_kernel_cuda);
REGISTER_DISPATCH(erfc_stub, &erfc_kernel_cuda);
REGISTER_DISPATCH(erfinv_stub, &erfinv_kernel_cuda);
REGISTER_DISPATCH(clamp_stub, &clamp_kernel_cuda);
REGISTER_DISPATCH(clamp_min_stub, &clamp_min_kernel_cuda);
REGISTER_DISPATCH(clamp_max_stub, &clamp_max_kernel_cuda);
REGISTER_DISPATCH(nan_to_num_stub, &nan_to_num_kernel_cuda);
REGISTER_DISPATCH(kaiser_window_stub, &kaiser_window_kernel_cuda);
} // namespace native
} // namespace at
| 979b66fd20e858d32f94f3634787fc2143cad68b.cu | #include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/NumericUtils.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/NumericUtils.h>
#include <c10/util/complex.h>
namespace at {
namespace native {
void bitwise_not_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == ScalarType::Bool) {
gpu_kernel(iter, []GPU_LAMBDA(bool a) {
return !a;
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_not_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ~a;
});
});
}
}
void exp_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "exp_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::exp(a);
});
});
}
void exp2_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "exp2_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::exp2(a);
});
});
}
void expm1_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "expm1_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::expm1(a);
});
});
}
void i0_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "i0_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_i0(a);
});
});
}
// We manually overload rsqrt because std::rsqrt does not work with complex types.
template<typename scalar_t>
__host__ __device__ static inline scalar_t rsqrt_wrapper(scalar_t v) {
return ::rsqrt(v);
}
template<typename T>
__host__ __device__ static inline c10::complex<T> rsqrt_wrapper(c10::complex<T> v) {
const c10::complex<T> one = c10::complex<T>(1.0, 0);
// std::sqrt for c10::complex is overloaded in c10/util/complex_math.h
return one / ::sqrt(v);
}
void rsqrt_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.common_dtype(), "rsqrt_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
// In CUDA, ::rsqrt is overloaded for float and at::Half here is implicitly cast to float.
return rsqrt_wrapper(a);
});
});
}
void sqrt_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "sqrt_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sqrt(a);
});
});
}
void sigmoid_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "sigmoid_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
scalar_t one = scalar_t(1);
return one / (one + std::exp(- a));
});
});
}
void logit_kernel_cuda(TensorIterator& iter, Scalar eps_scalar) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"logit_cuda",
[&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC eps = eps_scalar.to<T_ACC>();
if (eps < T_ACC(0)) {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t {
const T_ACC x_acc = static_cast<T_ACC>(x);
return c10::cuda::compat::log(x_acc / (T_ACC(1) - x_acc));
});
} else {
const T_ACC lo = eps;
const T_ACC hi = T_ACC(1) - eps;
gpu_kernel(
iter, [lo, hi] GPU_LAMBDA(scalar_t x) -> scalar_t {
const T_ACC x_acc = static_cast<T_ACC>(x);
T_ACC z = x_acc < lo ? lo : (x_acc > hi ? hi : x_acc);
return c10::cuda::compat::log(z / (T_ACC(1) - z));
});
}
});
}
void erf_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "erf_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erf(a);
});
});
}
void erfc_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "erfc_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erfc(a);
});
});
}
void erfinv_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "erfinv_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erfinv(a);
});
});
}
void clamp_kernel_cuda(TensorIterator& iter, Scalar min_value, Scalar max_value) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_cuda", [&]() {
auto lower = min_value.to<scalar_t>();
auto upper = max_value.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::min(::max(v, lower), upper);
}
});
});
}
void clamp_min_kernel_cuda(TensorIterator& iter, Scalar min_value) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_min_cuda", [&]() {
auto lower = min_value.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::max(v, lower);
}
});
});
}
void clamp_max_kernel_cuda(TensorIterator& iter, Scalar max_value) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_max_cuda", [&]() {
auto upper = max_value.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::min(v, upper);
}
});
});
}
void nan_to_num_kernel_cuda(
TensorIterator& iter,
c10::optional<double> nan,
c10::optional<double> pos_inf,
c10::optional<double> neg_inf) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "nan_to_num_cuda", [&]() {
scalar_t nan_replacement = static_cast<scalar_t>(nan.value_or(0.));
scalar_t pos_inf_replacement = pos_inf.has_value()
? static_cast<scalar_t>(pos_inf.value())
: std::numeric_limits<scalar_t>::max();
scalar_t neg_inf_replacement = neg_inf.has_value()
? static_cast<scalar_t>(neg_inf.value())
: std::numeric_limits<scalar_t>::lowest();
gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t a) -> scalar_t {
return (
at::_isnan(a)
? nan_replacement
: (a == std::numeric_limits<scalar_t>::infinity()
? pos_inf_replacement
: (a == -std::numeric_limits<scalar_t>::infinity()
? neg_inf_replacement
: a)));
});
});
}
void kaiser_window_kernel_cuda(TensorIterator& iter, int64_t window_length, double beta_){
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "kaiser_window_cuda", [&](){
using T_ACC = acc_type<scalar_t, true>;
const T_ACC inv_alpha = static_cast<T_ACC>(2.0 / (window_length - 1));
const T_ACC beta = static_cast<T_ACC>(beta_);
const T_ACC inv_i0_beta = 1.0 / calc_i0(beta);
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t a) -> scalar_t {
T_ACC x = static_cast<T_ACC>(a) * inv_alpha - 1;
T_ACC y = std::max<T_ACC>(0, 1 - x * x);
return calc_i0(beta * ::sqrt(y)) * inv_i0_beta;
});
});
}
REGISTER_DISPATCH(bitwise_not_stub, &bitwise_not_kernel_cuda);
REGISTER_DISPATCH(exp_stub, &exp_kernel_cuda);
REGISTER_DISPATCH(exp2_stub, &exp2_kernel_cuda);
REGISTER_DISPATCH(expm1_stub, &expm1_kernel_cuda);
REGISTER_DISPATCH(i0_stub, &i0_kernel_cuda);
REGISTER_DISPATCH(rsqrt_stub, &rsqrt_kernel_cuda);
REGISTER_DISPATCH(sqrt_stub, &sqrt_kernel_cuda);
REGISTER_DISPATCH(sigmoid_stub, &sigmoid_kernel_cuda);
REGISTER_DISPATCH(logit_stub, &logit_kernel_cuda);
REGISTER_DISPATCH(erf_stub, &erf_kernel_cuda);
REGISTER_DISPATCH(erfc_stub, &erfc_kernel_cuda);
REGISTER_DISPATCH(erfinv_stub, &erfinv_kernel_cuda);
REGISTER_DISPATCH(clamp_stub, &clamp_kernel_cuda);
REGISTER_DISPATCH(clamp_min_stub, &clamp_min_kernel_cuda);
REGISTER_DISPATCH(clamp_max_stub, &clamp_max_kernel_cuda);
REGISTER_DISPATCH(nan_to_num_stub, &nan_to_num_kernel_cuda);
REGISTER_DISPATCH(kaiser_window_stub, &kaiser_window_kernel_cuda);
} // namespace native
} // namespace at
|
4287b90add0243d07b5121103ea21cc9bbfb766a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/concatenate.cuh>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/dictionary/detail/concatenate.hpp>
#include <cudf/lists/detail/concatenate.hpp>
#include <cudf/strings/detail/concatenate.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_device_view.cuh>
#include <thrust/binary_search.h>
#include <thrust/transform_scan.h>
#include <algorithm>
#include <numeric>
#include <utility>
namespace cudf {
namespace detail {
// From benchmark data, the fused kernel optimization appears to perform better
// when there are more than a trivial number of columns, or when the null mask
// can also be computed at the same time
constexpr bool use_fused_kernel_heuristic(bool const has_nulls, size_t const num_columns)
{
return has_nulls || num_columns > 4;
}
auto create_device_views(std::vector<column_view> const& views, hipStream_t stream)
{
// Create device views for each input view
using CDViewPtr =
decltype(column_device_view::create(std::declval<column_view>(), std::declval<hipStream_t>()));
auto device_view_owners = std::vector<CDViewPtr>(views.size());
std::transform(
views.cbegin(), views.cend(), device_view_owners.begin(), [stream](auto const& col) {
// TODO creating this device view can invoke null count computation
// even though it isn't used. See this issue:
// https://github.com/rapidsai/cudf/issues/4368
return column_device_view::create(col, stream);
});
// Assemble contiguous array of device views
auto device_views = thrust::host_vector<column_device_view>();
device_views.reserve(views.size());
std::transform(device_view_owners.cbegin(),
device_view_owners.cend(),
std::back_inserter(device_views),
[](auto const& col) { return *col; });
// TODO each of these device vector copies invoke stream synchronization
// which appears to add unnecessary overhead. See this issue:
// https://github.com/rapidsai/rmm/issues/120
auto d_views = rmm::device_vector<column_device_view>{device_views};
// Compute the partition offsets
auto offsets = thrust::host_vector<size_t>(views.size() + 1);
thrust::transform_inclusive_scan(
thrust::host,
device_views.cbegin(),
device_views.cend(),
std::next(offsets.begin()),
[](auto const& col) { return col.size(); },
thrust::plus<size_t>{});
auto const d_offsets = rmm::device_vector<size_t>{offsets};
auto const output_size = offsets.back();
return std::make_tuple(
std::move(device_view_owners), std::move(d_views), std::move(d_offsets), output_size);
}
/**
* @brief Concatenates the null mask bits of all the column device views in the
* `views` array to the destination bitmask.
*
* @param views Array of column_device_view
* @param output_offsets Prefix sum of sizes of elements of `views`
* @param number_of_views Size of `views` array
* @param dest_mask The output buffer to copy null masks into
* @param number_of_mask_bits The total number of null masks bits that are being
* copied
**/
__global__ void concatenate_masks_kernel(column_device_view const* views,
size_t const* output_offsets,
size_type number_of_views,
bitmask_type* dest_mask,
size_type number_of_mask_bits)
{
size_type mask_index = threadIdx.x + blockIdx.x * blockDim.x;
auto active_mask = __ballot_sync(0xFFFF'FFFF, mask_index < number_of_mask_bits);
while (mask_index < number_of_mask_bits) {
size_type const source_view_index =
thrust::upper_bound(
thrust::seq, output_offsets, output_offsets + number_of_views, mask_index) -
output_offsets - 1;
bool bit_is_set = 1;
if (source_view_index < number_of_views) {
size_type const column_element_index = mask_index - output_offsets[source_view_index];
bit_is_set = views[source_view_index].is_valid(column_element_index);
}
bitmask_type const new_word = __ballot_sync(active_mask, bit_is_set);
if (threadIdx.x % detail::warp_size == 0) { dest_mask[word_index(mask_index)] = new_word; }
mask_index += blockDim.x * gridDim.x;
active_mask = __ballot_sync(active_mask, mask_index < number_of_mask_bits);
}
}
void concatenate_masks(rmm::device_vector<column_device_view> const& d_views,
rmm::device_vector<size_t> const& d_offsets,
bitmask_type* dest_mask,
size_type output_size,
hipStream_t stream)
{
constexpr size_type block_size{256};
cudf::detail::grid_1d config(output_size, block_size);
hipLaunchKernelGGL(( concatenate_masks_kernel), dim3(config.num_blocks), dim3(config.num_threads_per_block), 0, stream,
d_views.data().get(),
d_offsets.data().get(),
static_cast<size_type>(d_views.size()),
dest_mask,
output_size);
}
void concatenate_masks(std::vector<column_view> const& views,
bitmask_type* dest_mask,
hipStream_t stream)
{
// Preprocess and upload inputs to device memory
auto const device_views = create_device_views(views, stream);
auto const& d_views = std::get<1>(device_views);
auto const& d_offsets = std::get<2>(device_views);
auto const output_size = std::get<3>(device_views);
concatenate_masks(d_views, d_offsets, dest_mask, output_size, stream);
}
template <typename T, size_type block_size, bool Nullable>
__global__ void fused_concatenate_kernel(column_device_view const* input_views,
size_t const* input_offsets,
size_type num_input_views,
mutable_column_device_view output_view,
size_type* out_valid_count)
{
auto const output_size = output_view.size();
auto* output_data = output_view.data<T>();
size_type output_index = threadIdx.x + blockIdx.x * blockDim.x;
size_type warp_valid_count = 0;
unsigned active_mask;
if (Nullable) { active_mask = __ballot_sync(0xFFFF'FFFF, output_index < output_size); }
while (output_index < output_size) {
// Lookup input index by searching for output index in offsets
// thrust::prev isn't in CUDA 10.0, so subtracting 1 here instead
auto const offset_it =
-1 + thrust::upper_bound(
thrust::seq, input_offsets, input_offsets + num_input_views, output_index);
size_type const partition_index = offset_it - input_offsets;
// Copy input data to output
auto const offset_index = output_index - *offset_it;
auto const& input_view = input_views[partition_index];
auto const* input_data = input_view.data<T>();
output_data[output_index] = input_data[offset_index];
if (Nullable) {
bool const bit_is_set = input_view.is_valid(offset_index);
bitmask_type const new_word = __ballot_sync(active_mask, bit_is_set);
// First thread writes bitmask word
if (threadIdx.x % detail::warp_size == 0) {
output_view.null_mask()[word_index(output_index)] = new_word;
}
warp_valid_count += __popc(new_word);
}
output_index += blockDim.x * gridDim.x;
if (Nullable) { active_mask = __ballot_sync(active_mask, output_index < output_size); }
}
if (Nullable) {
using detail::single_lane_block_sum_reduce;
auto block_valid_count = single_lane_block_sum_reduce<block_size, 0>(warp_valid_count);
if (threadIdx.x == 0) { atomicAdd(out_valid_count, block_valid_count); }
}
}
template <typename T>
std::unique_ptr<column> fused_concatenate(std::vector<column_view> const& views,
bool const has_nulls,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
using mask_policy = cudf::mask_allocation_policy;
// Preprocess and upload inputs to device memory
auto const device_views = create_device_views(views, stream);
auto const& d_views = std::get<1>(device_views);
auto const& d_offsets = std::get<2>(device_views);
auto const output_size = std::get<3>(device_views);
CUDF_EXPECTS(output_size < std::numeric_limits<size_type>::max(),
"Total number of concatenated rows exceeds size_type range");
// Allocate output
auto const policy = has_nulls ? mask_policy::ALWAYS : mask_policy::NEVER;
auto out_col = detail::allocate_like(views.front(), output_size, policy, mr, stream);
out_col->set_null_count(0); // prevent null count from being materialized
auto out_view = out_col->mutable_view();
auto d_out_view = mutable_column_device_view::create(out_view, stream);
rmm::device_scalar<size_type> d_valid_count(0);
// Launch kernel
constexpr size_type block_size{256};
cudf::detail::grid_1d config(output_size, block_size);
auto const kernel = has_nulls ? fused_concatenate_kernel<T, block_size, true>
: fused_concatenate_kernel<T, block_size, false>;
hipLaunchKernelGGL(( kernel), dim3(config.num_blocks), dim3(config.num_threads_per_block), 0, stream,
d_views.data().get(),
d_offsets.data().get(),
static_cast<size_type>(d_views.size()),
*d_out_view,
d_valid_count.data());
if (has_nulls) { out_col->set_null_count(output_size - d_valid_count.value(stream)); }
return out_col;
}
template <typename T>
std::unique_ptr<column> for_each_concatenate(std::vector<column_view> const& views,
bool const has_nulls,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
size_type const total_element_count =
std::accumulate(views.begin(), views.end(), 0, [](auto accumulator, auto const& v) {
return accumulator + v.size();
});
using mask_policy = cudf::mask_allocation_policy;
auto const policy = has_nulls ? mask_policy::ALWAYS : mask_policy::NEVER;
auto col = cudf::allocate_like(views.front(), total_element_count, policy, mr);
col->set_null_count(0); // prevent null count from being materialized...
auto m_view = col->mutable_view(); // ...when we take a mutable view
auto count = 0;
for (auto& v : views) {
thrust::copy(
rmm::exec_policy()->on(stream), v.begin<T>(), v.end<T>(), m_view.begin<T>() + count);
count += v.size();
}
// If concatenated column is nullable, proceed to calculate it
if (has_nulls) {
cudf::detail::concatenate_masks(views, (col->mutable_view()).null_mask(), stream);
}
return col;
}
struct concatenate_dispatch {
std::vector<column_view> const& views;
rmm::mr::device_memory_resource* mr;
hipStream_t stream;
// fixed width
template <typename T>
std::unique_ptr<column> operator()()
{
bool const has_nulls =
std::any_of(views.cbegin(), views.cend(), [](auto const& col) { return col.has_nulls(); });
// Use a heuristic to guess when the fused kernel will be faster
if (use_fused_kernel_heuristic(has_nulls, views.size())) {
return fused_concatenate<T>(views, has_nulls, mr, stream);
} else {
return for_each_concatenate<T>(views, has_nulls, mr, stream);
}
}
};
template <>
std::unique_ptr<column> concatenate_dispatch::operator()<cudf::dictionary32>()
{
return cudf::dictionary::detail::concatenate(views, stream, mr);
}
template <>
std::unique_ptr<column> concatenate_dispatch::operator()<cudf::string_view>()
{
return cudf::strings::detail::concatenate(views, mr, stream);
}
template <>
std::unique_ptr<column> concatenate_dispatch::operator()<cudf::list_view>()
{
return cudf::lists::detail::concatenate(views, stream, mr);
}
// Concatenates the elements from a vector of column_views
std::unique_ptr<column> concatenate(std::vector<column_view> const& columns_to_concat,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_EXPECTS(not columns_to_concat.empty(), "Unexpected empty list of columns to concatenate.");
data_type const type = columns_to_concat.front().type();
CUDF_EXPECTS(std::all_of(columns_to_concat.begin(),
columns_to_concat.end(),
[&type](auto const& c) { return c.type() == type; }),
"Type mismatch in columns to concatenate.");
if (std::all_of(columns_to_concat.begin(), columns_to_concat.end(), [](column_view const& c) {
return c.is_empty();
})) {
return empty_like(columns_to_concat.front());
}
return type_dispatcher(type, concatenate_dispatch{columns_to_concat, mr, stream});
}
std::unique_ptr<table> concatenate(std::vector<table_view> const& tables_to_concat,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
if (tables_to_concat.empty()) { return std::make_unique<table>(); }
table_view const first_table = tables_to_concat.front();
CUDF_EXPECTS(std::all_of(tables_to_concat.cbegin(),
tables_to_concat.cend(),
[&first_table](auto const& t) {
return t.num_columns() == first_table.num_columns() &&
have_same_types(first_table, t);
}),
"Mismatch in table columns to concatenate.");
std::vector<std::unique_ptr<column>> concat_columns;
for (size_type i = 0; i < first_table.num_columns(); ++i) {
std::vector<column_view> cols;
std::transform(tables_to_concat.cbegin(),
tables_to_concat.cend(),
std::back_inserter(cols),
[i](auto const& t) { return t.column(i); });
concat_columns.emplace_back(detail::concatenate(cols, mr, stream));
}
return std::make_unique<table>(std::move(concat_columns));
}
} // namespace detail
rmm::device_buffer concatenate_masks(std::vector<column_view> const& views,
rmm::mr::device_memory_resource* mr)
{
bool const has_nulls =
std::any_of(views.begin(), views.end(), [](const column_view col) { return col.has_nulls(); });
if (has_nulls) {
size_type const total_element_count =
std::accumulate(views.begin(), views.end(), 0, [](auto accumulator, auto const& v) {
return accumulator + v.size();
});
rmm::device_buffer null_mask =
create_null_mask(total_element_count, mask_state::UNINITIALIZED, 0, mr);
detail::concatenate_masks(views, static_cast<bitmask_type*>(null_mask.data()), 0);
return null_mask;
}
// no nulls, so return an empty device buffer
return rmm::device_buffer{0, (hipStream_t)0, mr};
}
// Concatenates the elements from a vector of column_views
std::unique_ptr<column> concatenate(std::vector<column_view> const& columns_to_concat,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::concatenate(columns_to_concat, mr, 0);
}
std::unique_ptr<table> concatenate(std::vector<table_view> const& tables_to_concat,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::concatenate(tables_to_concat, mr, 0);
}
} // namespace cudf
| 4287b90add0243d07b5121103ea21cc9bbfb766a.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/concatenate.cuh>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/dictionary/detail/concatenate.hpp>
#include <cudf/lists/detail/concatenate.hpp>
#include <cudf/strings/detail/concatenate.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_device_view.cuh>
#include <thrust/binary_search.h>
#include <thrust/transform_scan.h>
#include <algorithm>
#include <numeric>
#include <utility>
namespace cudf {
namespace detail {
// From benchmark data, the fused kernel optimization appears to perform better
// when there are more than a trivial number of columns, or when the null mask
// can also be computed at the same time
constexpr bool use_fused_kernel_heuristic(bool const has_nulls, size_t const num_columns)
{
return has_nulls || num_columns > 4;
}
auto create_device_views(std::vector<column_view> const& views, cudaStream_t stream)
{
// Create device views for each input view
using CDViewPtr =
decltype(column_device_view::create(std::declval<column_view>(), std::declval<cudaStream_t>()));
auto device_view_owners = std::vector<CDViewPtr>(views.size());
std::transform(
views.cbegin(), views.cend(), device_view_owners.begin(), [stream](auto const& col) {
// TODO creating this device view can invoke null count computation
// even though it isn't used. See this issue:
// https://github.com/rapidsai/cudf/issues/4368
return column_device_view::create(col, stream);
});
// Assemble contiguous array of device views
auto device_views = thrust::host_vector<column_device_view>();
device_views.reserve(views.size());
std::transform(device_view_owners.cbegin(),
device_view_owners.cend(),
std::back_inserter(device_views),
[](auto const& col) { return *col; });
// TODO each of these device vector copies invoke stream synchronization
// which appears to add unnecessary overhead. See this issue:
// https://github.com/rapidsai/rmm/issues/120
auto d_views = rmm::device_vector<column_device_view>{device_views};
// Compute the partition offsets
auto offsets = thrust::host_vector<size_t>(views.size() + 1);
thrust::transform_inclusive_scan(
thrust::host,
device_views.cbegin(),
device_views.cend(),
std::next(offsets.begin()),
[](auto const& col) { return col.size(); },
thrust::plus<size_t>{});
auto const d_offsets = rmm::device_vector<size_t>{offsets};
auto const output_size = offsets.back();
return std::make_tuple(
std::move(device_view_owners), std::move(d_views), std::move(d_offsets), output_size);
}
/**
* @brief Concatenates the null mask bits of all the column device views in the
* `views` array to the destination bitmask.
*
* @param views Array of column_device_view
* @param output_offsets Prefix sum of sizes of elements of `views`
* @param number_of_views Size of `views` array
* @param dest_mask The output buffer to copy null masks into
* @param number_of_mask_bits The total number of null masks bits that are being
* copied
**/
__global__ void concatenate_masks_kernel(column_device_view const* views,
size_t const* output_offsets,
size_type number_of_views,
bitmask_type* dest_mask,
size_type number_of_mask_bits)
{
size_type mask_index = threadIdx.x + blockIdx.x * blockDim.x;
auto active_mask = __ballot_sync(0xFFFF'FFFF, mask_index < number_of_mask_bits);
while (mask_index < number_of_mask_bits) {
size_type const source_view_index =
thrust::upper_bound(
thrust::seq, output_offsets, output_offsets + number_of_views, mask_index) -
output_offsets - 1;
bool bit_is_set = 1;
if (source_view_index < number_of_views) {
size_type const column_element_index = mask_index - output_offsets[source_view_index];
bit_is_set = views[source_view_index].is_valid(column_element_index);
}
bitmask_type const new_word = __ballot_sync(active_mask, bit_is_set);
if (threadIdx.x % detail::warp_size == 0) { dest_mask[word_index(mask_index)] = new_word; }
mask_index += blockDim.x * gridDim.x;
active_mask = __ballot_sync(active_mask, mask_index < number_of_mask_bits);
}
}
void concatenate_masks(rmm::device_vector<column_device_view> const& d_views,
rmm::device_vector<size_t> const& d_offsets,
bitmask_type* dest_mask,
size_type output_size,
cudaStream_t stream)
{
constexpr size_type block_size{256};
cudf::detail::grid_1d config(output_size, block_size);
concatenate_masks_kernel<<<config.num_blocks, config.num_threads_per_block, 0, stream>>>(
d_views.data().get(),
d_offsets.data().get(),
static_cast<size_type>(d_views.size()),
dest_mask,
output_size);
}
void concatenate_masks(std::vector<column_view> const& views,
bitmask_type* dest_mask,
cudaStream_t stream)
{
// Preprocess and upload inputs to device memory
auto const device_views = create_device_views(views, stream);
auto const& d_views = std::get<1>(device_views);
auto const& d_offsets = std::get<2>(device_views);
auto const output_size = std::get<3>(device_views);
concatenate_masks(d_views, d_offsets, dest_mask, output_size, stream);
}
template <typename T, size_type block_size, bool Nullable>
__global__ void fused_concatenate_kernel(column_device_view const* input_views,
size_t const* input_offsets,
size_type num_input_views,
mutable_column_device_view output_view,
size_type* out_valid_count)
{
auto const output_size = output_view.size();
auto* output_data = output_view.data<T>();
size_type output_index = threadIdx.x + blockIdx.x * blockDim.x;
size_type warp_valid_count = 0;
unsigned active_mask;
if (Nullable) { active_mask = __ballot_sync(0xFFFF'FFFF, output_index < output_size); }
while (output_index < output_size) {
// Lookup input index by searching for output index in offsets
// thrust::prev isn't in CUDA 10.0, so subtracting 1 here instead
auto const offset_it =
-1 + thrust::upper_bound(
thrust::seq, input_offsets, input_offsets + num_input_views, output_index);
size_type const partition_index = offset_it - input_offsets;
// Copy input data to output
auto const offset_index = output_index - *offset_it;
auto const& input_view = input_views[partition_index];
auto const* input_data = input_view.data<T>();
output_data[output_index] = input_data[offset_index];
if (Nullable) {
bool const bit_is_set = input_view.is_valid(offset_index);
bitmask_type const new_word = __ballot_sync(active_mask, bit_is_set);
// First thread writes bitmask word
if (threadIdx.x % detail::warp_size == 0) {
output_view.null_mask()[word_index(output_index)] = new_word;
}
warp_valid_count += __popc(new_word);
}
output_index += blockDim.x * gridDim.x;
if (Nullable) { active_mask = __ballot_sync(active_mask, output_index < output_size); }
}
if (Nullable) {
using detail::single_lane_block_sum_reduce;
auto block_valid_count = single_lane_block_sum_reduce<block_size, 0>(warp_valid_count);
if (threadIdx.x == 0) { atomicAdd(out_valid_count, block_valid_count); }
}
}
template <typename T>
std::unique_ptr<column> fused_concatenate(std::vector<column_view> const& views,
bool const has_nulls,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
using mask_policy = cudf::mask_allocation_policy;
// Preprocess and upload inputs to device memory
auto const device_views = create_device_views(views, stream);
auto const& d_views = std::get<1>(device_views);
auto const& d_offsets = std::get<2>(device_views);
auto const output_size = std::get<3>(device_views);
CUDF_EXPECTS(output_size < std::numeric_limits<size_type>::max(),
"Total number of concatenated rows exceeds size_type range");
// Allocate output
auto const policy = has_nulls ? mask_policy::ALWAYS : mask_policy::NEVER;
auto out_col = detail::allocate_like(views.front(), output_size, policy, mr, stream);
out_col->set_null_count(0); // prevent null count from being materialized
auto out_view = out_col->mutable_view();
auto d_out_view = mutable_column_device_view::create(out_view, stream);
rmm::device_scalar<size_type> d_valid_count(0);
// Launch kernel
constexpr size_type block_size{256};
cudf::detail::grid_1d config(output_size, block_size);
auto const kernel = has_nulls ? fused_concatenate_kernel<T, block_size, true>
: fused_concatenate_kernel<T, block_size, false>;
kernel<<<config.num_blocks, config.num_threads_per_block, 0, stream>>>(
d_views.data().get(),
d_offsets.data().get(),
static_cast<size_type>(d_views.size()),
*d_out_view,
d_valid_count.data());
if (has_nulls) { out_col->set_null_count(output_size - d_valid_count.value(stream)); }
return out_col;
}
template <typename T>
std::unique_ptr<column> for_each_concatenate(std::vector<column_view> const& views,
bool const has_nulls,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
size_type const total_element_count =
std::accumulate(views.begin(), views.end(), 0, [](auto accumulator, auto const& v) {
return accumulator + v.size();
});
using mask_policy = cudf::mask_allocation_policy;
auto const policy = has_nulls ? mask_policy::ALWAYS : mask_policy::NEVER;
auto col = cudf::allocate_like(views.front(), total_element_count, policy, mr);
col->set_null_count(0); // prevent null count from being materialized...
auto m_view = col->mutable_view(); // ...when we take a mutable view
auto count = 0;
for (auto& v : views) {
thrust::copy(
rmm::exec_policy()->on(stream), v.begin<T>(), v.end<T>(), m_view.begin<T>() + count);
count += v.size();
}
// If concatenated column is nullable, proceed to calculate it
if (has_nulls) {
cudf::detail::concatenate_masks(views, (col->mutable_view()).null_mask(), stream);
}
return col;
}
struct concatenate_dispatch {
std::vector<column_view> const& views;
rmm::mr::device_memory_resource* mr;
cudaStream_t stream;
// fixed width
template <typename T>
std::unique_ptr<column> operator()()
{
bool const has_nulls =
std::any_of(views.cbegin(), views.cend(), [](auto const& col) { return col.has_nulls(); });
// Use a heuristic to guess when the fused kernel will be faster
if (use_fused_kernel_heuristic(has_nulls, views.size())) {
return fused_concatenate<T>(views, has_nulls, mr, stream);
} else {
return for_each_concatenate<T>(views, has_nulls, mr, stream);
}
}
};
template <>
std::unique_ptr<column> concatenate_dispatch::operator()<cudf::dictionary32>()
{
return cudf::dictionary::detail::concatenate(views, stream, mr);
}
template <>
std::unique_ptr<column> concatenate_dispatch::operator()<cudf::string_view>()
{
return cudf::strings::detail::concatenate(views, mr, stream);
}
template <>
std::unique_ptr<column> concatenate_dispatch::operator()<cudf::list_view>()
{
return cudf::lists::detail::concatenate(views, stream, mr);
}
// Concatenates the elements from a vector of column_views
std::unique_ptr<column> concatenate(std::vector<column_view> const& columns_to_concat,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_EXPECTS(not columns_to_concat.empty(), "Unexpected empty list of columns to concatenate.");
data_type const type = columns_to_concat.front().type();
CUDF_EXPECTS(std::all_of(columns_to_concat.begin(),
columns_to_concat.end(),
[&type](auto const& c) { return c.type() == type; }),
"Type mismatch in columns to concatenate.");
if (std::all_of(columns_to_concat.begin(), columns_to_concat.end(), [](column_view const& c) {
return c.is_empty();
})) {
return empty_like(columns_to_concat.front());
}
return type_dispatcher(type, concatenate_dispatch{columns_to_concat, mr, stream});
}
std::unique_ptr<table> concatenate(std::vector<table_view> const& tables_to_concat,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
if (tables_to_concat.empty()) { return std::make_unique<table>(); }
table_view const first_table = tables_to_concat.front();
CUDF_EXPECTS(std::all_of(tables_to_concat.cbegin(),
tables_to_concat.cend(),
[&first_table](auto const& t) {
return t.num_columns() == first_table.num_columns() &&
have_same_types(first_table, t);
}),
"Mismatch in table columns to concatenate.");
std::vector<std::unique_ptr<column>> concat_columns;
for (size_type i = 0; i < first_table.num_columns(); ++i) {
std::vector<column_view> cols;
std::transform(tables_to_concat.cbegin(),
tables_to_concat.cend(),
std::back_inserter(cols),
[i](auto const& t) { return t.column(i); });
concat_columns.emplace_back(detail::concatenate(cols, mr, stream));
}
return std::make_unique<table>(std::move(concat_columns));
}
} // namespace detail
rmm::device_buffer concatenate_masks(std::vector<column_view> const& views,
rmm::mr::device_memory_resource* mr)
{
bool const has_nulls =
std::any_of(views.begin(), views.end(), [](const column_view col) { return col.has_nulls(); });
if (has_nulls) {
size_type const total_element_count =
std::accumulate(views.begin(), views.end(), 0, [](auto accumulator, auto const& v) {
return accumulator + v.size();
});
rmm::device_buffer null_mask =
create_null_mask(total_element_count, mask_state::UNINITIALIZED, 0, mr);
detail::concatenate_masks(views, static_cast<bitmask_type*>(null_mask.data()), 0);
return null_mask;
}
// no nulls, so return an empty device buffer
return rmm::device_buffer{0, (cudaStream_t)0, mr};
}
// Concatenates the elements from a vector of column_views
std::unique_ptr<column> concatenate(std::vector<column_view> const& columns_to_concat,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::concatenate(columns_to_concat, mr, 0);
}
std::unique_ptr<table> concatenate(std::vector<table_view> const& tables_to_concat,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::concatenate(tables_to_concat, mr, 0);
}
} // namespace cudf
|
52730819560c4458f566f289d2c7c7bbca8896ae.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// tanh_shrink.cpp
#include <nbla/cuda/array/cuda_array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/tanh_shrink.hpp>
#include <nbla/cuda/function/utils/base_transform_unary.cuh>
#include <cmath>
namespace nbla {
NBLA_DEFINE_TRANSFORM_UNARY_CUDA(TanhShrink, x - std::tanh(x),
dy *::pow(std::tanh(x), (T)2), false);
}
| 52730819560c4458f566f289d2c7c7bbca8896ae.cu | // Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// tanh_shrink.cpp
#include <nbla/cuda/array/cuda_array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/tanh_shrink.hpp>
#include <nbla/cuda/function/utils/base_transform_unary.cuh>
#include <cmath>
namespace nbla {
NBLA_DEFINE_TRANSFORM_UNARY_CUDA(TanhShrink, x - std::tanh(x),
dy *std::pow(std::tanh(x), (T)2), false);
}
|
73ed7b3cb428c02d21c314cb73b2aad449ee0342.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Oleh Semeniv ([email protected])
//
#include <helpers/PointersManager.h>
#include <math/platformmath.h>
#include <math/templatemath.h>
#include <ops/declarable/helpers/updatersHelpers.h>
#include <system/op_boilerplate.h>
#include "execution/cuda/LaunchDims.h"
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template <typename T>
SD_KERNEL void adamUpdaterCuda(const void* vx, const sd::LongType* xShapeInfo, const void* vinv,
const sd::LongType* invShapeInfo, const void* vinm, const sd::LongType* inmShapeInfo,
void* vz, const sd::LongType* zShapeInfo, void* vstV, const sd::LongType* stvShapeInfo,
void* vstM, const sd::LongType* stmShapeInfo, const T lr, const T beta1, const T beta2,
const T epsilon, const T iteration) {
const auto grad = reinterpret_cast<const T*>(vx);
const auto initU = reinterpret_cast<const T*>(vinv);
const auto initM = reinterpret_cast<const T*>(vinm);
auto up = reinterpret_cast<T*>(vz);
auto stU = reinterpret_cast<T*>(vstV);
auto stM = reinterpret_cast<T*>(vstM);
__shared__ sd::LongType xLen;
__shared__ T epsilonT;
__shared__ bool bEWS, bOrdering, bXZsame, bXInUSame, bXStUSame, bXInMSame, bXStMSame;
if (threadIdx.x == 0) {
xLen = shape::length(xShapeInfo);
T beta1T = sd::math::sd_pow<T, T, T>(beta1, (iteration + 1));
T beta2T = sd::math::sd_pow<T, T, T>(beta2, (iteration + 1));
epsilonT = lr * sd::math::sd_sqrt<T, T>(1. - beta2T) / (1.0 - beta1T);
if (sd::math::sd_isnan(epsilonT) || 0 == epsilonT || sd::math::sd_isinf(epsilonT)) epsilonT = epsilon;
bEWS = 1 == shape::elementWiseStride(xShapeInfo) && 1 == shape::elementWiseStride(zShapeInfo) &&
1 == shape::elementWiseStride(stmShapeInfo) && 1 == shape::elementWiseStride(inmShapeInfo) &&
1 == shape::elementWiseStride(stvShapeInfo) && 1 == shape::elementWiseStride(invShapeInfo);
bOrdering = shape::order(xShapeInfo) == shape::order(zShapeInfo) &&
shape::order(zShapeInfo) == shape::order(stmShapeInfo) &&
shape::order(stmShapeInfo) == shape::order(inmShapeInfo) &&
shape::order(inmShapeInfo) == shape::order(stvShapeInfo) &&
shape::order(stvShapeInfo) == shape::order(invShapeInfo);
bXZsame = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
bXInUSame = shape::haveSameShapeAndStrides(xShapeInfo, invShapeInfo);
bXStUSame = shape::haveSameShapeAndStrides(xShapeInfo, stvShapeInfo);
bXInMSame = shape::haveSameShapeAndStrides(xShapeInfo, inmShapeInfo);
bXStMSame = shape::haveSameShapeAndStrides(xShapeInfo, stmShapeInfo);
}
__syncthreads();
sd::LongType coords[SD_MAX_RANK];
for (sd::LongType i = blockIdx.x * blockDim.x + threadIdx.x; i < xLen; i += gridDim.x * blockDim.x) {
sd::LongType xOffset = i, zOffset = i, initMOffset = i, initUOffset = i, stMOffset = i, stUOffset = i;
if (!bEWS || !bOrdering) {
shape::index2coords(i, xShapeInfo, coords);
xOffset = shape::getOffset(xShapeInfo, coords);
zOffset = bXZsame ? xOffset : shape::getOffset(zShapeInfo, coords);
initUOffset = bXInUSame ? xOffset : shape::getOffset(invShapeInfo, coords);
stUOffset = bXStUSame ? xOffset : shape::getOffset(stvShapeInfo, coords);
initMOffset = bXInMSame ? xOffset : shape::getOffset(inmShapeInfo, coords);
stMOffset = bXStMSame ? xOffset : shape::getOffset(stmShapeInfo, coords);
}
stM[stMOffset] = beta1 * initM[initMOffset] + grad[xOffset] * (1 - beta1);
stU[stUOffset] = beta2 * initU[initUOffset] + grad[xOffset] * grad[xOffset] * (1 - beta2);
up[zOffset] = (stM[stMOffset] * epsilonT) / (sd::math::sd_sqrt<T, T>(stU[stUOffset]) + epsilon);
}
}
///////////////////////////////////////////////////////////////////
template <typename T>
void adamUpdaterCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMemory,
const hipStream_t* stream, const void* vx, const sd::LongType* xShapeInfo,
const void* vinv, const sd::LongType* invShapeInfo, const void* vinm,
const sd::LongType* inmShapeInfo, void* vz, const sd::LongType* zShapeInfo, void* vstV,
const sd::LongType* stvShapeInfo, void* vstM, const sd::LongType* stmShapeInfo,
const double dLr, const double dBeta1, const double dBeta2, const double dEpsilon,
const int nIteration) {
const T lr = static_cast<T>(dLr);
const T beta1 = static_cast<T>(dBeta1);
const T beta2 = static_cast<T>(dBeta2);
T epsilon = static_cast<T>(dEpsilon);
//fp16 to prevent underflow
if(epsilon == 0.0) {
epsilon = static_cast<T>(1e-7);
}
const T iteration = static_cast<T>(nIteration);
hipLaunchKernelGGL(( adamUpdaterCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMemory, *stream,
vx, xShapeInfo, vinv, invShapeInfo, vinm, inmShapeInfo, vz, zShapeInfo, vstV, stvShapeInfo, vstM, stmShapeInfo,
lr, beta1, beta2, epsilon, iteration);
}
///////////////////////////////////////////////////////////////////
void updaterAdam(sd::LaunchContext* context, const NDArray& gradient, const NDArray& initStateU,
const NDArray& initStateM, NDArray& update, NDArray& stateU, NDArray& stateM, const double dLr,
const double dBeta1, const double dBeta2, const double dEpsilon, const int nIteration) {
PointersManager manager(context, "adamUpdater");
dim3 launchDims = updaterDims(gradient.lengthOf());
NDArray::prepareSpecialUse({&update, &stateU, &stateM}, {&gradient, &initStateU, &initStateM});
BUILD_SINGLE_SELECTOR(gradient.dataType(), adamUpdaterCudaLauncher,
(launchDims.y, launchDims.x,launchDims.z, context->getCudaStream(), gradient.specialBuffer(),
gradient.specialShapeInfo(), initStateU.specialBuffer(), initStateU.specialShapeInfo(),
initStateM.specialBuffer(), initStateM.specialShapeInfo(), update.specialBuffer(),
update.specialShapeInfo(), stateU.specialBuffer(), stateU.specialShapeInfo(),
stateM.specialBuffer(), stateM.specialShapeInfo(), dLr, dBeta1, dBeta2, dEpsilon, nIteration),
SD_FLOAT_TYPES);
NDArray::registerSpecialUse({&update, &stateU, &stateM}, {&gradient, &initStateU, &initStateM});
manager.synchronize();
}
} // namespace helpers
} // namespace ops
} // namespace sd
| 73ed7b3cb428c02d21c314cb73b2aad449ee0342.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Oleh Semeniv ([email protected])
//
#include <helpers/PointersManager.h>
#include <math/platformmath.h>
#include <math/templatemath.h>
#include <ops/declarable/helpers/updatersHelpers.h>
#include <system/op_boilerplate.h>
#include "execution/cuda/LaunchDims.h"
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template <typename T>
SD_KERNEL void adamUpdaterCuda(const void* vx, const sd::LongType* xShapeInfo, const void* vinv,
const sd::LongType* invShapeInfo, const void* vinm, const sd::LongType* inmShapeInfo,
void* vz, const sd::LongType* zShapeInfo, void* vstV, const sd::LongType* stvShapeInfo,
void* vstM, const sd::LongType* stmShapeInfo, const T lr, const T beta1, const T beta2,
const T epsilon, const T iteration) {
const auto grad = reinterpret_cast<const T*>(vx);
const auto initU = reinterpret_cast<const T*>(vinv);
const auto initM = reinterpret_cast<const T*>(vinm);
auto up = reinterpret_cast<T*>(vz);
auto stU = reinterpret_cast<T*>(vstV);
auto stM = reinterpret_cast<T*>(vstM);
__shared__ sd::LongType xLen;
__shared__ T epsilonT;
__shared__ bool bEWS, bOrdering, bXZsame, bXInUSame, bXStUSame, bXInMSame, bXStMSame;
if (threadIdx.x == 0) {
xLen = shape::length(xShapeInfo);
T beta1T = sd::math::sd_pow<T, T, T>(beta1, (iteration + 1));
T beta2T = sd::math::sd_pow<T, T, T>(beta2, (iteration + 1));
epsilonT = lr * sd::math::sd_sqrt<T, T>(1. - beta2T) / (1.0 - beta1T);
if (sd::math::sd_isnan(epsilonT) || 0 == epsilonT || sd::math::sd_isinf(epsilonT)) epsilonT = epsilon;
bEWS = 1 == shape::elementWiseStride(xShapeInfo) && 1 == shape::elementWiseStride(zShapeInfo) &&
1 == shape::elementWiseStride(stmShapeInfo) && 1 == shape::elementWiseStride(inmShapeInfo) &&
1 == shape::elementWiseStride(stvShapeInfo) && 1 == shape::elementWiseStride(invShapeInfo);
bOrdering = shape::order(xShapeInfo) == shape::order(zShapeInfo) &&
shape::order(zShapeInfo) == shape::order(stmShapeInfo) &&
shape::order(stmShapeInfo) == shape::order(inmShapeInfo) &&
shape::order(inmShapeInfo) == shape::order(stvShapeInfo) &&
shape::order(stvShapeInfo) == shape::order(invShapeInfo);
bXZsame = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
bXInUSame = shape::haveSameShapeAndStrides(xShapeInfo, invShapeInfo);
bXStUSame = shape::haveSameShapeAndStrides(xShapeInfo, stvShapeInfo);
bXInMSame = shape::haveSameShapeAndStrides(xShapeInfo, inmShapeInfo);
bXStMSame = shape::haveSameShapeAndStrides(xShapeInfo, stmShapeInfo);
}
__syncthreads();
sd::LongType coords[SD_MAX_RANK];
for (sd::LongType i = blockIdx.x * blockDim.x + threadIdx.x; i < xLen; i += gridDim.x * blockDim.x) {
sd::LongType xOffset = i, zOffset = i, initMOffset = i, initUOffset = i, stMOffset = i, stUOffset = i;
if (!bEWS || !bOrdering) {
shape::index2coords(i, xShapeInfo, coords);
xOffset = shape::getOffset(xShapeInfo, coords);
zOffset = bXZsame ? xOffset : shape::getOffset(zShapeInfo, coords);
initUOffset = bXInUSame ? xOffset : shape::getOffset(invShapeInfo, coords);
stUOffset = bXStUSame ? xOffset : shape::getOffset(stvShapeInfo, coords);
initMOffset = bXInMSame ? xOffset : shape::getOffset(inmShapeInfo, coords);
stMOffset = bXStMSame ? xOffset : shape::getOffset(stmShapeInfo, coords);
}
stM[stMOffset] = beta1 * initM[initMOffset] + grad[xOffset] * (1 - beta1);
stU[stUOffset] = beta2 * initU[initUOffset] + grad[xOffset] * grad[xOffset] * (1 - beta2);
up[zOffset] = (stM[stMOffset] * epsilonT) / (sd::math::sd_sqrt<T, T>(stU[stUOffset]) + epsilon);
}
}
///////////////////////////////////////////////////////////////////
template <typename T>
void adamUpdaterCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMemory,
const cudaStream_t* stream, const void* vx, const sd::LongType* xShapeInfo,
const void* vinv, const sd::LongType* invShapeInfo, const void* vinm,
const sd::LongType* inmShapeInfo, void* vz, const sd::LongType* zShapeInfo, void* vstV,
const sd::LongType* stvShapeInfo, void* vstM, const sd::LongType* stmShapeInfo,
const double dLr, const double dBeta1, const double dBeta2, const double dEpsilon,
const int nIteration) {
const T lr = static_cast<T>(dLr);
const T beta1 = static_cast<T>(dBeta1);
const T beta2 = static_cast<T>(dBeta2);
T epsilon = static_cast<T>(dEpsilon);
//fp16 to prevent underflow
if(epsilon == 0.0) {
epsilon = static_cast<T>(1e-7);
}
const T iteration = static_cast<T>(nIteration);
adamUpdaterCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMemory, *stream>>>(
vx, xShapeInfo, vinv, invShapeInfo, vinm, inmShapeInfo, vz, zShapeInfo, vstV, stvShapeInfo, vstM, stmShapeInfo,
lr, beta1, beta2, epsilon, iteration);
}
///////////////////////////////////////////////////////////////////
void updaterAdam(sd::LaunchContext* context, const NDArray& gradient, const NDArray& initStateU,
const NDArray& initStateM, NDArray& update, NDArray& stateU, NDArray& stateM, const double dLr,
const double dBeta1, const double dBeta2, const double dEpsilon, const int nIteration) {
PointersManager manager(context, "adamUpdater");
dim3 launchDims = updaterDims(gradient.lengthOf());
NDArray::prepareSpecialUse({&update, &stateU, &stateM}, {&gradient, &initStateU, &initStateM});
BUILD_SINGLE_SELECTOR(gradient.dataType(), adamUpdaterCudaLauncher,
(launchDims.y, launchDims.x,launchDims.z, context->getCudaStream(), gradient.specialBuffer(),
gradient.specialShapeInfo(), initStateU.specialBuffer(), initStateU.specialShapeInfo(),
initStateM.specialBuffer(), initStateM.specialShapeInfo(), update.specialBuffer(),
update.specialShapeInfo(), stateU.specialBuffer(), stateU.specialShapeInfo(),
stateM.specialBuffer(), stateM.specialShapeInfo(), dLr, dBeta1, dBeta2, dEpsilon, nIteration),
SD_FLOAT_TYPES);
NDArray::registerSpecialUse({&update, &stateU, &stateM}, {&gradient, &initStateU, &initStateM});
manager.synchronize();
}
} // namespace helpers
} // namespace ops
} // namespace sd
|
2c2a04e9da8e643db1f740cfb4f51e218e4af028.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "convolution_1x1_layer_tester_cuda.h"
#include <hip/hip_runtime.h>
#include "util_cuda.h"
#include "neural_network_cublas_exception.h"
#include "../convolution_layer.h"
namespace nnforge
{
namespace cuda
{
__global__ void copy_bias_1x1_kernel(
const float * __restrict biases,
float * __restrict output,
int output_neuron_count,
int output_neuron_count_per_feature_map,
int output_feature_map_count,
int entry_count)
{
int feature_map_id = blockIdx.x * blockDim.x + threadIdx.x;
int output_neuron_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = (blockIdx.z * blockDim.z + threadIdx.z) * 4;
if ((feature_map_id < output_feature_map_count) && (output_neuron_id < output_neuron_count_per_feature_map) && (entry_id < entry_count))
{
float bias = biases[feature_map_id];
float * current_output = output + (int)(entry_id * output_neuron_count + output_neuron_id * output_feature_map_count + feature_map_id);
#pragma unroll
for(int i = 0; i < 4; ++i)
{
if (entry_id < entry_count)
*current_output = bias;
current_output += output_neuron_count;
entry_id++;
}
}
}
convolution_1x1_layer_tester_cuda::convolution_1x1_layer_tester_cuda()
{
}
convolution_1x1_layer_tester_cuda::~convolution_1x1_layer_tester_cuda()
{
}
void convolution_1x1_layer_tester_cuda::enqueue_test(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data_custom,
cuda_linear_buffer_device_smart_ptr input_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_configuration_specific.feature_map_count,
output_elem_count_per_feature_map,
(entry_count + 4 - 1) / 4,
1);
hipLaunchKernelGGL(( copy_bias_1x1_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*data[1],
*additional_buffers[2],
output_elem_count_per_entry,
output_elem_count_per_feature_map,
output_configuration_specific.feature_map_count,
entry_count);
cuda_util::transpose(
*cuda_config,
*input_buffer,
*additional_buffers[1],
input_elem_count_per_feature_map,
input_configuration_specific.feature_map_count,
entry_count,
stream_id);
cublas_safe_call(hipblasSetStream(cuda_config->get_cublas_handle(), stream_id));
float alpha = 1.0F;
float beta = 1.0F;
cublas_safe_call(hipblasSgemm(
cuda_config->get_cublas_handle(),
HIPBLAS_OP_T,
HIPBLAS_OP_N,
output_configuration_specific.feature_map_count,
entry_count * input_elem_count_per_feature_map,
input_configuration_specific.feature_map_count,
&alpha,
*data[0],
input_configuration_specific.feature_map_count,
*additional_buffers[1],
input_configuration_specific.feature_map_count,
&beta,
*additional_buffers[2],
output_configuration_specific.feature_map_count));
cuda_util::transpose(
*cuda_config,
*additional_buffers[2],
*additional_buffers[0],
output_configuration_specific.feature_map_count,
output_elem_count_per_feature_map,
entry_count,
stream_id);
}
std::vector<size_t> convolution_1x1_layer_tester_cuda::get_sizes_of_additional_buffers_per_entry() const
{
std::vector<size_t> res;
res.push_back(output_elem_count_per_entry * sizeof(float));
res.push_back(input_elem_count_per_entry * sizeof(float));
res.push_back(output_elem_count_per_entry * sizeof(float));
return res;
}
cuda_linear_buffer_device_smart_ptr convolution_1x1_layer_tester_cuda::get_output_buffer(
cuda_linear_buffer_device_smart_ptr input_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers)
{
return additional_buffers[0];
}
int convolution_1x1_layer_tester_cuda::get_bias_update_block_size(int entry_count)
{
int block_size = ::min(::max(static_cast<int>(sqrtf(static_cast<float>(entry_count))), 1), entry_count);
return block_size;
}
}
}
| 2c2a04e9da8e643db1f740cfb4f51e218e4af028.cu | /*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "convolution_1x1_layer_tester_cuda.h"
#include <cuda_runtime.h>
#include "util_cuda.h"
#include "neural_network_cublas_exception.h"
#include "../convolution_layer.h"
namespace nnforge
{
namespace cuda
{
__global__ void copy_bias_1x1_kernel(
const float * __restrict biases,
float * __restrict output,
int output_neuron_count,
int output_neuron_count_per_feature_map,
int output_feature_map_count,
int entry_count)
{
int feature_map_id = blockIdx.x * blockDim.x + threadIdx.x;
int output_neuron_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = (blockIdx.z * blockDim.z + threadIdx.z) * 4;
if ((feature_map_id < output_feature_map_count) && (output_neuron_id < output_neuron_count_per_feature_map) && (entry_id < entry_count))
{
float bias = biases[feature_map_id];
float * current_output = output + (int)(entry_id * output_neuron_count + output_neuron_id * output_feature_map_count + feature_map_id);
#pragma unroll
for(int i = 0; i < 4; ++i)
{
if (entry_id < entry_count)
*current_output = bias;
current_output += output_neuron_count;
entry_id++;
}
}
}
convolution_1x1_layer_tester_cuda::convolution_1x1_layer_tester_cuda()
{
}
convolution_1x1_layer_tester_cuda::~convolution_1x1_layer_tester_cuda()
{
}
void convolution_1x1_layer_tester_cuda::enqueue_test(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data_custom,
cuda_linear_buffer_device_smart_ptr input_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_configuration_specific.feature_map_count,
output_elem_count_per_feature_map,
(entry_count + 4 - 1) / 4,
1);
copy_bias_1x1_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*data[1],
*additional_buffers[2],
output_elem_count_per_entry,
output_elem_count_per_feature_map,
output_configuration_specific.feature_map_count,
entry_count);
cuda_util::transpose(
*cuda_config,
*input_buffer,
*additional_buffers[1],
input_elem_count_per_feature_map,
input_configuration_specific.feature_map_count,
entry_count,
stream_id);
cublas_safe_call(cublasSetStream(cuda_config->get_cublas_handle(), stream_id));
float alpha = 1.0F;
float beta = 1.0F;
cublas_safe_call(cublasSgemm(
cuda_config->get_cublas_handle(),
CUBLAS_OP_T,
CUBLAS_OP_N,
output_configuration_specific.feature_map_count,
entry_count * input_elem_count_per_feature_map,
input_configuration_specific.feature_map_count,
&alpha,
*data[0],
input_configuration_specific.feature_map_count,
*additional_buffers[1],
input_configuration_specific.feature_map_count,
&beta,
*additional_buffers[2],
output_configuration_specific.feature_map_count));
cuda_util::transpose(
*cuda_config,
*additional_buffers[2],
*additional_buffers[0],
output_configuration_specific.feature_map_count,
output_elem_count_per_feature_map,
entry_count,
stream_id);
}
std::vector<size_t> convolution_1x1_layer_tester_cuda::get_sizes_of_additional_buffers_per_entry() const
{
std::vector<size_t> res;
res.push_back(output_elem_count_per_entry * sizeof(float));
res.push_back(input_elem_count_per_entry * sizeof(float));
res.push_back(output_elem_count_per_entry * sizeof(float));
return res;
}
cuda_linear_buffer_device_smart_ptr convolution_1x1_layer_tester_cuda::get_output_buffer(
cuda_linear_buffer_device_smart_ptr input_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers)
{
return additional_buffers[0];
}
int convolution_1x1_layer_tester_cuda::get_bias_update_block_size(int entry_count)
{
int block_size = std::min(std::max(static_cast<int>(sqrtf(static_cast<float>(entry_count))), 1), entry_count);
return block_size;
}
}
}
|
a387a1115742ee434e82fa1ff2a9beec23c02329.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "projektcuda.h"
#include "project_comm.h"
/* Kernel to square elements of the array on the GPU */
/*
input Matrix pSparseMatrix ,input Vector pVector
Vector pResultVector output vector
C=A*B
description:
each row of A occuppy one block. if gridDim is smaller than the row number of A
*/
__global__ void sparseMatrixMul(t_FullMatrix pResultVector,t_SparseMatrix pSparseMatrix, t_FullMatrix pVector)
{
__shared__ float Cs[VECTOR_BLOCK_SIZE];//VECTOR_BLOCK_SIZE shuld equal blockDim 512
//define gridIndex, if gridDim < mA, gridIndex > 0;
int gridIndex = 0;
//int idx = gridIndex*gridDim.x + blockIdx.x*blockDim.x+threadIdx.x;
t_ve *pMatrixElements, *pVectorElements, *pResultElements;
unsigned int m, n;//, i, j;
unsigned int *pRow, *pCol;
//unsigned int colbegin, colend;
pMatrixElements = pSparseMatrix.pNZElement;
pVectorElements = pVector.pElement;
pResultElements = pResultVector.pElement;
m = pSparseMatrix.m;
n = pSparseMatrix.n;
int aBegin = 0;
int aEnd = pSparseMatrix.m;
int bBegin = 0;
//int aStep = gridDim.x;
int bStep = VECTOR_BLOCK_SIZE; // blockDim.x
//int aEnd = mA;
int bEnd;
//==check size of Arguments========================================================
if(m != pResultVector.m*(pResultVector.n)){
//printf("Result Vector does not match the Matrix\n");
return;
}
if(n != pVector.m*(pVector.n)){
//printf("input Vector does not match the Matrix\n");
return;
}
//pRow and pCol may should in share memory or texture
pRow = pSparseMatrix.pRow;
pCol = pSparseMatrix.pCol;
//cal
if(threadIdx.x==0){
pResultElements[blockIdx.x]=0;
//C[gridIndex*gridDim.x+blockIdx.x]=0;
}
//following is operations within one block
// initialize the dot product for each row in A and vector B
t_ve blocksum = 0;
//if nB> blockDim, split repeat the
bBegin = pRow[blockIdx.x];
bEnd = pRow[blockIdx.x + 1];
for(int b = bBegin; (b < bEnd)&&((threadIdx.x+b) < bEnd); b += bStep ) {
Cs[threadIdx.x] = 0;
__syncthreads();
if (( (gridIndex*gridDim.x+blockIdx.x)<aEnd)&&((b+threadIdx.x) < bEnd)) {
Cs[threadIdx.x] = pMatrixElements[b + threadIdx.x] * pVectorElements[pCol[b + threadIdx.x ]];
}
__syncthreads();
if(threadIdx.x == 0){
int kEnd = bEnd-b;
if(kEnd > VECTOR_BLOCK_SIZE)kEnd = VECTOR_BLOCK_SIZE;
//Because I add Cs[0...k], if blockSize and Matrix does not fit, Parts of Cs[k] are not initialized as 0.
for (int k = 0; k < kEnd; k++) blocksum += Cs[k];
//blocksum = 2;
}
__syncthreads();
//Cs[threadIdx.x] = 0;
//__syncthreads();
}//for b
__syncthreads();
if(threadIdx.x == 0) pResultElements[blockIdx.x] = blocksum;//?????????????
__syncthreads();
} | a387a1115742ee434e82fa1ff2a9beec23c02329.cu | #include "cuda.h"
#include <stdio.h>
#include "projektcuda.h"
#include "project_comm.h"
/* Kernel to square elements of the array on the GPU */
/*
input Matrix pSparseMatrix ,input Vector pVector
Vector pResultVector output vector
C=A*B
description:
each row of A occuppy one block. if gridDim is smaller than the row number of A
*/
__global__ void sparseMatrixMul(t_FullMatrix pResultVector,t_SparseMatrix pSparseMatrix, t_FullMatrix pVector)
{
__shared__ float Cs[VECTOR_BLOCK_SIZE];//VECTOR_BLOCK_SIZE shuld equal blockDim 512
//define gridIndex, if gridDim < mA, gridIndex > 0;
int gridIndex = 0;
//int idx = gridIndex*gridDim.x + blockIdx.x*blockDim.x+threadIdx.x;
t_ve *pMatrixElements, *pVectorElements, *pResultElements;
unsigned int m, n;//, i, j;
unsigned int *pRow, *pCol;
//unsigned int colbegin, colend;
pMatrixElements = pSparseMatrix.pNZElement;
pVectorElements = pVector.pElement;
pResultElements = pResultVector.pElement;
m = pSparseMatrix.m;
n = pSparseMatrix.n;
int aBegin = 0;
int aEnd = pSparseMatrix.m;
int bBegin = 0;
//int aStep = gridDim.x;
int bStep = VECTOR_BLOCK_SIZE; // blockDim.x
//int aEnd = mA;
int bEnd;
//==check size of Arguments========================================================
if(m != pResultVector.m*(pResultVector.n)){
//printf("Result Vector does not match the Matrix\n");
return;
}
if(n != pVector.m*(pVector.n)){
//printf("input Vector does not match the Matrix\n");
return;
}
//pRow and pCol may should in share memory or texture
pRow = pSparseMatrix.pRow;
pCol = pSparseMatrix.pCol;
//cal
if(threadIdx.x==0){
pResultElements[blockIdx.x]=0;
//C[gridIndex*gridDim.x+blockIdx.x]=0;
}
//following is operations within one block
// initialize the dot product for each row in A and vector B
t_ve blocksum = 0;
//if nB> blockDim, split repeat the
bBegin = pRow[blockIdx.x];
bEnd = pRow[blockIdx.x + 1];
for(int b = bBegin; (b < bEnd)&&((threadIdx.x+b) < bEnd); b += bStep ) {
Cs[threadIdx.x] = 0;
__syncthreads();
if (( (gridIndex*gridDim.x+blockIdx.x)<aEnd)&&((b+threadIdx.x) < bEnd)) {
Cs[threadIdx.x] = pMatrixElements[b + threadIdx.x] * pVectorElements[pCol[b + threadIdx.x ]];
}
__syncthreads();
if(threadIdx.x == 0){
int kEnd = bEnd-b;
if(kEnd > VECTOR_BLOCK_SIZE)kEnd = VECTOR_BLOCK_SIZE;
//Because I add Cs[0...k], if blockSize and Matrix does not fit, Parts of Cs[k] are not initialized as 0.
for (int k = 0; k < kEnd; k++) blocksum += Cs[k];
//blocksum = 2;
}
__syncthreads();
//Cs[threadIdx.x] = 0;
//__syncthreads();
}//for b
__syncthreads();
if(threadIdx.x == 0) pResultElements[blockIdx.x] = blocksum;//?????????????
__syncthreads();
} |
bf5a03e938b1c7ba1c41b38555bd9cb44453a3b3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma once
#include "../Variable/Variable.cu"
#include "../TripleQueue/TripleQueue.cu"
#include "../ErrorChecking/ErrorChecking.cu"
#include "../MemoryManagement/MemoryManagement.cu"
///////////////////////////////////////////////////////////////////////
////////////////////////HOST SIDE//////////////////////////////////////
///////////////////////////////////////////////////////////////////////
struct HostVariableCollection{
int* dMem; //ptr to deviceMemory
DeviceVariable* deviceVariableMem; //vector for variables struct
int* dMemlastValues; //last values array
int nQueen; //number of variables and also domain size
HostQueue hostQueue; //queue
__host__ HostVariableCollection(int); //allocate memory with hostMemoryManagemnt
__host__ ~HostVariableCollection(); //deallocate dMemVariables
};
///////////////////////////////////////////////////////////////////////
__host__ HostVariableCollection::HostVariableCollection(int nq):
nQueen(nq),hostQueue(nq){
ErrorChecking::hostMessage("Warn::HostVariableCollection::constructor::ALLOCATION");
ErrorChecking::hostErrorCheck(hipMalloc((void**)&deviceVariableMem,sizeof(DeviceVariable)*nQueen),"HostVariableCollection::HostVariableCollection::DEVICE VARIABLE ALLOCATION");
ErrorChecking::hostErrorCheck(hipMalloc((void**)&dMemlastValues,sizeof(int)*nQueen),"HostVariableCollection::HostVariableCollection::LAST VALUE ALLOCATION");
ErrorChecking::hostErrorCheck(hipMalloc((void**)&dMem,sizeof(int)*nQueen*nQueen),"HostVariableCollection::HostVariableCollection::VARIABLE MEM ALLOCATION");
}
///////////////////////////////////////////////////////////////////////
__host__ HostVariableCollection::~HostVariableCollection(){
ErrorChecking::hostMessage("Warn::HostVariableCollection::destructor::DELLOCATION");
ErrorChecking::hostErrorCheck(hipFree(deviceVariableMem),"HostVariableCollection::~HostVariableCollection::DEVICE VARIABLE DEALLOCATION");;
ErrorChecking::hostErrorCheck(hipFree(dMemlastValues),"HostVariableCollection::~HostVariableCollection::DEVICE VARIABLE DEALLOCATION");;
ErrorChecking::hostErrorCheck(hipFree(dMem),"HostVariableCollection::~HostVariableCollection::DEVICE VARIABLE DEALLOCATION");;
}
///////////////////////////////////////////////////////////////////////
////////////////////////DEVICE SIDE////////////////////////////////////
///////////////////////////////////////////////////////////////////////
struct DeviceVariableCollection{
int fullParallel; //chose parallel code
int nQueen; //number of variables and domain size
int* lastValues; //last values array
int* dMem;
DeviceVariable* deviceVariable; //array for variables
DeviceQueue deviceQueue; //triple queue
__device__ DeviceVariableCollection(); //do nothing
__device__ DeviceVariableCollection(DeviceVariable*,Triple*, int*,int*,int); //initialize
__device__ void init(DeviceVariable*,Triple*,int*,int*,int); //initialize
__device__ void init2(DeviceVariable*,Triple*,int*,int*,int); //initialize
__device__ void init3(DeviceVariable*,Triple*,int*,int*,int); //initialize
__device__ ~DeviceVariableCollection(); //do nothing
__device__ DeviceVariableCollection& operator=(DeviceVariableCollection&); //copy
__device__ bool isGround(); //check if every variable is not failed
__device__ bool isFailed(); //check if every variable is ground
__device__ void print(); //print collection
};
///////////////////////////////////////////////////////////////////////
__device__ DeviceVariableCollection::DeviceVariableCollection(){}
///////////////////////////////////////////////////////////////////////
__device__ DeviceVariableCollection::DeviceVariableCollection(DeviceVariable* dv,Triple* q, int* vm, int* lv, int nq):
fullParallel(true),nQueen(nq),deviceVariable(dv),deviceQueue(q,nq),lastValues(lv),dMem(vm){
for(int i = 0; i < nQueen*nQueen; ++i){
vm[i] = 1;
}
for (int i = 0; i < nQueen; ++i){
deviceVariable[i].init2(&vm[nQueen*i],nQueen);
lastValues[i]=0;
}
}
///////////////////////////////////////////////////////////////////////
__device__ void DeviceVariableCollection::init(DeviceVariable* dv,Triple* q, int* vm, int* lv, int nq){
dMem = vm;
fullParallel = true;
nQueen = nq;
deviceVariable = dv;
lastValues = lv;
deviceQueue.init(q,nq);
if(threadIdx.x < nQueen*nQueen){
vm[threadIdx.x] = 1;
}
if(threadIdx.x < nQueen){
deviceVariable[threadIdx.x].init2(&vm[nQueen*threadIdx.x],nQueen);
lastValues[threadIdx.x]=0;
}
}
///////////////////////////////////////////////////////////////////////
__device__ void DeviceVariableCollection::init2(DeviceVariable* dv,Triple* q, int* vm, int* lv, int nq){
fullParallel = true;
dMem = vm;
nQueen = nq;
deviceVariable = dv;
lastValues = lv;
deviceQueue.init(q,nq);
for (int i = 0; i < nQueen; ++i){
deviceVariable[i].init2(&vm[nQueen*i],nQueen);
lastValues[i]=0;
}
}
///////////////////////////////////////////////////////////////////////
__device__ void DeviceVariableCollection::init3(DeviceVariable* dv,Triple* q, int* vm, int* lv, int nq){
fullParallel = true;
dMem = vm;
nQueen = nq;
deviceVariable = dv;
lastValues = lv;
deviceQueue.init(q,nq);
}
///////////////////////////////////////////////////////////////////////
__device__ DeviceVariableCollection::~DeviceVariableCollection(){}
///////////////////////////////////////////////////////////////////////
__device__ bool DeviceVariableCollection::isGround(){
for(int i = 0; i < nQueen; ++i)
if(deviceVariable[i].ground==-1)return false;
return true;
}
///////////////////////////////////////////////////////////////////////
__device__ bool DeviceVariableCollection::isFailed(){
for(int i = 0; i < nQueen; ++i)
if(deviceVariable[i].failed == 1)return true;
return false;
}
///////////////////////////////////////////////////////////////////////
__device__ void externCopy(DeviceVariableCollection& to,DeviceVariableCollection& other){
__shared__ int nQueen;
__shared__ int next1;
__shared__ int next2;
__shared__ int next3;
nQueen = to.nQueen;
next1 = ((((int(3*nQueen*nQueen/32)+1)*32)-3*nQueen*nQueen)+3*nQueen*nQueen);
next2 = ((((int((next1+nQueen*nQueen)/32)+1)*32)-(next1+nQueen*nQueen))+(next1+nQueen*nQueen));
next3 = ((((int((next2+nQueen)/32)+1)*32)-(next2+nQueen))+(next2+nQueen));
if(threadIdx.x < 3*nQueen*nQueen)
to.deviceQueue.q[threadIdx.x] = other.deviceQueue.q[threadIdx.x];
if(threadIdx.x >= next1 && threadIdx.x < next1 + nQueen*nQueen)
to.dMem[threadIdx.x - next1] = other.dMem[threadIdx.x - next1];
if(threadIdx.x >= next2 && threadIdx.x < next2 + nQueen)
to.lastValues[threadIdx.x - next2] = other.lastValues[threadIdx.x- next2];
if(threadIdx.x >= next3 && threadIdx.x < next3 + nQueen){
to.deviceVariable[threadIdx.x - next3].ground = other.deviceVariable[threadIdx.x - next3].ground;
to.deviceVariable[threadIdx.x - next3].failed = other.deviceVariable[threadIdx.x - next3].failed;
to.deviceVariable[threadIdx.x - next3].changed = other.deviceVariable[threadIdx.x - next3].changed;
}
if(threadIdx.x == 1023)
to.deviceQueue.count = other.deviceQueue.count;
}
__device__ DeviceVariableCollection& DeviceVariableCollection::operator=(DeviceVariableCollection& other){
/* __shared__ int next1;
__shared__ int next2;
__shared__ int next3;
next1 = ((((int(3*nQueen*nQueen/32)+1)*32)-3*nQueen*nQueen)+3*nQueen*nQueen);
next2 = ((((int((next1+nQueen*nQueen)/32)+1)*32)-(next1+nQueen*nQueen))+(next1+nQueen*nQueen));
next3 = ((((int((next2+nQueen)/32)+1)*32)-(next2+nQueen))+(next2+nQueen));
if(threadIdx.x < 3*nQueen*nQueen)
this->deviceQueue.q[threadIdx.x] = other.deviceQueue.q[threadIdx.x];
if(threadIdx.x >= next1 && threadIdx.x < next1 + nQueen*nQueen)
this->dMem[threadIdx.x - next1] = other.dMem[threadIdx.x - next1];
if(threadIdx.x >= next2 && threadIdx.x < next2 + nQueen)
this->lastValues[threadIdx.x - next2] = other.lastValues[threadIdx.x- next2];
if(threadIdx.x >= next3 && threadIdx.x < next3 + nQueen){
this->deviceVariable[threadIdx.x - next3].ground = other.deviceVariable[threadIdx.x - next3].ground;
this->deviceVariable[threadIdx.x - next3].failed = other.deviceVariable[threadIdx.x - next3].failed;
this->deviceVariable[threadIdx.x - next3].changed = other.deviceVariable[threadIdx.x - next3].changed;
}
if(threadIdx.x == 1023)
this->deviceQueue.count = other.deviceQueue.count;*/
if(threadIdx.x < 3*nQueen*nQueen)
this->deviceQueue.q[threadIdx.x] = other.deviceQueue.q[threadIdx.x];
if(threadIdx.x < nQueen*nQueen)
this->dMem[threadIdx.x] = other.dMem[threadIdx.x];
if(threadIdx.x < nQueen)
this->lastValues[threadIdx.x] = other.lastValues[threadIdx.x];
if(threadIdx.x < nQueen){
this->deviceVariable[threadIdx.x].ground = other.deviceVariable[threadIdx.x].ground;
this->deviceVariable[threadIdx.x].failed = other.deviceVariable[threadIdx.x].failed;
this->deviceVariable[threadIdx.x].changed = other.deviceVariable[threadIdx.x].changed;
}
if(threadIdx.x == 1023)
this->deviceQueue.count = other.deviceQueue.count;
return *this;
}
///////////////////////////////////////////////////////////////////////
__device__ void DeviceVariableCollection::print(){
for (int i = 0; i < nQueen; ++i){
printf("[%d] ::: ",lastValues[i]);
deviceVariable[i].print();
}
deviceQueue.print();
printf("\n");
}
///////////////////////////////////////////////////////////////////////
| bf5a03e938b1c7ba1c41b38555bd9cb44453a3b3.cu | #pragma once
#include "../Variable/Variable.cu"
#include "../TripleQueue/TripleQueue.cu"
#include "../ErrorChecking/ErrorChecking.cu"
#include "../MemoryManagement/MemoryManagement.cu"
///////////////////////////////////////////////////////////////////////
////////////////////////HOST SIDE//////////////////////////////////////
///////////////////////////////////////////////////////////////////////
struct HostVariableCollection{
int* dMem; //ptr to deviceMemory
DeviceVariable* deviceVariableMem; //vector for variables struct
int* dMemlastValues; //last values array
int nQueen; //number of variables and also domain size
HostQueue hostQueue; //queue
__host__ HostVariableCollection(int); //allocate memory with hostMemoryManagemnt
__host__ ~HostVariableCollection(); //deallocate dMemVariables
};
///////////////////////////////////////////////////////////////////////
__host__ HostVariableCollection::HostVariableCollection(int nq):
nQueen(nq),hostQueue(nq){
ErrorChecking::hostMessage("Warn::HostVariableCollection::constructor::ALLOCATION");
ErrorChecking::hostErrorCheck(cudaMalloc((void**)&deviceVariableMem,sizeof(DeviceVariable)*nQueen),"HostVariableCollection::HostVariableCollection::DEVICE VARIABLE ALLOCATION");
ErrorChecking::hostErrorCheck(cudaMalloc((void**)&dMemlastValues,sizeof(int)*nQueen),"HostVariableCollection::HostVariableCollection::LAST VALUE ALLOCATION");
ErrorChecking::hostErrorCheck(cudaMalloc((void**)&dMem,sizeof(int)*nQueen*nQueen),"HostVariableCollection::HostVariableCollection::VARIABLE MEM ALLOCATION");
}
///////////////////////////////////////////////////////////////////////
__host__ HostVariableCollection::~HostVariableCollection(){
ErrorChecking::hostMessage("Warn::HostVariableCollection::destructor::DELLOCATION");
ErrorChecking::hostErrorCheck(cudaFree(deviceVariableMem),"HostVariableCollection::~HostVariableCollection::DEVICE VARIABLE DEALLOCATION");;
ErrorChecking::hostErrorCheck(cudaFree(dMemlastValues),"HostVariableCollection::~HostVariableCollection::DEVICE VARIABLE DEALLOCATION");;
ErrorChecking::hostErrorCheck(cudaFree(dMem),"HostVariableCollection::~HostVariableCollection::DEVICE VARIABLE DEALLOCATION");;
}
///////////////////////////////////////////////////////////////////////
////////////////////////DEVICE SIDE////////////////////////////////////
///////////////////////////////////////////////////////////////////////
struct DeviceVariableCollection{
int fullParallel; //chose parallel code
int nQueen; //number of variables and domain size
int* lastValues; //last values array
int* dMem;
DeviceVariable* deviceVariable; //array for variables
DeviceQueue deviceQueue; //triple queue
__device__ DeviceVariableCollection(); //do nothing
__device__ DeviceVariableCollection(DeviceVariable*,Triple*, int*,int*,int); //initialize
__device__ void init(DeviceVariable*,Triple*,int*,int*,int); //initialize
__device__ void init2(DeviceVariable*,Triple*,int*,int*,int); //initialize
__device__ void init3(DeviceVariable*,Triple*,int*,int*,int); //initialize
__device__ ~DeviceVariableCollection(); //do nothing
__device__ DeviceVariableCollection& operator=(DeviceVariableCollection&); //copy
__device__ bool isGround(); //check if every variable is not failed
__device__ bool isFailed(); //check if every variable is ground
__device__ void print(); //print collection
};
///////////////////////////////////////////////////////////////////////
__device__ DeviceVariableCollection::DeviceVariableCollection(){}
///////////////////////////////////////////////////////////////////////
__device__ DeviceVariableCollection::DeviceVariableCollection(DeviceVariable* dv,Triple* q, int* vm, int* lv, int nq):
fullParallel(true),nQueen(nq),deviceVariable(dv),deviceQueue(q,nq),lastValues(lv),dMem(vm){
for(int i = 0; i < nQueen*nQueen; ++i){
vm[i] = 1;
}
for (int i = 0; i < nQueen; ++i){
deviceVariable[i].init2(&vm[nQueen*i],nQueen);
lastValues[i]=0;
}
}
///////////////////////////////////////////////////////////////////////
__device__ void DeviceVariableCollection::init(DeviceVariable* dv,Triple* q, int* vm, int* lv, int nq){
dMem = vm;
fullParallel = true;
nQueen = nq;
deviceVariable = dv;
lastValues = lv;
deviceQueue.init(q,nq);
if(threadIdx.x < nQueen*nQueen){
vm[threadIdx.x] = 1;
}
if(threadIdx.x < nQueen){
deviceVariable[threadIdx.x].init2(&vm[nQueen*threadIdx.x],nQueen);
lastValues[threadIdx.x]=0;
}
}
///////////////////////////////////////////////////////////////////////
__device__ void DeviceVariableCollection::init2(DeviceVariable* dv,Triple* q, int* vm, int* lv, int nq){
fullParallel = true;
dMem = vm;
nQueen = nq;
deviceVariable = dv;
lastValues = lv;
deviceQueue.init(q,nq);
for (int i = 0; i < nQueen; ++i){
deviceVariable[i].init2(&vm[nQueen*i],nQueen);
lastValues[i]=0;
}
}
///////////////////////////////////////////////////////////////////////
__device__ void DeviceVariableCollection::init3(DeviceVariable* dv,Triple* q, int* vm, int* lv, int nq){
fullParallel = true;
dMem = vm;
nQueen = nq;
deviceVariable = dv;
lastValues = lv;
deviceQueue.init(q,nq);
}
///////////////////////////////////////////////////////////////////////
__device__ DeviceVariableCollection::~DeviceVariableCollection(){}
///////////////////////////////////////////////////////////////////////
__device__ bool DeviceVariableCollection::isGround(){
for(int i = 0; i < nQueen; ++i)
if(deviceVariable[i].ground==-1)return false;
return true;
}
///////////////////////////////////////////////////////////////////////
__device__ bool DeviceVariableCollection::isFailed(){
for(int i = 0; i < nQueen; ++i)
if(deviceVariable[i].failed == 1)return true;
return false;
}
///////////////////////////////////////////////////////////////////////
__device__ void externCopy(DeviceVariableCollection& to,DeviceVariableCollection& other){
__shared__ int nQueen;
__shared__ int next1;
__shared__ int next2;
__shared__ int next3;
nQueen = to.nQueen;
next1 = ((((int(3*nQueen*nQueen/32)+1)*32)-3*nQueen*nQueen)+3*nQueen*nQueen);
next2 = ((((int((next1+nQueen*nQueen)/32)+1)*32)-(next1+nQueen*nQueen))+(next1+nQueen*nQueen));
next3 = ((((int((next2+nQueen)/32)+1)*32)-(next2+nQueen))+(next2+nQueen));
if(threadIdx.x < 3*nQueen*nQueen)
to.deviceQueue.q[threadIdx.x] = other.deviceQueue.q[threadIdx.x];
if(threadIdx.x >= next1 && threadIdx.x < next1 + nQueen*nQueen)
to.dMem[threadIdx.x - next1] = other.dMem[threadIdx.x - next1];
if(threadIdx.x >= next2 && threadIdx.x < next2 + nQueen)
to.lastValues[threadIdx.x - next2] = other.lastValues[threadIdx.x- next2];
if(threadIdx.x >= next3 && threadIdx.x < next3 + nQueen){
to.deviceVariable[threadIdx.x - next3].ground = other.deviceVariable[threadIdx.x - next3].ground;
to.deviceVariable[threadIdx.x - next3].failed = other.deviceVariable[threadIdx.x - next3].failed;
to.deviceVariable[threadIdx.x - next3].changed = other.deviceVariable[threadIdx.x - next3].changed;
}
if(threadIdx.x == 1023)
to.deviceQueue.count = other.deviceQueue.count;
}
__device__ DeviceVariableCollection& DeviceVariableCollection::operator=(DeviceVariableCollection& other){
/* __shared__ int next1;
__shared__ int next2;
__shared__ int next3;
next1 = ((((int(3*nQueen*nQueen/32)+1)*32)-3*nQueen*nQueen)+3*nQueen*nQueen);
next2 = ((((int((next1+nQueen*nQueen)/32)+1)*32)-(next1+nQueen*nQueen))+(next1+nQueen*nQueen));
next3 = ((((int((next2+nQueen)/32)+1)*32)-(next2+nQueen))+(next2+nQueen));
if(threadIdx.x < 3*nQueen*nQueen)
this->deviceQueue.q[threadIdx.x] = other.deviceQueue.q[threadIdx.x];
if(threadIdx.x >= next1 && threadIdx.x < next1 + nQueen*nQueen)
this->dMem[threadIdx.x - next1] = other.dMem[threadIdx.x - next1];
if(threadIdx.x >= next2 && threadIdx.x < next2 + nQueen)
this->lastValues[threadIdx.x - next2] = other.lastValues[threadIdx.x- next2];
if(threadIdx.x >= next3 && threadIdx.x < next3 + nQueen){
this->deviceVariable[threadIdx.x - next3].ground = other.deviceVariable[threadIdx.x - next3].ground;
this->deviceVariable[threadIdx.x - next3].failed = other.deviceVariable[threadIdx.x - next3].failed;
this->deviceVariable[threadIdx.x - next3].changed = other.deviceVariable[threadIdx.x - next3].changed;
}
if(threadIdx.x == 1023)
this->deviceQueue.count = other.deviceQueue.count;*/
if(threadIdx.x < 3*nQueen*nQueen)
this->deviceQueue.q[threadIdx.x] = other.deviceQueue.q[threadIdx.x];
if(threadIdx.x < nQueen*nQueen)
this->dMem[threadIdx.x] = other.dMem[threadIdx.x];
if(threadIdx.x < nQueen)
this->lastValues[threadIdx.x] = other.lastValues[threadIdx.x];
if(threadIdx.x < nQueen){
this->deviceVariable[threadIdx.x].ground = other.deviceVariable[threadIdx.x].ground;
this->deviceVariable[threadIdx.x].failed = other.deviceVariable[threadIdx.x].failed;
this->deviceVariable[threadIdx.x].changed = other.deviceVariable[threadIdx.x].changed;
}
if(threadIdx.x == 1023)
this->deviceQueue.count = other.deviceQueue.count;
return *this;
}
///////////////////////////////////////////////////////////////////////
__device__ void DeviceVariableCollection::print(){
for (int i = 0; i < nQueen; ++i){
printf("[%d] ::: ",lastValues[i]);
deviceVariable[i].print();
}
deviceQueue.print();
printf("\n");
}
///////////////////////////////////////////////////////////////////////
|
e6f8c70eb96f1d0ab6bd15999925a4ac3822e4c0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
///
/// vecAddKernel00.cu
/// For CSU CS575 Spring 2011
/// Instructor: Wim Bohm
/// Based on code from the CUDA Programming Guide
/// By David Newman
/// Created: 2011-02-16
/// Last Modified: 2011-02-16 DVN
///
/// This Kernel adds two Vectors A and B in C on GPU
/// without using coalesced memory access.
///
__global__ void AddVectors(const float* A, const float* B, float* C, int N)
{
int blockStartIndex = blockIdx.x * blockDim.x * N;
int blockEndIndex = ((blockIdx.x+1) * blockDim.x * N);
int threadStartIndex = blockStartIndex + (threadIdx.x);
int threadEndIndex = threadStartIndex + N;
int i;
for( i=threadStartIndex; i<blockEndIndex; i+=blockDim.x ){
C[i] = A[i] + B[i];
}
}
| e6f8c70eb96f1d0ab6bd15999925a4ac3822e4c0.cu | ///
/// vecAddKernel00.cu
/// For CSU CS575 Spring 2011
/// Instructor: Wim Bohm
/// Based on code from the CUDA Programming Guide
/// By David Newman
/// Created: 2011-02-16
/// Last Modified: 2011-02-16 DVN
///
/// This Kernel adds two Vectors A and B in C on GPU
/// without using coalesced memory access.
///
__global__ void AddVectors(const float* A, const float* B, float* C, int N)
{
int blockStartIndex = blockIdx.x * blockDim.x * N;
int blockEndIndex = ((blockIdx.x+1) * blockDim.x * N);
int threadStartIndex = blockStartIndex + (threadIdx.x);
int threadEndIndex = threadStartIndex + N;
int i;
for( i=threadStartIndex; i<blockEndIndex; i+=blockDim.x ){
C[i] = A[i] + B[i];
}
}
|
c4f80a7fb70876defece0a71005cb6629ef8787a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2023, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_CHANNELWISE_SOFTMAX_LAYER_INSTANTIATE
#include "lbann/layers/misc/channelwise_softmax_impl.hpp"
#include "lbann/utils/gpu/helpers.hpp"
namespace lbann {
namespace {
using Size3 = gpu_lib::array<size_t, 3>;
/** @brief Max functor */
template <class T>
struct max_op
{
__device__ __forceinline__ DataType operator()(const T& x1, const T& x2) const
{
return gpu_lib::max(x1, x2);
}
};
} // namespace
// =========================================================
// Forward prop
// =========================================================
namespace {
/** @brief Max reduction over last dimension of 3D tensor.
*
* Each CUDA block computes the max over a subset of tensor entries
* in @c vals and outputs the result to @c maxvals. This should be
* repeated multiple times to fully reduce the last tensor dimension.
*
* Block dimensions: bdimx x 1 x 1
*
* Grid dimensions: (vals_dims[2] / bdimx) x vals_dims[1] x vals_dims[0]
*
* maxvals: vals_dims[0] x vals_dims[1] x (vals_dims[2] / bdimx)
*/
template <typename TensorDataType, size_t bdimx>
__global__ void fp_max_kernel(Size3 vals_dims,
const TensorDataType* __restrict__ vals_buffer,
Size3 vals_strides,
TensorDataType* __restrict__ maxvals_buffer,
Size3 maxvals_strides)
{
// Indices and dimensions
constexpr size_t bdimy = 1;
constexpr size_t bdimz = 1;
const size_t tid = threadIdx.x;
const size_t bidx = blockIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
const size_t nthreadsz = blockDim.z * gridDim.z;
for (size_t k = gidz; k < vals_dims[0]; k += nthreadsz) {
for (size_t j = gidy; j < vals_dims[1]; j += nthreadsy) {
// Find largest value for each thread
TensorDataType maxval{-gpu_lib::infinity<TensorDataType>()};
for (size_t i = gidx; i < vals_dims[2]; i += nthreadsx) {
const auto& val =
vals_buffer[k * vals_strides[0] + j * vals_strides[1] +
i * vals_strides[2]];
maxval = gpu_lib::max(maxval, val);
}
// Find largest value for each block
maxval = gpu_lib::block_reduce<bdimx,
bdimy,
bdimz,
TensorDataType,
max_op<TensorDataType>>(maxval);
if (tid == 0) {
const auto& pos = (k * maxvals_strides[0] + j * maxvals_strides[1] +
bidx * maxvals_strides[2]);
maxvals_buffer[pos] = maxval;
}
}
}
}
/** Compute softmax denominator.
*
* denom = sum( exp(x_i-shift) )
*
* Block dimensions: bdimx x 1 x 1
*
* Grid dimensions: (input_dims[2] / bdimx) x input_dims[1] x input_dims[0]
*
* shifts and denoms are fully-packed 2D tensors with dimensions of
* input_dims[0] x input_dims[1].
*/
template <typename TensorDataType, size_t bdimx>
__global__ void fp_denom_kernel(Size3 input_dims,
const TensorDataType* __restrict__ input_buffer,
Size3 input_strides,
const TensorDataType* __restrict__ shifts,
TensorDataType* __restrict__ denoms)
{
// Indices and dimensions
constexpr size_t bdimy = 1;
constexpr size_t bdimz = 1;
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
const size_t nthreadsz = blockDim.z * gridDim.z;
for (size_t k = gidz; k < input_dims[0]; k += nthreadsz) {
for (size_t j = gidy; j < input_dims[1]; j += nthreadsy) {
// Compute contribution from each thread
const auto& shift = shifts[j + k * input_dims[1]];
TensorDataType denom{0.};
for (size_t i = gidx; i < input_dims[2]; i += nthreadsx) {
const auto& x =
input_buffer[k * input_strides[0] + j * input_strides[1] +
i * input_strides[2]];
denom += gpu_lib::exp(x - shift);
}
// Compute contribution from each block
denom = gpu_lib::block_reduce<bdimx, bdimy, bdimz>(denom);
if (tid == 0) {
gpu_lib::atomic_add(&denoms[j + k * input_dims[1]], denom);
}
}
}
}
/** Compute softmax.
*
* y_i = exp(x_i-shift) / denom
*
* Block dimensions: bdimx x bdimy x bdimz
*
* Grid dimensions: (input_dims[2] / bdimx) x (input_dims[1] / bdimy) x
* (input_dims[0] / bdimz)
*
* shifts and denoms are fully-packed 2D tensors with dimensions of
* input_dims[0] x input_dims[1].
*/
template <typename TensorDataType>
__global__ void
fp_output_kernel(Size3 input_dims,
const TensorDataType* __restrict__ input_buffer,
Size3 input_strides,
TensorDataType* __restrict__ output_buffer,
Size3 output_strides,
const TensorDataType* __restrict__ shifts,
const TensorDataType* __restrict__ denoms)
{
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
const size_t nthreadsz = blockDim.z * gridDim.z;
for (size_t k = gidz; k < input_dims[0]; k += nthreadsz) {
for (size_t j = gidy; j < input_dims[1]; j += nthreadsy) {
const auto& shift = shifts[j + k * input_dims[1]];
const auto& denom = denoms[j + k * input_dims[1]];
for (size_t i = gidx; i < input_dims[2]; i += nthreadsx) {
const auto& x =
input_buffer[k * input_strides[0] + j * input_strides[1] +
i * input_strides[2]];
auto& y = output_buffer[k * output_strides[0] + j * output_strides[1] +
i * output_strides[2]];
y = gpu_lib::exp(x - shift) / denom;
}
}
}
}
/** @brief Forward prop */
template <typename TensorDataType>
void fp_impl(size_t num_channels,
size_t channel_size,
size_t channel_stride,
const El::AbstractDistMatrix<TensorDataType>& input,
El::AbstractDistMatrix<TensorDataType>& output)
{
// Local matrices
using LocalMat = El::Matrix<TensorDataType, El::Device::GPU>;
const auto& local_input = dynamic_cast<const LocalMat&>(input.LockedMatrix());
auto& local_output = dynamic_cast<LocalMat&>(output.Matrix());
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_output),
gpu::get_sync_info(local_input));
// Dimensions
const size_t local_mini_batch_size = local_input.Width();
// const Size3 input_dims{local_mini_batch_size, num_channels, channel_size};
// Compute softmax shifts
LocalMat local_shifts;
if (!local_input.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (channel_size + block_size - 1) / block_size;
grid_dims.y = num_channels;
grid_dims.z = local_mini_batch_size;
gpu_lib::clip_grid_dims(grid_dims);
LocalMat maxvals(grid_dims.x * num_channels, local_mini_batch_size);
hydrogen::gpu::LaunchKernel(
fp_max_kernel<TensorDataType, block_size>,
grid_dims,
block_dims,
0,
multisync,
Size3{local_mini_batch_size, num_channels, channel_size},
local_input.LockedBuffer(),
Size3{static_cast<size_t>(local_input.LDim()), channel_stride, 1},
maxvals.Buffer(),
Size3{static_cast<size_t>(maxvals.LDim()), grid_dims.x, 1});
while (grid_dims.x > 1) {
const size_t prev_dim = grid_dims.x;
grid_dims.x = (prev_dim + block_size - 1) / block_size;
const LocalMat prev_maxvals(std::move(maxvals));
maxvals.Resize(grid_dims.x * num_channels, local_mini_batch_size);
hydrogen::gpu::LaunchKernel(
fp_max_kernel<TensorDataType, block_size>,
grid_dims,
block_dims,
0,
multisync,
Size3{local_mini_batch_size, num_channels, prev_dim},
prev_maxvals.LockedBuffer(),
Size3{static_cast<size_t>(prev_maxvals.LDim()), prev_dim, 1},
maxvals.Buffer(),
Size3{static_cast<size_t>(maxvals.LDim()), grid_dims.x, 1});
}
local_shifts = std::move(maxvals);
}
// Compute softmax denominators
LocalMat local_denoms(num_channels, local_mini_batch_size);
El::Zero(local_denoms);
if (!local_input.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (channel_size + block_size - 1) / block_size;
grid_dims.y = num_channels;
grid_dims.z = local_mini_batch_size;
gpu_lib::clip_grid_dims(grid_dims);
hydrogen::gpu::LaunchKernel(
fp_denom_kernel<TensorDataType, block_size>,
grid_dims,
block_dims,
0,
multisync,
Size3{local_mini_batch_size, num_channels, channel_size},
local_input.LockedBuffer(),
Size3{static_cast<size_t>(local_input.LDim()), channel_stride, 1},
local_shifts.LockedBuffer(),
local_denoms.Buffer());
}
// Compute softmax
if (!local_input.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (channel_size + block_size - 1) / block_size;
grid_dims.y = num_channels;
grid_dims.z = local_mini_batch_size;
gpu_lib::clip_grid_dims(grid_dims);
hydrogen::gpu::LaunchKernel(
fp_output_kernel<TensorDataType>,
grid_dims,
block_dims,
0,
multisync,
Size3{local_mini_batch_size, num_channels, channel_size},
local_input.LockedBuffer(),
Size3{static_cast<size_t>(local_input.LDim()), channel_stride, 1},
local_output.Buffer(),
Size3{static_cast<size_t>(local_output.LDim()), channel_stride, 1},
local_shifts.LockedBuffer(),
local_denoms.LockedBuffer());
}
}
} // namespace
template <typename TensorDataType, data_layout Layout, El::Device Device>
void channelwise_softmax_layer<TensorDataType, Layout, Device>::fp_compute()
{
El::Int num_channels, channel_size, channel_stride;
this->get_channel_size_and_stride(channel_size, channel_stride, num_channels);
fp_impl(num_channels,
channel_size,
channel_stride,
this->get_prev_activations(),
this->get_activations());
}
// =========================================================
// Backprop
// =========================================================
namespace {
/** Compute dot product between output and gradient w.r.t. output.
*
* Block dimensions: bdimx x 1 x 1
*
* Grid dimensions: (output_dims[2] / bdimx) x output_dims[1] x output_dims[0]
*
* y_dot_dy is a fully-packed 2D tensor with dimensions of
* output_dims[0] x output_dims[1].
*/
template <typename TensorDataType, size_t bdimx>
__global__ void
bp_y_dot_dy_kernel(Size3 output_dims,
const TensorDataType* __restrict__ output_buffer,
Size3 output_strides,
const TensorDataType* __restrict__ output_grad_buffer,
Size3 output_grad_strides,
TensorDataType* __restrict__ y_dot_dy)
{
// Indices and dimensions
constexpr size_t bdimy = 1;
constexpr size_t bdimz = 1;
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
const size_t nthreadsz = blockDim.z * gridDim.z;
for (size_t k = gidz; k < output_dims[0]; k += nthreadsz) {
for (size_t j = gidy; j < output_dims[1]; j += nthreadsy) {
// Compute contribution from each thread
TensorDataType _y_dot_dy{0.};
for (size_t i = gidx; i < output_dims[2]; i += nthreadsx) {
const auto& y =
output_buffer[k * output_strides[0] + j * output_strides[1] +
i * output_strides[2]];
const auto& dy = output_grad_buffer[k * output_grad_strides[0] +
j * output_grad_strides[1] +
i * output_grad_strides[2]];
_y_dot_dy += y * dy;
}
// Compute contribution from each block
_y_dot_dy = gpu_lib::block_reduce<bdimx, bdimy, bdimz>(_y_dot_dy);
if (tid == 0) {
gpu_lib::atomic_add(&y_dot_dy[j + k * output_dims[1]], _y_dot_dy);
}
}
}
}
/** Compute gradient w.r.t. input.
*
* dL/dx_i = y_i * ( dL/dy_i - dot(y,dL/dy) )
*
* Block dimensions: bdimx x bdimy x bdimz
*
* Grid dimensions: (output_dims[2] / bdimx) x (output_dims[1] / bdimy) x
* (output_dims[0] / bdimz)
*
* y_dot_dy is a fully-packed 2D tensor with dimensions of
* output_dims[0] x output_dims[1].
*/
template <typename TensorDataType>
__global__ void
bp_input_grad_kernel(Size3 output_dims,
const TensorDataType* __restrict__ output_buffer,
Size3 output_strides,
const TensorDataType* __restrict__ output_grad_buffer,
Size3 output_grad_strides,
TensorDataType* __restrict__ input_grad_buffer,
Size3 input_grad_strides,
const TensorDataType* __restrict__ y_dot_dy)
{
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
const size_t nthreadsz = blockDim.z * gridDim.z;
for (size_t k = gidz; k < output_dims[0]; k += nthreadsz) {
for (size_t j = gidy; j < output_dims[1]; j += nthreadsy) {
const auto& _y_dot_dy = y_dot_dy[j + k * output_dims[1]];
for (size_t i = gidx; i < output_dims[2]; i += nthreadsx) {
const auto& y =
output_buffer[k * output_strides[0] + j * output_strides[1] +
i * output_strides[2]];
const auto& dy = output_grad_buffer[k * output_grad_strides[0] +
j * output_grad_strides[1] +
i * output_grad_strides[2]];
auto& dx = input_grad_buffer[k * input_grad_strides[0] +
j * input_grad_strides[1] +
i * input_grad_strides[2]];
dx = y * (dy - _y_dot_dy);
}
}
}
}
/** @brief Backprop */
template <typename TensorDataType>
void bp_impl(size_t num_channels,
size_t channel_size,
size_t channel_stride,
const El::AbstractDistMatrix<TensorDataType>& output,
const El::AbstractDistMatrix<TensorDataType>& output_grad,
El::AbstractDistMatrix<TensorDataType>& input_grad)
{
// Local matrices
using LocalMat = El::Matrix<TensorDataType, El::Device::GPU>;
const auto& local_output =
dynamic_cast<const LocalMat&>(output.LockedMatrix());
const auto& local_output_grad =
dynamic_cast<const LocalMat&>(output_grad.LockedMatrix());
auto& local_input_grad = dynamic_cast<LocalMat&>(input_grad.Matrix());
// Dimensions
const size_t local_mini_batch_size = local_output.Width();
// dot(y,dL/dy)
LocalMat local_y_dot_dy(num_channels, local_mini_batch_size);
El::Zero(local_y_dot_dy);
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_y_dot_dy),
gpu::get_sync_info(local_output_grad),
gpu::get_sync_info(local_output),
gpu::get_sync_info(local_input_grad));
if (!local_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (channel_size + block_size - 1) / block_size;
grid_dims.y = num_channels;
grid_dims.z = local_mini_batch_size;
gpu_lib::clip_grid_dims(grid_dims);
hydrogen::gpu::LaunchKernel(
bp_y_dot_dy_kernel<TensorDataType, block_size>,
grid_dims,
block_dims,
0,
multisync,
Size3{local_mini_batch_size, num_channels, channel_size},
local_output.LockedBuffer(),
Size3{static_cast<size_t>(local_output.LDim()), channel_stride, 1},
local_output_grad.LockedBuffer(),
Size3{static_cast<size_t>(local_output_grad.LDim()), channel_stride, 1},
local_y_dot_dy.Buffer());
}
// Compute gradient w.r.t. input
if (!local_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (channel_size + block_size - 1) / block_size;
grid_dims.y = num_channels;
grid_dims.z = local_mini_batch_size;
gpu_lib::clip_grid_dims(grid_dims);
hydrogen::gpu::LaunchKernel(
bp_input_grad_kernel<TensorDataType>,
grid_dims,
block_dims,
0,
multisync,
Size3{local_mini_batch_size, num_channels, channel_size},
local_output.LockedBuffer(),
Size3{static_cast<size_t>(local_output.LDim()), channel_stride, 1},
local_output_grad.LockedBuffer(),
Size3{static_cast<size_t>(local_output_grad.LDim()), channel_stride, 1},
local_input_grad.Buffer(),
Size3{static_cast<size_t>(local_input_grad.LDim()), channel_stride, 1},
local_y_dot_dy.LockedBuffer());
}
}
} // namespace
template <typename TensorDataType, data_layout Layout, El::Device Device>
void channelwise_softmax_layer<TensorDataType, Layout, Device>::bp_compute()
{
El::Int num_channels, channel_size, channel_stride;
this->get_channel_size_and_stride(channel_size, channel_stride, num_channels);
bp_impl(num_channels,
channel_size,
channel_stride,
this->get_activations(),
this->get_prev_error_signals(),
this->get_error_signals());
}
// =========================================================
// Explicit template instantiation
// =========================================================
#define PROTO(T) \
template class channelwise_softmax_layer<T, \
data_layout::DATA_PARALLEL, \
El::Device::GPU>;
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
| c4f80a7fb70876defece0a71005cb6629ef8787a.cu | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2023, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_CHANNELWISE_SOFTMAX_LAYER_INSTANTIATE
#include "lbann/layers/misc/channelwise_softmax_impl.hpp"
#include "lbann/utils/gpu/helpers.hpp"
namespace lbann {
namespace {
using Size3 = gpu_lib::array<size_t, 3>;
/** @brief Max functor */
template <class T>
struct max_op
{
__device__ __forceinline__ DataType operator()(const T& x1, const T& x2) const
{
return gpu_lib::max(x1, x2);
}
};
} // namespace
// =========================================================
// Forward prop
// =========================================================
namespace {
/** @brief Max reduction over last dimension of 3D tensor.
*
* Each CUDA block computes the max over a subset of tensor entries
* in @c vals and outputs the result to @c maxvals. This should be
* repeated multiple times to fully reduce the last tensor dimension.
*
* Block dimensions: bdimx x 1 x 1
*
* Grid dimensions: (vals_dims[2] / bdimx) x vals_dims[1] x vals_dims[0]
*
* maxvals: vals_dims[0] x vals_dims[1] x (vals_dims[2] / bdimx)
*/
template <typename TensorDataType, size_t bdimx>
__global__ void fp_max_kernel(Size3 vals_dims,
const TensorDataType* __restrict__ vals_buffer,
Size3 vals_strides,
TensorDataType* __restrict__ maxvals_buffer,
Size3 maxvals_strides)
{
// Indices and dimensions
constexpr size_t bdimy = 1;
constexpr size_t bdimz = 1;
const size_t tid = threadIdx.x;
const size_t bidx = blockIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
const size_t nthreadsz = blockDim.z * gridDim.z;
for (size_t k = gidz; k < vals_dims[0]; k += nthreadsz) {
for (size_t j = gidy; j < vals_dims[1]; j += nthreadsy) {
// Find largest value for each thread
TensorDataType maxval{-gpu_lib::infinity<TensorDataType>()};
for (size_t i = gidx; i < vals_dims[2]; i += nthreadsx) {
const auto& val =
vals_buffer[k * vals_strides[0] + j * vals_strides[1] +
i * vals_strides[2]];
maxval = gpu_lib::max(maxval, val);
}
// Find largest value for each block
maxval = gpu_lib::block_reduce<bdimx,
bdimy,
bdimz,
TensorDataType,
max_op<TensorDataType>>(maxval);
if (tid == 0) {
const auto& pos = (k * maxvals_strides[0] + j * maxvals_strides[1] +
bidx * maxvals_strides[2]);
maxvals_buffer[pos] = maxval;
}
}
}
}
/** Compute softmax denominator.
*
* denom = sum( exp(x_i-shift) )
*
* Block dimensions: bdimx x 1 x 1
*
* Grid dimensions: (input_dims[2] / bdimx) x input_dims[1] x input_dims[0]
*
* shifts and denoms are fully-packed 2D tensors with dimensions of
* input_dims[0] x input_dims[1].
*/
template <typename TensorDataType, size_t bdimx>
__global__ void fp_denom_kernel(Size3 input_dims,
const TensorDataType* __restrict__ input_buffer,
Size3 input_strides,
const TensorDataType* __restrict__ shifts,
TensorDataType* __restrict__ denoms)
{
// Indices and dimensions
constexpr size_t bdimy = 1;
constexpr size_t bdimz = 1;
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
const size_t nthreadsz = blockDim.z * gridDim.z;
for (size_t k = gidz; k < input_dims[0]; k += nthreadsz) {
for (size_t j = gidy; j < input_dims[1]; j += nthreadsy) {
// Compute contribution from each thread
const auto& shift = shifts[j + k * input_dims[1]];
TensorDataType denom{0.};
for (size_t i = gidx; i < input_dims[2]; i += nthreadsx) {
const auto& x =
input_buffer[k * input_strides[0] + j * input_strides[1] +
i * input_strides[2]];
denom += gpu_lib::exp(x - shift);
}
// Compute contribution from each block
denom = gpu_lib::block_reduce<bdimx, bdimy, bdimz>(denom);
if (tid == 0) {
gpu_lib::atomic_add(&denoms[j + k * input_dims[1]], denom);
}
}
}
}
/** Compute softmax.
*
* y_i = exp(x_i-shift) / denom
*
* Block dimensions: bdimx x bdimy x bdimz
*
* Grid dimensions: (input_dims[2] / bdimx) x (input_dims[1] / bdimy) x
* (input_dims[0] / bdimz)
*
* shifts and denoms are fully-packed 2D tensors with dimensions of
* input_dims[0] x input_dims[1].
*/
template <typename TensorDataType>
__global__ void
fp_output_kernel(Size3 input_dims,
const TensorDataType* __restrict__ input_buffer,
Size3 input_strides,
TensorDataType* __restrict__ output_buffer,
Size3 output_strides,
const TensorDataType* __restrict__ shifts,
const TensorDataType* __restrict__ denoms)
{
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
const size_t nthreadsz = blockDim.z * gridDim.z;
for (size_t k = gidz; k < input_dims[0]; k += nthreadsz) {
for (size_t j = gidy; j < input_dims[1]; j += nthreadsy) {
const auto& shift = shifts[j + k * input_dims[1]];
const auto& denom = denoms[j + k * input_dims[1]];
for (size_t i = gidx; i < input_dims[2]; i += nthreadsx) {
const auto& x =
input_buffer[k * input_strides[0] + j * input_strides[1] +
i * input_strides[2]];
auto& y = output_buffer[k * output_strides[0] + j * output_strides[1] +
i * output_strides[2]];
y = gpu_lib::exp(x - shift) / denom;
}
}
}
}
/** @brief Forward prop */
template <typename TensorDataType>
void fp_impl(size_t num_channels,
size_t channel_size,
size_t channel_stride,
const El::AbstractDistMatrix<TensorDataType>& input,
El::AbstractDistMatrix<TensorDataType>& output)
{
// Local matrices
using LocalMat = El::Matrix<TensorDataType, El::Device::GPU>;
const auto& local_input = dynamic_cast<const LocalMat&>(input.LockedMatrix());
auto& local_output = dynamic_cast<LocalMat&>(output.Matrix());
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_output),
gpu::get_sync_info(local_input));
// Dimensions
const size_t local_mini_batch_size = local_input.Width();
// const Size3 input_dims{local_mini_batch_size, num_channels, channel_size};
// Compute softmax shifts
LocalMat local_shifts;
if (!local_input.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (channel_size + block_size - 1) / block_size;
grid_dims.y = num_channels;
grid_dims.z = local_mini_batch_size;
gpu_lib::clip_grid_dims(grid_dims);
LocalMat maxvals(grid_dims.x * num_channels, local_mini_batch_size);
hydrogen::gpu::LaunchKernel(
fp_max_kernel<TensorDataType, block_size>,
grid_dims,
block_dims,
0,
multisync,
Size3{local_mini_batch_size, num_channels, channel_size},
local_input.LockedBuffer(),
Size3{static_cast<size_t>(local_input.LDim()), channel_stride, 1},
maxvals.Buffer(),
Size3{static_cast<size_t>(maxvals.LDim()), grid_dims.x, 1});
while (grid_dims.x > 1) {
const size_t prev_dim = grid_dims.x;
grid_dims.x = (prev_dim + block_size - 1) / block_size;
const LocalMat prev_maxvals(std::move(maxvals));
maxvals.Resize(grid_dims.x * num_channels, local_mini_batch_size);
hydrogen::gpu::LaunchKernel(
fp_max_kernel<TensorDataType, block_size>,
grid_dims,
block_dims,
0,
multisync,
Size3{local_mini_batch_size, num_channels, prev_dim},
prev_maxvals.LockedBuffer(),
Size3{static_cast<size_t>(prev_maxvals.LDim()), prev_dim, 1},
maxvals.Buffer(),
Size3{static_cast<size_t>(maxvals.LDim()), grid_dims.x, 1});
}
local_shifts = std::move(maxvals);
}
// Compute softmax denominators
LocalMat local_denoms(num_channels, local_mini_batch_size);
El::Zero(local_denoms);
if (!local_input.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (channel_size + block_size - 1) / block_size;
grid_dims.y = num_channels;
grid_dims.z = local_mini_batch_size;
gpu_lib::clip_grid_dims(grid_dims);
hydrogen::gpu::LaunchKernel(
fp_denom_kernel<TensorDataType, block_size>,
grid_dims,
block_dims,
0,
multisync,
Size3{local_mini_batch_size, num_channels, channel_size},
local_input.LockedBuffer(),
Size3{static_cast<size_t>(local_input.LDim()), channel_stride, 1},
local_shifts.LockedBuffer(),
local_denoms.Buffer());
}
// Compute softmax
if (!local_input.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (channel_size + block_size - 1) / block_size;
grid_dims.y = num_channels;
grid_dims.z = local_mini_batch_size;
gpu_lib::clip_grid_dims(grid_dims);
hydrogen::gpu::LaunchKernel(
fp_output_kernel<TensorDataType>,
grid_dims,
block_dims,
0,
multisync,
Size3{local_mini_batch_size, num_channels, channel_size},
local_input.LockedBuffer(),
Size3{static_cast<size_t>(local_input.LDim()), channel_stride, 1},
local_output.Buffer(),
Size3{static_cast<size_t>(local_output.LDim()), channel_stride, 1},
local_shifts.LockedBuffer(),
local_denoms.LockedBuffer());
}
}
} // namespace
template <typename TensorDataType, data_layout Layout, El::Device Device>
void channelwise_softmax_layer<TensorDataType, Layout, Device>::fp_compute()
{
El::Int num_channels, channel_size, channel_stride;
this->get_channel_size_and_stride(channel_size, channel_stride, num_channels);
fp_impl(num_channels,
channel_size,
channel_stride,
this->get_prev_activations(),
this->get_activations());
}
// =========================================================
// Backprop
// =========================================================
namespace {
/** Compute dot product between output and gradient w.r.t. output.
*
* Block dimensions: bdimx x 1 x 1
*
* Grid dimensions: (output_dims[2] / bdimx) x output_dims[1] x output_dims[0]
*
* y_dot_dy is a fully-packed 2D tensor with dimensions of
* output_dims[0] x output_dims[1].
*/
template <typename TensorDataType, size_t bdimx>
__global__ void
bp_y_dot_dy_kernel(Size3 output_dims,
const TensorDataType* __restrict__ output_buffer,
Size3 output_strides,
const TensorDataType* __restrict__ output_grad_buffer,
Size3 output_grad_strides,
TensorDataType* __restrict__ y_dot_dy)
{
// Indices and dimensions
constexpr size_t bdimy = 1;
constexpr size_t bdimz = 1;
const size_t tid = threadIdx.x;
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
const size_t nthreadsz = blockDim.z * gridDim.z;
for (size_t k = gidz; k < output_dims[0]; k += nthreadsz) {
for (size_t j = gidy; j < output_dims[1]; j += nthreadsy) {
// Compute contribution from each thread
TensorDataType _y_dot_dy{0.};
for (size_t i = gidx; i < output_dims[2]; i += nthreadsx) {
const auto& y =
output_buffer[k * output_strides[0] + j * output_strides[1] +
i * output_strides[2]];
const auto& dy = output_grad_buffer[k * output_grad_strides[0] +
j * output_grad_strides[1] +
i * output_grad_strides[2]];
_y_dot_dy += y * dy;
}
// Compute contribution from each block
_y_dot_dy = gpu_lib::block_reduce<bdimx, bdimy, bdimz>(_y_dot_dy);
if (tid == 0) {
gpu_lib::atomic_add(&y_dot_dy[j + k * output_dims[1]], _y_dot_dy);
}
}
}
}
/** Compute gradient w.r.t. input.
*
* dL/dx_i = y_i * ( dL/dy_i - dot(y,dL/dy) )
*
* Block dimensions: bdimx x bdimy x bdimz
*
* Grid dimensions: (output_dims[2] / bdimx) x (output_dims[1] / bdimy) x
* (output_dims[0] / bdimz)
*
* y_dot_dy is a fully-packed 2D tensor with dimensions of
* output_dims[0] x output_dims[1].
*/
template <typename TensorDataType>
__global__ void
bp_input_grad_kernel(Size3 output_dims,
const TensorDataType* __restrict__ output_buffer,
Size3 output_strides,
const TensorDataType* __restrict__ output_grad_buffer,
Size3 output_grad_strides,
TensorDataType* __restrict__ input_grad_buffer,
Size3 input_grad_strides,
const TensorDataType* __restrict__ y_dot_dy)
{
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t gidz = threadIdx.z + blockIdx.z * blockDim.z;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
const size_t nthreadsz = blockDim.z * gridDim.z;
for (size_t k = gidz; k < output_dims[0]; k += nthreadsz) {
for (size_t j = gidy; j < output_dims[1]; j += nthreadsy) {
const auto& _y_dot_dy = y_dot_dy[j + k * output_dims[1]];
for (size_t i = gidx; i < output_dims[2]; i += nthreadsx) {
const auto& y =
output_buffer[k * output_strides[0] + j * output_strides[1] +
i * output_strides[2]];
const auto& dy = output_grad_buffer[k * output_grad_strides[0] +
j * output_grad_strides[1] +
i * output_grad_strides[2]];
auto& dx = input_grad_buffer[k * input_grad_strides[0] +
j * input_grad_strides[1] +
i * input_grad_strides[2]];
dx = y * (dy - _y_dot_dy);
}
}
}
}
/** @brief Backprop */
template <typename TensorDataType>
void bp_impl(size_t num_channels,
size_t channel_size,
size_t channel_stride,
const El::AbstractDistMatrix<TensorDataType>& output,
const El::AbstractDistMatrix<TensorDataType>& output_grad,
El::AbstractDistMatrix<TensorDataType>& input_grad)
{
// Local matrices
using LocalMat = El::Matrix<TensorDataType, El::Device::GPU>;
const auto& local_output =
dynamic_cast<const LocalMat&>(output.LockedMatrix());
const auto& local_output_grad =
dynamic_cast<const LocalMat&>(output_grad.LockedMatrix());
auto& local_input_grad = dynamic_cast<LocalMat&>(input_grad.Matrix());
// Dimensions
const size_t local_mini_batch_size = local_output.Width();
// dot(y,dL/dy)
LocalMat local_y_dot_dy(num_channels, local_mini_batch_size);
El::Zero(local_y_dot_dy);
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_y_dot_dy),
gpu::get_sync_info(local_output_grad),
gpu::get_sync_info(local_output),
gpu::get_sync_info(local_input_grad));
if (!local_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (channel_size + block_size - 1) / block_size;
grid_dims.y = num_channels;
grid_dims.z = local_mini_batch_size;
gpu_lib::clip_grid_dims(grid_dims);
hydrogen::gpu::LaunchKernel(
bp_y_dot_dy_kernel<TensorDataType, block_size>,
grid_dims,
block_dims,
0,
multisync,
Size3{local_mini_batch_size, num_channels, channel_size},
local_output.LockedBuffer(),
Size3{static_cast<size_t>(local_output.LDim()), channel_stride, 1},
local_output_grad.LockedBuffer(),
Size3{static_cast<size_t>(local_output_grad.LDim()), channel_stride, 1},
local_y_dot_dy.Buffer());
}
// Compute gradient w.r.t. input
if (!local_output.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (channel_size + block_size - 1) / block_size;
grid_dims.y = num_channels;
grid_dims.z = local_mini_batch_size;
gpu_lib::clip_grid_dims(grid_dims);
hydrogen::gpu::LaunchKernel(
bp_input_grad_kernel<TensorDataType>,
grid_dims,
block_dims,
0,
multisync,
Size3{local_mini_batch_size, num_channels, channel_size},
local_output.LockedBuffer(),
Size3{static_cast<size_t>(local_output.LDim()), channel_stride, 1},
local_output_grad.LockedBuffer(),
Size3{static_cast<size_t>(local_output_grad.LDim()), channel_stride, 1},
local_input_grad.Buffer(),
Size3{static_cast<size_t>(local_input_grad.LDim()), channel_stride, 1},
local_y_dot_dy.LockedBuffer());
}
}
} // namespace
template <typename TensorDataType, data_layout Layout, El::Device Device>
void channelwise_softmax_layer<TensorDataType, Layout, Device>::bp_compute()
{
El::Int num_channels, channel_size, channel_stride;
this->get_channel_size_and_stride(channel_size, channel_stride, num_channels);
bp_impl(num_channels,
channel_size,
channel_stride,
this->get_activations(),
this->get_prev_error_signals(),
this->get_error_signals());
}
// =========================================================
// Explicit template instantiation
// =========================================================
#define PROTO(T) \
template class channelwise_softmax_layer<T, \
data_layout::DATA_PARALLEL, \
El::Device::GPU>;
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
|
537836ab380266a63ddd0deabf1bcde46034da4a.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/JitLoops.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Math.cuh>
#include <c10/util/TypeSafeSignMath.h>
#include <ATen/OpMathType.h>
#include <type_traits>
namespace at::native {
void logical_not_kernel_cuda(TensorIteratorBase& iter) {
// error check -- this is just ensuring we don't dispatch on types that aren't in ALL_TYPES_AND_COMPLEX_AND3(...)
// so we don't have to maintain a separate list or to do double dispatch.
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kHalf, kBFloat16, iter.dtype(0), "logical_not_cuda", [&]() {});
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kHalf, kBFloat16, iter.dtype(1), "logical_not_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> bool { return !a; });
});
}
// NB: Ignores the negative bit on tensors
constexpr char neg_name[] = "neg_kernel";
void neg_kernel_cuda(TensorIteratorBase& iter) {
auto dtype = iter.dtype();
if (at::isComplexType(dtype)) {
#if AT_USE_JITERATOR()
static const auto neg_string = jiterator_stringify(
template <typename T>
T neg_kernel(T a) {
return -a;
}
); // neg_string
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "neg_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/ neg_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, neg_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "neg_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return -a;
});
});
#endif
} else {
AT_DISPATCH_ALL_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, dtype, "neg_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return -a;
});
});
}
}
void sign_kernel_cuda(TensorIteratorBase& iter){
if (iter.dtype() == ScalarType::Bool) {
gpu_kernel(iter, []GPU_LAMBDA(bool a){
return a;
});
} else {
AT_DISPATCH_ALL_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "sign_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return c10::signum(a);
});
});
}
}
void signbit_kernel_cuda(TensorIteratorBase& iter){
// NOTE: signbit does not always support integral arguments.
if (at::isIntegralType(iter.input_dtype(), /*includeBool=*/false)) {
AT_DISPATCH_INTEGRAL_TYPES(iter.input_dtype(), "signbit_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> bool { return is_negative(a); });
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, ScalarType::Half, iter.input_dtype(), "signbit_cuda", [&]() {
using opmath_t = at::opmath_type<scalar_t>;
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> bool { return signbit(opmath_t{a}); });
});
}
}
template<typename T>
C10_HOST_DEVICE static inline c10::complex<T> sgn_wrapper(c10::complex<T> z) {
if (z == c10::complex<T>(0, 0)) {
return c10::complex<T>(0, 0);
} else {
return z / std::abs(z);
}
}
constexpr char sgn_name[] = "sgn_kernel";
void sgn_kernel_cuda(TensorIteratorBase& iter){
auto dtype = iter.dtype();
#if AT_USE_JITERATOR()
static const auto sgn_string = jiterator_stringify(
template <typename T>
T sgn_kernel(T z) {
const T zero = T(0);
if (z == zero) {
return zero;
} else {
return z / std::abs(z);
}
}
); // sgn_string
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "sgn_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/ sgn_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, sgn_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "sgn_cuda", [&]() {
using opmath_t = at::opmath_type<scalar_t>;
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return sgn_wrapper(opmath_t{a});
});
});
#endif
}
REGISTER_DISPATCH(logical_not_stub, &logical_not_kernel_cuda);
REGISTER_DISPATCH(neg_stub, &neg_kernel_cuda);
REGISTER_DISPATCH(sign_stub, &sign_kernel_cuda);
REGISTER_DISPATCH(signbit_stub, &signbit_kernel_cuda);
REGISTER_DISPATCH(sgn_stub, &sgn_kernel_cuda);
} // namespace at::native
| 537836ab380266a63ddd0deabf1bcde46034da4a.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Math.cuh>
#include <c10/util/TypeSafeSignMath.h>
#include <ATen/OpMathType.h>
#include <type_traits>
namespace at::native {
void logical_not_kernel_cuda(TensorIteratorBase& iter) {
// error check -- this is just ensuring we don't dispatch on types that aren't in ALL_TYPES_AND_COMPLEX_AND3(...)
// so we don't have to maintain a separate list or to do double dispatch.
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kHalf, kBFloat16, iter.dtype(0), "logical_not_cuda", [&]() {});
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kBool, kHalf, kBFloat16, iter.dtype(1), "logical_not_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> bool { return !a; });
});
}
// NB: Ignores the negative bit on tensors
constexpr char neg_name[] = "neg_kernel";
void neg_kernel_cuda(TensorIteratorBase& iter) {
auto dtype = iter.dtype();
if (at::isComplexType(dtype)) {
#if AT_USE_JITERATOR()
static const auto neg_string = jiterator_stringify(
template <typename T>
T neg_kernel(T a) {
return -a;
}
); // neg_string
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "neg_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/ neg_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, neg_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "neg_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return -a;
});
});
#endif
} else {
AT_DISPATCH_ALL_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, dtype, "neg_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return -a;
});
});
}
}
void sign_kernel_cuda(TensorIteratorBase& iter){
if (iter.dtype() == ScalarType::Bool) {
gpu_kernel(iter, []GPU_LAMBDA(bool a){
return a;
});
} else {
AT_DISPATCH_ALL_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "sign_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return c10::signum(a);
});
});
}
}
void signbit_kernel_cuda(TensorIteratorBase& iter){
// NOTE: signbit does not always support integral arguments.
if (at::isIntegralType(iter.input_dtype(), /*includeBool=*/false)) {
AT_DISPATCH_INTEGRAL_TYPES(iter.input_dtype(), "signbit_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> bool { return is_negative(a); });
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, ScalarType::Half, iter.input_dtype(), "signbit_cuda", [&]() {
using opmath_t = at::opmath_type<scalar_t>;
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> bool { return signbit(opmath_t{a}); });
});
}
}
template<typename T>
C10_HOST_DEVICE static inline c10::complex<T> sgn_wrapper(c10::complex<T> z) {
if (z == c10::complex<T>(0, 0)) {
return c10::complex<T>(0, 0);
} else {
return z / std::abs(z);
}
}
constexpr char sgn_name[] = "sgn_kernel";
void sgn_kernel_cuda(TensorIteratorBase& iter){
auto dtype = iter.dtype();
#if AT_USE_JITERATOR()
static const auto sgn_string = jiterator_stringify(
template <typename T>
T sgn_kernel(T z) {
const T zero = T(0);
if (z == zero) {
return zero;
} else {
return z / std::abs(z);
}
}
); // sgn_string
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "sgn_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/ sgn_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, sgn_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "sgn_cuda", [&]() {
using opmath_t = at::opmath_type<scalar_t>;
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return sgn_wrapper(opmath_t{a});
});
});
#endif
}
REGISTER_DISPATCH(logical_not_stub, &logical_not_kernel_cuda);
REGISTER_DISPATCH(neg_stub, &neg_kernel_cuda);
REGISTER_DISPATCH(sign_stub, &sign_kernel_cuda);
REGISTER_DISPATCH(signbit_stub, &signbit_kernel_cuda);
REGISTER_DISPATCH(sgn_stub, &sgn_kernel_cuda);
} // namespace at::native
|
ed36cb0f6fb10fc41145e8416d903e0b97e1721d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int mainia()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| ed36cb0f6fb10fc41145e8416d903e0b97e1721d.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int mainia()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
bee6e7e7db6e5e4386bdd005bf2b54449b8884f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "degree.cuh"
namespace cugraph {
namespace snmg {
/**
* Single node multi-GPU method for degree calculation on a partitioned graph.
* @param x Indicates whether to compute in degree, out degree, or the sum of both.
* 0 = in + out degree
* 1 = in-degree
* 2 = out-degree
* @param part_off The vertex partitioning of the global graph
* @param off The offsets array of the local partition
* @param ind The indices array of the local partition
* @param degree Pointer to pointers to memory on each GPU for the result
* @return Error code
*/
template<typename idx_t>
void snmg_degree(int x, size_t* part_off, idx_t* off, idx_t* ind, idx_t** degree) {
sync_all();
SNMGinfo env;
auto i = env.get_thread_num();
auto p = env.get_num_threads();
// Getting the global and local vertices and edges
size_t glob_v = part_off[p];
size_t loc_v = part_off[i + 1] - part_off[i];
idx_t tmp;
CUDA_TRY(hipMemcpy(&tmp, &off[loc_v], sizeof(idx_t), hipMemcpyDeviceToHost));
size_t loc_e = tmp;
// Allocating the local result array, and setting all entries to zero.
idx_t* local_result;
ALLOC_TRY((void** )&local_result, glob_v * sizeof(idx_t), nullptr);
thrust::fill(rmm::exec_policy(nullptr)->on(nullptr), local_result, local_result + glob_v, 0);
// In-degree
if (x == 1 || x == 0) {
dim3 nthreads, nblocks;
nthreads.x = min(static_cast<idx_t>(loc_e), static_cast<idx_t>(CUDA_MAX_KERNEL_THREADS));
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min(static_cast<idx_t>((loc_e + nthreads.x - 1) / nthreads.x),
static_cast<idx_t>(env.get_num_sm() * 32));
nblocks.y = 1;
nblocks.z = 1;
hipLaunchKernelGGL(( cugraph::detail::degree_coo<idx_t, idx_t>) , dim3(nblocks), dim3(nthreads), 0, 0, static_cast<idx_t>(loc_e),
static_cast<idx_t>(loc_e),
ind,
local_result);
CUDA_CHECK_LAST();
}
// Out-degree
if (x == 2 || x == 0) {
dim3 nthreads, nblocks;
nthreads.x = min(static_cast<idx_t>(loc_v), static_cast<idx_t>(CUDA_MAX_KERNEL_THREADS));
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min(static_cast<idx_t>((loc_v + nthreads.x - 1) / nthreads.x),
static_cast<idx_t>(env.get_num_sm() * 32));
nblocks.y = 1;
nblocks.z = 1;
hipLaunchKernelGGL(( cugraph::detail::degree_offsets<idx_t, idx_t>) , dim3(nblocks), dim3(nthreads), 0, 0, static_cast<idx_t>(loc_v),
static_cast<idx_t>(loc_e),
off,
local_result + part_off[i]);
CUDA_CHECK_LAST();
}
// Combining the local results into global results
sync_all();
treeReduce<idx_t, thrust::plus<idx_t> >(env, glob_v, local_result, degree);
// Broadcasting the global result to all GPUs
treeBroadcast(env, glob_v, local_result, degree);
}
template void snmg_degree<int>(int x, size_t* part_off, int* off, int* ind, int** degree);
template<>
void snmg_degree<int64_t>(int x,
size_t* part_off,
int64_t* off,
int64_t* ind,
int64_t** degree) {
sync_all();
SNMGinfo env;
auto i = env.get_thread_num();
auto p = env.get_num_threads();
// Getting the global and local vertices and edges
size_t glob_v = part_off[p];
size_t loc_v = part_off[i + 1] - part_off[i];
int64_t tmp;
CUDA_TRY(hipMemcpy(&tmp, &off[loc_v], sizeof(int64_t), hipMemcpyDeviceToHost));
size_t loc_e = tmp;
// Allocating the local result array, and setting all entries to zero.
int64_t* local_result;
ALLOC_TRY((void** )&local_result, glob_v * sizeof(int64_t), nullptr);
thrust::fill(rmm::exec_policy(nullptr)->on(nullptr), local_result, local_result + glob_v, 0);
// In-degree
if (x == 1 || x == 0) {
dim3 nthreads, nblocks;
nthreads.x = min(static_cast<int64_t>(loc_e), static_cast<int64_t>(CUDA_MAX_KERNEL_THREADS));
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min(static_cast<int64_t>((loc_e + nthreads.x - 1) / nthreads.x),
static_cast<int64_t>(env.get_num_sm() * 32));
nblocks.y = 1;
nblocks.z = 1;
hipLaunchKernelGGL(( cugraph::detail::degree_coo<int64_t, double>) , dim3(nblocks), dim3(nthreads), 0, 0, static_cast<int64_t>(loc_e),
static_cast<int64_t>(loc_e),
ind,
reinterpret_cast<double*>(local_result));
CUDA_CHECK_LAST();
}
// Out-degree
if (x == 2 || x == 0) {
dim3 nthreads, nblocks;
nthreads.x = min(static_cast<int64_t>(loc_v), static_cast<int64_t>(CUDA_MAX_KERNEL_THREADS));
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min(static_cast<int64_t>((loc_v + nthreads.x - 1) / nthreads.x),
static_cast<int64_t>(env.get_num_sm() * 32));
nblocks.y = 1;
nblocks.z = 1;
hipLaunchKernelGGL(( cugraph::detail::degree_offsets<int64_t, double>) , dim3(nblocks), dim3(nthreads), 0, 0, static_cast<int64_t>(loc_v),
static_cast<int64_t>(loc_e),
off,
reinterpret_cast<double*>(local_result
+ part_off[i]));
CUDA_CHECK_LAST();
}
// Convert the values written as doubles back to int64:
dim3 nthreads, nblocks;
nthreads.x = min(static_cast<int64_t>(glob_v), static_cast<int64_t>(CUDA_MAX_KERNEL_THREADS));
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min(static_cast<int64_t>((glob_v + nthreads.x - 1) / nthreads.x),
static_cast<int64_t>(env.get_num_sm() * 32));
nblocks.y = 1;
nblocks.z = 1;
hipLaunchKernelGGL(( cugraph::detail::type_convert<double, int64_t>) , dim3(nblocks), dim3(nthreads), 0, 0, reinterpret_cast<double*>(local_result), glob_v);
CUDA_CHECK_LAST();
// Combining the local results into global results
treeReduce<int64_t, thrust::plus<int64_t> >(env, glob_v, local_result, degree);
// Broadcasting the global result to all GPUs
treeBroadcast(env, glob_v, local_result, degree);
}
template<typename idx_t>
void snmg_degree_impl(int x,
size_t* part_offsets,
gdf_column* off,
gdf_column* ind,
gdf_column** x_cols) {
CUGRAPH_EXPECTS(off->size > 0, "Invalid API parameter");
CUGRAPH_EXPECTS(ind->size > 0, "Invalid API parameter");
CUGRAPH_EXPECTS(off->dtype == ind->dtype, "Unsupported data type");
CUGRAPH_EXPECTS(off->null_count + ind->null_count == 0, "Column must be valid");
auto p = omp_get_num_threads();
idx_t* degree[p];
for (auto i = 0; i < p; ++i) {
CUGRAPH_EXPECTS(x_cols[i] != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(x_cols[i]->size > 0, "Invalid API parameter");
degree[i] = static_cast<idx_t*>(x_cols[i]->data);
}
snmg_degree(x,
part_offsets,
static_cast<idx_t*>(off->data),
static_cast<idx_t*>(ind->data),
degree);
}
} //namespace snmg
void snmg_degree(int x,
size_t* part_offsets,
gdf_column* off,
gdf_column* ind,
gdf_column** x_cols) {
CUGRAPH_EXPECTS(part_offsets != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(off != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(ind != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(x_cols != nullptr, "Invalid API parameter");
switch (off->dtype) {
case GDF_INT32:
return snmg::snmg_degree_impl<int32_t>(x, part_offsets, off, ind, x_cols);
case GDF_INT64:
return snmg::snmg_degree_impl<int64_t>(x, part_offsets, off, ind, x_cols);
default:
CUGRAPH_FAIL("Unsupported data type");
}
}
} // namespace cugraph
| bee6e7e7db6e5e4386bdd005bf2b54449b8884f0.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "degree.cuh"
namespace cugraph {
namespace snmg {
/**
* Single node multi-GPU method for degree calculation on a partitioned graph.
* @param x Indicates whether to compute in degree, out degree, or the sum of both.
* 0 = in + out degree
* 1 = in-degree
* 2 = out-degree
* @param part_off The vertex partitioning of the global graph
* @param off The offsets array of the local partition
* @param ind The indices array of the local partition
* @param degree Pointer to pointers to memory on each GPU for the result
* @return Error code
*/
template<typename idx_t>
void snmg_degree(int x, size_t* part_off, idx_t* off, idx_t* ind, idx_t** degree) {
sync_all();
SNMGinfo env;
auto i = env.get_thread_num();
auto p = env.get_num_threads();
// Getting the global and local vertices and edges
size_t glob_v = part_off[p];
size_t loc_v = part_off[i + 1] - part_off[i];
idx_t tmp;
CUDA_TRY(cudaMemcpy(&tmp, &off[loc_v], sizeof(idx_t), cudaMemcpyDeviceToHost));
size_t loc_e = tmp;
// Allocating the local result array, and setting all entries to zero.
idx_t* local_result;
ALLOC_TRY((void** )&local_result, glob_v * sizeof(idx_t), nullptr);
thrust::fill(rmm::exec_policy(nullptr)->on(nullptr), local_result, local_result + glob_v, 0);
// In-degree
if (x == 1 || x == 0) {
dim3 nthreads, nblocks;
nthreads.x = min(static_cast<idx_t>(loc_e), static_cast<idx_t>(CUDA_MAX_KERNEL_THREADS));
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min(static_cast<idx_t>((loc_e + nthreads.x - 1) / nthreads.x),
static_cast<idx_t>(env.get_num_sm() * 32));
nblocks.y = 1;
nblocks.z = 1;
cugraph::detail::degree_coo<idx_t, idx_t> <<<nblocks, nthreads>>>(static_cast<idx_t>(loc_e),
static_cast<idx_t>(loc_e),
ind,
local_result);
CUDA_CHECK_LAST();
}
// Out-degree
if (x == 2 || x == 0) {
dim3 nthreads, nblocks;
nthreads.x = min(static_cast<idx_t>(loc_v), static_cast<idx_t>(CUDA_MAX_KERNEL_THREADS));
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min(static_cast<idx_t>((loc_v + nthreads.x - 1) / nthreads.x),
static_cast<idx_t>(env.get_num_sm() * 32));
nblocks.y = 1;
nblocks.z = 1;
cugraph::detail::degree_offsets<idx_t, idx_t> <<<nblocks, nthreads>>>(static_cast<idx_t>(loc_v),
static_cast<idx_t>(loc_e),
off,
local_result + part_off[i]);
CUDA_CHECK_LAST();
}
// Combining the local results into global results
sync_all();
treeReduce<idx_t, thrust::plus<idx_t> >(env, glob_v, local_result, degree);
// Broadcasting the global result to all GPUs
treeBroadcast(env, glob_v, local_result, degree);
}
template void snmg_degree<int>(int x, size_t* part_off, int* off, int* ind, int** degree);
template<>
void snmg_degree<int64_t>(int x,
size_t* part_off,
int64_t* off,
int64_t* ind,
int64_t** degree) {
sync_all();
SNMGinfo env;
auto i = env.get_thread_num();
auto p = env.get_num_threads();
// Getting the global and local vertices and edges
size_t glob_v = part_off[p];
size_t loc_v = part_off[i + 1] - part_off[i];
int64_t tmp;
CUDA_TRY(cudaMemcpy(&tmp, &off[loc_v], sizeof(int64_t), cudaMemcpyDeviceToHost));
size_t loc_e = tmp;
// Allocating the local result array, and setting all entries to zero.
int64_t* local_result;
ALLOC_TRY((void** )&local_result, glob_v * sizeof(int64_t), nullptr);
thrust::fill(rmm::exec_policy(nullptr)->on(nullptr), local_result, local_result + glob_v, 0);
// In-degree
if (x == 1 || x == 0) {
dim3 nthreads, nblocks;
nthreads.x = min(static_cast<int64_t>(loc_e), static_cast<int64_t>(CUDA_MAX_KERNEL_THREADS));
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min(static_cast<int64_t>((loc_e + nthreads.x - 1) / nthreads.x),
static_cast<int64_t>(env.get_num_sm() * 32));
nblocks.y = 1;
nblocks.z = 1;
cugraph::detail::degree_coo<int64_t, double> <<<nblocks, nthreads>>>(static_cast<int64_t>(loc_e),
static_cast<int64_t>(loc_e),
ind,
reinterpret_cast<double*>(local_result));
CUDA_CHECK_LAST();
}
// Out-degree
if (x == 2 || x == 0) {
dim3 nthreads, nblocks;
nthreads.x = min(static_cast<int64_t>(loc_v), static_cast<int64_t>(CUDA_MAX_KERNEL_THREADS));
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min(static_cast<int64_t>((loc_v + nthreads.x - 1) / nthreads.x),
static_cast<int64_t>(env.get_num_sm() * 32));
nblocks.y = 1;
nblocks.z = 1;
cugraph::detail::degree_offsets<int64_t, double> <<<nblocks, nthreads>>>(static_cast<int64_t>(loc_v),
static_cast<int64_t>(loc_e),
off,
reinterpret_cast<double*>(local_result
+ part_off[i]));
CUDA_CHECK_LAST();
}
// Convert the values written as doubles back to int64:
dim3 nthreads, nblocks;
nthreads.x = min(static_cast<int64_t>(glob_v), static_cast<int64_t>(CUDA_MAX_KERNEL_THREADS));
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = min(static_cast<int64_t>((glob_v + nthreads.x - 1) / nthreads.x),
static_cast<int64_t>(env.get_num_sm() * 32));
nblocks.y = 1;
nblocks.z = 1;
cugraph::detail::type_convert<double, int64_t> <<<nblocks, nthreads>>>(reinterpret_cast<double*>(local_result), glob_v);
CUDA_CHECK_LAST();
// Combining the local results into global results
treeReduce<int64_t, thrust::plus<int64_t> >(env, glob_v, local_result, degree);
// Broadcasting the global result to all GPUs
treeBroadcast(env, glob_v, local_result, degree);
}
template<typename idx_t>
void snmg_degree_impl(int x,
size_t* part_offsets,
gdf_column* off,
gdf_column* ind,
gdf_column** x_cols) {
CUGRAPH_EXPECTS(off->size > 0, "Invalid API parameter");
CUGRAPH_EXPECTS(ind->size > 0, "Invalid API parameter");
CUGRAPH_EXPECTS(off->dtype == ind->dtype, "Unsupported data type");
CUGRAPH_EXPECTS(off->null_count + ind->null_count == 0, "Column must be valid");
auto p = omp_get_num_threads();
idx_t* degree[p];
for (auto i = 0; i < p; ++i) {
CUGRAPH_EXPECTS(x_cols[i] != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(x_cols[i]->size > 0, "Invalid API parameter");
degree[i] = static_cast<idx_t*>(x_cols[i]->data);
}
snmg_degree(x,
part_offsets,
static_cast<idx_t*>(off->data),
static_cast<idx_t*>(ind->data),
degree);
}
} //namespace snmg
void snmg_degree(int x,
size_t* part_offsets,
gdf_column* off,
gdf_column* ind,
gdf_column** x_cols) {
CUGRAPH_EXPECTS(part_offsets != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(off != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(ind != nullptr, "Invalid API parameter");
CUGRAPH_EXPECTS(x_cols != nullptr, "Invalid API parameter");
switch (off->dtype) {
case GDF_INT32:
return snmg::snmg_degree_impl<int32_t>(x, part_offsets, off, ind, x_cols);
case GDF_INT64:
return snmg::snmg_degree_impl<int64_t>(x, part_offsets, off, ind, x_cols);
default:
CUGRAPH_FAIL("Unsupported data type");
}
}
} // namespace cugraph
|
6e7b284dcc6fde0228272de8097bc6088c75e9bd.hip | // !!! This is a file automatically generated by hipify!!!
/*------------------------------------------------------------*/
/* author Kazuji HAGINOYA 2012.03.05 */
/* Keigo TANAKA *1 */
/* Noriyuki FUJIMOTO *1 */
/* *1 Department of mathematics and Information Sciences, */
/* Graduate School of Science, Osaka Prefecture University */
/*------------------------------------------------------------*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cutil_inline.h>
#include <nq_symG_kernel.cu>
#include <stdlib.h>
#include <windows.h>
#define nsize 19
#define BLKSZ 64
#define MPSZ 96
//#include "cuda_def.h"
//#include "cuda_dmy_main.cpp"
#define PoolSize 2500000
typedef struct {
int status;
int kbn;
int t;
void *buf;
} TCA;
int N,MASK,J,Mid,X0,MASKH;
unsigned long long *d_cnt;
int T_all,T8,T4,T2; /* 1/2*/
int Kline;
int kubun,Pcnt;
unsigned long long cnt,Total;
int4 *proper8,*proper4,*proper2; /* 1/2mask*/
int4 *d_proper;
unsigned int *d_counter;
unsigned int *counter;
int rBit[1 << 13];
int Bits[1 << 13];
DWORD WINAPI RunGPU(LPVOID);
HANDLE hThread;
DWORD dwThreadID;
CRITICAL_SECTION lock;
TCA tca;
dim3 block(BLKSZ,1,1);
dim3 grid(MPSZ,1,1);
//-------------------------------------------------------------------------
int Wait(TCA *tca,int Val)
{
int rc = -1;
// printf("Wait %d %d\n",tca->status,Val);
EnterCriticalSection(&lock);
while(tca->status >= 0) {
if (tca->status == Val) { rc = 0; break; };
LeaveCriticalSection(&lock);
Sleep(1);
EnterCriticalSection(&lock);
}
LeaveCriticalSection(&lock);
// printf("Wait exit %d\n",tca->status);
return rc;
}
//-------------------------------------------------------------------------
int Swap(TCA *tca,int X,int Y)
{
int rc = -1;
// printf("Swap %d %d %d\n",tca->status,X,Y);
EnterCriticalSection(&lock);
if (tca->status == X) {
tca->status = Y;
rc = 0;
}
LeaveCriticalSection(&lock);
// printf("Swap status %d\n",tca->status);
return rc;
}
//-------------------------------------------------------------------------
void submit(int Kbn,int *T,int4 *proper)
{
tca.kbn = Kbn;
tca.t = *T;
tca.buf = proper;
Swap(&tca,0,1); //triger RunGPU
Sleep(1);
Wait(&tca,0); //wait for buf-empty
}
//-------------------------------------------------------------------------
void HStore(int Kbn,int X,int U,int V,int BM,int *T,int4 *proper)
{
if (*T >= PoolSize) {
submit(Kbn,T,proper);
*T = 0;
}
// printf(" %d (%x %x %x)\n",*T,X,U&MASK,V);
proper[*T].x = X ^ MASK;
proper[*T].y = U;
proper[*T].z = V;
proper[*T].w = BM;
(*T)++;
}
//-------------------------------------------------------------------------
int RVS(int X,int N)
{
int k,r,w;
w = X;
r = 0;
for (k=0; k<N; k++) { r = (r << 1) | (w & 0x01); w = w >> 1; }
return r;
}
//-------------------------------------------------------------------------
int Bitcount(int X)
{
int c,w;
c = 0;
w = X;
while (w) { c++; w &= w - 1; }
return c;
}
//-------------------------------------------------------------------------
void nq(int nest,int X,int U,int V,int B,int N)
{
int b,m,K,JM,BM,A,BB,c;
int XX,UU,VV;
if ((nest >= Kline) && (J > 0)) {
K = (N - nest) * BLKSZ;
JM = (1 << J) - 1;
BM = JM & ~(X | (U<<(N-nest)) | (V>>(N-nest)));
if (BM)
// HStore(0,X,U,V,(nest<<16)+J,&T8,proper8);
HStore(0,X,U,V,(K<<16)+JM,&T8,proper8);
return;
}
if (nest == N) {
// if (X0 > Mid) return;
// BM = Mid & ~(X | U | V);
// if (BM)
// HStore(2,X,U,V,0,&T2,proper2);
// return;
//
A = X & MASKH;
c = rBit[X >> (N+1)];
if (Bits[A] == Bits[c]) {
if (X0 > Mid) return;
BM = Mid & ~(X | U | V);
if (BM) {
// HStore(2,X,U,V,0,&T2,proper2); // D free
XX = X | Mid;
UU = (U | Mid) << 1;
VV = (V | Mid) >> 1;
BM = MASK & ~(XX | UU | VV);
HStore(2,XX,UU,VV,BM,&T2,proper2); // D free
}
return;
}
if (Bits[A] < Bits[c]) {
BM = Mid & ~(X | U | V);
if (BM) {
// if (A < B) HStore(1,X,U,V,0,&T4,proper4);
// if (A == B) HStore(2,X,U,V,0,&T2,proper2);
XX = X | Mid;
UU = (U | Mid) << 1;
VV = (V | Mid) >> 1;
BM = MASK & ~(XX | UU | VV);
if (A < B) HStore(1,XX,UU,VV,BM,&T4,proper4);
if (A == B) HStore(2,XX,UU,VV,BM,&T2,proper2);
}
}
return;
}
m = MASK & ~(X | U | V);
while(m != 0) {
b = m & (-m);
m = m ^ b;
if (nest == 0) {
if (b == Mid) continue;
X0 = b;
}
if (b == Mid) J = nest;
BB = (b < Mid)? B | (1 << nest) : B ;
nq(nest+1, X|b, (U|b)<<1, (V|b)>>1, BB, N);
if (b == Mid) J = -1;
}
}
//-------------------------------------------------------------------------
DWORD WINAPI SeedsGen(LPVOID)
//void RunGPU(void)
{
int k;
printf("==== SeedsGen started ====\n");
N = nsize >> 1;
Mid = 1 << N;
MASK = (1 << nsize) - 1;
MASKH= Mid - 1;
Kline= N - 1;
// Kline= N - 2;
J = -1;
for (k=0; k<(1<<N); k++) { rBit[k] = RVS(k,N); Bits[k] = Bitcount(k); }
T_all = 0;
T8 = 0;
T4 = 0;
T2 = 0;
nq(0,0,0,0,0,N);
//flush buf
submit(0,&T8,proper8);
submit(1,&T4,proper4);
submit(2,&T2,proper2);
T8 = 0;
submit(0,&T8,proper8);
tca.status = -1; //SeedsGen completed & halt GPU-thread
printf("==== SeedsGen terminated ====\n");
Sleep(100);
return 0;
}
//-------------------------------------------------------------------------
int main(int argc, char* argv[])
{
const int Maps[] = { 8, 4, 2 };
int kbn,kubun;
int4 *proper;
printf("\n==== nq_symG ver.65 nsize=%d,MPSZ=%d,BLKSZ=%d,Queens=%d,PoolSize=%d\n",
nsize,MPSZ,BLKSZ,Queens,PoolSize);
unsigned int timer = 0;
//temp modfied by deepgreen
// u64 tick1,tick2;
// printf("----- N = %d -----\n",nsize);
// tick1 = clock();
cutilCheckError( cutCreateTimer( &timer));
cutilCheckError( cutStartTimer( timer));
// proper8 = (int4*)malloc(sizeof(int4)*PoolSize);
// proper4 = (int4*)malloc(sizeof(int4)*PoolSize);
// proper2 = (int4*)malloc(sizeof(int4)*PoolSize);
hipHostMalloc((void**) &proper8,sizeof(int4)*PoolSize,hipHostMallocDefault);
hipHostMalloc((void**) &proper4,sizeof(int4)*PoolSize,hipHostMallocDefault);
hipHostMalloc((void**) &proper2,sizeof(int4)*PoolSize,hipHostMallocDefault);
counter = (unsigned int*)malloc(sizeof(unsigned int));
hipMalloc((void**) &d_cnt, sizeof(unsigned long long));
hipMalloc((void**) &d_proper, sizeof(int4)*PoolSize);
hipMalloc((void**) &d_counter, sizeof(unsigned int) );
// d_cnt = (unsigned long long *) malloc(sizeof(unsigned long long));
// d_proper = (int4*) malloc(sizeof(int4)*PoolSize);
// d_counter= (unsigned int *) malloc(sizeof(unsigned int));
//***** thread launch section
tca.status = 0; //initialize thread communication area
InitializeCriticalSection(&lock);
hThread=CreateThread(NULL, //default security attributes
0, //default stack size
SeedsGen, //function name
0, // parameter
0, // start the thread immediately after creation
&dwThreadID);
if (hThread) {
// printf ("SeedsGen-Thread launched successfully\n");
CloseHandle (hThread);
}
//****** GPU control section
Total = 0;
Pcnt = 0;
while(tca.status >= 0) {
if (Wait(&tca,1) < 0) break; //wait for buf-full
kbn = tca.kbn;
kubun = Maps[kbn];
T_all = tca.t;
proper= (int4*)tca.buf;
if (T_all)
hipMemcpy(d_proper, proper, sizeof(int4)*T_all, hipMemcpyHostToDevice);
Swap(&tca,1,0); //release buf
// printf("---- release buf T_all = %d\n",T_all);
if (T_all == 0) break;
hipMemset(d_cnt, 0, sizeof(unsigned long long));
counter[0] = BLKSZ * MPSZ; /* blocksize * multiprocessor */
// counter[0] = 1;
hipMemcpy(d_counter, counter,sizeof(unsigned int), hipMemcpyHostToDevice);
//temp modified by deepgreen
//hipLaunchKernelGGL(( assign), dim3(grid), dim3(block) , 0, 0, n, N_assign, d_cnt, T_all, d_proper, BLKSZ, d_counter);
// assign(kbn, nsize, d_cnt, T_all, d_proper, 1, d_counter);
hipLaunchKernelGGL(( assign), dim3(grid), dim3(block) , 0, 0, kbn, nsize, d_cnt, T_all, d_proper, BLKSZ, d_counter);
cutilCheckMsg("Kernel execution failed");
hipMemcpy(&cnt, d_cnt, sizeof(unsigned long long), hipMemcpyDeviceToHost);
Total += kubun * cnt;
Pcnt++;
if ((T_all != PoolSize) ||
((Pcnt & 0x03) == 1)) {
printf("---- kick (%d) ---- %8d %9d %I64d %I64d\n",kbn,Pcnt,T_all,Total,cnt);
}
}
//temp modfied by deepgreen
cutilCheckError( cutStopTimer( timer));
printf("Processing time: %f (s)\n", cutGetTimerValue( timer)/1000);
cutilCheckError( cutDeleteTimer( timer));
// tick2 = clock();
// printf("Processing time: %d\n", (int)(tick2-tick1));
// printf("");
// printf("%llu\n", cnt*2);
printf("%llu\n", Total);
// free(proper);
// free(proper8);
// free(proper4);
// free(proper2);
hipHostFree(proper8);
hipHostFree(proper4);
hipHostFree(proper2);
free(counter);
hipFree(d_cnt);
hipFree(d_proper);
hipFree(d_counter);
DeleteCriticalSection(&lock);
// printf("SeedsGen terminated\n\n" );
// CUT_EXIT(argc, argv);
return 0;
}
| 6e7b284dcc6fde0228272de8097bc6088c75e9bd.cu | /*------------------------------------------------------------*/
/* author Kazuji HAGINOYA 2012.03.05 */
/* Keigo TANAKA *1 */
/* Noriyuki FUJIMOTO *1 */
/* *1 Department of mathematics and Information Sciences, */
/* Graduate School of Science, Osaka Prefecture University */
/*------------------------------------------------------------*/
#include <stdio.h>
#include <cuda_runtime.h>
#include <cutil_inline.h>
#include <nq_symG_kernel.cu>
#include <stdlib.h>
#include <windows.h>
#define nsize 19
#define BLKSZ 64
#define MPSZ 96
//#include "cuda_def.h"
//#include "cuda_dmy_main.cpp"
#define PoolSize 2500000
typedef struct {
int status;
int kbn;
int t;
void *buf;
} TCA;
int N,MASK,J,Mid,X0,MASKH;
unsigned long long *d_cnt;
int T_all,T8,T4,T2; /* 1/2部分解による個数*/
int Kline;
int kubun,Pcnt;
unsigned long long cnt,Total;
int4 *proper8,*proper4,*proper2; /* 1/2部分解のmask配列*/
int4 *d_proper;
unsigned int *d_counter;
unsigned int *counter;
int rBit[1 << 13];
int Bits[1 << 13];
DWORD WINAPI RunGPU(LPVOID);
HANDLE hThread;
DWORD dwThreadID;
CRITICAL_SECTION lock;
TCA tca;
dim3 block(BLKSZ,1,1);
dim3 grid(MPSZ,1,1);
//-------------------------------------------------------------------------
int Wait(TCA *tca,int Val)
{
int rc = -1;
// printf("Wait %d %d\n",tca->status,Val);
EnterCriticalSection(&lock);
while(tca->status >= 0) {
if (tca->status == Val) { rc = 0; break; };
LeaveCriticalSection(&lock);
Sleep(1);
EnterCriticalSection(&lock);
}
LeaveCriticalSection(&lock);
// printf("Wait exit %d\n",tca->status);
return rc;
}
//-------------------------------------------------------------------------
int Swap(TCA *tca,int X,int Y)
{
int rc = -1;
// printf("Swap %d %d %d\n",tca->status,X,Y);
EnterCriticalSection(&lock);
if (tca->status == X) {
tca->status = Y;
rc = 0;
}
LeaveCriticalSection(&lock);
// printf("Swap status %d\n",tca->status);
return rc;
}
//-------------------------------------------------------------------------
void submit(int Kbn,int *T,int4 *proper)
{
tca.kbn = Kbn;
tca.t = *T;
tca.buf = proper;
Swap(&tca,0,1); //triger RunGPU
Sleep(1);
Wait(&tca,0); //wait for buf-empty
}
//-------------------------------------------------------------------------
void HStore(int Kbn,int X,int U,int V,int BM,int *T,int4 *proper)
{
if (*T >= PoolSize) {
submit(Kbn,T,proper);
*T = 0;
}
// printf(" %d (%x %x %x)\n",*T,X,U&MASK,V);
proper[*T].x = X ^ MASK;
proper[*T].y = U;
proper[*T].z = V;
proper[*T].w = BM;
(*T)++;
}
//-------------------------------------------------------------------------
int RVS(int X,int N)
{
int k,r,w;
w = X;
r = 0;
for (k=0; k<N; k++) { r = (r << 1) | (w & 0x01); w = w >> 1; }
return r;
}
//-------------------------------------------------------------------------
int Bitcount(int X)
{
int c,w;
c = 0;
w = X;
while (w) { c++; w &= w - 1; }
return c;
}
//-------------------------------------------------------------------------
void nq(int nest,int X,int U,int V,int B,int N)
{
int b,m,K,JM,BM,A,BB,c;
int XX,UU,VV;
if ((nest >= Kline) && (J > 0)) {
K = (N - nest) * BLKSZ;
JM = (1 << J) - 1;
BM = JM & ~(X | (U<<(N-nest)) | (V>>(N-nest)));
if (BM)
// HStore(0,X,U,V,(nest<<16)+J,&T8,proper8);
HStore(0,X,U,V,(K<<16)+JM,&T8,proper8);
return;
}
if (nest == N) {
// if (X0 > Mid) return;
// BM = Mid & ~(X | U | V);
// if (BM)
// HStore(2,X,U,V,0,&T2,proper2);
// return;
//
A = X & MASKH;
c = rBit[X >> (N+1)];
if (Bits[A] == Bits[c]) {
if (X0 > Mid) return;
BM = Mid & ~(X | U | V);
if (BM) {
// HStore(2,X,U,V,0,&T2,proper2); // D free
XX = X | Mid;
UU = (U | Mid) << 1;
VV = (V | Mid) >> 1;
BM = MASK & ~(XX | UU | VV);
HStore(2,XX,UU,VV,BM,&T2,proper2); // D free
}
return;
}
if (Bits[A] < Bits[c]) {
BM = Mid & ~(X | U | V);
if (BM) {
// if (A < B) HStore(1,X,U,V,0,&T4,proper4);
// if (A == B) HStore(2,X,U,V,0,&T2,proper2);
XX = X | Mid;
UU = (U | Mid) << 1;
VV = (V | Mid) >> 1;
BM = MASK & ~(XX | UU | VV);
if (A < B) HStore(1,XX,UU,VV,BM,&T4,proper4);
if (A == B) HStore(2,XX,UU,VV,BM,&T2,proper2);
}
}
return;
}
m = MASK & ~(X | U | V);
while(m != 0) {
b = m & (-m);
m = m ^ b;
if (nest == 0) {
if (b == Mid) continue;
X0 = b;
}
if (b == Mid) J = nest;
BB = (b < Mid)? B | (1 << nest) : B ;
nq(nest+1, X|b, (U|b)<<1, (V|b)>>1, BB, N);
if (b == Mid) J = -1;
}
}
//-------------------------------------------------------------------------
DWORD WINAPI SeedsGen(LPVOID)
//void RunGPU(void)
{
int k;
printf("==== SeedsGen started ====\n");
N = nsize >> 1;
Mid = 1 << N;
MASK = (1 << nsize) - 1;
MASKH= Mid - 1;
Kline= N - 1;
// Kline= N - 2;
J = -1;
for (k=0; k<(1<<N); k++) { rBit[k] = RVS(k,N); Bits[k] = Bitcount(k); }
T_all = 0;
T8 = 0;
T4 = 0;
T2 = 0;
nq(0,0,0,0,0,N);
//flush buf
submit(0,&T8,proper8);
submit(1,&T4,proper4);
submit(2,&T2,proper2);
T8 = 0;
submit(0,&T8,proper8);
tca.status = -1; //SeedsGen completed & halt GPU-thread
printf("==== SeedsGen terminated ====\n");
Sleep(100);
return 0;
}
//-------------------------------------------------------------------------
int main(int argc, char* argv[])
{
const int Maps[] = { 8, 4, 2 };
int kbn,kubun;
int4 *proper;
printf("\n==== nq_symG ver.65 nsize=%d,MPSZ=%d,BLKSZ=%d,Queens=%d,PoolSize=%d\n",
nsize,MPSZ,BLKSZ,Queens,PoolSize);
unsigned int timer = 0;
//temp modfied by deepgreen
// u64 tick1,tick2;
// printf("----- N = %d -----\n",nsize);
// tick1 = clock();
cutilCheckError( cutCreateTimer( &timer));
cutilCheckError( cutStartTimer( timer));
// proper8 = (int4*)malloc(sizeof(int4)*PoolSize);
// proper4 = (int4*)malloc(sizeof(int4)*PoolSize);
// proper2 = (int4*)malloc(sizeof(int4)*PoolSize);
cudaHostAlloc((void**) &proper8,sizeof(int4)*PoolSize,cudaHostAllocDefault);
cudaHostAlloc((void**) &proper4,sizeof(int4)*PoolSize,cudaHostAllocDefault);
cudaHostAlloc((void**) &proper2,sizeof(int4)*PoolSize,cudaHostAllocDefault);
counter = (unsigned int*)malloc(sizeof(unsigned int));
cudaMalloc((void**) &d_cnt, sizeof(unsigned long long));
cudaMalloc((void**) &d_proper, sizeof(int4)*PoolSize);
cudaMalloc((void**) &d_counter, sizeof(unsigned int) );
// d_cnt = (unsigned long long *) malloc(sizeof(unsigned long long));
// d_proper = (int4*) malloc(sizeof(int4)*PoolSize);
// d_counter= (unsigned int *) malloc(sizeof(unsigned int));
//***** thread launch section
tca.status = 0; //initialize thread communication area
InitializeCriticalSection(&lock);
hThread=CreateThread(NULL, //default security attributes
0, //default stack size
SeedsGen, //function name
0, // parameter
0, // start the thread immediately after creation
&dwThreadID);
if (hThread) {
// printf ("SeedsGen-Thread launched successfully\n");
CloseHandle (hThread);
}
//****** GPU control section
Total = 0;
Pcnt = 0;
while(tca.status >= 0) {
if (Wait(&tca,1) < 0) break; //wait for buf-full
kbn = tca.kbn;
kubun = Maps[kbn];
T_all = tca.t;
proper= (int4*)tca.buf;
if (T_all)
cudaMemcpy(d_proper, proper, sizeof(int4)*T_all, cudaMemcpyHostToDevice);
Swap(&tca,1,0); //release buf
// printf("---- release buf T_all = %d\n",T_all);
if (T_all == 0) break;
cudaMemset(d_cnt, 0, sizeof(unsigned long long));
counter[0] = BLKSZ * MPSZ; /* blocksize * multiprocessor数 */
// counter[0] = 1;
cudaMemcpy(d_counter, counter,sizeof(unsigned int), cudaMemcpyHostToDevice);
//temp modified by deepgreen
// assign<<< grid, block >>>(n, N_assign, d_cnt, T_all, d_proper, BLKSZ, d_counter);
// assign(kbn, nsize, d_cnt, T_all, d_proper, 1, d_counter);
assign<<< grid, block >>>(kbn, nsize, d_cnt, T_all, d_proper, BLKSZ, d_counter);
cutilCheckMsg("Kernel execution failed");
cudaMemcpy(&cnt, d_cnt, sizeof(unsigned long long), cudaMemcpyDeviceToHost);
Total += kubun * cnt;
Pcnt++;
if ((T_all != PoolSize) ||
((Pcnt & 0x03) == 1)) {
printf("---- kick (%d) ---- %8d %9d %I64d %I64d\n",kbn,Pcnt,T_all,Total,cnt);
}
}
//temp modfied by deepgreen
cutilCheckError( cutStopTimer( timer));
printf("Processing time: %f (s)\n", cutGetTimerValue( timer)/1000);
cutilCheckError( cutDeleteTimer( timer));
// tick2 = clock();
// printf("Processing time: %d\n", (int)(tick2-tick1));
// printf("解の個数");
// printf("%llu\n", cnt*2);
printf("%llu\n", Total);
// free(proper);
// free(proper8);
// free(proper4);
// free(proper2);
cudaFreeHost(proper8);
cudaFreeHost(proper4);
cudaFreeHost(proper2);
free(counter);
cudaFree(d_cnt);
cudaFree(d_proper);
cudaFree(d_counter);
DeleteCriticalSection(&lock);
// printf("SeedsGen terminated\n\n" );
// CUT_EXIT(argc, argv);
return 0;
}
|
63fbd8864b21ed1e184575b50f09aa4539bbd550.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication as described in Chapter 3
* of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
void
matrixMulCPU(float *C, const float *A, const float *B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j)
{
double sum = 0;
for (unsigned int k = 0; k < wA; ++k)
{
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
__global__ void
matrixMul_naive( float* C, float* A, float* B, int wA, int wB)
{
int Row = blockIdx.y*blockDim.y+threadIdx.y;
int Col = blockIdx.x*blockDim.x+threadIdx.x;
if ((Row < wA) && (Col<wB))
{
float Cvalue=0;
for (int k = 0 ; k < wA; ++k) Cvalue += A[Row*wA+k]*B[k*wB+Col];
C[Row*wB+Col]=Cvalue;
}
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int size_C = dimsC.x * dimsC.y;
unsigned int mem_size_C = size_C * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
// compute reference solution
printf("Computing result using host CPU...");
float *reference = (float *)malloc(mem_size_C);
matrixMulCPU(reference, h_A, h_B, dimsA.y, dimsA.x, dimsB.x);
printf("done.\n");
hipMalloc((void **) &d_A, mem_size_A);
hipMalloc((void **) &d_B, mem_size_B);
hipMalloc((void **) &d_C, mem_size_C);
// copy host memory to device
hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice);
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
hipLaunchKernelGGL(( matrixMul_naive), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
printf("done\n");
hipDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
hipEvent_t start;
hipEventCreate(&start);
hipEvent_t stop;
hipEventCreate(&stop);
// Record the start event
hipEventRecord(start, NULL);
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++)
{
hipLaunchKernelGGL(( matrixMul_naive), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
// Record the stop event
hipEventRecord(stop, NULL);
// Wait for the stop event to complete
hipEventSynchronize(stop);
float msecTotal = 0.0f;
hipEventElapsedTime(&msecTotal, start, stop);
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost);
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n");
// Use a larger block size for Fermi and above
int block_size = 32;
dim3 dimsA(5*2*block_size, 5*2*block_size, 1);
dim3 dimsB(5*4*block_size, 5*2*block_size, 1);
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
| 63fbd8864b21ed1e184575b50f09aa4539bbd550.cu | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication as described in Chapter 3
* of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
void
matrixMulCPU(float *C, const float *A, const float *B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j)
{
double sum = 0;
for (unsigned int k = 0; k < wA; ++k)
{
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
__global__ void
matrixMul_naive( float* C, float* A, float* B, int wA, int wB)
{
int Row = blockIdx.y*blockDim.y+threadIdx.y;
int Col = blockIdx.x*blockDim.x+threadIdx.x;
if ((Row < wA) && (Col<wB))
{
float Cvalue=0;
for (int k = 0 ; k < wA; ++k) Cvalue += A[Row*wA+k]*B[k*wB+Col];
C[Row*wB+Col]=Cvalue;
}
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int size_C = dimsC.x * dimsC.y;
unsigned int mem_size_C = size_C * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
// compute reference solution
printf("Computing result using host CPU...");
float *reference = (float *)malloc(mem_size_C);
matrixMulCPU(reference, h_A, h_B, dimsA.y, dimsA.x, dimsB.x);
printf("done.\n");
cudaMalloc((void **) &d_A, mem_size_A);
cudaMalloc((void **) &d_B, mem_size_B);
cudaMalloc((void **) &d_C, mem_size_C);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
matrixMul_naive<<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
printf("done\n");
cudaDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
cudaEventCreate(&start);
cudaEvent_t stop;
cudaEventCreate(&stop);
// Record the start event
cudaEventRecord(start, NULL);
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++)
{
matrixMul_naive<<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
// Record the stop event
cudaEventRecord(stop, NULL);
// Wait for the stop event to complete
cudaEventSynchronize(stop);
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n");
// Use a larger block size for Fermi and above
int block_size = 32;
dim3 dimsA(5*2*block_size, 5*2*block_size, 1);
dim3 dimsB(5*4*block_size, 5*2*block_size, 1);
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
|
8cdebb59b786e357eeb4d2f1510f3df069881e88.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vector_add_cu.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vector_add_cu), dim3(gridBlock),dim3(threadBlock), 0, 0, out,a,b,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vector_add_cu), dim3(gridBlock),dim3(threadBlock), 0, 0, out,a,b,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vector_add_cu), dim3(gridBlock),dim3(threadBlock), 0, 0, out,a,b,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 8cdebb59b786e357eeb4d2f1510f3df069881e88.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vector_add_cu.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vector_add_cu<<<gridBlock,threadBlock>>>(out,a,b,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vector_add_cu<<<gridBlock,threadBlock>>>(out,a,b,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vector_add_cu<<<gridBlock,threadBlock>>>(out,a,b,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
7e189d177dc692ec8ccffaaf49e4d3bb81eaa08d.hip | // !!! This is a file automatically generated by hipify!!!
#include <opencv2/opencv.hpp>
#include <thrust/window_2d.h>
#include <thrust/window_transform.h>
using namespace cv;
class lbpFunctor : public thrust::shared_unary_window_transform_functor<uchar>
{
public:
__device__ void operator() (const thrust::window_2d<uchar> &inputWindow,const thrust::window_2d<uchar> &outputWindow) const
{
uchar temp[3][3];
for(int i = 0; i<3;i++)
{
for(int j = 0; j<3;j++)
{
temp[i][j] = inputWindow[make_int2(3/2,3/2)]>=inputWindow[make_int2(j,i)]?1:0;
}
}
outputWindow[3/2][3/2]=128*temp[0][1]+64*temp[0][0]+32*temp[1][0]+16*temp[2][0]+8*temp[2][1]+4*temp[2][2]+2*temp[1][2]+1*temp[1][1];
}
};
int main(int argc, char const *argv[]) {
hipDeviceProp_t dev_prop;
hipGetDeviceProperties(&dev_prop,0);
Mat small = imread("car.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat image;
int dim = 512;
if(argc ==2)
{
dim = atoi(argv[1]);
}
resize(small,image,Size(dim,dim));
thrust::block_2d<unsigned char > image_block (image.cols,image.rows);
thrust::block_2d<uchar> uchar_image_block (image.cols,image.rows);
thrust::block_2d<uchar> outBlock (image.cols,image.rows);
uchar * img = (uchar * )malloc(sizeof(uchar)*(image_block.end()-image_block.begin()));
for(int i = 0; i<image.cols*image.rows;i++)
{
img[i]=(uchar)image.ptr()[i];
}
uchar_image_block.upload(img);
thrust::window_vector<uchar> myVector = thrust::window_vector<uchar>(&uchar_image_block,3,3,1,1);
thrust::window_vector<uchar> outputVector = thrust::window_vector<uchar>(&outBlock,3,3,1,1);
thrust::transform(thrust::hip::texture,myVector.begin(),myVector.end(),outputVector.begin(),lbpFunctor());
unsigned char * outputFloatImageData = (unsigned char *)malloc(sizeof(unsigned char)*(uchar_image_block.end()-uchar_image_block.begin()));
outBlock.download(&img);
for(int i = 0; i<image.cols*image.rows;i++)
{
outputFloatImageData[i]=(unsigned char)img[i];
}
Mat output (Size(image.cols,image.rows),CV_8UC1,outputFloatImageData);
#ifdef OWRITE
imwrite("input.png",image);
imwrite("output.png",output);
#endif
#ifdef SHOW
imshow("input.png",image);
imshow("output.png",output);
waitKey(0);
#endif
return 0;
}
| 7e189d177dc692ec8ccffaaf49e4d3bb81eaa08d.cu | #include <opencv2/opencv.hpp>
#include <thrust/window_2d.h>
#include <thrust/window_transform.h>
using namespace cv;
class lbpFunctor : public thrust::shared_unary_window_transform_functor<uchar>
{
public:
__device__ void operator() (const thrust::window_2d<uchar> &inputWindow,const thrust::window_2d<uchar> &outputWindow) const
{
uchar temp[3][3];
for(int i = 0; i<3;i++)
{
for(int j = 0; j<3;j++)
{
temp[i][j] = inputWindow[make_int2(3/2,3/2)]>=inputWindow[make_int2(j,i)]?1:0;
}
}
outputWindow[3/2][3/2]=128*temp[0][1]+64*temp[0][0]+32*temp[1][0]+16*temp[2][0]+8*temp[2][1]+4*temp[2][2]+2*temp[1][2]+1*temp[1][1];
}
};
int main(int argc, char const *argv[]) {
cudaDeviceProp dev_prop;
cudaGetDeviceProperties(&dev_prop,0);
Mat small = imread("car.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat image;
int dim = 512;
if(argc ==2)
{
dim = atoi(argv[1]);
}
resize(small,image,Size(dim,dim));
thrust::block_2d<unsigned char > image_block (image.cols,image.rows);
thrust::block_2d<uchar> uchar_image_block (image.cols,image.rows);
thrust::block_2d<uchar> outBlock (image.cols,image.rows);
uchar * img = (uchar * )malloc(sizeof(uchar)*(image_block.end()-image_block.begin()));
for(int i = 0; i<image.cols*image.rows;i++)
{
img[i]=(uchar)image.ptr()[i];
}
uchar_image_block.upload(img);
thrust::window_vector<uchar> myVector = thrust::window_vector<uchar>(&uchar_image_block,3,3,1,1);
thrust::window_vector<uchar> outputVector = thrust::window_vector<uchar>(&outBlock,3,3,1,1);
thrust::transform(thrust::cuda::texture,myVector.begin(),myVector.end(),outputVector.begin(),lbpFunctor());
unsigned char * outputFloatImageData = (unsigned char *)malloc(sizeof(unsigned char)*(uchar_image_block.end()-uchar_image_block.begin()));
outBlock.download(&img);
for(int i = 0; i<image.cols*image.rows;i++)
{
outputFloatImageData[i]=(unsigned char)img[i];
}
Mat output (Size(image.cols,image.rows),CV_8UC1,outputFloatImageData);
#ifdef OWRITE
imwrite("input.png",image);
imwrite("output.png",output);
#endif
#ifdef SHOW
imshow("input.png",image);
imshow("output.png",output);
waitKey(0);
#endif
return 0;
}
|
cbb1476e30edbaa17cfe7f6bd10bea5be90aaadc.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <cstdlib>
#include <stdio.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// Includes CUDA
#include <hip/hip_runtime.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and hip/hip_runtime_api.h
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
#define MASK_DIMENSION 3
#define OFFSET (MASK_DIMENSION / 2)
const char *iname = "lena_bw.pgm";
const char *Imp_type = "serial";
float* serial_convl(float* image,float *kernel, size_t width,int height) {
float* output = (float*)malloc(sizeof(float)*width*height);
for (int y = 0; y < width; ++y) {
for (int x = 0; x < width; ++x) {
float val = 0;
for (int j = 0; j < MASK_DIMENSION; ++j) { //perform convolution on a pixel
for (int i = 0; i < MASK_DIMENSION; ++i) {
int d_i = i-OFFSET;
int d_j = j-OFFSET;
if (d_i + x >= 0 && d_i + x < width && d_j + y >= 0 && d_j + y < width) { //check if in bounds of image
val += (float)image[y*width+x+d_j*width+d_i] * kernel[j*MASK_DIMENSION+i];
}
}
}
if (val>0.4 || val<-0.4){
output[y*width+x] = 1;
}
else{
output[y*width+x] = 0;
}
}
}
return output;
}
void runTest(int argc, char **argv);
int main(int argc, char **argv)
{
printf("%s starting...\n", Imp_type);
runTest(argc, argv);
hipDeviceReset();
return 0;
}
void runTest(int argc, char **argv)
{
//int devID = findCudaDevice(argc, (const char **) argv);
//convulution mask
//float *sharpening = (float*)malloc(sizeof(float)*3*3);
float *edge_dect = (float*)malloc(sizeof(float)*3*3);
//float *averaging = (float*)malloc(sizeof(float)*3*3);
//float sharp[9] = {-1,-1,-1,-1,9,-1,-1,-1,-1};
float edge[9] = {-1,0,1,-2,0,2,-1,0,1};
//float av[9] = {1/9,1/9,1/9,1/9,1/9,1/9,1/9,1/9,1/9};
//sharpening=&sharp[0];
edge_dect=&edge[0];
//averaging=&av[0];
// load image from disk
float *h_d = NULL;
unsigned int width, height;
char *img_p = sdkFindFilePath(iname, argv[0]);
if (img_p == NULL)
{
printf("Unable to source image file: %s\n", iname);
exit(EXIT_FAILURE);
}
sdkLoadPGM(img_p, &h_d, &width, &height);
//unsigned int size = width * height * sizeof(float);
printf("Loaded '%s', %d x %d pixels\n", iname, width, height);
float *serial_data = NULL;
StopWatchInterface *s_timer = NULL;
sdkCreateTimer(&s_timer);
sdkStartTimer(&s_timer);
serial_data = serial_convl(h_d,edge_dect,width,height);
sdkStopTimer(&s_timer);
printf("Processing time for serial: %f (ms)\n", sdkGetTimerValue(&s_timer));
printf("%.2f Mpixels/sec\n",(width *height / (sdkGetTimerValue(&s_timer) / 1000.0f)) / 1e6);
sdkDeleteTimer(&s_timer);
//Write output to file
char serial_outputfile[1024];
strcpy(serial_outputfile, img_p);
strcpy(serial_outputfile + strlen(img_p) - 4, "_serial_out.pgm");
sdkSavePGM(serial_outputfile, serial_data, width, height);
printf("Wrote '%s'\n", serial_outputfile);
free(img_p);
} | cbb1476e30edbaa17cfe7f6bd10bea5be90aaadc.cu | #include <cassert>
#include <cstdlib>
#include <stdio.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// Includes CUDA
#include <cuda_runtime.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and cuda_runtime_api.h
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
#define MASK_DIMENSION 3
#define OFFSET (MASK_DIMENSION / 2)
const char *iname = "lena_bw.pgm";
const char *Imp_type = "serial";
float* serial_convl(float* image,float *kernel, size_t width,int height) {
float* output = (float*)malloc(sizeof(float)*width*height);
for (int y = 0; y < width; ++y) {
for (int x = 0; x < width; ++x) {
float val = 0;
for (int j = 0; j < MASK_DIMENSION; ++j) { //perform convolution on a pixel
for (int i = 0; i < MASK_DIMENSION; ++i) {
int d_i = i-OFFSET;
int d_j = j-OFFSET;
if (d_i + x >= 0 && d_i + x < width && d_j + y >= 0 && d_j + y < width) { //check if in bounds of image
val += (float)image[y*width+x+d_j*width+d_i] * kernel[j*MASK_DIMENSION+i];
}
}
}
if (val>0.4 || val<-0.4){
output[y*width+x] = 1;
}
else{
output[y*width+x] = 0;
}
}
}
return output;
}
void runTest(int argc, char **argv);
int main(int argc, char **argv)
{
printf("%s starting...\n", Imp_type);
runTest(argc, argv);
cudaDeviceReset();
return 0;
}
void runTest(int argc, char **argv)
{
//int devID = findCudaDevice(argc, (const char **) argv);
//convulution mask
//float *sharpening = (float*)malloc(sizeof(float)*3*3);
float *edge_dect = (float*)malloc(sizeof(float)*3*3);
//float *averaging = (float*)malloc(sizeof(float)*3*3);
//float sharp[9] = {-1,-1,-1,-1,9,-1,-1,-1,-1};
float edge[9] = {-1,0,1,-2,0,2,-1,0,1};
//float av[9] = {1/9,1/9,1/9,1/9,1/9,1/9,1/9,1/9,1/9};
//sharpening=&sharp[0];
edge_dect=&edge[0];
//averaging=&av[0];
// load image from disk
float *h_d = NULL;
unsigned int width, height;
char *img_p = sdkFindFilePath(iname, argv[0]);
if (img_p == NULL)
{
printf("Unable to source image file: %s\n", iname);
exit(EXIT_FAILURE);
}
sdkLoadPGM(img_p, &h_d, &width, &height);
//unsigned int size = width * height * sizeof(float);
printf("Loaded '%s', %d x %d pixels\n", iname, width, height);
float *serial_data = NULL;
StopWatchInterface *s_timer = NULL;
sdkCreateTimer(&s_timer);
sdkStartTimer(&s_timer);
serial_data = serial_convl(h_d,edge_dect,width,height);
sdkStopTimer(&s_timer);
printf("Processing time for serial: %f (ms)\n", sdkGetTimerValue(&s_timer));
printf("%.2f Mpixels/sec\n",(width *height / (sdkGetTimerValue(&s_timer) / 1000.0f)) / 1e6);
sdkDeleteTimer(&s_timer);
//Write output to file
char serial_outputfile[1024];
strcpy(serial_outputfile, img_p);
strcpy(serial_outputfile + strlen(img_p) - 4, "_serial_out.pgm");
sdkSavePGM(serial_outputfile, serial_data, width, height);
printf("Wrote '%s'\n", serial_outputfile);
free(img_p);
} |
0ee32af9c6f5becfa331b9faca95f840db6f8cda.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <test_utils.h>
#include <raft/cuda_utils.cuh>
#include <vector>
#include <thrust/fill.h>
#include <cuml/cluster/kmeans.hpp>
#include <cuml/common/cuml_allocator.hpp>
#include <cuml/common/device_buffer.hpp>
#include <cuml/common/logger.hpp>
#include <cuml/cuml.hpp>
#include <cuml/datasets/make_blobs.hpp>
#include <cuml/metrics/metrics.hpp>
namespace ML {
using namespace MLCommon;
using namespace Datasets;
using namespace Metrics;
template <typename T>
struct KmeansInputs {
int n_row;
int n_col;
int n_clusters;
T tol;
bool weighted;
};
template <typename T>
class KmeansTest : public ::testing::TestWithParam<KmeansInputs<T>> {
protected:
void basicTest() {
raft::handle_t handle;
testparams = ::testing::TestWithParam<KmeansInputs<T>>::GetParam();
int n_samples = testparams.n_row;
int n_features = testparams.n_col;
params.n_clusters = testparams.n_clusters;
params.tol = testparams.tol;
params.n_init = 5;
params.seed = 1;
params.oversampling_factor = 0;
device_buffer<T> X(handle.get_device_allocator(), handle.get_stream(),
n_samples * n_features);
device_buffer<int> labels(handle.get_device_allocator(),
handle.get_stream(), n_samples);
make_blobs(handle, X.data(), labels.data(), n_samples, n_features,
params.n_clusters, true, nullptr, nullptr, 1.0, false, -10.0f,
10.0f, 1234ULL);
raft::allocate(d_labels, n_samples);
raft::allocate(d_labels_ref, n_samples);
raft::allocate(d_centroids, params.n_clusters * n_features);
if (testparams.weighted) {
raft::allocate(d_sample_weight, n_samples);
thrust::fill(thrust::hip::par.on(handle.get_stream()), d_sample_weight,
d_sample_weight + n_samples, 1);
} else {
d_sample_weight = nullptr;
}
raft::copy(d_labels_ref, labels.data(), n_samples, handle.get_stream());
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
T inertia = 0;
int n_iter = 0;
kmeans::fit_predict(handle, params, X.data(), n_samples, n_features,
d_sample_weight, d_centroids, d_labels, inertia,
n_iter);
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
score = adjusted_rand_index(handle, d_labels_ref, d_labels, n_samples);
if (score < 1.0) {
std::stringstream ss;
ss << "Expected: "
<< raft::arr2Str(d_labels_ref, 25, "d_labels_ref",
handle.get_stream());
CUML_LOG_DEBUG(ss.str().c_str());
ss.str(std::string());
ss << "Actual: "
<< raft::arr2Str(d_labels, 25, "d_labels", handle.get_stream());
CUML_LOG_DEBUG(ss.str().c_str());
CUML_LOG_DEBUG("Score = %lf", score);
}
}
void SetUp() override { basicTest(); }
void TearDown() override {
CUDA_CHECK(hipFree(d_labels));
CUDA_CHECK(hipFree(d_centroids));
CUDA_CHECK(hipFree(d_labels_ref));
CUDA_CHECK(hipFree(d_sample_weight));
}
protected:
KmeansInputs<T> testparams;
int *d_labels, *d_labels_ref;
T *d_centroids, *d_sample_weight;
double score;
ML::kmeans::KMeansParams params;
};
const std::vector<KmeansInputs<float>> inputsf2 = {
{1000, 32, 5, 0.0001, true}, {1000, 32, 5, 0.0001, false},
{1000, 100, 20, 0.0001, true}, {1000, 100, 20, 0.0001, false},
{10000, 32, 10, 0.0001, true}, {10000, 32, 10, 0.0001, false},
{10000, 100, 50, 0.0001, true}, {10000, 100, 50, 0.0001, false},
{10000, 1000, 200, 0.0001, true}, {10000, 1000, 200, 0.0001, false}};
const std::vector<KmeansInputs<double>> inputsd2 = {
{1000, 32, 5, 0.0001, true}, {1000, 32, 5, 0.0001, false},
{1000, 100, 20, 0.0001, true}, {1000, 100, 20, 0.0001, false},
{10000, 32, 10, 0.0001, true}, {10000, 32, 10, 0.0001, false},
{10000, 100, 50, 0.0001, true}, {10000, 100, 50, 0.0001, false},
{10000, 1000, 200, 0.0001, true}, {10000, 1000, 200, 0.0001, false}};
typedef KmeansTest<float> KmeansTestF;
TEST_P(KmeansTestF, Result) { ASSERT_TRUE(score == 1.0); }
typedef KmeansTest<double> KmeansTestD;
TEST_P(KmeansTestD, Result) { ASSERT_TRUE(score == 1.0); }
INSTANTIATE_TEST_CASE_P(KmeansTests, KmeansTestF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(KmeansTests, KmeansTestD,
::testing::ValuesIn(inputsd2));
} // end namespace ML
| 0ee32af9c6f5becfa331b9faca95f840db6f8cda.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <test_utils.h>
#include <raft/cuda_utils.cuh>
#include <vector>
#include <thrust/fill.h>
#include <cuml/cluster/kmeans.hpp>
#include <cuml/common/cuml_allocator.hpp>
#include <cuml/common/device_buffer.hpp>
#include <cuml/common/logger.hpp>
#include <cuml/cuml.hpp>
#include <cuml/datasets/make_blobs.hpp>
#include <cuml/metrics/metrics.hpp>
namespace ML {
using namespace MLCommon;
using namespace Datasets;
using namespace Metrics;
template <typename T>
struct KmeansInputs {
int n_row;
int n_col;
int n_clusters;
T tol;
bool weighted;
};
template <typename T>
class KmeansTest : public ::testing::TestWithParam<KmeansInputs<T>> {
protected:
void basicTest() {
raft::handle_t handle;
testparams = ::testing::TestWithParam<KmeansInputs<T>>::GetParam();
int n_samples = testparams.n_row;
int n_features = testparams.n_col;
params.n_clusters = testparams.n_clusters;
params.tol = testparams.tol;
params.n_init = 5;
params.seed = 1;
params.oversampling_factor = 0;
device_buffer<T> X(handle.get_device_allocator(), handle.get_stream(),
n_samples * n_features);
device_buffer<int> labels(handle.get_device_allocator(),
handle.get_stream(), n_samples);
make_blobs(handle, X.data(), labels.data(), n_samples, n_features,
params.n_clusters, true, nullptr, nullptr, 1.0, false, -10.0f,
10.0f, 1234ULL);
raft::allocate(d_labels, n_samples);
raft::allocate(d_labels_ref, n_samples);
raft::allocate(d_centroids, params.n_clusters * n_features);
if (testparams.weighted) {
raft::allocate(d_sample_weight, n_samples);
thrust::fill(thrust::cuda::par.on(handle.get_stream()), d_sample_weight,
d_sample_weight + n_samples, 1);
} else {
d_sample_weight = nullptr;
}
raft::copy(d_labels_ref, labels.data(), n_samples, handle.get_stream());
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
T inertia = 0;
int n_iter = 0;
kmeans::fit_predict(handle, params, X.data(), n_samples, n_features,
d_sample_weight, d_centroids, d_labels, inertia,
n_iter);
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
score = adjusted_rand_index(handle, d_labels_ref, d_labels, n_samples);
if (score < 1.0) {
std::stringstream ss;
ss << "Expected: "
<< raft::arr2Str(d_labels_ref, 25, "d_labels_ref",
handle.get_stream());
CUML_LOG_DEBUG(ss.str().c_str());
ss.str(std::string());
ss << "Actual: "
<< raft::arr2Str(d_labels, 25, "d_labels", handle.get_stream());
CUML_LOG_DEBUG(ss.str().c_str());
CUML_LOG_DEBUG("Score = %lf", score);
}
}
void SetUp() override { basicTest(); }
void TearDown() override {
CUDA_CHECK(cudaFree(d_labels));
CUDA_CHECK(cudaFree(d_centroids));
CUDA_CHECK(cudaFree(d_labels_ref));
CUDA_CHECK(cudaFree(d_sample_weight));
}
protected:
KmeansInputs<T> testparams;
int *d_labels, *d_labels_ref;
T *d_centroids, *d_sample_weight;
double score;
ML::kmeans::KMeansParams params;
};
const std::vector<KmeansInputs<float>> inputsf2 = {
{1000, 32, 5, 0.0001, true}, {1000, 32, 5, 0.0001, false},
{1000, 100, 20, 0.0001, true}, {1000, 100, 20, 0.0001, false},
{10000, 32, 10, 0.0001, true}, {10000, 32, 10, 0.0001, false},
{10000, 100, 50, 0.0001, true}, {10000, 100, 50, 0.0001, false},
{10000, 1000, 200, 0.0001, true}, {10000, 1000, 200, 0.0001, false}};
const std::vector<KmeansInputs<double>> inputsd2 = {
{1000, 32, 5, 0.0001, true}, {1000, 32, 5, 0.0001, false},
{1000, 100, 20, 0.0001, true}, {1000, 100, 20, 0.0001, false},
{10000, 32, 10, 0.0001, true}, {10000, 32, 10, 0.0001, false},
{10000, 100, 50, 0.0001, true}, {10000, 100, 50, 0.0001, false},
{10000, 1000, 200, 0.0001, true}, {10000, 1000, 200, 0.0001, false}};
typedef KmeansTest<float> KmeansTestF;
TEST_P(KmeansTestF, Result) { ASSERT_TRUE(score == 1.0); }
typedef KmeansTest<double> KmeansTestD;
TEST_P(KmeansTestD, Result) { ASSERT_TRUE(score == 1.0); }
INSTANTIATE_TEST_CASE_P(KmeansTests, KmeansTestF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(KmeansTests, KmeansTestD,
::testing::ValuesIn(inputsd2));
} // end namespace ML
|
030f3e85fe86a15894f70a168390932e17c576e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__device__ const int FILTER_SIZE = 3;
extern "C"
__global__ void kernel(
unsigned int width,
unsigned int height,
unsigned int *img,
unsigned int *filter,
unsigned int *result)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int sum = 0;
sum = 0;
for(int filterY=0; filterY < FILTER_SIZE; filterY++) {
for(int filterX=0; filterX < FILTER_SIZE; filterX++) {
sum += img[ ((y + filterY) * width) + x + filterX ] * filter[ (filterY * FILTER_SIZE) + filterX ];
}
}
if(y + 1 < height && x + 1 < width) {
result[((y + 1) * width) + x + 1] = sum / 16;
}
} | 030f3e85fe86a15894f70a168390932e17c576e7.cu | __device__ const int FILTER_SIZE = 3;
extern "C"
__global__ void kernel(
unsigned int width,
unsigned int height,
unsigned int *img,
unsigned int *filter,
unsigned int *result)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int sum = 0;
sum = 0;
for(int filterY=0; filterY < FILTER_SIZE; filterY++) {
for(int filterX=0; filterX < FILTER_SIZE; filterX++) {
sum += img[ ((y + filterY) * width) + x + filterX ] * filter[ (filterY * FILTER_SIZE) + filterX ];
}
}
if(y + 1 < height && x + 1 < width) {
result[((y + 1) * width) + x + 1] = sum / 16;
}
} |
b652050e5713e1ca55f2eb349c211748c3bb3e9d.hip | // !!! This is a file automatically generated by hipify!!!
#include <string.h>
#include <gauge_field.h>
#include <face_quda.h>
#include <typeinfo>
#include <misc_helpers.h>
#include <blas_quda.h>
namespace quda {
cudaGaugeField::cudaGaugeField(const GaugeFieldParam ¶m) :
GaugeField(param), gauge(0), even(0), odd(0), backed_up(false)
{
if ((order == QUDA_QDP_GAUGE_ORDER || order == QUDA_QDPJIT_GAUGE_ORDER) &&
create != QUDA_REFERENCE_FIELD_CREATE) {
errorQuda("QDP ordering only supported for reference fields");
}
if (order == QUDA_QDP_GAUGE_ORDER || order == QUDA_MILC_GAUGE_ORDER ||
order == QUDA_TIFR_GAUGE_ORDER || order == QUDA_BQCD_GAUGE_ORDER ||
order == QUDA_CPS_WILSON_GAUGE_ORDER)
errorQuda("Field ordering %d presently disabled for this type", order);
#ifdef MULTI_GPU
if (link_type != QUDA_ASQTAD_MOM_LINKS &&
ghostExchange == QUDA_GHOST_EXCHANGE_PAD &&
isNative()) {
bool pad_check = true;
for (int i=0; i<nDim; i++)
if (pad < nFace*surfaceCB[i]) pad_check = false;
if (!pad_check)
errorQuda("cudaGaugeField being constructed with insufficient padding\n");
}
#endif
if(create != QUDA_NULL_FIELD_CREATE &&
create != QUDA_ZERO_FIELD_CREATE &&
create != QUDA_REFERENCE_FIELD_CREATE){
errorQuda("ERROR: create type(%d) not supported yet\n", create);
}
if (create != QUDA_REFERENCE_FIELD_CREATE) {
gauge = pool_device_malloc(bytes);
if (create == QUDA_ZERO_FIELD_CREATE) hipMemset(gauge, 0, bytes);
} else {
gauge = param.gauge;
}
if ( !isNative() ) {
for (int i=0; i<nDim; i++) {
size_t nbytes = nFace * surface[i] * nInternal * precision;
ghost[i] = nbytes ? pool_device_malloc(nbytes) : NULL;
}
}
if (ghostExchange == QUDA_GHOST_EXCHANGE_PAD) {
if (create == QUDA_REFERENCE_FIELD_CREATE) exchangeGhost();
}
even = gauge;
odd = (char*)gauge + bytes/2;
#ifdef USE_TEXTURE_OBJECTS
createTexObject(evenTex, even);
createTexObject(oddTex, odd);
if(reconstruct == QUDA_RECONSTRUCT_13 || reconstruct == QUDA_RECONSTRUCT_9)
{ // Create texture objects for the phases
const int isPhase = 1;
createTexObject(evenPhaseTex, (char*)even + phase_offset, isPhase);
createTexObject(oddPhaseTex, (char*)odd + phase_offset, isPhase);
}
#endif
}
#ifdef USE_TEXTURE_OBJECTS
void cudaGaugeField::createTexObject(hipTextureObject_t &tex, void *field, int isPhase) {
if( isNative() ){
// create the texture for the field components
hipChannelFormatDesc desc;
memset(&desc, 0, sizeof(hipChannelFormatDesc));
if (precision == QUDA_SINGLE_PRECISION) desc.f = hipChannelFormatKindFloat;
else desc.f = hipChannelFormatKindSigned; // half is short, double is int2
if(isPhase){
if(precision == QUDA_DOUBLE_PRECISION){
desc.x = 8*sizeof(int);
desc.y = 8*sizeof(int);
desc.z = 0;
desc.w = 0;
}else{
desc.x = 8*precision;
desc.y = desc.z = desc.w = 0;
}
}else{
// always four components regardless of precision
if (precision == QUDA_DOUBLE_PRECISION) {
desc.x = 8*sizeof(int);
desc.y = 8*sizeof(int);
desc.z = 8*sizeof(int);
desc.w = 8*sizeof(int);
} else {
desc.x = 8*precision;
desc.y = 8*precision;
desc.z = (reconstruct == 18) ? 0 : 8*precision; // float2 or short2 for 18 reconstruct
desc.w = (reconstruct == 18) ? 0 : 8*precision;
}
}
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.devPtr = field;
resDesc.res.linear.desc = desc;
resDesc.res.linear.sizeInBytes = isPhase ? phase_bytes/2 : (bytes-phase_bytes)/2;
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
if (precision == QUDA_HALF_PRECISION) texDesc.readMode = hipReadModeNormalizedFloat;
else texDesc.readMode = hipReadModeElementType;
hipCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
checkCudaError();
}
}
void cudaGaugeField::destroyTexObject() {
if( isNative() ){
hipDestroyTextureObject(evenTex);
hipDestroyTextureObject(oddTex);
if(reconstruct == QUDA_RECONSTRUCT_9 || reconstruct == QUDA_RECONSTRUCT_13){
hipDestroyTextureObject(evenPhaseTex);
hipDestroyTextureObject(oddPhaseTex);
}
checkCudaError();
}
}
#endif
cudaGaugeField::~cudaGaugeField()
{
#ifdef USE_TEXTURE_OBJECTS
destroyTexObject();
#endif
if (create != QUDA_REFERENCE_FIELD_CREATE) {
if (gauge) pool_device_free(gauge);
}
if ( !isNative() ) {
for (int i=0; i<nDim; i++) {
if (ghost[i]) pool_device_free(ghost[i]);
}
}
}
// This does the exchange of the gauge field ghost zone and places it
// into the ghost array.
void cudaGaugeField::exchangeGhost() {
if (ghostExchange != QUDA_GHOST_EXCHANGE_PAD)
errorQuda("Cannot call exchangeGhost with ghostExchange=%d",
ghostExchange);
if (geometry != QUDA_VECTOR_GEOMETRY && geometry != QUDA_COARSE_GEOMETRY)
errorQuda("Cannot exchange for %d geometry gauge field", geometry);
void *ghost_[QUDA_MAX_DIM];
void *send[QUDA_MAX_DIM];
for (int d=0; d<nDim; d++) {
ghost_[d] = isNative() ? pool_device_malloc(nFace*surface[d]*nInternal*precision) : ghost[d];
send[d] = pool_device_malloc(nFace*surface[d]*nInternal*precision);
}
// get the links into contiguous buffers
extractGaugeGhost(*this, send, true);
// communicate between nodes
exchange(ghost_, send, QUDA_FORWARDS);
for (int d=0; d<nDim; d++) pool_device_free(send[d]);
if (isNative()) {
// copy from ghost into the padded region in gauge
copyGenericGauge(*this, *this, QUDA_CUDA_FIELD_LOCATION, 0, 0, 0, ghost_, 1);
for (int d=0; d<nDim; d++) pool_device_free(ghost_[d]);
}
}
// This does the opposite of exchnageGhost and sends back the ghost
// zone to the node from which it came and injects it back into the
// field
void cudaGaugeField::injectGhost() {
if (ghostExchange != QUDA_GHOST_EXCHANGE_PAD)
errorQuda("Cannot call exchangeGhost with ghostExchange=%d",
ghostExchange);
if (geometry != QUDA_VECTOR_GEOMETRY && geometry != QUDA_COARSE_GEOMETRY)
errorQuda("Cannot exchange for %d geometry gauge field", geometry);
void *ghost_[QUDA_MAX_DIM];
void *recv[QUDA_MAX_DIM];
for (int d=0; d<nDim; d++) {
ghost_[d] = isNative() ? pool_device_malloc(nFace*surface[d]*nInternal*precision) : ghost[d];
recv[d] = pool_device_malloc(nFace*surface[d]*nInternal*precision);
}
if (isNative()) {
// copy from padded region in gauge field into ghost
copyGenericGauge(*this, *this, QUDA_CUDA_FIELD_LOCATION, 0, 0, ghost_, 0, 1);
}
// communicate between nodes
exchange(recv, ghost_, QUDA_BACKWARDS);
// get the links into contiguous buffers
extractGaugeGhost(*this, recv, false);
for (int d=0; d<nDim; d++) {
pool_device_free(recv[d]);
if (isNative()) pool_device_free(ghost_[d]);
}
}
void cudaGaugeField::exchangeExtendedGhost(const int *R, bool no_comms_fill) {
void *send[QUDA_MAX_DIM];
void *recv[QUDA_MAX_DIM];
void *send_d[QUDA_MAX_DIM];
void *recv_d[QUDA_MAX_DIM];
size_t bytes[QUDA_MAX_DIM];
for (int d=0; d<nDim; d++) {
if ( !(commDimPartitioned(d) || (no_comms_fill && R[d])) ) continue;
// store both parities and directions in each
bytes[d] = surface[d] * R[d] * geometry * nInternal * precision;
send_d[d] = pool_device_malloc(2 * bytes[d]);
recv_d[d] = pool_device_malloc(2 * bytes[d]);
}
#ifndef GPU_COMMS
void *send_h[QUDA_MAX_DIM];
void *recv_h[QUDA_MAX_DIM];
size_t total_bytes = 0;
for (int d=0; d<nDim; d++) {
if (!commDimPartitioned(d)) continue;
total_bytes += 4*bytes[d]; // (2 from send/recv) x (2 from fwd/back)
}
void *buffer = total_bytes > 0 ? pool_pinned_malloc(total_bytes) : nullptr;
size_t offset = 0;
for (int d=0; d<nDim; d++) {
if (!commDimPartitioned(d)) continue;
recv_h[d] = static_cast<char*>(buffer) + offset;
send_h[d] = static_cast<char*>(recv_h[d]) + 2*bytes[d];
offset += 4*bytes[d];
}
#endif
// do the exchange
MsgHandle *mh_recv_back[QUDA_MAX_DIM];
MsgHandle *mh_recv_fwd[QUDA_MAX_DIM];
MsgHandle *mh_send_fwd[QUDA_MAX_DIM];
MsgHandle *mh_send_back[QUDA_MAX_DIM];
for (int d=0; d<nDim; d++) {
if (!commDimPartitioned(d)) continue;
#ifdef GPU_COMMS
recv[d] = recv_d[d];
send[d] = send_d[d];
#else
recv[d] = recv_h[d];
send[d] = send_h[d];
#endif
// look into storing these for later
mh_recv_back[d] = comm_declare_receive_relative(recv[d], d, -1, bytes[d]);
mh_recv_fwd[d] = comm_declare_receive_relative(static_cast<char*>(recv[d])+bytes[d],
d, +1, bytes[d]);
mh_send_back[d] = comm_declare_send_relative(send[d], d, -1, bytes[d]);
mh_send_fwd[d] = comm_declare_send_relative(static_cast<char*>(send[d])+bytes[d],
d, +1, bytes[d]);
}
for (int d=0; d<nDim; d++) {
if ( !(commDimPartitioned(d) || (no_comms_fill && R[d])) ) continue;
// FIXME why does this break if the order is switched?
// prepost the receives
if (commDimPartitioned(d)) {
comm_start(mh_recv_fwd[d]);
comm_start(mh_recv_back[d]);
}
//extract into a contiguous buffer
extractExtendedGaugeGhost(*this, d, R, send_d, true);
if (commDimPartitioned(d)) {
// pipeline the forwards and backwards sending
#ifndef GPU_COMMS
hipMemcpyAsync(send_h[d], send_d[d], bytes[d], hipMemcpyDeviceToHost, streams[0]);
hipMemcpyAsync(static_cast<char*>(send_h[d])+bytes[d],
static_cast<char*>(send_d[d])+bytes[d], bytes[d], hipMemcpyDeviceToHost, streams[1]);
#endif
#ifndef GPU_COMMS
hipStreamSynchronize(streams[0]);
#endif
comm_start(mh_send_back[d]);
#ifndef GPU_COMMS
hipStreamSynchronize(streams[1]);
#endif
comm_start(mh_send_fwd[d]);
// forwards recv
comm_wait(mh_send_back[d]);
comm_wait(mh_recv_fwd[d]);
#ifndef GPU_COMMS
hipMemcpyAsync(static_cast<char*>(recv_d[d])+bytes[d],
static_cast<char*>(recv_h[d])+bytes[d], bytes[d], hipMemcpyHostToDevice, streams[0]);
#endif
// backwards recv
comm_wait(mh_send_fwd[d]);
comm_wait(mh_recv_back[d]);
#ifndef GPU_COMMS
hipMemcpyAsync(recv_d[d], recv_h[d], bytes[d], hipMemcpyHostToDevice, streams[1]);
#endif
} else { // if just doing a local exchange to fill halo then need to swap faces
qudaMemcpy(static_cast<char*>(recv_d[d])+bytes[d], send_d[d], bytes[d], hipMemcpyDeviceToDevice);
qudaMemcpy(recv_d[d], static_cast<char*>(send_d[d])+bytes[d], bytes[d], hipMemcpyDeviceToDevice);
}
// inject back into the gauge field
extractExtendedGaugeGhost(*this, d, R, recv_d, false);
}
#ifndef GPU_COMMS
if (total_bytes > 0) pool_pinned_free(buffer);
#endif
for (int d=0; d<nDim; d++) {
if ( !(commDimPartitioned(d) || (no_comms_fill && R[d])) ) continue;
if (commDimPartitioned(d)) {
comm_free(mh_send_fwd[d]);
comm_free(mh_send_back[d]);
comm_free(mh_recv_back[d]);
comm_free(mh_recv_fwd[d]);
}
pool_device_free(send_d[d]);
pool_device_free(recv_d[d]);
}
}
void cudaGaugeField::setGauge(void *gauge_)
{
if(create != QUDA_REFERENCE_FIELD_CREATE) {
errorQuda("Setting gauge pointer is only allowed when create="
"QUDA_REFERENCE_FIELD_CREATE type\n");
}
gauge = gauge_;
}
void *create_gauge_buffer(size_t bytes, QudaGaugeFieldOrder order, QudaFieldGeometry geometry) {
if (order == QUDA_QDP_GAUGE_ORDER) {
void **buffer = new void*[geometry];
for (int d=0; d<geometry; d++) buffer[d] = device_malloc(bytes/geometry);
return ((void*)buffer);
} else {
return device_malloc(bytes);
}
}
void **create_ghost_buffer(size_t bytes[], QudaGaugeFieldOrder order) {
if (order > 4) {
void **buffer = new void*[4];
for (int d=0; d<4; d++) buffer[d] = device_malloc(bytes[d]);
return buffer;
} else {
return 0;
}
}
void free_gauge_buffer(void *buffer, QudaGaugeFieldOrder order, QudaFieldGeometry geometry) {
if (order == QUDA_QDP_GAUGE_ORDER) {
for (int d=0; d<geometry; d++) device_free(((void**)buffer)[d]);
delete []((void**)buffer);
} else {
device_free(buffer);
}
}
void free_ghost_buffer(void **buffer, QudaGaugeFieldOrder order) {
if (order > 4) {
for (int d=0; d<4; d++) device_free(buffer[d]);
delete []buffer;
}
}
void cudaGaugeField::copy(const GaugeField &src) {
if (this == &src) return;
checkField(src);
if (link_type == QUDA_ASQTAD_FAT_LINKS) {
fat_link_max = src.LinkMax();
if (precision == QUDA_HALF_PRECISION && fat_link_max == 0.0)
errorQuda("fat_link_max has not been computed");
} else {
fat_link_max = 1.0;
}
if (typeid(src) == typeid(cudaGaugeField)) {
// copy field and ghost zone into this field
copyGenericGauge(*this, src, QUDA_CUDA_FIELD_LOCATION, gauge,
static_cast<const cudaGaugeField&>(src).gauge);
} else if (typeid(src) == typeid(cpuGaugeField)) {
if (reorder_location() == QUDA_CPU_FIELD_LOCATION) { // do reorder on the CPU
void *buffer = pool_pinned_malloc(bytes);
// copy field and ghost zone into buffer
copyGenericGauge(*this, src, QUDA_CPU_FIELD_LOCATION, buffer, static_cast<const cpuGaugeField&>(src).gauge);
// this copies over both even and odd
qudaMemcpy(gauge, buffer, bytes, hipMemcpyHostToDevice);
pool_pinned_free(buffer);
} else { // else on the GPU
void *buffer = create_gauge_buffer(src.Bytes(), src.Order(), src.Geometry());
size_t ghost_bytes[4];
int srcNinternal = src.Reconstruct() != QUDA_RECONSTRUCT_NO ? src.Reconstruct() : 2*nColor*nColor;
for (int d=0; d<4; d++) ghost_bytes[d] = nFace * surface[d] * srcNinternal * src.Precision();
void **ghost_buffer = (nFace > 0) ? create_ghost_buffer(ghost_bytes, src.Order()) : nullptr;
if (src.Order() == QUDA_QDP_GAUGE_ORDER) {
for (int d=0; d<geometry; d++) {
qudaMemcpy(((void**)buffer)[d], ((void**)src.Gauge_p())[d], src.Bytes()/geometry, hipMemcpyHostToDevice);
}
} else {
qudaMemcpy(buffer, src.Gauge_p(), src.Bytes(), hipMemcpyHostToDevice);
}
if (src.Order() > 4 && GhostExchange() == QUDA_GHOST_EXCHANGE_PAD &&
src.GhostExchange() == QUDA_GHOST_EXCHANGE_PAD && nFace)
for (int d=0; d<4; d++)
qudaMemcpy(ghost_buffer[d], src.Ghost()[d], ghost_bytes[d], hipMemcpyHostToDevice);
copyGenericGauge(*this, src, QUDA_CUDA_FIELD_LOCATION, gauge, buffer, 0, ghost_buffer);
free_gauge_buffer(buffer, src.Order(), src.Geometry());
if (nFace > 0) free_ghost_buffer(ghost_buffer, src.Order());
} // reorder_location
} else {
errorQuda("Invalid gauge field type");
}
// if we have copied from a source without a pad then we need to exchange
if (ghostExchange == QUDA_GHOST_EXCHANGE_PAD && src.GhostExchange() != QUDA_GHOST_EXCHANGE_PAD) exchangeGhost();
staggeredPhaseApplied = src.StaggeredPhaseApplied();
staggeredPhaseType = src.StaggeredPhase();
checkCudaError();
}
void cudaGaugeField::loadCPUField(const cpuGaugeField &cpu) { copy(cpu); }
void cudaGaugeField::saveCPUField(cpuGaugeField &cpu) const
{
QudaFieldLocation pack_location = reorder_location();
if (pack_location == QUDA_CUDA_FIELD_LOCATION) {
void *buffer = create_gauge_buffer(cpu.Bytes(), cpu.Order(), cpu.Geometry());
// Allocate space for ghost zone if required
size_t ghost_bytes[4];
int cpuNinternal = cpu.Reconstruct() != QUDA_RECONSTRUCT_NO ? cpu.Reconstruct() : 2*nColor*nColor;
for (int d=0; d<4; d++) ghost_bytes[d] = nFace * surface[d] * cpuNinternal * cpu.Precision();
void **ghost_buffer = (nFace > 0) ? create_ghost_buffer(ghost_bytes, cpu.Order()) : nullptr;
copyGenericGauge(cpu, *this, QUDA_CUDA_FIELD_LOCATION, buffer, gauge, ghost_buffer, 0);
if (cpu.Order() == QUDA_QDP_GAUGE_ORDER) {
for (int d=0; d<geometry; d++) qudaMemcpy(((void**)cpu.gauge)[d], ((void**)buffer)[d], cpu.Bytes()/geometry, hipMemcpyDeviceToHost);
} else {
qudaMemcpy(cpu.gauge, buffer, cpu.Bytes(), hipMemcpyDeviceToHost);
}
if (cpu.Order() > 4 && GhostExchange() == QUDA_GHOST_EXCHANGE_PAD &&
cpu.GhostExchange() == QUDA_GHOST_EXCHANGE_PAD && nFace)
for (int d=0; d<4; d++)
qudaMemcpy(cpu.Ghost()[d], ghost_buffer[d], ghost_bytes[d], hipMemcpyDeviceToHost);
free_gauge_buffer(buffer, cpu.Order(), cpu.Geometry());
if (nFace > 0) free_ghost_buffer(ghost_buffer, cpu.Order());
} else if (pack_location == QUDA_CPU_FIELD_LOCATION) { // do copy then host-side reorder
void *buffer = pool_pinned_malloc(bytes);
qudaMemcpy(buffer, gauge, bytes, hipMemcpyDeviceToHost);
copyGenericGauge(cpu, *this, QUDA_CPU_FIELD_LOCATION, cpu.gauge, buffer);
pool_pinned_free(buffer);
} else {
errorQuda("Invalid pack location %d", pack_location);
}
cpu.staggeredPhaseApplied = staggeredPhaseApplied;
cpu.staggeredPhaseType = staggeredPhaseType;
}
void cudaGaugeField::backup() const {
if (backed_up) errorQuda("Gauge field already backed up");
backup_h = new char[bytes];
hipMemcpy(backup_h, gauge, bytes, hipMemcpyDeviceToHost);
checkCudaError();
backed_up = true;
}
void cudaGaugeField::restore() {
if (!backed_up) errorQuda("Cannot restore since not backed up");
hipMemcpy(gauge, backup_h, bytes, hipMemcpyHostToDevice);
delete []backup_h;
checkCudaError();
backed_up = false;
}
void cudaGaugeField::zero() {
hipMemset(gauge, 0, bytes);
}
} // namespace quda
| b652050e5713e1ca55f2eb349c211748c3bb3e9d.cu | #include <string.h>
#include <gauge_field.h>
#include <face_quda.h>
#include <typeinfo>
#include <misc_helpers.h>
#include <blas_quda.h>
namespace quda {
cudaGaugeField::cudaGaugeField(const GaugeFieldParam ¶m) :
GaugeField(param), gauge(0), even(0), odd(0), backed_up(false)
{
if ((order == QUDA_QDP_GAUGE_ORDER || order == QUDA_QDPJIT_GAUGE_ORDER) &&
create != QUDA_REFERENCE_FIELD_CREATE) {
errorQuda("QDP ordering only supported for reference fields");
}
if (order == QUDA_QDP_GAUGE_ORDER || order == QUDA_MILC_GAUGE_ORDER ||
order == QUDA_TIFR_GAUGE_ORDER || order == QUDA_BQCD_GAUGE_ORDER ||
order == QUDA_CPS_WILSON_GAUGE_ORDER)
errorQuda("Field ordering %d presently disabled for this type", order);
#ifdef MULTI_GPU
if (link_type != QUDA_ASQTAD_MOM_LINKS &&
ghostExchange == QUDA_GHOST_EXCHANGE_PAD &&
isNative()) {
bool pad_check = true;
for (int i=0; i<nDim; i++)
if (pad < nFace*surfaceCB[i]) pad_check = false;
if (!pad_check)
errorQuda("cudaGaugeField being constructed with insufficient padding\n");
}
#endif
if(create != QUDA_NULL_FIELD_CREATE &&
create != QUDA_ZERO_FIELD_CREATE &&
create != QUDA_REFERENCE_FIELD_CREATE){
errorQuda("ERROR: create type(%d) not supported yet\n", create);
}
if (create != QUDA_REFERENCE_FIELD_CREATE) {
gauge = pool_device_malloc(bytes);
if (create == QUDA_ZERO_FIELD_CREATE) cudaMemset(gauge, 0, bytes);
} else {
gauge = param.gauge;
}
if ( !isNative() ) {
for (int i=0; i<nDim; i++) {
size_t nbytes = nFace * surface[i] * nInternal * precision;
ghost[i] = nbytes ? pool_device_malloc(nbytes) : NULL;
}
}
if (ghostExchange == QUDA_GHOST_EXCHANGE_PAD) {
if (create == QUDA_REFERENCE_FIELD_CREATE) exchangeGhost();
}
even = gauge;
odd = (char*)gauge + bytes/2;
#ifdef USE_TEXTURE_OBJECTS
createTexObject(evenTex, even);
createTexObject(oddTex, odd);
if(reconstruct == QUDA_RECONSTRUCT_13 || reconstruct == QUDA_RECONSTRUCT_9)
{ // Create texture objects for the phases
const int isPhase = 1;
createTexObject(evenPhaseTex, (char*)even + phase_offset, isPhase);
createTexObject(oddPhaseTex, (char*)odd + phase_offset, isPhase);
}
#endif
}
#ifdef USE_TEXTURE_OBJECTS
void cudaGaugeField::createTexObject(cudaTextureObject_t &tex, void *field, int isPhase) {
if( isNative() ){
// create the texture for the field components
cudaChannelFormatDesc desc;
memset(&desc, 0, sizeof(cudaChannelFormatDesc));
if (precision == QUDA_SINGLE_PRECISION) desc.f = cudaChannelFormatKindFloat;
else desc.f = cudaChannelFormatKindSigned; // half is short, double is int2
if(isPhase){
if(precision == QUDA_DOUBLE_PRECISION){
desc.x = 8*sizeof(int);
desc.y = 8*sizeof(int);
desc.z = 0;
desc.w = 0;
}else{
desc.x = 8*precision;
desc.y = desc.z = desc.w = 0;
}
}else{
// always four components regardless of precision
if (precision == QUDA_DOUBLE_PRECISION) {
desc.x = 8*sizeof(int);
desc.y = 8*sizeof(int);
desc.z = 8*sizeof(int);
desc.w = 8*sizeof(int);
} else {
desc.x = 8*precision;
desc.y = 8*precision;
desc.z = (reconstruct == 18) ? 0 : 8*precision; // float2 or short2 for 18 reconstruct
desc.w = (reconstruct == 18) ? 0 : 8*precision;
}
}
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = field;
resDesc.res.linear.desc = desc;
resDesc.res.linear.sizeInBytes = isPhase ? phase_bytes/2 : (bytes-phase_bytes)/2;
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
if (precision == QUDA_HALF_PRECISION) texDesc.readMode = cudaReadModeNormalizedFloat;
else texDesc.readMode = cudaReadModeElementType;
cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
checkCudaError();
}
}
void cudaGaugeField::destroyTexObject() {
if( isNative() ){
cudaDestroyTextureObject(evenTex);
cudaDestroyTextureObject(oddTex);
if(reconstruct == QUDA_RECONSTRUCT_9 || reconstruct == QUDA_RECONSTRUCT_13){
cudaDestroyTextureObject(evenPhaseTex);
cudaDestroyTextureObject(oddPhaseTex);
}
checkCudaError();
}
}
#endif
cudaGaugeField::~cudaGaugeField()
{
#ifdef USE_TEXTURE_OBJECTS
destroyTexObject();
#endif
if (create != QUDA_REFERENCE_FIELD_CREATE) {
if (gauge) pool_device_free(gauge);
}
if ( !isNative() ) {
for (int i=0; i<nDim; i++) {
if (ghost[i]) pool_device_free(ghost[i]);
}
}
}
// This does the exchange of the gauge field ghost zone and places it
// into the ghost array.
void cudaGaugeField::exchangeGhost() {
if (ghostExchange != QUDA_GHOST_EXCHANGE_PAD)
errorQuda("Cannot call exchangeGhost with ghostExchange=%d",
ghostExchange);
if (geometry != QUDA_VECTOR_GEOMETRY && geometry != QUDA_COARSE_GEOMETRY)
errorQuda("Cannot exchange for %d geometry gauge field", geometry);
void *ghost_[QUDA_MAX_DIM];
void *send[QUDA_MAX_DIM];
for (int d=0; d<nDim; d++) {
ghost_[d] = isNative() ? pool_device_malloc(nFace*surface[d]*nInternal*precision) : ghost[d];
send[d] = pool_device_malloc(nFace*surface[d]*nInternal*precision);
}
// get the links into contiguous buffers
extractGaugeGhost(*this, send, true);
// communicate between nodes
exchange(ghost_, send, QUDA_FORWARDS);
for (int d=0; d<nDim; d++) pool_device_free(send[d]);
if (isNative()) {
// copy from ghost into the padded region in gauge
copyGenericGauge(*this, *this, QUDA_CUDA_FIELD_LOCATION, 0, 0, 0, ghost_, 1);
for (int d=0; d<nDim; d++) pool_device_free(ghost_[d]);
}
}
// This does the opposite of exchnageGhost and sends back the ghost
// zone to the node from which it came and injects it back into the
// field
void cudaGaugeField::injectGhost() {
if (ghostExchange != QUDA_GHOST_EXCHANGE_PAD)
errorQuda("Cannot call exchangeGhost with ghostExchange=%d",
ghostExchange);
if (geometry != QUDA_VECTOR_GEOMETRY && geometry != QUDA_COARSE_GEOMETRY)
errorQuda("Cannot exchange for %d geometry gauge field", geometry);
void *ghost_[QUDA_MAX_DIM];
void *recv[QUDA_MAX_DIM];
for (int d=0; d<nDim; d++) {
ghost_[d] = isNative() ? pool_device_malloc(nFace*surface[d]*nInternal*precision) : ghost[d];
recv[d] = pool_device_malloc(nFace*surface[d]*nInternal*precision);
}
if (isNative()) {
// copy from padded region in gauge field into ghost
copyGenericGauge(*this, *this, QUDA_CUDA_FIELD_LOCATION, 0, 0, ghost_, 0, 1);
}
// communicate between nodes
exchange(recv, ghost_, QUDA_BACKWARDS);
// get the links into contiguous buffers
extractGaugeGhost(*this, recv, false);
for (int d=0; d<nDim; d++) {
pool_device_free(recv[d]);
if (isNative()) pool_device_free(ghost_[d]);
}
}
void cudaGaugeField::exchangeExtendedGhost(const int *R, bool no_comms_fill) {
void *send[QUDA_MAX_DIM];
void *recv[QUDA_MAX_DIM];
void *send_d[QUDA_MAX_DIM];
void *recv_d[QUDA_MAX_DIM];
size_t bytes[QUDA_MAX_DIM];
for (int d=0; d<nDim; d++) {
if ( !(commDimPartitioned(d) || (no_comms_fill && R[d])) ) continue;
// store both parities and directions in each
bytes[d] = surface[d] * R[d] * geometry * nInternal * precision;
send_d[d] = pool_device_malloc(2 * bytes[d]);
recv_d[d] = pool_device_malloc(2 * bytes[d]);
}
#ifndef GPU_COMMS
void *send_h[QUDA_MAX_DIM];
void *recv_h[QUDA_MAX_DIM];
size_t total_bytes = 0;
for (int d=0; d<nDim; d++) {
if (!commDimPartitioned(d)) continue;
total_bytes += 4*bytes[d]; // (2 from send/recv) x (2 from fwd/back)
}
void *buffer = total_bytes > 0 ? pool_pinned_malloc(total_bytes) : nullptr;
size_t offset = 0;
for (int d=0; d<nDim; d++) {
if (!commDimPartitioned(d)) continue;
recv_h[d] = static_cast<char*>(buffer) + offset;
send_h[d] = static_cast<char*>(recv_h[d]) + 2*bytes[d];
offset += 4*bytes[d];
}
#endif
// do the exchange
MsgHandle *mh_recv_back[QUDA_MAX_DIM];
MsgHandle *mh_recv_fwd[QUDA_MAX_DIM];
MsgHandle *mh_send_fwd[QUDA_MAX_DIM];
MsgHandle *mh_send_back[QUDA_MAX_DIM];
for (int d=0; d<nDim; d++) {
if (!commDimPartitioned(d)) continue;
#ifdef GPU_COMMS
recv[d] = recv_d[d];
send[d] = send_d[d];
#else
recv[d] = recv_h[d];
send[d] = send_h[d];
#endif
// look into storing these for later
mh_recv_back[d] = comm_declare_receive_relative(recv[d], d, -1, bytes[d]);
mh_recv_fwd[d] = comm_declare_receive_relative(static_cast<char*>(recv[d])+bytes[d],
d, +1, bytes[d]);
mh_send_back[d] = comm_declare_send_relative(send[d], d, -1, bytes[d]);
mh_send_fwd[d] = comm_declare_send_relative(static_cast<char*>(send[d])+bytes[d],
d, +1, bytes[d]);
}
for (int d=0; d<nDim; d++) {
if ( !(commDimPartitioned(d) || (no_comms_fill && R[d])) ) continue;
// FIXME why does this break if the order is switched?
// prepost the receives
if (commDimPartitioned(d)) {
comm_start(mh_recv_fwd[d]);
comm_start(mh_recv_back[d]);
}
//extract into a contiguous buffer
extractExtendedGaugeGhost(*this, d, R, send_d, true);
if (commDimPartitioned(d)) {
// pipeline the forwards and backwards sending
#ifndef GPU_COMMS
cudaMemcpyAsync(send_h[d], send_d[d], bytes[d], cudaMemcpyDeviceToHost, streams[0]);
cudaMemcpyAsync(static_cast<char*>(send_h[d])+bytes[d],
static_cast<char*>(send_d[d])+bytes[d], bytes[d], cudaMemcpyDeviceToHost, streams[1]);
#endif
#ifndef GPU_COMMS
cudaStreamSynchronize(streams[0]);
#endif
comm_start(mh_send_back[d]);
#ifndef GPU_COMMS
cudaStreamSynchronize(streams[1]);
#endif
comm_start(mh_send_fwd[d]);
// forwards recv
comm_wait(mh_send_back[d]);
comm_wait(mh_recv_fwd[d]);
#ifndef GPU_COMMS
cudaMemcpyAsync(static_cast<char*>(recv_d[d])+bytes[d],
static_cast<char*>(recv_h[d])+bytes[d], bytes[d], cudaMemcpyHostToDevice, streams[0]);
#endif
// backwards recv
comm_wait(mh_send_fwd[d]);
comm_wait(mh_recv_back[d]);
#ifndef GPU_COMMS
cudaMemcpyAsync(recv_d[d], recv_h[d], bytes[d], cudaMemcpyHostToDevice, streams[1]);
#endif
} else { // if just doing a local exchange to fill halo then need to swap faces
qudaMemcpy(static_cast<char*>(recv_d[d])+bytes[d], send_d[d], bytes[d], cudaMemcpyDeviceToDevice);
qudaMemcpy(recv_d[d], static_cast<char*>(send_d[d])+bytes[d], bytes[d], cudaMemcpyDeviceToDevice);
}
// inject back into the gauge field
extractExtendedGaugeGhost(*this, d, R, recv_d, false);
}
#ifndef GPU_COMMS
if (total_bytes > 0) pool_pinned_free(buffer);
#endif
for (int d=0; d<nDim; d++) {
if ( !(commDimPartitioned(d) || (no_comms_fill && R[d])) ) continue;
if (commDimPartitioned(d)) {
comm_free(mh_send_fwd[d]);
comm_free(mh_send_back[d]);
comm_free(mh_recv_back[d]);
comm_free(mh_recv_fwd[d]);
}
pool_device_free(send_d[d]);
pool_device_free(recv_d[d]);
}
}
void cudaGaugeField::setGauge(void *gauge_)
{
if(create != QUDA_REFERENCE_FIELD_CREATE) {
errorQuda("Setting gauge pointer is only allowed when create="
"QUDA_REFERENCE_FIELD_CREATE type\n");
}
gauge = gauge_;
}
void *create_gauge_buffer(size_t bytes, QudaGaugeFieldOrder order, QudaFieldGeometry geometry) {
if (order == QUDA_QDP_GAUGE_ORDER) {
void **buffer = new void*[geometry];
for (int d=0; d<geometry; d++) buffer[d] = device_malloc(bytes/geometry);
return ((void*)buffer);
} else {
return device_malloc(bytes);
}
}
void **create_ghost_buffer(size_t bytes[], QudaGaugeFieldOrder order) {
if (order > 4) {
void **buffer = new void*[4];
for (int d=0; d<4; d++) buffer[d] = device_malloc(bytes[d]);
return buffer;
} else {
return 0;
}
}
void free_gauge_buffer(void *buffer, QudaGaugeFieldOrder order, QudaFieldGeometry geometry) {
if (order == QUDA_QDP_GAUGE_ORDER) {
for (int d=0; d<geometry; d++) device_free(((void**)buffer)[d]);
delete []((void**)buffer);
} else {
device_free(buffer);
}
}
void free_ghost_buffer(void **buffer, QudaGaugeFieldOrder order) {
if (order > 4) {
for (int d=0; d<4; d++) device_free(buffer[d]);
delete []buffer;
}
}
void cudaGaugeField::copy(const GaugeField &src) {
if (this == &src) return;
checkField(src);
if (link_type == QUDA_ASQTAD_FAT_LINKS) {
fat_link_max = src.LinkMax();
if (precision == QUDA_HALF_PRECISION && fat_link_max == 0.0)
errorQuda("fat_link_max has not been computed");
} else {
fat_link_max = 1.0;
}
if (typeid(src) == typeid(cudaGaugeField)) {
// copy field and ghost zone into this field
copyGenericGauge(*this, src, QUDA_CUDA_FIELD_LOCATION, gauge,
static_cast<const cudaGaugeField&>(src).gauge);
} else if (typeid(src) == typeid(cpuGaugeField)) {
if (reorder_location() == QUDA_CPU_FIELD_LOCATION) { // do reorder on the CPU
void *buffer = pool_pinned_malloc(bytes);
// copy field and ghost zone into buffer
copyGenericGauge(*this, src, QUDA_CPU_FIELD_LOCATION, buffer, static_cast<const cpuGaugeField&>(src).gauge);
// this copies over both even and odd
qudaMemcpy(gauge, buffer, bytes, cudaMemcpyHostToDevice);
pool_pinned_free(buffer);
} else { // else on the GPU
void *buffer = create_gauge_buffer(src.Bytes(), src.Order(), src.Geometry());
size_t ghost_bytes[4];
int srcNinternal = src.Reconstruct() != QUDA_RECONSTRUCT_NO ? src.Reconstruct() : 2*nColor*nColor;
for (int d=0; d<4; d++) ghost_bytes[d] = nFace * surface[d] * srcNinternal * src.Precision();
void **ghost_buffer = (nFace > 0) ? create_ghost_buffer(ghost_bytes, src.Order()) : nullptr;
if (src.Order() == QUDA_QDP_GAUGE_ORDER) {
for (int d=0; d<geometry; d++) {
qudaMemcpy(((void**)buffer)[d], ((void**)src.Gauge_p())[d], src.Bytes()/geometry, cudaMemcpyHostToDevice);
}
} else {
qudaMemcpy(buffer, src.Gauge_p(), src.Bytes(), cudaMemcpyHostToDevice);
}
if (src.Order() > 4 && GhostExchange() == QUDA_GHOST_EXCHANGE_PAD &&
src.GhostExchange() == QUDA_GHOST_EXCHANGE_PAD && nFace)
for (int d=0; d<4; d++)
qudaMemcpy(ghost_buffer[d], src.Ghost()[d], ghost_bytes[d], cudaMemcpyHostToDevice);
copyGenericGauge(*this, src, QUDA_CUDA_FIELD_LOCATION, gauge, buffer, 0, ghost_buffer);
free_gauge_buffer(buffer, src.Order(), src.Geometry());
if (nFace > 0) free_ghost_buffer(ghost_buffer, src.Order());
} // reorder_location
} else {
errorQuda("Invalid gauge field type");
}
// if we have copied from a source without a pad then we need to exchange
if (ghostExchange == QUDA_GHOST_EXCHANGE_PAD && src.GhostExchange() != QUDA_GHOST_EXCHANGE_PAD) exchangeGhost();
staggeredPhaseApplied = src.StaggeredPhaseApplied();
staggeredPhaseType = src.StaggeredPhase();
checkCudaError();
}
void cudaGaugeField::loadCPUField(const cpuGaugeField &cpu) { copy(cpu); }
void cudaGaugeField::saveCPUField(cpuGaugeField &cpu) const
{
QudaFieldLocation pack_location = reorder_location();
if (pack_location == QUDA_CUDA_FIELD_LOCATION) {
void *buffer = create_gauge_buffer(cpu.Bytes(), cpu.Order(), cpu.Geometry());
// Allocate space for ghost zone if required
size_t ghost_bytes[4];
int cpuNinternal = cpu.Reconstruct() != QUDA_RECONSTRUCT_NO ? cpu.Reconstruct() : 2*nColor*nColor;
for (int d=0; d<4; d++) ghost_bytes[d] = nFace * surface[d] * cpuNinternal * cpu.Precision();
void **ghost_buffer = (nFace > 0) ? create_ghost_buffer(ghost_bytes, cpu.Order()) : nullptr;
copyGenericGauge(cpu, *this, QUDA_CUDA_FIELD_LOCATION, buffer, gauge, ghost_buffer, 0);
if (cpu.Order() == QUDA_QDP_GAUGE_ORDER) {
for (int d=0; d<geometry; d++) qudaMemcpy(((void**)cpu.gauge)[d], ((void**)buffer)[d], cpu.Bytes()/geometry, cudaMemcpyDeviceToHost);
} else {
qudaMemcpy(cpu.gauge, buffer, cpu.Bytes(), cudaMemcpyDeviceToHost);
}
if (cpu.Order() > 4 && GhostExchange() == QUDA_GHOST_EXCHANGE_PAD &&
cpu.GhostExchange() == QUDA_GHOST_EXCHANGE_PAD && nFace)
for (int d=0; d<4; d++)
qudaMemcpy(cpu.Ghost()[d], ghost_buffer[d], ghost_bytes[d], cudaMemcpyDeviceToHost);
free_gauge_buffer(buffer, cpu.Order(), cpu.Geometry());
if (nFace > 0) free_ghost_buffer(ghost_buffer, cpu.Order());
} else if (pack_location == QUDA_CPU_FIELD_LOCATION) { // do copy then host-side reorder
void *buffer = pool_pinned_malloc(bytes);
qudaMemcpy(buffer, gauge, bytes, cudaMemcpyDeviceToHost);
copyGenericGauge(cpu, *this, QUDA_CPU_FIELD_LOCATION, cpu.gauge, buffer);
pool_pinned_free(buffer);
} else {
errorQuda("Invalid pack location %d", pack_location);
}
cpu.staggeredPhaseApplied = staggeredPhaseApplied;
cpu.staggeredPhaseType = staggeredPhaseType;
}
void cudaGaugeField::backup() const {
if (backed_up) errorQuda("Gauge field already backed up");
backup_h = new char[bytes];
cudaMemcpy(backup_h, gauge, bytes, cudaMemcpyDeviceToHost);
checkCudaError();
backed_up = true;
}
void cudaGaugeField::restore() {
if (!backed_up) errorQuda("Cannot restore since not backed up");
cudaMemcpy(gauge, backup_h, bytes, cudaMemcpyHostToDevice);
delete []backup_h;
checkCudaError();
backed_up = false;
}
void cudaGaugeField::zero() {
cudaMemset(gauge, 0, bytes);
}
} // namespace quda
|
cc44d7de8d7ad7a6f401ea0f739536a1cc7ab52d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/IVFFlatScan.cuh>
#include <faiss/gpu/impl/DistanceUtils.cuh>
#include <faiss/gpu/impl/IVFUtils.cuh>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/Float16.cuh>
#include <faiss/gpu/utils/MathOperators.cuh>
#include <faiss/gpu/utils/LoadStoreOperators.cuh>
#include <faiss/gpu/utils/PtxUtils.cuh>
#include <faiss/gpu/utils/Reductions.cuh>
#include <faiss/gpu/utils/StaticUtils.h>
#include <thrust/host_vector.h>
namespace faiss { namespace gpu {
namespace {
/// Sort direction per each metric
inline bool metricToSortDirection(MetricType mt) {
switch (mt) {
case MetricType::METRIC_INNER_PRODUCT:
// highest
return true;
case MetricType::METRIC_L2:
// lowest
return false;
default:
// unhandled metric
FAISS_ASSERT(false);
return false;
}
}
}
// Number of warps we create per block of IVFFlatScan
constexpr int kIVFFlatScanWarps = 4;
// Works for any dimension size
template <typename Codec, typename Metric>
struct IVFFlatScan {
static __device__ void scan(float* query,
bool useResidual,
float* residualBaseSlice,
void* vecData,
const Codec& codec,
const Metric& metric,
int numVecs,
int dim,
float* distanceOut) {
// How many separate loading points are there for the decoder?
int limit = utils::divDown(dim, Codec::kDimPerIter);
// Each warp handles a separate chunk of vectors
int warpId = threadIdx.x / kWarpSize;
// FIXME: why does getLaneId() not work when we write out below!?!?!
int laneId = threadIdx.x % kWarpSize; // getLaneId();
// Divide the set of vectors among the warps
int vecsPerWarp = utils::divUp(numVecs, kIVFFlatScanWarps);
int vecStart = vecsPerWarp * warpId;
int vecEnd = min(vecsPerWarp * (warpId + 1), numVecs);
// Walk the list of vectors for this warp
for (int vec = vecStart; vec < vecEnd; ++vec) {
Metric dist = metric.zero();
// Scan the dimensions availabe that have whole units for the decoder,
// as the decoder may handle more than one dimension at once (leaving the
// remainder to be handled separately)
for (int d = laneId; d < limit; d += kWarpSize) {
int realDim = d * Codec::kDimPerIter;
float vecVal[Codec::kDimPerIter];
// Decode the kDimPerIter dimensions
codec.decode(vecData, vec, d, vecVal);
#pragma unroll
for (int j = 0; j < Codec::kDimPerIter; ++j) {
vecVal[j] += useResidual ? residualBaseSlice[realDim + j] : 0.0f;
}
#pragma unroll
for (int j = 0; j < Codec::kDimPerIter; ++j) {
dist.handle(query[realDim + j], vecVal[j]);
}
}
// Handle remainder by a single thread, if any
// Not needed if we decode 1 dim per time
if (Codec::kDimPerIter > 1) {
int realDim = limit * Codec::kDimPerIter;
// Was there any remainder?
if (realDim < dim) {
// Let the first threads in the block sequentially perform it
int remainderDim = realDim + laneId;
if (remainderDim < dim) {
float vecVal =
codec.decodePartial(vecData, vec, limit, laneId);
vecVal += useResidual ? residualBaseSlice[remainderDim] : 0.0f;
dist.handle(query[remainderDim], vecVal);
}
}
}
// Reduce distance within warp
auto warpDist = warpReduceAllSum(dist.reduce());
if (laneId == 0) {
distanceOut[vec] = warpDist;
}
}
}
};
template <typename Codec, typename Metric>
__global__ void
ivfFlatScan(Tensor<float, 2, true> queries,
bool useResidual,
Tensor<float, 3, true> residualBase,
Tensor<int, 2, true> listIds,
void** allListData,
int* listLengths,
Codec codec,
Metric metric,
Tensor<int, 2, true> prefixSumOffsets,
Tensor<float, 1, true> distance) {
extern __shared__ float smem[];
auto queryId = blockIdx.y;
auto probeId = blockIdx.x;
// This is where we start writing out data
// We ensure that before the array (at offset -1), there is a 0 value
int outBase = *(prefixSumOffsets[queryId][probeId].data() - 1);
auto listId = listIds[queryId][probeId];
// Safety guard in case NaNs in input cause no list ID to be generated
if (listId == -1) {
return;
}
auto query = queries[queryId].data();
auto vecs = allListData[listId];
auto numVecs = listLengths[listId];
auto dim = queries.getSize(1);
auto distanceOut = distance[outBase].data();
auto residualBaseSlice = residualBase[queryId][probeId].data();
codec.setSmem(smem, dim);
IVFFlatScan<Codec, Metric>::scan(query,
useResidual,
residualBaseSlice,
vecs,
codec,
metric,
numVecs,
dim,
distanceOut);
}
void
runIVFFlatScanTile(Tensor<float, 2, true>& queries,
Tensor<int, 2, true>& listIds,
thrust::device_vector<void*>& listData,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
Tensor<char, 1, true>& thrustMem,
Tensor<int, 2, true>& prefixSumOffsets,
Tensor<float, 1, true>& allDistances,
Tensor<float, 3, true>& heapDistances,
Tensor<int, 3, true>& heapIndices,
int k,
faiss::MetricType metricType,
bool useResidual,
Tensor<float, 3, true>& residualBase,
GpuScalarQuantizer* scalarQ,
Tensor<float, 2, true>& outDistances,
Tensor<long, 2, true>& outIndices,
hipStream_t stream) {
int dim = queries.getSize(1);
// Check the amount of shared memory per block available based on our type is
// sufficient
if (scalarQ &&
(scalarQ->qtype == ScalarQuantizer::QuantizerType::QT_8bit ||
scalarQ->qtype == ScalarQuantizer::QuantizerType::QT_4bit)) {
int maxDim = getMaxSharedMemPerBlockCurrentDevice() /
(sizeof(float) * 2);
FAISS_THROW_IF_NOT_FMT(dim < maxDim,
"Insufficient shared memory available on the GPU "
"for QT_8bit or QT_4bit with %d dimensions; "
"maximum dimensions possible is %d", dim, maxDim);
}
// Calculate offset lengths, so we know where to write out
// intermediate results
runCalcListOffsets(listIds, listLengths, prefixSumOffsets, thrustMem, stream);
auto grid = dim3(listIds.getSize(1), listIds.getSize(0));
auto block = dim3(kWarpSize * kIVFFlatScanWarps);
#define RUN_IVF_FLAT \
do { \
hipLaunchKernelGGL(( ivfFlatScan) \
, dim3(grid), dim3(block), codec.getSmemSize(dim), stream, \
queries, \
useResidual, \
residualBase, \
listIds, \
listData.data().get(), \
listLengths.data().get(), \
codec, \
metric, \
prefixSumOffsets, \
allDistances); \
} while (0)
#define HANDLE_METRICS \
do { \
if (metricType == MetricType::METRIC_L2) { \
L2Distance metric; RUN_IVF_FLAT; \
} else { \
IPDistance metric; RUN_IVF_FLAT; \
} \
} while (0)
if (!scalarQ) {
CodecFloat codec(dim * sizeof(float));
HANDLE_METRICS;
} else {
switch (scalarQ->qtype) {
case ScalarQuantizer::QuantizerType::QT_8bit:
{
// FIXME: investigate 32 bit load perf issues
// if (dim % 4 == 0) {
if (false) {
Codec<ScalarQuantizer::QuantizerType::QT_8bit, 4>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
HANDLE_METRICS;
} else {
Codec<ScalarQuantizer::QuantizerType::QT_8bit, 1>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
HANDLE_METRICS;
}
}
break;
case ScalarQuantizer::QuantizerType::QT_8bit_uniform:
{
// FIXME: investigate 32 bit load perf issues
if (false) {
// if (dim % 4 == 0) {
Codec<ScalarQuantizer::QuantizerType::QT_8bit_uniform, 4>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
HANDLE_METRICS;
} else {
Codec<ScalarQuantizer::QuantizerType::QT_8bit_uniform, 1>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
HANDLE_METRICS;
}
}
break;
case ScalarQuantizer::QuantizerType::QT_fp16:
{
if (false) {
// FIXME: investigate 32 bit load perf issues
// if (dim % 2 == 0) {
Codec<ScalarQuantizer::QuantizerType::QT_fp16, 2>
codec(scalarQ->code_size);
HANDLE_METRICS;
} else {
Codec<ScalarQuantizer::QuantizerType::QT_fp16, 1>
codec(scalarQ->code_size);
HANDLE_METRICS;
}
}
break;
case ScalarQuantizer::QuantizerType::QT_8bit_direct:
{
Codec<ScalarQuantizer::QuantizerType::QT_8bit_direct, 1>
codec(scalarQ->code_size);
HANDLE_METRICS;
}
break;
case ScalarQuantizer::QuantizerType::QT_4bit:
{
Codec<ScalarQuantizer::QuantizerType::QT_4bit, 1>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
HANDLE_METRICS;
}
break;
case ScalarQuantizer::QuantizerType::QT_4bit_uniform:
{
Codec<ScalarQuantizer::QuantizerType::QT_4bit_uniform, 1>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
HANDLE_METRICS;
}
break;
default:
// unimplemented, should be handled at a higher level
FAISS_ASSERT(false);
}
}
CUDA_TEST_ERROR();
#undef HANDLE_METRICS
#undef RUN_IVF_FLAT
// k-select the output in chunks, to increase parallelism
runPass1SelectLists(prefixSumOffsets,
allDistances,
listIds.getSize(1),
k,
metricToSortDirection(metricType),
heapDistances,
heapIndices,
stream);
// k-select final output
auto flatHeapDistances = heapDistances.downcastInner<2>();
auto flatHeapIndices = heapIndices.downcastInner<2>();
runPass2SelectLists(flatHeapDistances,
flatHeapIndices,
listIndices,
indicesOptions,
prefixSumOffsets,
listIds,
k,
metricToSortDirection(metricType),
outDistances,
outIndices,
stream);
}
void
runIVFFlatScan(Tensor<float, 2, true>& queries,
Tensor<int, 2, true>& listIds,
thrust::device_vector<void*>& listData,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
int maxListLength,
int k,
faiss::MetricType metric,
bool useResidual,
Tensor<float, 3, true>& residualBase,
GpuScalarQuantizer* scalarQ,
// output
Tensor<float, 2, true>& outDistances,
// output
Tensor<long, 2, true>& outIndices,
GpuResources* res) {
constexpr int kMinQueryTileSize = 8;
constexpr int kMaxQueryTileSize = 128;
constexpr int kThrustMemSize = 16384;
int nprobe = listIds.getSize(1);
auto& mem = res->getMemoryManagerCurrentDevice();
auto stream = res->getDefaultStreamCurrentDevice();
// Make a reservation for Thrust to do its dirty work (global memory
// cross-block reduction space); hopefully this is large enough.
DeviceTensor<char, 1, true> thrustMem1(
mem, {kThrustMemSize}, stream);
DeviceTensor<char, 1, true> thrustMem2(
mem, {kThrustMemSize}, stream);
DeviceTensor<char, 1, true>* thrustMem[2] =
{&thrustMem1, &thrustMem2};
// How much temporary storage is available?
// If possible, we'd like to fit within the space available.
size_t sizeAvailable = mem.getSizeAvailable();
// We run two passes of heap selection
// This is the size of the first-level heap passes
constexpr int kNProbeSplit = 8;
int pass2Chunks = ::min(nprobe, kNProbeSplit);
size_t sizeForFirstSelectPass =
pass2Chunks * k * (sizeof(float) + sizeof(int));
// How much temporary storage we need per each query
size_t sizePerQuery =
2 * // # streams
((nprobe * sizeof(int) + sizeof(int)) + // prefixSumOffsets
nprobe * maxListLength * sizeof(float) + // allDistances
sizeForFirstSelectPass);
int queryTileSize = (int) (sizeAvailable / sizePerQuery);
if (queryTileSize < kMinQueryTileSize) {
queryTileSize = kMinQueryTileSize;
} else if (queryTileSize > kMaxQueryTileSize) {
queryTileSize = kMaxQueryTileSize;
}
// FIXME: we should adjust queryTileSize to deal with this, since
// indexing is in int32
FAISS_ASSERT(queryTileSize * nprobe * maxListLength <
std::numeric_limits<int>::max());
// Temporary memory buffers
// Make sure there is space prior to the start which will be 0, and
// will handle the boundary condition without branches
DeviceTensor<int, 1, true> prefixSumOffsetSpace1(
mem, {queryTileSize * nprobe + 1}, stream);
DeviceTensor<int, 1, true> prefixSumOffsetSpace2(
mem, {queryTileSize * nprobe + 1}, stream);
DeviceTensor<int, 2, true> prefixSumOffsets1(
prefixSumOffsetSpace1[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true> prefixSumOffsets2(
prefixSumOffsetSpace2[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true>* prefixSumOffsets[2] =
{&prefixSumOffsets1, &prefixSumOffsets2};
// Make sure the element before prefixSumOffsets is 0, since we
// depend upon simple, boundary-less indexing to get proper results
CUDA_VERIFY(hipMemsetAsync(prefixSumOffsetSpace1.data(),
0,
sizeof(int),
stream));
CUDA_VERIFY(hipMemsetAsync(prefixSumOffsetSpace2.data(),
0,
sizeof(int),
stream));
DeviceTensor<float, 1, true> allDistances1(
mem, {queryTileSize * nprobe * maxListLength}, stream);
DeviceTensor<float, 1, true> allDistances2(
mem, {queryTileSize * nprobe * maxListLength}, stream);
DeviceTensor<float, 1, true>* allDistances[2] =
{&allDistances1, &allDistances2};
DeviceTensor<float, 3, true> heapDistances1(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<float, 3, true> heapDistances2(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<float, 3, true>* heapDistances[2] =
{&heapDistances1, &heapDistances2};
DeviceTensor<int, 3, true> heapIndices1(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<int, 3, true> heapIndices2(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<int, 3, true>* heapIndices[2] =
{&heapIndices1, &heapIndices2};
auto streams = res->getAlternateStreamsCurrentDevice();
streamWait(streams, {stream});
int curStream = 0;
for (int query = 0; query < queries.getSize(0); query += queryTileSize) {
int numQueriesInTile =
::min(queryTileSize, queries.getSize(0) - query);
auto prefixSumOffsetsView =
prefixSumOffsets[curStream]->narrowOutermost(0, numQueriesInTile);
auto listIdsView =
listIds.narrowOutermost(query, numQueriesInTile);
auto queryView =
queries.narrowOutermost(query, numQueriesInTile);
auto residualBaseView =
residualBase.narrowOutermost(query, numQueriesInTile);
auto heapDistancesView =
heapDistances[curStream]->narrowOutermost(0, numQueriesInTile);
auto heapIndicesView =
heapIndices[curStream]->narrowOutermost(0, numQueriesInTile);
auto outDistanceView =
outDistances.narrowOutermost(query, numQueriesInTile);
auto outIndicesView =
outIndices.narrowOutermost(query, numQueriesInTile);
runIVFFlatScanTile(queryView,
listIdsView,
listData,
listIndices,
indicesOptions,
listLengths,
*thrustMem[curStream],
prefixSumOffsetsView,
*allDistances[curStream],
heapDistancesView,
heapIndicesView,
k,
metric,
useResidual,
residualBaseView,
scalarQ,
outDistanceView,
outIndicesView,
streams[curStream]);
curStream = (curStream + 1) % 2;
}
streamWait({stream}, streams);
}
} } // namespace
| cc44d7de8d7ad7a6f401ea0f739536a1cc7ab52d.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/IVFFlatScan.cuh>
#include <faiss/gpu/impl/DistanceUtils.cuh>
#include <faiss/gpu/impl/IVFUtils.cuh>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/Float16.cuh>
#include <faiss/gpu/utils/MathOperators.cuh>
#include <faiss/gpu/utils/LoadStoreOperators.cuh>
#include <faiss/gpu/utils/PtxUtils.cuh>
#include <faiss/gpu/utils/Reductions.cuh>
#include <faiss/gpu/utils/StaticUtils.h>
#include <thrust/host_vector.h>
namespace faiss { namespace gpu {
namespace {
/// Sort direction per each metric
inline bool metricToSortDirection(MetricType mt) {
switch (mt) {
case MetricType::METRIC_INNER_PRODUCT:
// highest
return true;
case MetricType::METRIC_L2:
// lowest
return false;
default:
// unhandled metric
FAISS_ASSERT(false);
return false;
}
}
}
// Number of warps we create per block of IVFFlatScan
constexpr int kIVFFlatScanWarps = 4;
// Works for any dimension size
template <typename Codec, typename Metric>
struct IVFFlatScan {
static __device__ void scan(float* query,
bool useResidual,
float* residualBaseSlice,
void* vecData,
const Codec& codec,
const Metric& metric,
int numVecs,
int dim,
float* distanceOut) {
// How many separate loading points are there for the decoder?
int limit = utils::divDown(dim, Codec::kDimPerIter);
// Each warp handles a separate chunk of vectors
int warpId = threadIdx.x / kWarpSize;
// FIXME: why does getLaneId() not work when we write out below!?!?!
int laneId = threadIdx.x % kWarpSize; // getLaneId();
// Divide the set of vectors among the warps
int vecsPerWarp = utils::divUp(numVecs, kIVFFlatScanWarps);
int vecStart = vecsPerWarp * warpId;
int vecEnd = min(vecsPerWarp * (warpId + 1), numVecs);
// Walk the list of vectors for this warp
for (int vec = vecStart; vec < vecEnd; ++vec) {
Metric dist = metric.zero();
// Scan the dimensions availabe that have whole units for the decoder,
// as the decoder may handle more than one dimension at once (leaving the
// remainder to be handled separately)
for (int d = laneId; d < limit; d += kWarpSize) {
int realDim = d * Codec::kDimPerIter;
float vecVal[Codec::kDimPerIter];
// Decode the kDimPerIter dimensions
codec.decode(vecData, vec, d, vecVal);
#pragma unroll
for (int j = 0; j < Codec::kDimPerIter; ++j) {
vecVal[j] += useResidual ? residualBaseSlice[realDim + j] : 0.0f;
}
#pragma unroll
for (int j = 0; j < Codec::kDimPerIter; ++j) {
dist.handle(query[realDim + j], vecVal[j]);
}
}
// Handle remainder by a single thread, if any
// Not needed if we decode 1 dim per time
if (Codec::kDimPerIter > 1) {
int realDim = limit * Codec::kDimPerIter;
// Was there any remainder?
if (realDim < dim) {
// Let the first threads in the block sequentially perform it
int remainderDim = realDim + laneId;
if (remainderDim < dim) {
float vecVal =
codec.decodePartial(vecData, vec, limit, laneId);
vecVal += useResidual ? residualBaseSlice[remainderDim] : 0.0f;
dist.handle(query[remainderDim], vecVal);
}
}
}
// Reduce distance within warp
auto warpDist = warpReduceAllSum(dist.reduce());
if (laneId == 0) {
distanceOut[vec] = warpDist;
}
}
}
};
template <typename Codec, typename Metric>
__global__ void
ivfFlatScan(Tensor<float, 2, true> queries,
bool useResidual,
Tensor<float, 3, true> residualBase,
Tensor<int, 2, true> listIds,
void** allListData,
int* listLengths,
Codec codec,
Metric metric,
Tensor<int, 2, true> prefixSumOffsets,
Tensor<float, 1, true> distance) {
extern __shared__ float smem[];
auto queryId = blockIdx.y;
auto probeId = blockIdx.x;
// This is where we start writing out data
// We ensure that before the array (at offset -1), there is a 0 value
int outBase = *(prefixSumOffsets[queryId][probeId].data() - 1);
auto listId = listIds[queryId][probeId];
// Safety guard in case NaNs in input cause no list ID to be generated
if (listId == -1) {
return;
}
auto query = queries[queryId].data();
auto vecs = allListData[listId];
auto numVecs = listLengths[listId];
auto dim = queries.getSize(1);
auto distanceOut = distance[outBase].data();
auto residualBaseSlice = residualBase[queryId][probeId].data();
codec.setSmem(smem, dim);
IVFFlatScan<Codec, Metric>::scan(query,
useResidual,
residualBaseSlice,
vecs,
codec,
metric,
numVecs,
dim,
distanceOut);
}
void
runIVFFlatScanTile(Tensor<float, 2, true>& queries,
Tensor<int, 2, true>& listIds,
thrust::device_vector<void*>& listData,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
Tensor<char, 1, true>& thrustMem,
Tensor<int, 2, true>& prefixSumOffsets,
Tensor<float, 1, true>& allDistances,
Tensor<float, 3, true>& heapDistances,
Tensor<int, 3, true>& heapIndices,
int k,
faiss::MetricType metricType,
bool useResidual,
Tensor<float, 3, true>& residualBase,
GpuScalarQuantizer* scalarQ,
Tensor<float, 2, true>& outDistances,
Tensor<long, 2, true>& outIndices,
cudaStream_t stream) {
int dim = queries.getSize(1);
// Check the amount of shared memory per block available based on our type is
// sufficient
if (scalarQ &&
(scalarQ->qtype == ScalarQuantizer::QuantizerType::QT_8bit ||
scalarQ->qtype == ScalarQuantizer::QuantizerType::QT_4bit)) {
int maxDim = getMaxSharedMemPerBlockCurrentDevice() /
(sizeof(float) * 2);
FAISS_THROW_IF_NOT_FMT(dim < maxDim,
"Insufficient shared memory available on the GPU "
"for QT_8bit or QT_4bit with %d dimensions; "
"maximum dimensions possible is %d", dim, maxDim);
}
// Calculate offset lengths, so we know where to write out
// intermediate results
runCalcListOffsets(listIds, listLengths, prefixSumOffsets, thrustMem, stream);
auto grid = dim3(listIds.getSize(1), listIds.getSize(0));
auto block = dim3(kWarpSize * kIVFFlatScanWarps);
#define RUN_IVF_FLAT \
do { \
ivfFlatScan \
<<<grid, block, codec.getSmemSize(dim), stream>>>( \
queries, \
useResidual, \
residualBase, \
listIds, \
listData.data().get(), \
listLengths.data().get(), \
codec, \
metric, \
prefixSumOffsets, \
allDistances); \
} while (0)
#define HANDLE_METRICS \
do { \
if (metricType == MetricType::METRIC_L2) { \
L2Distance metric; RUN_IVF_FLAT; \
} else { \
IPDistance metric; RUN_IVF_FLAT; \
} \
} while (0)
if (!scalarQ) {
CodecFloat codec(dim * sizeof(float));
HANDLE_METRICS;
} else {
switch (scalarQ->qtype) {
case ScalarQuantizer::QuantizerType::QT_8bit:
{
// FIXME: investigate 32 bit load perf issues
// if (dim % 4 == 0) {
if (false) {
Codec<ScalarQuantizer::QuantizerType::QT_8bit, 4>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
HANDLE_METRICS;
} else {
Codec<ScalarQuantizer::QuantizerType::QT_8bit, 1>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
HANDLE_METRICS;
}
}
break;
case ScalarQuantizer::QuantizerType::QT_8bit_uniform:
{
// FIXME: investigate 32 bit load perf issues
if (false) {
// if (dim % 4 == 0) {
Codec<ScalarQuantizer::QuantizerType::QT_8bit_uniform, 4>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
HANDLE_METRICS;
} else {
Codec<ScalarQuantizer::QuantizerType::QT_8bit_uniform, 1>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
HANDLE_METRICS;
}
}
break;
case ScalarQuantizer::QuantizerType::QT_fp16:
{
if (false) {
// FIXME: investigate 32 bit load perf issues
// if (dim % 2 == 0) {
Codec<ScalarQuantizer::QuantizerType::QT_fp16, 2>
codec(scalarQ->code_size);
HANDLE_METRICS;
} else {
Codec<ScalarQuantizer::QuantizerType::QT_fp16, 1>
codec(scalarQ->code_size);
HANDLE_METRICS;
}
}
break;
case ScalarQuantizer::QuantizerType::QT_8bit_direct:
{
Codec<ScalarQuantizer::QuantizerType::QT_8bit_direct, 1>
codec(scalarQ->code_size);
HANDLE_METRICS;
}
break;
case ScalarQuantizer::QuantizerType::QT_4bit:
{
Codec<ScalarQuantizer::QuantizerType::QT_4bit, 1>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
HANDLE_METRICS;
}
break;
case ScalarQuantizer::QuantizerType::QT_4bit_uniform:
{
Codec<ScalarQuantizer::QuantizerType::QT_4bit_uniform, 1>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
HANDLE_METRICS;
}
break;
default:
// unimplemented, should be handled at a higher level
FAISS_ASSERT(false);
}
}
CUDA_TEST_ERROR();
#undef HANDLE_METRICS
#undef RUN_IVF_FLAT
// k-select the output in chunks, to increase parallelism
runPass1SelectLists(prefixSumOffsets,
allDistances,
listIds.getSize(1),
k,
metricToSortDirection(metricType),
heapDistances,
heapIndices,
stream);
// k-select final output
auto flatHeapDistances = heapDistances.downcastInner<2>();
auto flatHeapIndices = heapIndices.downcastInner<2>();
runPass2SelectLists(flatHeapDistances,
flatHeapIndices,
listIndices,
indicesOptions,
prefixSumOffsets,
listIds,
k,
metricToSortDirection(metricType),
outDistances,
outIndices,
stream);
}
void
runIVFFlatScan(Tensor<float, 2, true>& queries,
Tensor<int, 2, true>& listIds,
thrust::device_vector<void*>& listData,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
thrust::device_vector<int>& listLengths,
int maxListLength,
int k,
faiss::MetricType metric,
bool useResidual,
Tensor<float, 3, true>& residualBase,
GpuScalarQuantizer* scalarQ,
// output
Tensor<float, 2, true>& outDistances,
// output
Tensor<long, 2, true>& outIndices,
GpuResources* res) {
constexpr int kMinQueryTileSize = 8;
constexpr int kMaxQueryTileSize = 128;
constexpr int kThrustMemSize = 16384;
int nprobe = listIds.getSize(1);
auto& mem = res->getMemoryManagerCurrentDevice();
auto stream = res->getDefaultStreamCurrentDevice();
// Make a reservation for Thrust to do its dirty work (global memory
// cross-block reduction space); hopefully this is large enough.
DeviceTensor<char, 1, true> thrustMem1(
mem, {kThrustMemSize}, stream);
DeviceTensor<char, 1, true> thrustMem2(
mem, {kThrustMemSize}, stream);
DeviceTensor<char, 1, true>* thrustMem[2] =
{&thrustMem1, &thrustMem2};
// How much temporary storage is available?
// If possible, we'd like to fit within the space available.
size_t sizeAvailable = mem.getSizeAvailable();
// We run two passes of heap selection
// This is the size of the first-level heap passes
constexpr int kNProbeSplit = 8;
int pass2Chunks = std::min(nprobe, kNProbeSplit);
size_t sizeForFirstSelectPass =
pass2Chunks * k * (sizeof(float) + sizeof(int));
// How much temporary storage we need per each query
size_t sizePerQuery =
2 * // # streams
((nprobe * sizeof(int) + sizeof(int)) + // prefixSumOffsets
nprobe * maxListLength * sizeof(float) + // allDistances
sizeForFirstSelectPass);
int queryTileSize = (int) (sizeAvailable / sizePerQuery);
if (queryTileSize < kMinQueryTileSize) {
queryTileSize = kMinQueryTileSize;
} else if (queryTileSize > kMaxQueryTileSize) {
queryTileSize = kMaxQueryTileSize;
}
// FIXME: we should adjust queryTileSize to deal with this, since
// indexing is in int32
FAISS_ASSERT(queryTileSize * nprobe * maxListLength <
std::numeric_limits<int>::max());
// Temporary memory buffers
// Make sure there is space prior to the start which will be 0, and
// will handle the boundary condition without branches
DeviceTensor<int, 1, true> prefixSumOffsetSpace1(
mem, {queryTileSize * nprobe + 1}, stream);
DeviceTensor<int, 1, true> prefixSumOffsetSpace2(
mem, {queryTileSize * nprobe + 1}, stream);
DeviceTensor<int, 2, true> prefixSumOffsets1(
prefixSumOffsetSpace1[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true> prefixSumOffsets2(
prefixSumOffsetSpace2[1].data(),
{queryTileSize, nprobe});
DeviceTensor<int, 2, true>* prefixSumOffsets[2] =
{&prefixSumOffsets1, &prefixSumOffsets2};
// Make sure the element before prefixSumOffsets is 0, since we
// depend upon simple, boundary-less indexing to get proper results
CUDA_VERIFY(cudaMemsetAsync(prefixSumOffsetSpace1.data(),
0,
sizeof(int),
stream));
CUDA_VERIFY(cudaMemsetAsync(prefixSumOffsetSpace2.data(),
0,
sizeof(int),
stream));
DeviceTensor<float, 1, true> allDistances1(
mem, {queryTileSize * nprobe * maxListLength}, stream);
DeviceTensor<float, 1, true> allDistances2(
mem, {queryTileSize * nprobe * maxListLength}, stream);
DeviceTensor<float, 1, true>* allDistances[2] =
{&allDistances1, &allDistances2};
DeviceTensor<float, 3, true> heapDistances1(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<float, 3, true> heapDistances2(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<float, 3, true>* heapDistances[2] =
{&heapDistances1, &heapDistances2};
DeviceTensor<int, 3, true> heapIndices1(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<int, 3, true> heapIndices2(
mem, {queryTileSize, pass2Chunks, k}, stream);
DeviceTensor<int, 3, true>* heapIndices[2] =
{&heapIndices1, &heapIndices2};
auto streams = res->getAlternateStreamsCurrentDevice();
streamWait(streams, {stream});
int curStream = 0;
for (int query = 0; query < queries.getSize(0); query += queryTileSize) {
int numQueriesInTile =
std::min(queryTileSize, queries.getSize(0) - query);
auto prefixSumOffsetsView =
prefixSumOffsets[curStream]->narrowOutermost(0, numQueriesInTile);
auto listIdsView =
listIds.narrowOutermost(query, numQueriesInTile);
auto queryView =
queries.narrowOutermost(query, numQueriesInTile);
auto residualBaseView =
residualBase.narrowOutermost(query, numQueriesInTile);
auto heapDistancesView =
heapDistances[curStream]->narrowOutermost(0, numQueriesInTile);
auto heapIndicesView =
heapIndices[curStream]->narrowOutermost(0, numQueriesInTile);
auto outDistanceView =
outDistances.narrowOutermost(query, numQueriesInTile);
auto outIndicesView =
outIndices.narrowOutermost(query, numQueriesInTile);
runIVFFlatScanTile(queryView,
listIdsView,
listData,
listIndices,
indicesOptions,
listLengths,
*thrustMem[curStream],
prefixSumOffsetsView,
*allDistances[curStream],
heapDistancesView,
heapIndicesView,
k,
metric,
useResidual,
residualBaseView,
scalarQ,
outDistanceView,
outIndicesView,
streams[curStream]);
curStream = (curStream + 1) % 2;
}
streamWait({stream}, streams);
}
} } // namespace
|
10224a1ff10201ae90575ef888c31b19e1b6e676.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2016, The Bifrost Authors. All rights reserved.
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of The Bifrost Authors nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <bifrost/fdmt.h>
#include "assert.hpp"
#include "utils.hpp"
#include "workspace.hpp"
#include "cuda.hpp"
//#include <limits>
#include <math_constants.h> // For CUDART_NAN_F
#include <thrust/device_vector.h>
#include <vector>
#include <map>
#include <string>
// HACK TESTING
#include <iostream>
using std::cout;
using std::endl;
// Note: Can be tuned over block shape
template<typename InType, typename OutType>
__global__
void fdmt_init_kernel(int ntime,
int nchan,
bool reverse_band,
bool reverse_time,
int const* __restrict__ d_offsets,
InType /*const* __restrict__*/ d_in,
int istride,
OutType* __restrict__ d_out,
int ostride) {
int t0 = threadIdx.x + blockIdx.x*blockDim.x;
int c0 = threadIdx.y + blockIdx.y*blockDim.y;
//int b0 = blockIdx.z;
//for( int b=b0; b<nbatch; b+=gridDim.z ) {
for( int c=c0; c<nchan; c+=blockDim.y*gridDim.y ) {
int offset = d_offsets[c];
int ndelay = d_offsets[c+1] - offset;
for( int t=t0; t<ntime; t+=blockDim.x*gridDim.x ) {
OutType tmp(0);
for( int d=0; d<ndelay; ++d ) {
// Note: This fills the unused elements with NaNs
OutType outval(CUDART_NAN_F);//std::numeric_limits<OutType>::quiet_NaN());
if( t >= d ) {
int c_ = reverse_band ? nchan-1 - c : c;
int t_ = reverse_time ? ntime-1 - t : t;
tmp += d_in[(t_-d) + istride*c_];// + ibstride*b];
// TODO: Check effect of not-/using sqrt
// The final paper has no sqrt (i.e., computation is just the mean)
//outval = tmp * rsqrtf(d+1);
outval = tmp * (1.f/(d+1));
}
d_out[t + ostride*(offset+d)] = outval;
//d_out[t + ostride*(offset+d) + obstride*b] = outval;
}
}
}
//}
}
// Note: Can be tuned over block shape
template<typename DType>
__global__
void fdmt_exec_kernel(int ntime,
int nrow,
bool is_final_step,
bool reverse_time,
int const* __restrict__ d_delays,
int2 const* __restrict__ d_srcrows,
DType const* __restrict__ d_in,
int istride,
DType* __restrict__ d_out,
int ostride) {
int t0 = threadIdx.x + blockIdx.x*blockDim.x;
int r0 = threadIdx.y + blockIdx.y*blockDim.y;
for( int r=r0; r<nrow; r+=blockDim.y*gridDim.y ) {
int delay = d_delays[r];
int srcrow0 = d_srcrows[r].x;
int srcrow1 = d_srcrows[r].y;
for( int t=t0; t<ntime; t+=blockDim.x*gridDim.x ) {
// Avoid elements that go unused due to diagonal reindexing
if( is_final_step && t < r ) {
//int ostride_ = ostride - reverse_time;
//d_out[t + ostride_*r] = CUDART_NAN_F;
continue;
}
// HACK TESTING
////if( ostride < ntime && t >= ntime-1 - r ) {
//if( ostride != ntime && t < r ) {
// int ostride_ = ostride - (ostride > ntime);
// d_out[t + ostride_*r] = CUDART_NAN_F;
// continue;
//}// else if( ostride > ntime && t >= ntime - r ) {
// //d_out[t - (ntime-1) + ostride*r] = CUDART_NAN_F;
// continue;
//}
// Note: Non-existent rows are signified by -1
//if( t == 0 && r == 0 ) {
// printf("t,srcrow0,srcrow1,istride = %i, %i, %i, %i\n", t, srcrow0, srcrow1, istride);
//}
//if( threadIdx.x == 63 && blockIdx.y == 4 ) {
//printf("istride = %i, srcrow0 = %i, srcrow1 = %i, d_in = %p\n", istride, srcrow0, srcrow1, d_in);
//}
//if( t == 0 ) {// && r == 1 ) {
// printf("istride = %i, srcrow0 = %i, srcrow1 = %i, d_in = %p\n", istride, srcrow0, srcrow1, d_in);
//}
DType outval = (srcrow0 != -1) ? d_in[ t + istride*srcrow0] : 0;
if( t >= delay ) {
outval += (srcrow1 != -1) ? d_in[(t-delay) + istride*srcrow1] : 0;
}
int t_ = (is_final_step && reverse_time) ? ntime-1 - t : t;
d_out[t_ + ostride*r] = outval;
}
}
}
template<typename InType, typename OutType>
inline
void launch_fdmt_init_kernel(int ntime,
int nchan,
bool reverse_band,
bool reverse_time,
//int const* d_ndelays,
int const* d_offsets,
InType /*const**/ d_in,
int istride,
OutType* d_out,
int ostride,
hipStream_t stream=0) {
dim3 block(256, 1); // TODO: Tune this
dim3 grid(::min((ntime-1)/block.x+1, 65535u),
::min((nchan-1)/block.y+1, 65535u));
//fdmt_init_kernel<<<grid,block,0,stream>>>(ntime,nchan,
// //d_ndelays,
// d_offsets,
// d_in,istride,
// d_out,ostride);
void* args[] = {&ntime,
&nchan,
&reverse_band,
&reverse_time,
&d_offsets,
&d_in,
&istride,
&d_out,
&ostride};
cudaLaunchKernel((void*)fdmt_init_kernel<InType,OutType>,
grid, block,
&args[0], 0, stream);
}
template<typename DType>
inline
void launch_fdmt_exec_kernel(int ntime,
int nrow,
bool is_final_step,
bool reverse_time,
int const* d_delays,
int2 const* d_srcrows,
DType const* d_in,
int istride,
DType* d_out,
int ostride,
hipStream_t stream=0) {
//cout << "LAUNCH " << d_in << ", " << d_out << endl;
dim3 block(256, 1); // TODO: Tune this
dim3 grid(::min((ntime-1)/block.x+1, 65535u),
::min((nrow -1)/block.y+1, 65535u));
//fdmt_exec_kernel<<<grid,block,0,stream>>>(ntime,nrow,
// d_delays,d_srcrows,
// d_in,istride,
// d_out,ostride);
void* args[] = {&ntime,
&nrow,
&is_final_step,
&reverse_time,
&d_delays,
&d_srcrows,
&d_in,
&istride,
&d_out,
&ostride};
//cudaLaunchKernel((void*)static_cast<void(*)(int, int, const int*, const int2*, const DType*, int, DType*, int)>(fdmt_exec_kernel<DType>),
cudaLaunchKernel((void*)fdmt_exec_kernel<DType>,
grid, block,
&args[0], 0, stream);
}
/*
**** 4096
**** 4096
**** 2048
**** 1066
**** 650
**** 475
**** 381
**** 337
**** 316
**** 302
**** 299
**** 295
**** 293
SB 3
delay 135
Step 10 prev: 58, 78
srcs: 57, 78
NROW_MAX = 4096
STEP 1
STEP 2
STEP 3
STEP 4
STEP 5
STEP 6
STEP 7
STEP 8
STEP 9
STEP 10
STEP 11
*/
class BFfdmt_impl {
typedef int IType;
typedef double FType;
typedef int2 IndexPair;
public: // HACK WAR for what looks like a bug in the CUDA 7.0 compiler
typedef float DType;
private:
IType _nchan;
IType _max_delay;
FType _f0;
FType _df;
FType _exponent;
IType _nrow_max;
IType _plan_stride;
IType _buffer_stride;
std::vector<IType> _offsets;
std::vector<std::vector<IndexPair> > _step_srcrows;
std::vector<std::vector<IType> > _step_delays;
IType* _d_offsets;
IndexPair* _d_step_srcrows;
IType* _d_step_delays;
DType* _d_buffer_a;
DType* _d_buffer_b;
Workspace _plan_storage;
Workspace _exec_storage;
// TODO: Use something other than Thrust
thrust::device_vector<char> _dv_plan_storage;
thrust::device_vector<char> _dv_exec_storage;
hipStream_t _stream;
bool _reverse_band;
FType cfreq(IType chan) {
return _f0 + _df*chan;
}
FType rel_delay(FType flo, FType fhi, FType fmin, FType fmax) {
FType g = _exponent;
FType eps = std::numeric_limits<FType>::epsilon();
FType denom = ::pow(fmin,g) - ::pow(fmax,g);
if( ::abs(denom) < eps ) {
denom = ::copysign(eps, denom);
}
return (::pow(flo,g) - ::pow(fhi,g)) / denom;
}
FType rel_delay(FType flo, FType fhi) {
FType fmin = cfreq(0);
FType fmax = cfreq(_nchan-1);
//std::swap(fmin, fmax);
//FType fmax = cfreq(_nchan); // HACK TESTING
return rel_delay(flo, fhi, fmin, fmax);
}
IType subband_ndelay(FType f0, FType df) {
FType fracdelay = rel_delay(f0, f0+df);
FType fmaxdelay = fracdelay*(_max_delay-1);
IType ndelay = IType(::ceil(fmaxdelay)) + 1;
return ndelay;
}
public:
BFfdmt_impl() : _nchan(0), _max_delay(0), _f0(0), _df(0), _exponent(0),
_stream(g_cuda_stream) {}
inline IType nchan() const { return _nchan; }
inline IType max_delay() const { return _max_delay; }
void init(IType nchan,
IType max_delay,
FType f0,
FType df,
FType exponent) {
if( df < 0. ) {
_reverse_band = true;
f0 += (nchan-1)*df;
df *= -1;
} else {
_reverse_band = false;
}
if( nchan == _nchan &&
max_delay == _max_delay &&
f0 == _f0 &&
df == _df &&
exponent == _exponent ) {
return;
}
_f0 = f0;
_df = df;
_nchan = nchan;
_max_delay = max_delay;
_exponent = exponent;
// Note: Initialized with 1 entry as dummy for initialization step
std::vector<std::vector<IndexPair> > step_subband_parents(1);
IType nsubband = _nchan;
while( nsubband > 1 ) {
IType step = step_subband_parents.size();
step_subband_parents.push_back(std::vector<IndexPair>());
for( IType sb=0; sb<nsubband; sb+=2 ) {
IType parent0 = sb;
IType parent1 = sb+1;
if( nsubband % 2 ) {
// Note: Alternating left/right-biased merging scheme
if( (step-1) % 2 ) {
parent0 -= 1; // Note: First entry becomes -1 => non-existent
parent1 -= 1;
} else {
// Note: Last entry becomes -1 => non-existent
if( parent1 == nsubband ) parent1 = -1;
}
}
//cout << step << ": " << parent0 << ", " << parent1 << endl;
IndexPair parents = make_int2(parent0, parent1);
step_subband_parents[step].push_back(parents);
}
nsubband = step_subband_parents[step].size();
}
// Note: Includes initialization step
IType nstep = step_subband_parents.size();
std::vector<std::vector<IType> > step_subband_nchans(nstep);
step_subband_nchans[0].assign(_nchan, 1);
for( IType step=1; step<nstep; ++step ) {
IType nsubband = step_subband_parents[step].size();
step_subband_nchans[step].resize(nsubband);
for( IType sb=0; sb<nsubband; ++sb ) {
IndexPair parents = step_subband_parents[step][sb];
IType p0 = parents.x;//first;
IType p1 = parents.y;//second;
IType parent0_nchan = (p0!=-1) ? step_subband_nchans[step-1][p0] : 0;
IType parent1_nchan = (p1!=-1) ? step_subband_nchans[step-1][p1] : 0;
IType child_nchan = parent0_nchan + parent1_nchan;
step_subband_nchans[step][sb] = child_nchan;
}
}
std::vector<std::vector<IType> > step_subband_chan_offsets(nstep);
std::vector<std::vector<IType> > step_subband_row_offsets(nstep);
IType nrow_max = 0;
for( IType step=0; step<nstep; ++step ) {
IType nsubband = step_subband_nchans[step].size();
// Note: +1 to store the total in the last element
// (The array will hold a complete exclusive scan)
step_subband_chan_offsets[step].resize(nsubband+1);
step_subband_row_offsets[step].resize(nsubband+1);
IType chan0 = 0;
IType row_offset = 0;
for( IType sb=0; sb<nsubband; ++sb ) {
IType nchan = step_subband_nchans[step][sb];
FType f0 = cfreq(chan0) - (step == 0 ? 0.5*_df : 0.);
//FType f0 = cfreq(chan0); // HACK TESTING
FType df = _df * (step == 0 ? 1 : nchan-1);
//FType df = _df * nchan; // HACK TESTING
//cout << "df = " << df << endl;
IType ndelay = subband_ndelay(f0, df);
//cout << "NDELAY = " << ndelay << endl;
step_subband_chan_offsets[step][sb] = chan0;
step_subband_row_offsets[step][sb] = row_offset;
chan0 += nchan;
row_offset += ndelay;
}
step_subband_chan_offsets[step][nsubband] = chan0;
step_subband_row_offsets[step][nsubband] = row_offset;
nrow_max = ::max(nrow_max, row_offset);
//*cout << "**** Nrow: " << row_offset << endl;
}
// Save for use during initialization
//plan->_init_subband_row_offsets = step_subband_row_offsets[0];
_offsets = step_subband_row_offsets[0];
_nrow_max = nrow_max;
//cout << "**** " << _nrow_max << endl;
// Note: First entry in these remains empty
std::vector<std::vector<IndexPair> > step_srcrows(nstep);
std::vector<std::vector<IType> > step_delays(nstep);
for( IType step=1; step<nstep; ++step ) {
IType nsubband = step_subband_nchans[step].size();
IType nrow = step_subband_row_offsets[step][nsubband];
//*cout << "nrow " << nrow << endl;
step_srcrows[step].resize(nrow);
step_delays[step].resize(nrow);
for( IType sb=0; sb<nsubband; ++sb ) {
IndexPair parents = step_subband_parents[step][sb];
IType p0 = parents.x;//first;
IType p1 = parents.y;//second;
// TODO: Setting these to 1 instead of 0 in the exceptional case fixed some indexing
// issues, but should double-check that the results are good.
IType p0_nchan = (p0!=-1) ? step_subband_nchans[step-1][p0] : 1;
IType p1_nchan = (p1!=-1) ? step_subband_nchans[step-1][p1] : 1;
// Note: If first parent doesn't exist, then it effectively starts where the second parent starts
// If second parent doesn't exist, then it effectively starts where the first parent ends
IType p0_chan0 = step_subband_chan_offsets[step-1][(p0!=-1) ? p0 : p1];
IType p1_chan0 = step_subband_chan_offsets[step-1][(p1!=-1) ? p1 : p0];
if( p1 == -1 ) {
p1_chan0 += (p0_nchan-1);
}
FType flo = cfreq(p0_chan0);
FType fmidlo = cfreq(p0_chan0 + (p0_nchan-1));
FType fmidhi = cfreq(p1_chan0);
FType fhi = cfreq(p1_chan0 + (p1_nchan-1));
FType cmidlo = rel_delay(flo, fmidlo, flo, fhi);
FType cmidhi = rel_delay(flo, fmidhi, flo, fhi);
/*
// HACK TESTING
FType flo = cfreq(p0_chan0) - 0.5*_df;
FType fmidlo = flo + (p0_nchan-1)*_df;
FType fmidhi = flo + p0_nchan*_df;
FType fhi = flo + (p0_nchan + p1_nchan - 1)*_df;
FType cmidlo = rel_delay(fmidlo, flo, fhi, flo);
FType cmidhi = rel_delay(fmidhi, flo, fhi, flo);
*/
//cout << p0 << ", " << p1 << endl;
//cout << p0_chan0 << ", " << p0_nchan << "; " << p1_chan0 << ", " << p1_nchan << endl;
//cout << cmidlo << ", " << cmidhi << endl;
// TODO: See if should use same approach with these as in fdmt.py
IType beg = step_subband_row_offsets[step][sb];
IType end = step_subband_row_offsets[step][sb+1];
IType ndelay = end - beg;
for( IType delay=0; delay<ndelay; ++delay ) {
IType dmidlo = (IType)::round(delay*cmidlo);
IType dmidhi = (IType)::round(delay*cmidhi);
IType drest = delay - dmidhi;
assert( dmidlo <= delay );
assert( dmidhi <= delay );
IType prev_beg = (p0!=-1) ? step_subband_row_offsets[step-1][p0] : -1;
IType prev_mid0 = (p0!=-1) ? step_subband_row_offsets[step-1][p0+1] : -1;
IType prev_mid1 = (p1!=-1) ? step_subband_row_offsets[step-1][p1] : -1;
IType prev_end = (p1!=-1) ? step_subband_row_offsets[step-1][p1+1] : -1;
// HACK WAR for strange indexing error observed only when nchan=4096
if( p1 != -1 && drest >= prev_end - prev_mid1 ) {
drest -= 1;
}
if( (p0 != -1 && dmidlo >= prev_mid0 - prev_beg) ||
(p1 != -1 && drest >= prev_end - prev_mid1) ) {
cout << "FDMT DEBUGGING INFO" << endl;
cout << "SB " << sb << endl;
cout << "delay " << delay << endl;
cout << "Step " << step << " prev: " << prev_mid0 - prev_beg << ", " << prev_end - prev_mid1 << endl;
cout << " srcs: " << dmidlo << ", " << drest << endl;
}
assert( p0 == -1 || dmidlo < prev_mid0 - prev_beg );
assert( p1 == -1 || drest < prev_end - prev_mid1 );
IType dst_row = step_subband_row_offsets[step ][sb] + delay;
IType src_row0 = (p0!=-1) ? step_subband_row_offsets[step-1][p0] + dmidlo : -1;
IType src_row1 = (p1!=-1) ? step_subband_row_offsets[step-1][p1] + drest : -1;
step_srcrows[step][dst_row].x = src_row0;//first = src_row0;
//cout << "step " << step << ", dst_row = " << dst_row << ", delay = " << dmidhi << ", src_row0 = " << src_row0 << ", src_row1 = " << src_row1 << endl;
step_srcrows[step][dst_row].y = src_row1;//second = src_row1;
step_delays[step][dst_row] = dmidhi;
//IType prev_nsubband = step_subband_nchans[step-1].size();
//IType prev_nrow = step_subband_row_offsets[step-1][prev_nsubband];
}
}
}
// Save for use during execution
_step_srcrows = step_srcrows;
_step_delays = step_delays;
}
bool init_plan_storage(void* storage_ptr, BFsize* storage_size) {
enum {
ALIGNMENT_BYTES = 512,
ALIGNMENT_ELMTS = ALIGNMENT_BYTES / sizeof(int)
};
Workspace workspace(ALIGNMENT_BYTES);
_plan_stride = round_up(_nrow_max, ALIGNMENT_ELMTS);
//int nstep_execute = _step_delays.size() - 1;
int nstep = _step_delays.size();
workspace.reserve(_nchan+1, &_d_offsets);
workspace.reserve(nstep*_plan_stride, &_d_step_srcrows);
workspace.reserve(nstep*_plan_stride, &_d_step_delays);
if( storage_size ) {
if( !storage_ptr ) {
// Return required storage size
*storage_size = workspace.size();
return false;
} else {
BF_ASSERT_EXCEPTION(*storage_size >= workspace.size(),
BF_STATUS_INSUFFICIENT_STORAGE);
}
} else {
// Auto-allocate storage
BF_ASSERT_EXCEPTION(!storage_ptr, BF_STATUS_INVALID_ARGUMENT);
_dv_plan_storage.resize(workspace.size());
storage_ptr = thrust::raw_pointer_cast(&_dv_plan_storage[0]);
}
//std::cout << "workspace.size() = " << workspace.size() << std::endl;
//_d_offsets = (IType*)0x123;
//std::cout << "_d_offsets = " << _d_offsets << std::endl;
//std::cout << "storage_ptr = " << storage_ptr << std::endl;
workspace.commit(storage_ptr);
//std::cout << "_d_offsets = " << _d_offsets << std::endl;
BF_CHECK_CUDA_EXCEPTION( hipMemcpyAsync(_d_offsets,
&_offsets[0],
sizeof(int )*_offsets.size(),
hipMemcpyHostToDevice,
_stream),
BF_STATUS_MEM_OP_FAILED );
for( int step=0; step<nstep; ++step ) {
BF_CHECK_CUDA_EXCEPTION( hipMemcpyAsync(_d_step_srcrows + step*_plan_stride,
&_step_srcrows[step][0],
sizeof(int2)*_step_srcrows[step].size(),
hipMemcpyHostToDevice,
_stream),
BF_STATUS_MEM_OP_FAILED );
BF_CHECK_CUDA_EXCEPTION( hipMemcpyAsync(_d_step_delays + step*_plan_stride,
&_step_delays[step][0],
sizeof(int)*_step_delays[step].size(),
hipMemcpyHostToDevice,
_stream),
BF_STATUS_MEM_OP_FAILED );
}
BF_CHECK_CUDA_EXCEPTION( hipStreamSynchronize(_stream),
BF_STATUS_DEVICE_ERROR );
return true;
}
bool init_exec_storage(void* storage_ptr, BFsize* storage_size, size_t ntime) {
enum {
ALIGNMENT_BYTES = 512,
ALIGNMENT_ELMTS = ALIGNMENT_BYTES / sizeof(DType)
};
Workspace workspace(ALIGNMENT_BYTES);
//std::cout << "ntime = " << ntime << std::endl;
//std::cout << "_nrow_max = " << _nrow_max << std::endl;
_buffer_stride = round_up(ntime, ALIGNMENT_ELMTS);
//std::cout << "_buffer_stride = " << _buffer_stride << std::endl;
// TODO: Check if truly safe to allocate smaller buffer_b
workspace.reserve(_nrow_max*_buffer_stride, &_d_buffer_a);
workspace.reserve(_nrow_max*_buffer_stride, &_d_buffer_b);
if( storage_size ) {
if( !storage_ptr ) {
//cout << "++++ returning storage size" << endl;
// Return required storage size
*storage_size = workspace.size();
return false;
} else {
//cout << "++++ using user storage" << endl;
BF_ASSERT_EXCEPTION(*storage_size >= workspace.size(),
BF_STATUS_INSUFFICIENT_STORAGE);
}
} else {
//cout << "++++ auto-allocating storage" << endl;
// Auto-allocate storage
BF_ASSERT_EXCEPTION(!storage_ptr, BF_STATUS_INVALID_ARGUMENT);
_dv_exec_storage.resize(workspace.size());
storage_ptr = thrust::raw_pointer_cast(&_dv_exec_storage[0]);
//std::cout << "*** exec storage_ptr = " << storage_ptr << std::endl;
}
//cout << "++++ committing" << endl;
workspace.commit(storage_ptr);
return true;
}
void execute(BFarray const* in,
BFarray const* out,
size_t ntime,
bool negative_delays) {
//cout << "out dtype = " << out->dtype << endl;
BF_ASSERT_EXCEPTION(out->dtype == BF_DTYPE_F32, BF_STATUS_UNSUPPORTED_DTYPE);
BF_ASSERT_EXCEPTION( out->strides[in->ndim-1] == 4, BF_STATUS_UNSUPPORTED_STRIDE);
DType* d_ibuf = _d_buffer_b;
DType* d_obuf = _d_buffer_a;
//std::cout << "_d_buffer_a = " << _d_buffer_a << std::endl;
//std::cout << "_d_buffer_b = " << _d_buffer_b << std::endl;
//BF_ASSERT_EXCEPTION(/*abs*/(in->strides[in->ndim-1]) == 1, BF_STATUS_UNSUPPORTED_STRIDE);
BF_ASSERT_EXCEPTION( in->strides[in->ndim-2] > 0, BF_STATUS_UNSUPPORTED_STRIDE);
BF_ASSERT_EXCEPTION(out->strides[in->ndim-2] > 0, BF_STATUS_UNSUPPORTED_STRIDE);
//bool reverse_time = (in->strides[in->ndim-1] < 0);
bool reverse_time = negative_delays;
BF_CHECK_CUDA_EXCEPTION(hipGetLastError(), BF_STATUS_INTERNAL_ERROR);
#define LAUNCH_FDMT_INIT_KERNEL(IterType) \
BF_ASSERT_EXCEPTION(/*abs*/(in->strides[in->ndim-1]) == sizeof(value_type<IterType>::type), BF_STATUS_UNSUPPORTED_STRIDE); \
launch_fdmt_init_kernel(ntime, _nchan, _reverse_band, reverse_time, \
_d_offsets, \
(IterType)in->data, \
in->strides[in->ndim-2]/sizeof(value_type<IterType>::type), /* TODO: Check this*/ \
d_obuf, _buffer_stride, \
_stream)
switch( in->dtype ) {
// HACK testing disabled
// TODO: Get NbitReader working
//case BF_DTYPE_I1: LAUNCH_FDMT_INIT_KERNEL(NbitReader<1>); break;
//case BF_DTYPE_I2: LAUNCH_FDMT_INIT_KERNEL(NbitReader<2>); break;
//case BF_DTYPE_I4: LAUNCH_FDMT_INIT_KERNEL(NbitReader<4>); break;
case BF_DTYPE_I8: LAUNCH_FDMT_INIT_KERNEL(int8_t*); break;
case BF_DTYPE_I16: LAUNCH_FDMT_INIT_KERNEL(int16_t*); break;
case BF_DTYPE_I32: LAUNCH_FDMT_INIT_KERNEL(int32_t*); break;
case BF_DTYPE_U8: LAUNCH_FDMT_INIT_KERNEL(uint8_t*); break;
case BF_DTYPE_U16: LAUNCH_FDMT_INIT_KERNEL(uint16_t*); break;
case BF_DTYPE_U32: LAUNCH_FDMT_INIT_KERNEL(uint32_t*); break;
case BF_DTYPE_F32: LAUNCH_FDMT_INIT_KERNEL(float*); break;
default: BF_ASSERT_EXCEPTION(false, BF_STATUS_UNSUPPORTED_DTYPE);
}
#undef LAUNCH_FDMT_INIT_KERNEL
BF_CHECK_CUDA_EXCEPTION(hipGetLastError(), BF_STATUS_INTERNAL_ERROR);
std::swap(d_ibuf, d_obuf);
size_t ostride = _buffer_stride;
IType nstep = _step_delays.size();
for( int step=1; step<nstep; ++step ) {
//cout << "STEP " << step << endl;
IType nrow = _step_srcrows[step].size();
//cout << "nrow " << nrow << endl;
if( step == nstep-1 ) {
d_obuf = (DType*)out->data;
ostride = out->strides[out->ndim-2]/sizeof(DType); // TODO: Check this
// HACK TESTING diagonal reindexing to align output with TOA at highest freq
ostride += reverse_time ? +1 : -1;
}
//hipDeviceSynchronize(); // HACK TESTING
launch_fdmt_exec_kernel(ntime, nrow, (step==nstep-1), reverse_time,
_d_step_delays + step*_plan_stride,
_d_step_srcrows + step*_plan_stride,
d_ibuf, _buffer_stride,
d_obuf, ostride,
_stream);
//hipDeviceSynchronize(); // HACK TESTING
//BF_CHECK_CUDA_EXCEPTION(hipGetLastError(), BF_STATUS_INTERNAL_ERROR);
std::swap(d_ibuf, d_obuf);
}
BF_CHECK_CUDA_EXCEPTION(hipGetLastError(), BF_STATUS_INTERNAL_ERROR);
//hipDeviceSynchronize(); // HACK TESTING
}
void set_stream(hipStream_t stream) {
_stream = stream;
}
};
BFstatus bfFdmtCreate(BFfdmt* plan_ptr) {
BF_ASSERT(plan_ptr, BF_STATUS_INVALID_POINTER);
BF_TRY_RETURN_ELSE(*plan_ptr = new BFfdmt_impl(),
*plan_ptr = 0);
}
// **TODO: Passing 'BFarray const* in' here could replace nchan, f0, df and space if BFarray included dimension scales
// Also, could potentially set the output dimension scales (dm0, ddm)
// OR, could just leave these to higher-level wrappers (e.g., Python)
// This might be for the best in the short term
BFstatus bfFdmtInit(BFfdmt plan,
BFsize nchan,
BFsize max_delay,
double f0,
double df,
double exponent,
BFspace space,
void* plan_storage,
BFsize* plan_storage_size) {
BF_ASSERT(plan, BF_STATUS_INVALID_HANDLE);
BF_ASSERT(space_accessible_from(space, BF_SPACE_CUDA),
BF_STATUS_UNSUPPORTED_SPACE);
BF_TRY(plan->init(nchan, max_delay, f0, df, exponent));
BF_TRY_RETURN(plan->init_plan_storage(plan_storage, plan_storage_size));
}
BFstatus bfFdmtSetStream(BFfdmt plan,
void const* stream) {
BF_ASSERT(plan, BF_STATUS_INVALID_HANDLE);
BF_ASSERT(stream, BF_STATUS_INVALID_POINTER);
BF_TRY_RETURN(plan->set_stream(*(hipStream_t*)stream));
}
BFstatus bfFdmtExecute(BFfdmt plan,
BFarray const* in,
BFarray const* out,
BFbool negative_delays,
void* exec_storage,
BFsize* exec_storage_size) {
BF_ASSERT(plan, BF_STATUS_INVALID_HANDLE);
BF_ASSERT(in, BF_STATUS_INVALID_POINTER);
BF_ASSERT(out, BF_STATUS_INVALID_POINTER);
BF_ASSERT( in->shape[ in->ndim-2] == plan->nchan(), BF_STATUS_INVALID_SHAPE);
BF_ASSERT(out->shape[out->ndim-2] == plan->max_delay(), BF_STATUS_INVALID_SHAPE);
BF_ASSERT( in->shape[in->ndim-1] == out->shape[out->ndim-1], BF_STATUS_INVALID_SHAPE);
// TODO: BF_ASSERT(...);
size_t ntime = in->shape[in->ndim-1];
bool ready;
BF_TRY(ready = plan->init_exec_storage(exec_storage, exec_storage_size, ntime));
if( !ready ) {
// Just requesting exec_storage_size, not ready to execute yet
return BF_STATUS_SUCCESS;
}
BF_ASSERT(space_accessible_from( in->space, BF_SPACE_CUDA), BF_STATUS_INVALID_SPACE);
BF_ASSERT(space_accessible_from(out->space, BF_SPACE_CUDA), BF_STATUS_INVALID_SPACE);
BF_TRY_RETURN(plan->execute(in, out, ntime, negative_delays));
}
BFstatus bfFdmtDestroy(BFfdmt plan) {
BF_ASSERT(plan, BF_STATUS_INVALID_HANDLE);
delete plan;
return BF_STATUS_SUCCESS;
}
| 10224a1ff10201ae90575ef888c31b19e1b6e676.cu | /*
* Copyright (c) 2016, The Bifrost Authors. All rights reserved.
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of The Bifrost Authors nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <bifrost/fdmt.h>
#include "assert.hpp"
#include "utils.hpp"
#include "workspace.hpp"
#include "cuda.hpp"
//#include <limits>
#include <math_constants.h> // For CUDART_NAN_F
#include <thrust/device_vector.h>
#include <vector>
#include <map>
#include <string>
// HACK TESTING
#include <iostream>
using std::cout;
using std::endl;
// Note: Can be tuned over block shape
template<typename InType, typename OutType>
__global__
void fdmt_init_kernel(int ntime,
int nchan,
bool reverse_band,
bool reverse_time,
int const* __restrict__ d_offsets,
InType /*const* __restrict__*/ d_in,
int istride,
OutType* __restrict__ d_out,
int ostride) {
int t0 = threadIdx.x + blockIdx.x*blockDim.x;
int c0 = threadIdx.y + blockIdx.y*blockDim.y;
//int b0 = blockIdx.z;
//for( int b=b0; b<nbatch; b+=gridDim.z ) {
for( int c=c0; c<nchan; c+=blockDim.y*gridDim.y ) {
int offset = d_offsets[c];
int ndelay = d_offsets[c+1] - offset;
for( int t=t0; t<ntime; t+=blockDim.x*gridDim.x ) {
OutType tmp(0);
for( int d=0; d<ndelay; ++d ) {
// Note: This fills the unused elements with NaNs
OutType outval(CUDART_NAN_F);//std::numeric_limits<OutType>::quiet_NaN());
if( t >= d ) {
int c_ = reverse_band ? nchan-1 - c : c;
int t_ = reverse_time ? ntime-1 - t : t;
tmp += d_in[(t_-d) + istride*c_];// + ibstride*b];
// TODO: Check effect of not-/using sqrt
// The final paper has no sqrt (i.e., computation is just the mean)
//outval = tmp * rsqrtf(d+1);
outval = tmp * (1.f/(d+1));
}
d_out[t + ostride*(offset+d)] = outval;
//d_out[t + ostride*(offset+d) + obstride*b] = outval;
}
}
}
//}
}
// Note: Can be tuned over block shape
template<typename DType>
__global__
void fdmt_exec_kernel(int ntime,
int nrow,
bool is_final_step,
bool reverse_time,
int const* __restrict__ d_delays,
int2 const* __restrict__ d_srcrows,
DType const* __restrict__ d_in,
int istride,
DType* __restrict__ d_out,
int ostride) {
int t0 = threadIdx.x + blockIdx.x*blockDim.x;
int r0 = threadIdx.y + blockIdx.y*blockDim.y;
for( int r=r0; r<nrow; r+=blockDim.y*gridDim.y ) {
int delay = d_delays[r];
int srcrow0 = d_srcrows[r].x;
int srcrow1 = d_srcrows[r].y;
for( int t=t0; t<ntime; t+=blockDim.x*gridDim.x ) {
// Avoid elements that go unused due to diagonal reindexing
if( is_final_step && t < r ) {
//int ostride_ = ostride - reverse_time;
//d_out[t + ostride_*r] = CUDART_NAN_F;
continue;
}
// HACK TESTING
////if( ostride < ntime && t >= ntime-1 - r ) {
//if( ostride != ntime && t < r ) {
// int ostride_ = ostride - (ostride > ntime);
// d_out[t + ostride_*r] = CUDART_NAN_F;
// continue;
//}// else if( ostride > ntime && t >= ntime - r ) {
// //d_out[t - (ntime-1) + ostride*r] = CUDART_NAN_F;
// continue;
//}
// Note: Non-existent rows are signified by -1
//if( t == 0 && r == 0 ) {
// printf("t,srcrow0,srcrow1,istride = %i, %i, %i, %i\n", t, srcrow0, srcrow1, istride);
//}
//if( threadIdx.x == 63 && blockIdx.y == 4 ) {
//printf("istride = %i, srcrow0 = %i, srcrow1 = %i, d_in = %p\n", istride, srcrow0, srcrow1, d_in);
//}
//if( t == 0 ) {// && r == 1 ) {
// printf("istride = %i, srcrow0 = %i, srcrow1 = %i, d_in = %p\n", istride, srcrow0, srcrow1, d_in);
//}
DType outval = (srcrow0 != -1) ? d_in[ t + istride*srcrow0] : 0;
if( t >= delay ) {
outval += (srcrow1 != -1) ? d_in[(t-delay) + istride*srcrow1] : 0;
}
int t_ = (is_final_step && reverse_time) ? ntime-1 - t : t;
d_out[t_ + ostride*r] = outval;
}
}
}
template<typename InType, typename OutType>
inline
void launch_fdmt_init_kernel(int ntime,
int nchan,
bool reverse_band,
bool reverse_time,
//int const* d_ndelays,
int const* d_offsets,
InType /*const**/ d_in,
int istride,
OutType* d_out,
int ostride,
cudaStream_t stream=0) {
dim3 block(256, 1); // TODO: Tune this
dim3 grid(std::min((ntime-1)/block.x+1, 65535u),
std::min((nchan-1)/block.y+1, 65535u));
//fdmt_init_kernel<<<grid,block,0,stream>>>(ntime,nchan,
// //d_ndelays,
// d_offsets,
// d_in,istride,
// d_out,ostride);
void* args[] = {&ntime,
&nchan,
&reverse_band,
&reverse_time,
&d_offsets,
&d_in,
&istride,
&d_out,
&ostride};
cudaLaunchKernel((void*)fdmt_init_kernel<InType,OutType>,
grid, block,
&args[0], 0, stream);
}
template<typename DType>
inline
void launch_fdmt_exec_kernel(int ntime,
int nrow,
bool is_final_step,
bool reverse_time,
int const* d_delays,
int2 const* d_srcrows,
DType const* d_in,
int istride,
DType* d_out,
int ostride,
cudaStream_t stream=0) {
//cout << "LAUNCH " << d_in << ", " << d_out << endl;
dim3 block(256, 1); // TODO: Tune this
dim3 grid(std::min((ntime-1)/block.x+1, 65535u),
std::min((nrow -1)/block.y+1, 65535u));
//fdmt_exec_kernel<<<grid,block,0,stream>>>(ntime,nrow,
// d_delays,d_srcrows,
// d_in,istride,
// d_out,ostride);
void* args[] = {&ntime,
&nrow,
&is_final_step,
&reverse_time,
&d_delays,
&d_srcrows,
&d_in,
&istride,
&d_out,
&ostride};
//cudaLaunchKernel((void*)static_cast<void(*)(int, int, const int*, const int2*, const DType*, int, DType*, int)>(fdmt_exec_kernel<DType>),
cudaLaunchKernel((void*)fdmt_exec_kernel<DType>,
grid, block,
&args[0], 0, stream);
}
/*
**** 4096
**** 4096
**** 2048
**** 1066
**** 650
**** 475
**** 381
**** 337
**** 316
**** 302
**** 299
**** 295
**** 293
SB 3
delay 135
Step 10 prev: 58, 78
srcs: 57, 78
NROW_MAX = 4096
STEP 1
STEP 2
STEP 3
STEP 4
STEP 5
STEP 6
STEP 7
STEP 8
STEP 9
STEP 10
STEP 11
*/
class BFfdmt_impl {
typedef int IType;
typedef double FType;
typedef int2 IndexPair;
public: // HACK WAR for what looks like a bug in the CUDA 7.0 compiler
typedef float DType;
private:
IType _nchan;
IType _max_delay;
FType _f0;
FType _df;
FType _exponent;
IType _nrow_max;
IType _plan_stride;
IType _buffer_stride;
std::vector<IType> _offsets;
std::vector<std::vector<IndexPair> > _step_srcrows;
std::vector<std::vector<IType> > _step_delays;
IType* _d_offsets;
IndexPair* _d_step_srcrows;
IType* _d_step_delays;
DType* _d_buffer_a;
DType* _d_buffer_b;
Workspace _plan_storage;
Workspace _exec_storage;
// TODO: Use something other than Thrust
thrust::device_vector<char> _dv_plan_storage;
thrust::device_vector<char> _dv_exec_storage;
cudaStream_t _stream;
bool _reverse_band;
FType cfreq(IType chan) {
return _f0 + _df*chan;
}
FType rel_delay(FType flo, FType fhi, FType fmin, FType fmax) {
FType g = _exponent;
FType eps = std::numeric_limits<FType>::epsilon();
FType denom = ::pow(fmin,g) - ::pow(fmax,g);
if( ::abs(denom) < eps ) {
denom = ::copysign(eps, denom);
}
return (::pow(flo,g) - ::pow(fhi,g)) / denom;
}
FType rel_delay(FType flo, FType fhi) {
FType fmin = cfreq(0);
FType fmax = cfreq(_nchan-1);
//std::swap(fmin, fmax);
//FType fmax = cfreq(_nchan); // HACK TESTING
return rel_delay(flo, fhi, fmin, fmax);
}
IType subband_ndelay(FType f0, FType df) {
FType fracdelay = rel_delay(f0, f0+df);
FType fmaxdelay = fracdelay*(_max_delay-1);
IType ndelay = IType(::ceil(fmaxdelay)) + 1;
return ndelay;
}
public:
BFfdmt_impl() : _nchan(0), _max_delay(0), _f0(0), _df(0), _exponent(0),
_stream(g_cuda_stream) {}
inline IType nchan() const { return _nchan; }
inline IType max_delay() const { return _max_delay; }
void init(IType nchan,
IType max_delay,
FType f0,
FType df,
FType exponent) {
if( df < 0. ) {
_reverse_band = true;
f0 += (nchan-1)*df;
df *= -1;
} else {
_reverse_band = false;
}
if( nchan == _nchan &&
max_delay == _max_delay &&
f0 == _f0 &&
df == _df &&
exponent == _exponent ) {
return;
}
_f0 = f0;
_df = df;
_nchan = nchan;
_max_delay = max_delay;
_exponent = exponent;
// Note: Initialized with 1 entry as dummy for initialization step
std::vector<std::vector<IndexPair> > step_subband_parents(1);
IType nsubband = _nchan;
while( nsubband > 1 ) {
IType step = step_subband_parents.size();
step_subband_parents.push_back(std::vector<IndexPair>());
for( IType sb=0; sb<nsubband; sb+=2 ) {
IType parent0 = sb;
IType parent1 = sb+1;
if( nsubband % 2 ) {
// Note: Alternating left/right-biased merging scheme
if( (step-1) % 2 ) {
parent0 -= 1; // Note: First entry becomes -1 => non-existent
parent1 -= 1;
} else {
// Note: Last entry becomes -1 => non-existent
if( parent1 == nsubband ) parent1 = -1;
}
}
//cout << step << ": " << parent0 << ", " << parent1 << endl;
IndexPair parents = make_int2(parent0, parent1);
step_subband_parents[step].push_back(parents);
}
nsubband = step_subband_parents[step].size();
}
// Note: Includes initialization step
IType nstep = step_subband_parents.size();
std::vector<std::vector<IType> > step_subband_nchans(nstep);
step_subband_nchans[0].assign(_nchan, 1);
for( IType step=1; step<nstep; ++step ) {
IType nsubband = step_subband_parents[step].size();
step_subband_nchans[step].resize(nsubband);
for( IType sb=0; sb<nsubband; ++sb ) {
IndexPair parents = step_subband_parents[step][sb];
IType p0 = parents.x;//first;
IType p1 = parents.y;//second;
IType parent0_nchan = (p0!=-1) ? step_subband_nchans[step-1][p0] : 0;
IType parent1_nchan = (p1!=-1) ? step_subband_nchans[step-1][p1] : 0;
IType child_nchan = parent0_nchan + parent1_nchan;
step_subband_nchans[step][sb] = child_nchan;
}
}
std::vector<std::vector<IType> > step_subband_chan_offsets(nstep);
std::vector<std::vector<IType> > step_subband_row_offsets(nstep);
IType nrow_max = 0;
for( IType step=0; step<nstep; ++step ) {
IType nsubband = step_subband_nchans[step].size();
// Note: +1 to store the total in the last element
// (The array will hold a complete exclusive scan)
step_subband_chan_offsets[step].resize(nsubband+1);
step_subband_row_offsets[step].resize(nsubband+1);
IType chan0 = 0;
IType row_offset = 0;
for( IType sb=0; sb<nsubband; ++sb ) {
IType nchan = step_subband_nchans[step][sb];
FType f0 = cfreq(chan0) - (step == 0 ? 0.5*_df : 0.);
//FType f0 = cfreq(chan0); // HACK TESTING
FType df = _df * (step == 0 ? 1 : nchan-1);
//FType df = _df * nchan; // HACK TESTING
//cout << "df = " << df << endl;
IType ndelay = subband_ndelay(f0, df);
//cout << "NDELAY = " << ndelay << endl;
step_subband_chan_offsets[step][sb] = chan0;
step_subband_row_offsets[step][sb] = row_offset;
chan0 += nchan;
row_offset += ndelay;
}
step_subband_chan_offsets[step][nsubband] = chan0;
step_subband_row_offsets[step][nsubband] = row_offset;
nrow_max = std::max(nrow_max, row_offset);
//*cout << "**** Nrow: " << row_offset << endl;
}
// Save for use during initialization
//plan->_init_subband_row_offsets = step_subband_row_offsets[0];
_offsets = step_subband_row_offsets[0];
_nrow_max = nrow_max;
//cout << "**** " << _nrow_max << endl;
// Note: First entry in these remains empty
std::vector<std::vector<IndexPair> > step_srcrows(nstep);
std::vector<std::vector<IType> > step_delays(nstep);
for( IType step=1; step<nstep; ++step ) {
IType nsubband = step_subband_nchans[step].size();
IType nrow = step_subband_row_offsets[step][nsubband];
//*cout << "nrow " << nrow << endl;
step_srcrows[step].resize(nrow);
step_delays[step].resize(nrow);
for( IType sb=0; sb<nsubband; ++sb ) {
IndexPair parents = step_subband_parents[step][sb];
IType p0 = parents.x;//first;
IType p1 = parents.y;//second;
// TODO: Setting these to 1 instead of 0 in the exceptional case fixed some indexing
// issues, but should double-check that the results are good.
IType p0_nchan = (p0!=-1) ? step_subband_nchans[step-1][p0] : 1;
IType p1_nchan = (p1!=-1) ? step_subband_nchans[step-1][p1] : 1;
// Note: If first parent doesn't exist, then it effectively starts where the second parent starts
// If second parent doesn't exist, then it effectively starts where the first parent ends
IType p0_chan0 = step_subband_chan_offsets[step-1][(p0!=-1) ? p0 : p1];
IType p1_chan0 = step_subband_chan_offsets[step-1][(p1!=-1) ? p1 : p0];
if( p1 == -1 ) {
p1_chan0 += (p0_nchan-1);
}
FType flo = cfreq(p0_chan0);
FType fmidlo = cfreq(p0_chan0 + (p0_nchan-1));
FType fmidhi = cfreq(p1_chan0);
FType fhi = cfreq(p1_chan0 + (p1_nchan-1));
FType cmidlo = rel_delay(flo, fmidlo, flo, fhi);
FType cmidhi = rel_delay(flo, fmidhi, flo, fhi);
/*
// HACK TESTING
FType flo = cfreq(p0_chan0) - 0.5*_df;
FType fmidlo = flo + (p0_nchan-1)*_df;
FType fmidhi = flo + p0_nchan*_df;
FType fhi = flo + (p0_nchan + p1_nchan - 1)*_df;
FType cmidlo = rel_delay(fmidlo, flo, fhi, flo);
FType cmidhi = rel_delay(fmidhi, flo, fhi, flo);
*/
//cout << p0 << ", " << p1 << endl;
//cout << p0_chan0 << ", " << p0_nchan << "; " << p1_chan0 << ", " << p1_nchan << endl;
//cout << cmidlo << ", " << cmidhi << endl;
// TODO: See if should use same approach with these as in fdmt.py
IType beg = step_subband_row_offsets[step][sb];
IType end = step_subband_row_offsets[step][sb+1];
IType ndelay = end - beg;
for( IType delay=0; delay<ndelay; ++delay ) {
IType dmidlo = (IType)::round(delay*cmidlo);
IType dmidhi = (IType)::round(delay*cmidhi);
IType drest = delay - dmidhi;
assert( dmidlo <= delay );
assert( dmidhi <= delay );
IType prev_beg = (p0!=-1) ? step_subband_row_offsets[step-1][p0] : -1;
IType prev_mid0 = (p0!=-1) ? step_subband_row_offsets[step-1][p0+1] : -1;
IType prev_mid1 = (p1!=-1) ? step_subband_row_offsets[step-1][p1] : -1;
IType prev_end = (p1!=-1) ? step_subband_row_offsets[step-1][p1+1] : -1;
// HACK WAR for strange indexing error observed only when nchan=4096
if( p1 != -1 && drest >= prev_end - prev_mid1 ) {
drest -= 1;
}
if( (p0 != -1 && dmidlo >= prev_mid0 - prev_beg) ||
(p1 != -1 && drest >= prev_end - prev_mid1) ) {
cout << "FDMT DEBUGGING INFO" << endl;
cout << "SB " << sb << endl;
cout << "delay " << delay << endl;
cout << "Step " << step << " prev: " << prev_mid0 - prev_beg << ", " << prev_end - prev_mid1 << endl;
cout << " srcs: " << dmidlo << ", " << drest << endl;
}
assert( p0 == -1 || dmidlo < prev_mid0 - prev_beg );
assert( p1 == -1 || drest < prev_end - prev_mid1 );
IType dst_row = step_subband_row_offsets[step ][sb] + delay;
IType src_row0 = (p0!=-1) ? step_subband_row_offsets[step-1][p0] + dmidlo : -1;
IType src_row1 = (p1!=-1) ? step_subband_row_offsets[step-1][p1] + drest : -1;
step_srcrows[step][dst_row].x = src_row0;//first = src_row0;
//cout << "step " << step << ", dst_row = " << dst_row << ", delay = " << dmidhi << ", src_row0 = " << src_row0 << ", src_row1 = " << src_row1 << endl;
step_srcrows[step][dst_row].y = src_row1;//second = src_row1;
step_delays[step][dst_row] = dmidhi;
//IType prev_nsubband = step_subband_nchans[step-1].size();
//IType prev_nrow = step_subband_row_offsets[step-1][prev_nsubband];
}
}
}
// Save for use during execution
_step_srcrows = step_srcrows;
_step_delays = step_delays;
}
bool init_plan_storage(void* storage_ptr, BFsize* storage_size) {
enum {
ALIGNMENT_BYTES = 512,
ALIGNMENT_ELMTS = ALIGNMENT_BYTES / sizeof(int)
};
Workspace workspace(ALIGNMENT_BYTES);
_plan_stride = round_up(_nrow_max, ALIGNMENT_ELMTS);
//int nstep_execute = _step_delays.size() - 1;
int nstep = _step_delays.size();
workspace.reserve(_nchan+1, &_d_offsets);
workspace.reserve(nstep*_plan_stride, &_d_step_srcrows);
workspace.reserve(nstep*_plan_stride, &_d_step_delays);
if( storage_size ) {
if( !storage_ptr ) {
// Return required storage size
*storage_size = workspace.size();
return false;
} else {
BF_ASSERT_EXCEPTION(*storage_size >= workspace.size(),
BF_STATUS_INSUFFICIENT_STORAGE);
}
} else {
// Auto-allocate storage
BF_ASSERT_EXCEPTION(!storage_ptr, BF_STATUS_INVALID_ARGUMENT);
_dv_plan_storage.resize(workspace.size());
storage_ptr = thrust::raw_pointer_cast(&_dv_plan_storage[0]);
}
//std::cout << "workspace.size() = " << workspace.size() << std::endl;
//_d_offsets = (IType*)0x123;
//std::cout << "_d_offsets = " << _d_offsets << std::endl;
//std::cout << "storage_ptr = " << storage_ptr << std::endl;
workspace.commit(storage_ptr);
//std::cout << "_d_offsets = " << _d_offsets << std::endl;
BF_CHECK_CUDA_EXCEPTION( cudaMemcpyAsync(_d_offsets,
&_offsets[0],
sizeof(int )*_offsets.size(),
cudaMemcpyHostToDevice,
_stream),
BF_STATUS_MEM_OP_FAILED );
for( int step=0; step<nstep; ++step ) {
BF_CHECK_CUDA_EXCEPTION( cudaMemcpyAsync(_d_step_srcrows + step*_plan_stride,
&_step_srcrows[step][0],
sizeof(int2)*_step_srcrows[step].size(),
cudaMemcpyHostToDevice,
_stream),
BF_STATUS_MEM_OP_FAILED );
BF_CHECK_CUDA_EXCEPTION( cudaMemcpyAsync(_d_step_delays + step*_plan_stride,
&_step_delays[step][0],
sizeof(int)*_step_delays[step].size(),
cudaMemcpyHostToDevice,
_stream),
BF_STATUS_MEM_OP_FAILED );
}
BF_CHECK_CUDA_EXCEPTION( cudaStreamSynchronize(_stream),
BF_STATUS_DEVICE_ERROR );
return true;
}
bool init_exec_storage(void* storage_ptr, BFsize* storage_size, size_t ntime) {
enum {
ALIGNMENT_BYTES = 512,
ALIGNMENT_ELMTS = ALIGNMENT_BYTES / sizeof(DType)
};
Workspace workspace(ALIGNMENT_BYTES);
//std::cout << "ntime = " << ntime << std::endl;
//std::cout << "_nrow_max = " << _nrow_max << std::endl;
_buffer_stride = round_up(ntime, ALIGNMENT_ELMTS);
//std::cout << "_buffer_stride = " << _buffer_stride << std::endl;
// TODO: Check if truly safe to allocate smaller buffer_b
workspace.reserve(_nrow_max*_buffer_stride, &_d_buffer_a);
workspace.reserve(_nrow_max*_buffer_stride, &_d_buffer_b);
if( storage_size ) {
if( !storage_ptr ) {
//cout << "++++ returning storage size" << endl;
// Return required storage size
*storage_size = workspace.size();
return false;
} else {
//cout << "++++ using user storage" << endl;
BF_ASSERT_EXCEPTION(*storage_size >= workspace.size(),
BF_STATUS_INSUFFICIENT_STORAGE);
}
} else {
//cout << "++++ auto-allocating storage" << endl;
// Auto-allocate storage
BF_ASSERT_EXCEPTION(!storage_ptr, BF_STATUS_INVALID_ARGUMENT);
_dv_exec_storage.resize(workspace.size());
storage_ptr = thrust::raw_pointer_cast(&_dv_exec_storage[0]);
//std::cout << "*** exec storage_ptr = " << storage_ptr << std::endl;
}
//cout << "++++ committing" << endl;
workspace.commit(storage_ptr);
return true;
}
void execute(BFarray const* in,
BFarray const* out,
size_t ntime,
bool negative_delays) {
//cout << "out dtype = " << out->dtype << endl;
BF_ASSERT_EXCEPTION(out->dtype == BF_DTYPE_F32, BF_STATUS_UNSUPPORTED_DTYPE);
BF_ASSERT_EXCEPTION( out->strides[in->ndim-1] == 4, BF_STATUS_UNSUPPORTED_STRIDE);
DType* d_ibuf = _d_buffer_b;
DType* d_obuf = _d_buffer_a;
//std::cout << "_d_buffer_a = " << _d_buffer_a << std::endl;
//std::cout << "_d_buffer_b = " << _d_buffer_b << std::endl;
//BF_ASSERT_EXCEPTION(/*abs*/(in->strides[in->ndim-1]) == 1, BF_STATUS_UNSUPPORTED_STRIDE);
BF_ASSERT_EXCEPTION( in->strides[in->ndim-2] > 0, BF_STATUS_UNSUPPORTED_STRIDE);
BF_ASSERT_EXCEPTION(out->strides[in->ndim-2] > 0, BF_STATUS_UNSUPPORTED_STRIDE);
//bool reverse_time = (in->strides[in->ndim-1] < 0);
bool reverse_time = negative_delays;
BF_CHECK_CUDA_EXCEPTION(cudaGetLastError(), BF_STATUS_INTERNAL_ERROR);
#define LAUNCH_FDMT_INIT_KERNEL(IterType) \
BF_ASSERT_EXCEPTION(/*abs*/(in->strides[in->ndim-1]) == sizeof(value_type<IterType>::type), BF_STATUS_UNSUPPORTED_STRIDE); \
launch_fdmt_init_kernel(ntime, _nchan, _reverse_band, reverse_time, \
_d_offsets, \
(IterType)in->data, \
in->strides[in->ndim-2]/sizeof(value_type<IterType>::type), /* TODO: Check this*/ \
d_obuf, _buffer_stride, \
_stream)
switch( in->dtype ) {
// HACK testing disabled
// TODO: Get NbitReader working
//case BF_DTYPE_I1: LAUNCH_FDMT_INIT_KERNEL(NbitReader<1>); break;
//case BF_DTYPE_I2: LAUNCH_FDMT_INIT_KERNEL(NbitReader<2>); break;
//case BF_DTYPE_I4: LAUNCH_FDMT_INIT_KERNEL(NbitReader<4>); break;
case BF_DTYPE_I8: LAUNCH_FDMT_INIT_KERNEL(int8_t*); break;
case BF_DTYPE_I16: LAUNCH_FDMT_INIT_KERNEL(int16_t*); break;
case BF_DTYPE_I32: LAUNCH_FDMT_INIT_KERNEL(int32_t*); break;
case BF_DTYPE_U8: LAUNCH_FDMT_INIT_KERNEL(uint8_t*); break;
case BF_DTYPE_U16: LAUNCH_FDMT_INIT_KERNEL(uint16_t*); break;
case BF_DTYPE_U32: LAUNCH_FDMT_INIT_KERNEL(uint32_t*); break;
case BF_DTYPE_F32: LAUNCH_FDMT_INIT_KERNEL(float*); break;
default: BF_ASSERT_EXCEPTION(false, BF_STATUS_UNSUPPORTED_DTYPE);
}
#undef LAUNCH_FDMT_INIT_KERNEL
BF_CHECK_CUDA_EXCEPTION(cudaGetLastError(), BF_STATUS_INTERNAL_ERROR);
std::swap(d_ibuf, d_obuf);
size_t ostride = _buffer_stride;
IType nstep = _step_delays.size();
for( int step=1; step<nstep; ++step ) {
//cout << "STEP " << step << endl;
IType nrow = _step_srcrows[step].size();
//cout << "nrow " << nrow << endl;
if( step == nstep-1 ) {
d_obuf = (DType*)out->data;
ostride = out->strides[out->ndim-2]/sizeof(DType); // TODO: Check this
// HACK TESTING diagonal reindexing to align output with TOA at highest freq
ostride += reverse_time ? +1 : -1;
}
//cudaDeviceSynchronize(); // HACK TESTING
launch_fdmt_exec_kernel(ntime, nrow, (step==nstep-1), reverse_time,
_d_step_delays + step*_plan_stride,
_d_step_srcrows + step*_plan_stride,
d_ibuf, _buffer_stride,
d_obuf, ostride,
_stream);
//cudaDeviceSynchronize(); // HACK TESTING
//BF_CHECK_CUDA_EXCEPTION(cudaGetLastError(), BF_STATUS_INTERNAL_ERROR);
std::swap(d_ibuf, d_obuf);
}
BF_CHECK_CUDA_EXCEPTION(cudaGetLastError(), BF_STATUS_INTERNAL_ERROR);
//cudaDeviceSynchronize(); // HACK TESTING
}
void set_stream(cudaStream_t stream) {
_stream = stream;
}
};
BFstatus bfFdmtCreate(BFfdmt* plan_ptr) {
BF_ASSERT(plan_ptr, BF_STATUS_INVALID_POINTER);
BF_TRY_RETURN_ELSE(*plan_ptr = new BFfdmt_impl(),
*plan_ptr = 0);
}
// **TODO: Passing 'BFarray const* in' here could replace nchan, f0, df and space if BFarray included dimension scales
// Also, could potentially set the output dimension scales (dm0, ddm)
// OR, could just leave these to higher-level wrappers (e.g., Python)
// This might be for the best in the short term
BFstatus bfFdmtInit(BFfdmt plan,
BFsize nchan,
BFsize max_delay,
double f0,
double df,
double exponent,
BFspace space,
void* plan_storage,
BFsize* plan_storage_size) {
BF_ASSERT(plan, BF_STATUS_INVALID_HANDLE);
BF_ASSERT(space_accessible_from(space, BF_SPACE_CUDA),
BF_STATUS_UNSUPPORTED_SPACE);
BF_TRY(plan->init(nchan, max_delay, f0, df, exponent));
BF_TRY_RETURN(plan->init_plan_storage(plan_storage, plan_storage_size));
}
BFstatus bfFdmtSetStream(BFfdmt plan,
void const* stream) {
BF_ASSERT(plan, BF_STATUS_INVALID_HANDLE);
BF_ASSERT(stream, BF_STATUS_INVALID_POINTER);
BF_TRY_RETURN(plan->set_stream(*(cudaStream_t*)stream));
}
BFstatus bfFdmtExecute(BFfdmt plan,
BFarray const* in,
BFarray const* out,
BFbool negative_delays,
void* exec_storage,
BFsize* exec_storage_size) {
BF_ASSERT(plan, BF_STATUS_INVALID_HANDLE);
BF_ASSERT(in, BF_STATUS_INVALID_POINTER);
BF_ASSERT(out, BF_STATUS_INVALID_POINTER);
BF_ASSERT( in->shape[ in->ndim-2] == plan->nchan(), BF_STATUS_INVALID_SHAPE);
BF_ASSERT(out->shape[out->ndim-2] == plan->max_delay(), BF_STATUS_INVALID_SHAPE);
BF_ASSERT( in->shape[in->ndim-1] == out->shape[out->ndim-1], BF_STATUS_INVALID_SHAPE);
// TODO: BF_ASSERT(...);
size_t ntime = in->shape[in->ndim-1];
bool ready;
BF_TRY(ready = plan->init_exec_storage(exec_storage, exec_storage_size, ntime));
if( !ready ) {
// Just requesting exec_storage_size, not ready to execute yet
return BF_STATUS_SUCCESS;
}
BF_ASSERT(space_accessible_from( in->space, BF_SPACE_CUDA), BF_STATUS_INVALID_SPACE);
BF_ASSERT(space_accessible_from(out->space, BF_SPACE_CUDA), BF_STATUS_INVALID_SPACE);
BF_TRY_RETURN(plan->execute(in, out, ntime, negative_delays));
}
BFstatus bfFdmtDestroy(BFfdmt plan) {
BF_ASSERT(plan, BF_STATUS_INVALID_HANDLE);
delete plan;
return BF_STATUS_SUCCESS;
}
|
321fe835e2d4cae8009cf495e767243e1f3e6e51.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
#include "timer.h"
#define BLOCK_SIZE 32
#define COVERAGE 4
__device__ void private_nw_function_3(unsigned char* reference, unsigned char* query, int* matrix, unsigned int N, int matrix_s[BLOCK_SIZE][BLOCK_SIZE], int pos_x, int pos_y, int mat_row, int mat_col) {
// Calculate value left, top, and top-left neighbors.
int top =
(mat_row == 0) ?
((mat_col + 1)*DELETION) : (pos_y == 0) ?
matrix[ (mat_row - 1)*N + mat_col ] : matrix_s[pos_y - 1][pos_x ];
int left =
(mat_col == 0) ?
((mat_row + 1)*INSERTION) : (pos_x == 0) ?
matrix[ mat_row*N + (mat_col - 1) ] : matrix_s[pos_y ][pos_x - 1];
int topleft =
(mat_row == 0) ?
(mat_col*DELETION) : (mat_col == 0) ?
(mat_row*INSERTION) : (pos_y == 0 || pos_x == 0) ?
matrix[ (mat_row - 1)*N + mat_col - 1 ] : matrix_s[pos_y - 1][pos_x - 1];
// Determine scores of the three possible outcomes: insertion, deletion, and match.
int insertion = top + INSERTION;
int deletion = left + DELETION;
// Get the characters to verify if there is a match.
char ref_char = reference[mat_col];
char query_char = query[mat_row];
int match = topleft + ( (ref_char == query_char) ? MATCH : MISMATCH );
// Select the maximum between the three.
int max = (insertion > deletion) ? insertion : deletion;
max = (match > max) ? match : max;
// Update the matrix at the correct position
matrix_s[ pos_y ][ pos_x ] = max;
matrix[ mat_row * N + mat_col ] = max;
}
__global__ void nw_kernel3(unsigned char* reference, unsigned char* query, int* matrix, unsigned int N, int iteration_number) {
// Transform 1D Grid Coordinates into 2D Diagonal Coordinates.
int diagonal_block_row = blockIdx.x;
int diagonal_block_col = iteration_number - diagonal_block_row;
if( iteration_number > gridDim.x) {
diagonal_block_row = ( (N + BLOCK_SIZE*COVERAGE - 1)/(BLOCK_SIZE*COVERAGE) ) - blockIdx.x - 1;
diagonal_block_col = iteration_number - diagonal_block_row;
}
int block_row = diagonal_block_row * BLOCK_SIZE * COVERAGE;
int block_col = diagonal_block_col * BLOCK_SIZE * COVERAGE;
// Initialise Shared Memory
__shared__ int matrix_s[BLOCK_SIZE][BLOCK_SIZE];
if( threadIdx.y == 0 ) {
for( int diagonal=0; diagonal < BLOCK_SIZE; diagonal++ ) {
// Get the position of the thread inside the block.
int pos_x = threadIdx.x;
int pos_y = diagonal - pos_x;
// Calculate the positions of the thread inside the matrix.
int mat_row = block_row + pos_y;
int mat_col = block_col + pos_x;
if( mat_row < N && mat_col < N && pos_x < BLOCK_SIZE && pos_y < BLOCK_SIZE && pos_x >= 0 && pos_y >= 0) {
private_nw_function_3(reference, query, matrix, N, matrix_s, pos_x, pos_y, mat_row, mat_col);
}
}
}
__syncthreads();
bool isThreadActive = (threadIdx.x + threadIdx.y) % 2 == 0;
int row = 0;
int col = 0;
int diag = 0;
for( int subBlock = 1; subBlock < COVERAGE * COVERAGE; subBlock++ ) {
row++;
col--;
// AFTER GREATEST DIAGONAL
if( diag >= COVERAGE ) {
diag++;
row = diag - COVERAGE + 1;
col = COVERAGE;
}
// RESET
if(col < 0) {
diag++;
row = 0;
col = diag;
}
if ( diag >= COVERAGE && col < diag - COVERAGE + 1 ) {
diag++;
row = diag - COVERAGE + 1;
col = COVERAGE;
}
__syncthreads();
// LOAD NEXT SUB-BLOCK
if( threadIdx.x == 0 && threadIdx.y == 0 ) {
// Calculate the positions of the thread inside the matrix.
int mat_row = block_row + row * BLOCK_SIZE;
int mat_col = block_col + col * BLOCK_SIZE;
if ( mat_row < N && mat_col < N ) {
private_nw_function_3(reference, query, matrix, N, matrix_s, 0, 0, mat_row, mat_col);
}
}
__syncthreads();
int pos_x = threadIdx.x;
int pos_y = threadIdx.y;
int current_diagonal = (pos_x + pos_y) / 2;
int subBlock_row = row - current_diagonal;
int subBlock_col = diag - subBlock_row;
if( subBlock_row < 0 && diag < COVERAGE ) {
int num_jumps = (-1 * subBlock_row) - 1;
int diag_jumps = 1;
subBlock_row = diag - diag_jumps;
subBlock_col = 0;
while( num_jumps > 0 ) {
subBlock_row--;
subBlock_col++;
if( subBlock_row < 0 ) {
diag_jumps++;
subBlock_row = diag - diag_jumps;
subBlock_col = 0;
}
num_jumps--;
}
if( subBlock == 9 ) {
printf("num_jumps: %d, x: %d, y: %d, row: %d, col: %d\n", num_jumps, threadIdx.x, threadIdx.y, subBlock_row, subBlock_col);
}
if ( subBlock_row < 0 ) {
subBlock_row = 0;
subBlock_col = 0;
}
}
if( subBlock_row < diag - COVERAGE + 1 && diag >= COVERAGE ) {
int num_jumps = (diag - COVERAGE + 1) - subBlock_row - 1;
int diag_jumps = 1;
subBlock_row = COVERAGE;
subBlock_col = (diag - diag_jumps) - COVERAGE + 1;
while( num_jumps > 0 ) {
subBlock_row--;
subBlock_col++;
if( subBlock_row < diag - COVERAGE + 1 ) {
diag_jumps++;
subBlock_row = COVERAGE;
subBlock_col = (diag - diag_jumps) - COVERAGE + 1;
}
if( diag_jumps <= 0 ) {
subBlock_row = 0;
subBlock_col = 0;
break;
}
num_jumps--;
}
if ( subBlock_row < 0 ) {
subBlock_row = 0;
subBlock_col = 0;
}
}
__syncthreads();
int mat_row = block_row + subBlock_row * BLOCK_SIZE + pos_y;
int mat_col = block_col + subBlock_col * BLOCK_SIZE + pos_x;
// First Activation
if( isThreadActive && threadIdx.x != 0 && threadIdx.x != 0 ) {
// if( subBlock == 2 ) {
// printf( "mat_col: %d\n", mat_col);
// }
if( mat_row < N && mat_col < N ) {
private_nw_function_3(reference, query, matrix, N, matrix_s, pos_x, pos_y, mat_row, mat_col);
}
}
__syncthreads();
isThreadActive = !(isThreadActive);
// SECOND ACTIVATION
if( isThreadActive ) {
if( mat_row < N && mat_col < N ) {
private_nw_function_3(reference, query, matrix, N, matrix_s, pos_x, pos_y, mat_row, mat_col);
}
}
isThreadActive = !(isThreadActive);
__syncthreads();
// if( subBlock == 11 && threadIdx.x == 0 && threadIdx.y == 0 ) {
// printf("%d, %d", mat_row, mat_col);
// printf("\n");
// for( int i=32*0; i<32*1; i++ ) {
// for( int j=32*1; j<32*2; j++ ) {
// printf( "%d, ", matrix[i*N + j]);
// }
// printf("\n");
// }
// printf("\n");
// for( int i=0; i<32; i++ ) {
// for( int j=0; j<32; j++ ) {
// printf( "%d, ", matrix_s[i][j]);
// }
// printf("\n");
// }
// }
__syncthreads();
}
if( threadIdx.x == 0 && threadIdx.y == 0 ) {
printf("\nResult:\n\n");
for( int i=32*0; i<32*1; i++ ) {
for( int j=32*0; j<32*1; j++ ) {
printf( "%d, ", matrix[i*N + j]);
}
printf("\n");
}
}
}
void nw_gpu3(unsigned char* reference_d, unsigned char* query_d, int* matrix_d, unsigned int N) {
dim3 numThreadsPerBlock( BLOCK_SIZE, BLOCK_SIZE );
for(int iter=0; iter < 2* ( (N + BLOCK_SIZE * COVERAGE - 1) / (BLOCK_SIZE * COVERAGE)) - 1; iter++) {
// Configure next run
unsigned int numBlocks = (iter < (N + BLOCK_SIZE * COVERAGE - 1) / (BLOCK_SIZE * COVERAGE)) ? (iter + 1) : (2*((N + BLOCK_SIZE * COVERAGE - 1) / (BLOCK_SIZE * COVERAGE)) - iter - 1);
printf("iteration: %d, blocks: %d\n", iter, numBlocks);
// Launch kernel
hipLaunchKernelGGL(( nw_kernel3), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, reference_d, query_d, matrix_d, N, iter);
hipDeviceSynchronize();
}
}
| 321fe835e2d4cae8009cf495e767243e1f3e6e51.cu |
#include "common.h"
#include "timer.h"
#define BLOCK_SIZE 32
#define COVERAGE 4
__device__ void private_nw_function_3(unsigned char* reference, unsigned char* query, int* matrix, unsigned int N, int matrix_s[BLOCK_SIZE][BLOCK_SIZE], int pos_x, int pos_y, int mat_row, int mat_col) {
// Calculate value left, top, and top-left neighbors.
int top =
(mat_row == 0) ?
((mat_col + 1)*DELETION) : (pos_y == 0) ?
matrix[ (mat_row - 1)*N + mat_col ] : matrix_s[pos_y - 1][pos_x ];
int left =
(mat_col == 0) ?
((mat_row + 1)*INSERTION) : (pos_x == 0) ?
matrix[ mat_row*N + (mat_col - 1) ] : matrix_s[pos_y ][pos_x - 1];
int topleft =
(mat_row == 0) ?
(mat_col*DELETION) : (mat_col == 0) ?
(mat_row*INSERTION) : (pos_y == 0 || pos_x == 0) ?
matrix[ (mat_row - 1)*N + mat_col - 1 ] : matrix_s[pos_y - 1][pos_x - 1];
// Determine scores of the three possible outcomes: insertion, deletion, and match.
int insertion = top + INSERTION;
int deletion = left + DELETION;
// Get the characters to verify if there is a match.
char ref_char = reference[mat_col];
char query_char = query[mat_row];
int match = topleft + ( (ref_char == query_char) ? MATCH : MISMATCH );
// Select the maximum between the three.
int max = (insertion > deletion) ? insertion : deletion;
max = (match > max) ? match : max;
// Update the matrix at the correct position
matrix_s[ pos_y ][ pos_x ] = max;
matrix[ mat_row * N + mat_col ] = max;
}
__global__ void nw_kernel3(unsigned char* reference, unsigned char* query, int* matrix, unsigned int N, int iteration_number) {
// Transform 1D Grid Coordinates into 2D Diagonal Coordinates.
int diagonal_block_row = blockIdx.x;
int diagonal_block_col = iteration_number - diagonal_block_row;
if( iteration_number > gridDim.x) {
diagonal_block_row = ( (N + BLOCK_SIZE*COVERAGE - 1)/(BLOCK_SIZE*COVERAGE) ) - blockIdx.x - 1;
diagonal_block_col = iteration_number - diagonal_block_row;
}
int block_row = diagonal_block_row * BLOCK_SIZE * COVERAGE;
int block_col = diagonal_block_col * BLOCK_SIZE * COVERAGE;
// Initialise Shared Memory
__shared__ int matrix_s[BLOCK_SIZE][BLOCK_SIZE];
if( threadIdx.y == 0 ) {
for( int diagonal=0; diagonal < BLOCK_SIZE; diagonal++ ) {
// Get the position of the thread inside the block.
int pos_x = threadIdx.x;
int pos_y = diagonal - pos_x;
// Calculate the positions of the thread inside the matrix.
int mat_row = block_row + pos_y;
int mat_col = block_col + pos_x;
if( mat_row < N && mat_col < N && pos_x < BLOCK_SIZE && pos_y < BLOCK_SIZE && pos_x >= 0 && pos_y >= 0) {
private_nw_function_3(reference, query, matrix, N, matrix_s, pos_x, pos_y, mat_row, mat_col);
}
}
}
__syncthreads();
bool isThreadActive = (threadIdx.x + threadIdx.y) % 2 == 0;
int row = 0;
int col = 0;
int diag = 0;
for( int subBlock = 1; subBlock < COVERAGE * COVERAGE; subBlock++ ) {
row++;
col--;
// AFTER GREATEST DIAGONAL
if( diag >= COVERAGE ) {
diag++;
row = diag - COVERAGE + 1;
col = COVERAGE;
}
// RESET
if(col < 0) {
diag++;
row = 0;
col = diag;
}
if ( diag >= COVERAGE && col < diag - COVERAGE + 1 ) {
diag++;
row = diag - COVERAGE + 1;
col = COVERAGE;
}
__syncthreads();
// LOAD NEXT SUB-BLOCK
if( threadIdx.x == 0 && threadIdx.y == 0 ) {
// Calculate the positions of the thread inside the matrix.
int mat_row = block_row + row * BLOCK_SIZE;
int mat_col = block_col + col * BLOCK_SIZE;
if ( mat_row < N && mat_col < N ) {
private_nw_function_3(reference, query, matrix, N, matrix_s, 0, 0, mat_row, mat_col);
}
}
__syncthreads();
int pos_x = threadIdx.x;
int pos_y = threadIdx.y;
int current_diagonal = (pos_x + pos_y) / 2;
int subBlock_row = row - current_diagonal;
int subBlock_col = diag - subBlock_row;
if( subBlock_row < 0 && diag < COVERAGE ) {
int num_jumps = (-1 * subBlock_row) - 1;
int diag_jumps = 1;
subBlock_row = diag - diag_jumps;
subBlock_col = 0;
while( num_jumps > 0 ) {
subBlock_row--;
subBlock_col++;
if( subBlock_row < 0 ) {
diag_jumps++;
subBlock_row = diag - diag_jumps;
subBlock_col = 0;
}
num_jumps--;
}
if( subBlock == 9 ) {
printf("num_jumps: %d, x: %d, y: %d, row: %d, col: %d\n", num_jumps, threadIdx.x, threadIdx.y, subBlock_row, subBlock_col);
}
if ( subBlock_row < 0 ) {
subBlock_row = 0;
subBlock_col = 0;
}
}
if( subBlock_row < diag - COVERAGE + 1 && diag >= COVERAGE ) {
int num_jumps = (diag - COVERAGE + 1) - subBlock_row - 1;
int diag_jumps = 1;
subBlock_row = COVERAGE;
subBlock_col = (diag - diag_jumps) - COVERAGE + 1;
while( num_jumps > 0 ) {
subBlock_row--;
subBlock_col++;
if( subBlock_row < diag - COVERAGE + 1 ) {
diag_jumps++;
subBlock_row = COVERAGE;
subBlock_col = (diag - diag_jumps) - COVERAGE + 1;
}
if( diag_jumps <= 0 ) {
subBlock_row = 0;
subBlock_col = 0;
break;
}
num_jumps--;
}
if ( subBlock_row < 0 ) {
subBlock_row = 0;
subBlock_col = 0;
}
}
__syncthreads();
int mat_row = block_row + subBlock_row * BLOCK_SIZE + pos_y;
int mat_col = block_col + subBlock_col * BLOCK_SIZE + pos_x;
// First Activation
if( isThreadActive && threadIdx.x != 0 && threadIdx.x != 0 ) {
// if( subBlock == 2 ) {
// printf( "mat_col: %d\n", mat_col);
// }
if( mat_row < N && mat_col < N ) {
private_nw_function_3(reference, query, matrix, N, matrix_s, pos_x, pos_y, mat_row, mat_col);
}
}
__syncthreads();
isThreadActive = !(isThreadActive);
// SECOND ACTIVATION
if( isThreadActive ) {
if( mat_row < N && mat_col < N ) {
private_nw_function_3(reference, query, matrix, N, matrix_s, pos_x, pos_y, mat_row, mat_col);
}
}
isThreadActive = !(isThreadActive);
__syncthreads();
// if( subBlock == 11 && threadIdx.x == 0 && threadIdx.y == 0 ) {
// printf("%d, %d", mat_row, mat_col);
// printf("\n");
// for( int i=32*0; i<32*1; i++ ) {
// for( int j=32*1; j<32*2; j++ ) {
// printf( "%d, ", matrix[i*N + j]);
// }
// printf("\n");
// }
// printf("\n");
// for( int i=0; i<32; i++ ) {
// for( int j=0; j<32; j++ ) {
// printf( "%d, ", matrix_s[i][j]);
// }
// printf("\n");
// }
// }
__syncthreads();
}
if( threadIdx.x == 0 && threadIdx.y == 0 ) {
printf("\nResult:\n\n");
for( int i=32*0; i<32*1; i++ ) {
for( int j=32*0; j<32*1; j++ ) {
printf( "%d, ", matrix[i*N + j]);
}
printf("\n");
}
}
}
void nw_gpu3(unsigned char* reference_d, unsigned char* query_d, int* matrix_d, unsigned int N) {
dim3 numThreadsPerBlock( BLOCK_SIZE, BLOCK_SIZE );
for(int iter=0; iter < 2* ( (N + BLOCK_SIZE * COVERAGE - 1) / (BLOCK_SIZE * COVERAGE)) - 1; iter++) {
// Configure next run
unsigned int numBlocks = (iter < (N + BLOCK_SIZE * COVERAGE - 1) / (BLOCK_SIZE * COVERAGE)) ? (iter + 1) : (2*((N + BLOCK_SIZE * COVERAGE - 1) / (BLOCK_SIZE * COVERAGE)) - iter - 1);
printf("iteration: %d, blocks: %d\n", iter, numBlocks);
// Launch kernel
nw_kernel3<<<numBlocks, numThreadsPerBlock>>>(reference_d, query_d, matrix_d, N, iter);
cudaDeviceSynchronize();
}
}
|
ac86ba831d93d3127beb1acc14233badd5b0fdd9.hip | // !!! This is a file automatically generated by hipify!!!
//======================================
//
//
// GPU
//======================================
#include"stdafx.h"
#include"MergeAverage_DATA.hpp"
#include"MergeAverage_FUNC.hpp"
#include"MergeAverage_Base.h"
#include"MergeAverage_GPU.cuh"
#include"MergeAverage_LayerData_GPU.cuh"
using namespace Gravisbell;
using namespace Gravisbell::Layer::NeuralNetwork;
namespace Gravisbell {
namespace Layer {
namespace NeuralNetwork {
/** */
MergeAverage_GPU::MergeAverage_GPU(Gravisbell::GUID guid, MergeAverage_LayerData_GPU& i_layerData, const std::vector<IODataStruct>& i_lpInputDataStruct, Gravisbell::Common::ITemporaryMemoryManager& i_temporaryMemoryManager)
: MergeAverage_Base (guid, i_lpInputDataStruct, i_layerData.GetOutputDataStruct(&i_lpInputDataStruct[0], (U32)i_lpInputDataStruct.size()))
, layerData (i_layerData) /**< */
, outputBufferCount (0) /**< */
{
hipblasCreate(&cublasHandle);
}
/** */
MergeAverage_GPU::~MergeAverage_GPU()
{
hipblasDestroy(cublasHandle);
}
//================================
//
//================================
/** */
U32 MergeAverage_GPU::GetLayerKind()const
{
return Layer::ELayerKind::LAYER_KIND_GPU | GetLayerKindBase();
}
/** .
@return 0 */
ErrorCode MergeAverage_GPU::Initialize(void)
{
return this->layerData.Initialize();
}
//===========================
//
//===========================
/** */
MergeAverage_LayerData_Base& MergeAverage_GPU::GetLayerData()
{
return this->layerData;
}
const MergeAverage_LayerData_Base& MergeAverage_GPU::GetLayerData()const
{
return this->layerData;
}
//================================
//
//================================
/** .()
@param batchSize .
NN.
PreProcessLearnLoop. */
ErrorCode MergeAverage_GPU::PreProcessLearn()
{
ErrorCode errorCode = this->PreProcessCalculate();
if(errorCode != ErrorCode::ERROR_CODE_NONE)
return errorCode;
return ErrorCode::ERROR_CODE_NONE;
}
/** .()
@param batchSize .
NN.
Calculate. */
ErrorCode MergeAverage_GPU::PreProcessCalculate()
{
//
this->lpInputBufferCount.resize(this->GetInputDataCount());
for(U32 inputNum=0; inputNum<this->lpInputBufferCount.size(); inputNum++)
{
this->lpInputBufferCount[inputNum] = this->GetInputBufferCount(inputNum);
if(this->lpInputBufferCount[inputNum] == 0)
return ErrorCode::ERROR_CODE_FRAUD_INPUT_COUNT;
}
//
this->outputBufferCount = this->GetOutputBufferCount();
if(this->outputBufferCount == 0)
return ErrorCode::ERROR_CODE_FRAUD_OUTPUT_COUNT;
return ErrorCode::ERROR_CODE_NONE;
}
/** .
Calculate. */
ErrorCode MergeAverage_GPU::PreProcessLoop()
{
return ErrorCode::ERROR_CODE_NONE;
}
//================================
//
//================================
/** .
@param lpInputBuffer . GetInputBufferCount
@return 0 */
ErrorCode MergeAverage_GPU::Calculate_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer[], BATCH_BUFFER_POINTER o_lppOutputBuffer)
{
//
hipMemset(&o_lppOutputBuffer[0], 0, sizeof(F32)*this->outputBufferCount*this->GetBatchSize());
F32 alpha = 1.0f / this->GetInputDataCount();
for(U32 batchNum=0; batchNum<this->GetBatchSize(); batchNum++)
{
for(U32 inputNum=0; inputNum<this->lpInputBufferCount.size(); inputNum++)
{
hipblasStatus_t err = hipblasSaxpy(
this->cublasHandle,
min(this->lpInputBufferCount[inputNum], outputBufferCount),
&alpha,
&i_lppInputBuffer[inputNum][batchNum * this->lpInputBufferCount[inputNum]],
1,
&o_lppOutputBuffer[batchNum*this->outputBufferCount],
1);
if(err != 0)
return ErrorCode::ERROR_CODE_CUDA_CALCULATE;
}
}
hipDeviceSynchronize();
return ErrorCode::ERROR_CODE_NONE;
}
//================================
//
//================================
/** ..
Calculate.
@param o_lppDInputBuffer . [GetBatchSize()][GetInputBufferCount()].
@param i_lppDOutputBuffer =. [GetBatchSize()][GetOutputBufferCount()].
*/
ErrorCode MergeAverage_GPU::CalculateDInput_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer[], BATCH_BUFFER_POINTER o_lppDInputBuffer[], CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer)
{
if(o_lppDInputBuffer)
{
//
for(U32 inputNum=0; inputNum<this->GetInputDataCount(); inputNum++)
{
hipMemset(o_lppDInputBuffer[inputNum], 0, sizeof(F32)*this->lpInputBufferCount[inputNum]*this->GetBatchSize());
}
F32 alpha = 1.0f / this->GetInputDataCount();
for(U32 batchNum=0; batchNum<this->GetBatchSize(); batchNum++)
{
for(U32 inputNum=0; inputNum<this->lpInputBufferCount.size(); inputNum++)
{
hipblasStatus_t err = hipblasSaxpy(
this->cublasHandle,
min(this->lpInputBufferCount[inputNum], outputBufferCount),
&alpha,
&i_lppDOutputBuffer[batchNum*this->outputBufferCount],
1,
&o_lppDInputBuffer[inputNum][batchNum * this->lpInputBufferCount[inputNum]],
1);
if(err != 0)
return ErrorCode::ERROR_CODE_CUDA_CALCULATE;
}
}
hipDeviceSynchronize();
}
#ifdef _DEBUG
std::vector<std::vector<float>> lpTmpInputBuffer(this->GetInputDataCount());
for(int i=0; i<lpTmpInputBuffer.size(); i++)
{
lpTmpInputBuffer[i].resize(this->GetBatchSize() * this->lpInputBufferCount[i]);
hipMemcpy(&lpTmpInputBuffer[i][0], i_lppInputBuffer[i], sizeof(float)*lpTmpInputBuffer[i].size(), hipMemcpyDeviceToHost);
}
std::vector<float> lpTmpOutputBuffer(this->GetBatchSize() * this->outputBufferCount);
hipMemcpy(&lpTmpOutputBuffer[0], i_lppOutputBuffer, sizeof(float)*lpTmpOutputBuffer.size(), hipMemcpyDeviceToHost);
std::vector<float> lpTmpDOutputBuffer(this->GetBatchSize() * this->outputBufferCount);
hipMemcpy(&lpTmpDOutputBuffer[0], i_lppDOutputBuffer, sizeof(float)*lpTmpDOutputBuffer.size(), hipMemcpyDeviceToHost);
std::vector<std::vector<float>> lpTmpDInputBuffer(this->GetInputDataCount());
for(int i=0; i<lpTmpInputBuffer.size(); i++)
{
lpTmpDInputBuffer[i].resize(this->GetBatchSize() * this->lpInputBufferCount[i]);
hipMemcpy(&lpTmpDInputBuffer[i][0], o_lppDInputBuffer[i], sizeof(float)*lpTmpDInputBuffer[i].size(), hipMemcpyDeviceToHost);
}
#endif
return ErrorCode::ERROR_CODE_NONE;
}
/** .
Calculate.
@param i_lppDOutputBuffer =. [GetBatchSize()][GetOutputBufferCount()].
*/
ErrorCode MergeAverage_GPU::Training_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer[], BATCH_BUFFER_POINTER o_lppDInputBuffer[], CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer)
{
return this->CalculateDInput_device(i_lppInputBuffer, o_lppDInputBuffer, i_lppOutputBuffer, i_lppDOutputBuffer);
}
} // Gravisbell;
} // Layer;
} // NeuralNetwork;
| ac86ba831d93d3127beb1acc14233badd5b0fdd9.cu | //======================================
// フィードフォワードニューラルネットワークの統合処理レイヤー
// 結合、活性化
// GPU処理用
//======================================
#include"stdafx.h"
#include"MergeAverage_DATA.hpp"
#include"MergeAverage_FUNC.hpp"
#include"MergeAverage_Base.h"
#include"MergeAverage_GPU.cuh"
#include"MergeAverage_LayerData_GPU.cuh"
using namespace Gravisbell;
using namespace Gravisbell::Layer::NeuralNetwork;
namespace Gravisbell {
namespace Layer {
namespace NeuralNetwork {
/** コンストラクタ */
MergeAverage_GPU::MergeAverage_GPU(Gravisbell::GUID guid, MergeAverage_LayerData_GPU& i_layerData, const std::vector<IODataStruct>& i_lpInputDataStruct, Gravisbell::Common::ITemporaryMemoryManager& i_temporaryMemoryManager)
: MergeAverage_Base (guid, i_lpInputDataStruct, i_layerData.GetOutputDataStruct(&i_lpInputDataStruct[0], (U32)i_lpInputDataStruct.size()))
, layerData (i_layerData) /**< レイヤーデータ */
, outputBufferCount (0) /**< 出力バッファ数 */
{
cublasCreate(&cublasHandle);
}
/** デストラクタ */
MergeAverage_GPU::~MergeAverage_GPU()
{
cublasDestroy(cublasHandle);
}
//================================
// 基本処理
//================================
/** レイヤー種別の取得 */
U32 MergeAverage_GPU::GetLayerKind()const
{
return Layer::ELayerKind::LAYER_KIND_GPU | GetLayerKindBase();
}
/** 初期化. 各ニューロンの値をランダムに初期化
@return 成功した場合0 */
ErrorCode MergeAverage_GPU::Initialize(void)
{
return this->layerData.Initialize();
}
//===========================
// レイヤーデータ関連
//===========================
/** レイヤーデータを取得する */
MergeAverage_LayerData_Base& MergeAverage_GPU::GetLayerData()
{
return this->layerData;
}
const MergeAverage_LayerData_Base& MergeAverage_GPU::GetLayerData()const
{
return this->layerData;
}
//================================
// 演算処理
//================================
/** 演算前処理を実行する.(学習用)
@param batchSize 同時に演算を行うバッチのサイズ.
NN作成後、演算処理を実行する前に一度だけ必ず実行すること。データごとに実行する必要はない.
失敗した場合はPreProcessLearnLoop以降の処理は実行不可. */
ErrorCode MergeAverage_GPU::PreProcessLearn()
{
ErrorCode errorCode = this->PreProcessCalculate();
if(errorCode != ErrorCode::ERROR_CODE_NONE)
return errorCode;
return ErrorCode::ERROR_CODE_NONE;
}
/** 演算前処理を実行する.(演算用)
@param batchSize 同時に演算を行うバッチのサイズ.
NN作成後、演算処理を実行する前に一度だけ必ず実行すること。データごとに実行する必要はない.
失敗した場合はCalculate以降の処理は実行不可. */
ErrorCode MergeAverage_GPU::PreProcessCalculate()
{
// 入力バッファ数を確認
this->lpInputBufferCount.resize(this->GetInputDataCount());
for(U32 inputNum=0; inputNum<this->lpInputBufferCount.size(); inputNum++)
{
this->lpInputBufferCount[inputNum] = this->GetInputBufferCount(inputNum);
if(this->lpInputBufferCount[inputNum] == 0)
return ErrorCode::ERROR_CODE_FRAUD_INPUT_COUNT;
}
// 出力バッファ数を確認
this->outputBufferCount = this->GetOutputBufferCount();
if(this->outputBufferCount == 0)
return ErrorCode::ERROR_CODE_FRAUD_OUTPUT_COUNT;
return ErrorCode::ERROR_CODE_NONE;
}
/** ループの初期化処理.データセットの実行開始前に実行する
失敗した場合はCalculate以降の処理は実行不可. */
ErrorCode MergeAverage_GPU::PreProcessLoop()
{
return ErrorCode::ERROR_CODE_NONE;
}
//================================
// 演算処理
//================================
/** 演算処理を実行する.
@param lpInputBuffer 入力データバッファ. GetInputBufferCountで取得した値の要素数が必要
@return 成功した場合0が返る */
ErrorCode MergeAverage_GPU::Calculate_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer[], BATCH_BUFFER_POINTER o_lppOutputBuffer)
{
// 出力バッファを初期化
cudaMemset(&o_lppOutputBuffer[0], 0, sizeof(F32)*this->outputBufferCount*this->GetBatchSize());
F32 alpha = 1.0f / this->GetInputDataCount();
for(U32 batchNum=0; batchNum<this->GetBatchSize(); batchNum++)
{
for(U32 inputNum=0; inputNum<this->lpInputBufferCount.size(); inputNum++)
{
cublasStatus_t err = cublasSaxpy_v2(
this->cublasHandle,
min(this->lpInputBufferCount[inputNum], outputBufferCount),
&alpha,
&i_lppInputBuffer[inputNum][batchNum * this->lpInputBufferCount[inputNum]],
1,
&o_lppOutputBuffer[batchNum*this->outputBufferCount],
1);
if(err != 0)
return ErrorCode::ERROR_CODE_CUDA_CALCULATE;
}
}
cudaThreadSynchronize();
return ErrorCode::ERROR_CODE_NONE;
}
//================================
// 学習処理
//================================
/** 入力誤差計算をを実行する.学習せずに入力誤差を取得したい場合に使用する.
入力信号、出力信号は直前のCalculateの値を参照する.
@param o_lppDInputBuffer 入力誤差差分格納先レイヤー. [GetBatchSize()の戻り値][GetInputBufferCount()の戻り値]の要素数が必要.
@param i_lppDOutputBuffer 出力誤差差分=次レイヤーの入力誤差差分. [GetBatchSize()の戻り値][GetOutputBufferCount()の戻り値]の要素数が必要.
直前の計算結果を使用する */
ErrorCode MergeAverage_GPU::CalculateDInput_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer[], BATCH_BUFFER_POINTER o_lppDInputBuffer[], CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer)
{
if(o_lppDInputBuffer)
{
// 入力誤差バッファの初期化
for(U32 inputNum=0; inputNum<this->GetInputDataCount(); inputNum++)
{
cudaMemset(o_lppDInputBuffer[inputNum], 0, sizeof(F32)*this->lpInputBufferCount[inputNum]*this->GetBatchSize());
}
F32 alpha = 1.0f / this->GetInputDataCount();
for(U32 batchNum=0; batchNum<this->GetBatchSize(); batchNum++)
{
for(U32 inputNum=0; inputNum<this->lpInputBufferCount.size(); inputNum++)
{
cublasStatus_t err = cublasSaxpy_v2(
this->cublasHandle,
min(this->lpInputBufferCount[inputNum], outputBufferCount),
&alpha,
&i_lppDOutputBuffer[batchNum*this->outputBufferCount],
1,
&o_lppDInputBuffer[inputNum][batchNum * this->lpInputBufferCount[inputNum]],
1);
if(err != 0)
return ErrorCode::ERROR_CODE_CUDA_CALCULATE;
}
}
cudaThreadSynchronize();
}
#ifdef _DEBUG
std::vector<std::vector<float>> lpTmpInputBuffer(this->GetInputDataCount());
for(int i=0; i<lpTmpInputBuffer.size(); i++)
{
lpTmpInputBuffer[i].resize(this->GetBatchSize() * this->lpInputBufferCount[i]);
cudaMemcpy(&lpTmpInputBuffer[i][0], i_lppInputBuffer[i], sizeof(float)*lpTmpInputBuffer[i].size(), cudaMemcpyDeviceToHost);
}
std::vector<float> lpTmpOutputBuffer(this->GetBatchSize() * this->outputBufferCount);
cudaMemcpy(&lpTmpOutputBuffer[0], i_lppOutputBuffer, sizeof(float)*lpTmpOutputBuffer.size(), cudaMemcpyDeviceToHost);
std::vector<float> lpTmpDOutputBuffer(this->GetBatchSize() * this->outputBufferCount);
cudaMemcpy(&lpTmpDOutputBuffer[0], i_lppDOutputBuffer, sizeof(float)*lpTmpDOutputBuffer.size(), cudaMemcpyDeviceToHost);
std::vector<std::vector<float>> lpTmpDInputBuffer(this->GetInputDataCount());
for(int i=0; i<lpTmpInputBuffer.size(); i++)
{
lpTmpDInputBuffer[i].resize(this->GetBatchSize() * this->lpInputBufferCount[i]);
cudaMemcpy(&lpTmpDInputBuffer[i][0], o_lppDInputBuffer[i], sizeof(float)*lpTmpDInputBuffer[i].size(), cudaMemcpyDeviceToHost);
}
#endif
return ErrorCode::ERROR_CODE_NONE;
}
/** 学習処理を実行する.
入力信号、出力信号は直前のCalculateの値を参照する.
@param i_lppDOutputBuffer 出力誤差差分=次レイヤーの入力誤差差分. [GetBatchSize()の戻り値][GetOutputBufferCount()の戻り値]の要素数が必要.
直前の計算結果を使用する */
ErrorCode MergeAverage_GPU::Training_device(CONST_BATCH_BUFFER_POINTER i_lppInputBuffer[], BATCH_BUFFER_POINTER o_lppDInputBuffer[], CONST_BATCH_BUFFER_POINTER i_lppOutputBuffer, CONST_BATCH_BUFFER_POINTER i_lppDOutputBuffer)
{
return this->CalculateDInput_device(i_lppInputBuffer, o_lppDInputBuffer, i_lppOutputBuffer, i_lppDOutputBuffer);
}
} // Gravisbell;
} // Layer;
} // NeuralNetwork;
|
f257bb8533f80ff6938643677f1382553b4f5cbb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<hip/hip_runtime_api.h>
#include<time.h>
#include<stdlib.h>
#define SAFE_CALL(CallInstruction){ \
hipError_t cuerr=CallInstruction; \
if(cuerr!=hipSuccess){ \
printf("CUDA error:%s at call \"" #CallInstruction"\"\n",hipGetErrorString(cuerr));\
throw "error in CUDA API function,aborting...";\
} \
}
#define SAFE_KERNEL_CALL(KernelCallInstruction){\
KernelCallInstruction; \
hipError_t cuerr=hipGetLastError();\
if(cuerr!=hipSuccess){\
printf("CUDA error in kernel launch:%s at kernel \"" #KernelCallInstruction "\"\n",hipGetErrorString(cuerr)); \
throw "error in CUDA kernel launch,aborting...";\
}\
cuerr=hipDeviceSynchronize();\
if(cuerr!=hipSuccess){\
printf("CUDA error in kernel execution:%s at kernel\"" #KernelCallInstruction "\"\n",hipGetErrorString(cuerr));\
throw "error in CUDA kernel execution,aborting...";\
}\
}
#define size_mb 1048576
//
__global__ void ram(int *ptrs,int *result,int *data,int num)
{
int idx=blockIdx.x*blockDim.x+threadIdx.x;
if(idx<num)
{
result[idx]=data[ptrs[idx]];
}
}
int main(int argc,char **argv)
{
//int n=atoi(argv[1]);
int n=1;
long int cached_array_size=size_mb*n;
long int large_array_size=size_mb*1024;
//host data
int *h_ptrs,*h_result,*h_data;
h_ptrs=(int*)malloc(large_array_size);
h_result=(int*)malloc(large_array_size);
h_data=(int*)malloc(cached_array_size);
for(int i=0;i<(cached_array_size/sizeof(int));i++)
{
h_data[i]=i;
}
for(int i=0;i<(large_array_size/sizeof(int));i++)
{
h_result[i]=0;
h_ptrs[i]=rand()%cached_array_size;
}
//device data
int *d_ptrs,*d_result,*d_data;
SAFE_CALL(hipMalloc(&d_ptrs,large_array_size));
SAFE_CALL(hipMalloc(&d_result,large_array_size));
SAFE_CALL(hipMalloc(&d_data,cached_array_size));
SAFE_CALL(hipMemcpy(d_ptrs,h_ptrs,large_array_size,hipMemcpyHostToDevice));
SAFE_CALL(hipMemcpy(d_result,h_result,large_array_size,hipMemcpyHostToDevice));
SAFE_CALL(hipMemcpy(d_data,h_data,cached_array_size,hipMemcpyHostToDevice));
int blocksize=1024;
int gridsize=(((large_array_size/sizeof(int))-1)/blocksize)+1;
hipEvent_t start,finish;
SAFE_CALL(hipEventCreate(&start));
SAFE_CALL(hipEventCreate(&finish));
SAFE_CALL(hipDeviceSynchronize());
SAFE_CALL(hipEventRecord(start));
hipLaunchKernelGGL(( SAFE_KERNEL_CALL((ram), dim3(gridsize),dim3(blocksize), 0, 0, d_ptrs,d_result,d_data,large_array_size/sizeof(int))));
SAFE_CALL(hipEventRecord(finish));
SAFE_CALL(hipMemcpy(h_result,d_result,large_array_size,hipMemcpyDeviceToHost));
SAFE_CALL(hipEventSynchronize(finish));
float milliseconds=0;
SAFE_CALL(hipEventElapsedTime(&milliseconds,start,finish));
printf("Used time is(s):%f",milliseconds/1000);
printf("Effective Bandwidth(GB/s):%fn",large_array_size*3/milliseconds/1e6);
FILE *bandwidth=fopen("bandwidth.dat","a");
fprintf(bandwidth,"%d %fn",n,large_array_size*3/milliseconds/1e6);
SAFE_CALL(hipFree(d_ptrs));
SAFE_CALL(hipFree(d_result));
SAFE_CALL(hipFree(d_data));
free(h_ptrs);
free(h_result);
free(h_data);
return 0;
}
| f257bb8533f80ff6938643677f1382553b4f5cbb.cu | #include<iostream>
#include<cuda_runtime_api.h>
#include<time.h>
#include<stdlib.h>
#define SAFE_CALL(CallInstruction){ \
cudaError_t cuerr=CallInstruction; \
if(cuerr!=cudaSuccess){ \
printf("CUDA error:%s at call \"" #CallInstruction"\"\n",cudaGetErrorString(cuerr));\
throw "error in CUDA API function,aborting...";\
} \
}
#define SAFE_KERNEL_CALL(KernelCallInstruction){\
KernelCallInstruction; \
cudaError_t cuerr=cudaGetLastError();\
if(cuerr!=cudaSuccess){\
printf("CUDA error in kernel launch:%s at kernel \"" #KernelCallInstruction "\"\n",cudaGetErrorString(cuerr)); \
throw "error in CUDA kernel launch,aborting...";\
}\
cuerr=cudaDeviceSynchronize();\
if(cuerr!=cudaSuccess){\
printf("CUDA error in kernel execution:%s at kernel\"" #KernelCallInstruction "\"\n",cudaGetErrorString(cuerr));\
throw "error in CUDA kernel execution,aborting...";\
}\
}
#define size_mb 1048576
//ядро
__global__ void ram(int *ptrs,int *result,int *data,int num)
{
int idx=blockIdx.x*blockDim.x+threadIdx.x;
if(idx<num)
{
result[idx]=data[ptrs[idx]];
}
}
int main(int argc,char **argv)
{
//int n=atoi(argv[1]);
int n=1;
long int cached_array_size=size_mb*n;
long int large_array_size=size_mb*1024;
//host data
int *h_ptrs,*h_result,*h_data;
h_ptrs=(int*)malloc(large_array_size);
h_result=(int*)malloc(large_array_size);
h_data=(int*)malloc(cached_array_size);
for(int i=0;i<(cached_array_size/sizeof(int));i++)
{
h_data[i]=i;
}
for(int i=0;i<(large_array_size/sizeof(int));i++)
{
h_result[i]=0;
h_ptrs[i]=rand()%cached_array_size;
}
//device data
int *d_ptrs,*d_result,*d_data;
SAFE_CALL(cudaMalloc(&d_ptrs,large_array_size));
SAFE_CALL(cudaMalloc(&d_result,large_array_size));
SAFE_CALL(cudaMalloc(&d_data,cached_array_size));
SAFE_CALL(cudaMemcpy(d_ptrs,h_ptrs,large_array_size,cudaMemcpyHostToDevice));
SAFE_CALL(cudaMemcpy(d_result,h_result,large_array_size,cudaMemcpyHostToDevice));
SAFE_CALL(cudaMemcpy(d_data,h_data,cached_array_size,cudaMemcpyHostToDevice));
int blocksize=1024;
int gridsize=(((large_array_size/sizeof(int))-1)/blocksize)+1;
cudaEvent_t start,finish;
SAFE_CALL(cudaEventCreate(&start));
SAFE_CALL(cudaEventCreate(&finish));
SAFE_CALL(cudaDeviceSynchronize());
SAFE_CALL(cudaEventRecord(start));
SAFE_KERNEL_CALL((ram<<<gridsize,blocksize>>>(d_ptrs,d_result,d_data,large_array_size/sizeof(int))));
SAFE_CALL(cudaEventRecord(finish));
SAFE_CALL(cudaMemcpy(h_result,d_result,large_array_size,cudaMemcpyDeviceToHost));
SAFE_CALL(cudaEventSynchronize(finish));
float milliseconds=0;
SAFE_CALL(cudaEventElapsedTime(&milliseconds,start,finish));
printf("Used time is(s):%f",milliseconds/1000);
printf("Effective Bandwidth(GB/s):%fn",large_array_size*3/milliseconds/1e6);
FILE *bandwidth=fopen("bandwidth.dat","a");
fprintf(bandwidth,"%d %fn",n,large_array_size*3/milliseconds/1e6);
SAFE_CALL(cudaFree(d_ptrs));
SAFE_CALL(cudaFree(d_result));
SAFE_CALL(cudaFree(d_data));
free(h_ptrs);
free(h_result);
free(h_data);
return 0;
}
|
54630791930cfe4c42fff355309a2dfb69fa58d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void addKernel(int *c, const int *a, const int *b)
{
int g_tId = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int warpId = threadIdx.x / warpSize;
if ( g_tId < vectorSize) {
c[g_tId] = a[g_tId] + b[g_tId];
printf("thread id %d , warp Id %d , block id %d\n", g_tId, warpId,blockIdx.x);
}
} | 54630791930cfe4c42fff355309a2dfb69fa58d9.cu | #include "includes.h"
__global__ void addKernel(int *c, const int *a, const int *b)
{
int g_tId = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int warpId = threadIdx.x / warpSize;
if ( g_tId < vectorSize) {
c[g_tId] = a[g_tId] + b[g_tId];
printf("thread id %d , warp Id %d , block id %d\n", g_tId, warpId,blockIdx.x);
}
} |
045d547ad6644e5436df176b06123d5ec142dd68.hip | // !!! This is a file automatically generated by hipify!!!
#include <fstream>
#include <iostream>
#include <string>
#include <hip/hip_runtime.h>
// Charge une matrice disponible dans les repertoires exemples
bool load_matrix(char * filename, double * &matrix, int &nx, int &ny){
std::string line;
std::ifstream infile(filename);
if (!infile.is_open()) {
std::cout << "Fichier introuvable: "<< filename << std::endl;
return 0;
}
// Charge la taile de la matrice
infile >> nx >> ny;
// Alloue le tableau correspondant
matrix = new double[nx*ny];
// Charge la matrice
for (int i=0; i< nx*ny; i++){
infile >> matrix[i];
}
infile.close();
return 1;
}
// Calcul C = A * B
__global__ void matrixMultiply(float * A, float * B, float * C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
/// Insrer le code
}
int main(int argc, char ** argv) {
float * hostA;
float * hostB;
float * hostC;
float * deviceA;
float * deviceB;
float * deviceC;
int numARows;
int numAColumns;
int numBRows;
int numBColumns;
int numCRows;
int numCColumns;
/// Charger le fichier d'entree
/// Initialiser numCRows et numCColumns
numCRows = 0;
numCColumns = 0;
/// Allouer hostC
/// Afficher les informations sur la matrice
/// Allouer la memoire sur GPU
/// Copier la memoire sur le GPU
/// Initialise la grille et les dimensions de chaque bloc
/// Execute le kernel
hipDeviceSynchronize();
/// Charge le resultat en memoire CPU
/// Libere la memoire
free(hostA);
free(hostB);
free(hostC);
return 0;
}
| 045d547ad6644e5436df176b06123d5ec142dd68.cu | #include <fstream>
#include <iostream>
#include <string>
#include <cuda.h>
// Charge une matrice disponible dans les repertoires exemples
bool load_matrix(char * filename, double * &matrix, int &nx, int &ny){
std::string line;
std::ifstream infile(filename);
if (!infile.is_open()) {
std::cout << "Fichier introuvable: "<< filename << std::endl;
return 0;
}
// Charge la taile de la matrice
infile >> nx >> ny;
// Alloue le tableau correspondant
matrix = new double[nx*ny];
// Charge la matrice
for (int i=0; i< nx*ny; i++){
infile >> matrix[i];
}
infile.close();
return 1;
}
// Calcul C = A * B
__global__ void matrixMultiply(float * A, float * B, float * C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
/// Insérer le code
}
int main(int argc, char ** argv) {
float * hostA;
float * hostB;
float * hostC;
float * deviceA;
float * deviceB;
float * deviceC;
int numARows;
int numAColumns;
int numBRows;
int numBColumns;
int numCRows;
int numCColumns;
/// Charger le fichier d'entree
/// Initialiser numCRows et numCColumns
numCRows = 0;
numCColumns = 0;
/// Allouer hostC
/// Afficher les informations sur la matrice
/// Allouer la memoire sur GPU
/// Copier la memoire sur le GPU
/// Initialise la grille et les dimensions de chaque bloc
/// Execute le kernel
cudaThreadSynchronize();
/// Charge le resultat en memoire CPU
/// Libere la memoire
free(hostA);
free(hostB);
free(hostC);
return 0;
}
|
69beb0846c3d29ce8f20135931579132e6327d21.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Simple example of writing and reading data
* through ADIOS2 BP engine with multiple simulations steps
* for every IO step.
*/
#include <ios>
#include <iostream>
#include <vector>
#include <adios2.h>
#include <hip/hip_runtime.h>
__global__ void update_array(float *vect, int val) { vect[blockIdx.x] += val; }
int BPWrite(const std::string fname, const size_t N, int nSteps)
{
// Initialize the simulation data
float *gpuSimData;
hipMalloc(&gpuSimData, N * sizeof(float));
hipMemset(gpuSimData, 0, N);
// Set up the ADIOS structures
adios2::ADIOS adios;
adios2::IO io = adios.DeclareIO("WriteIO");
io.SetEngine("BPFile");
// Declare an array for the ADIOS data of size (NumOfProcesses * N)
const adios2::Dims shape{static_cast<size_t>(N)};
const adios2::Dims start{static_cast<size_t>(0)};
const adios2::Dims count{N};
auto data = io.DefineVariable<float>("data", shape, start, count);
adios2::Engine bpWriter = io.Open(fname, adios2::Mode::Write);
// Simulation steps
for (size_t step = 0; step < nSteps; ++step)
{
// Make a 1D selection to describe the local dimensions of the
// variable we write and its offsets in the global spaces
adios2::Box<adios2::Dims> sel({0}, {N});
data.SetSelection(sel);
// Start IO step every write step
bpWriter.BeginStep();
data.SetMemorySpace(adios2::MemorySpace::CUDA);
bpWriter.Put(data, gpuSimData);
bpWriter.EndStep();
// Update values in the simulation data
hipLaunchKernelGGL(( update_array), dim3(N), dim3(1), 0, 0, gpuSimData, 10);
}
bpWriter.Close();
return 0;
}
int BPRead(const std::string fname, const size_t N, int nSteps)
{
// Create ADIOS structures
adios2::ADIOS adios;
adios2::IO io = adios.DeclareIO("ReadIO");
io.SetEngine("BPFile");
adios2::Engine bpReader = io.Open(fname, adios2::Mode::Read);
auto data = io.InquireVariable<float>("data");
std::cout << "Steps expected by the reader: " << bpReader.Steps()
<< std::endl;
std::cout << "Expecting data per step: " << data.Shape()[0];
std::cout << " elements" << std::endl;
int write_step = bpReader.Steps();
// Create the local buffer and initialize the access point in the ADIOS file
std::vector<float> simData(N); // set size to N
const adios2::Dims start{0};
const adios2::Dims count{N};
const adios2::Box<adios2::Dims> sel(start, count);
data.SetSelection(sel);
float *gpuSimData;
hipMalloc(&gpuSimData, N * sizeof(float));
hipMemset(gpuSimData, 0, N);
// Read the data in each of the ADIOS steps
for (size_t step = 0; step < write_step; step++)
{
data.SetStepSelection({step, 1});
data.SetMemorySpace(adios2::MemorySpace::CUDA);
bpReader.Get(data, gpuSimData, adios2::Mode::Deferred);
bpReader.PerformGets();
hipMemcpy(simData.data(), gpuSimData, N * sizeof(float),
hipMemcpyDeviceToHost);
std::cout << "Simualation step " << step << " : ";
std::cout << simData.size() << " elements: " << simData[1] << std::endl;
}
bpReader.Close();
return 0;
}
int main(int argc, char **argv)
{
const std::string fname("CudaBp4wr.bp");
const int device_id = 1;
hipSetDevice(device_id);
const size_t N = 6000;
int nSteps = 10, ret = 0;
ret += BPWrite(fname, N, nSteps);
ret += BPRead(fname, N, nSteps);
return ret;
}
| 69beb0846c3d29ce8f20135931579132e6327d21.cu | /*
* Simple example of writing and reading data
* through ADIOS2 BP engine with multiple simulations steps
* for every IO step.
*/
#include <ios>
#include <iostream>
#include <vector>
#include <adios2.h>
#include <cuda_runtime.h>
__global__ void update_array(float *vect, int val) { vect[blockIdx.x] += val; }
int BPWrite(const std::string fname, const size_t N, int nSteps)
{
// Initialize the simulation data
float *gpuSimData;
cudaMalloc(&gpuSimData, N * sizeof(float));
cudaMemset(gpuSimData, 0, N);
// Set up the ADIOS structures
adios2::ADIOS adios;
adios2::IO io = adios.DeclareIO("WriteIO");
io.SetEngine("BPFile");
// Declare an array for the ADIOS data of size (NumOfProcesses * N)
const adios2::Dims shape{static_cast<size_t>(N)};
const adios2::Dims start{static_cast<size_t>(0)};
const adios2::Dims count{N};
auto data = io.DefineVariable<float>("data", shape, start, count);
adios2::Engine bpWriter = io.Open(fname, adios2::Mode::Write);
// Simulation steps
for (size_t step = 0; step < nSteps; ++step)
{
// Make a 1D selection to describe the local dimensions of the
// variable we write and its offsets in the global spaces
adios2::Box<adios2::Dims> sel({0}, {N});
data.SetSelection(sel);
// Start IO step every write step
bpWriter.BeginStep();
data.SetMemorySpace(adios2::MemorySpace::CUDA);
bpWriter.Put(data, gpuSimData);
bpWriter.EndStep();
// Update values in the simulation data
update_array<<<N, 1>>>(gpuSimData, 10);
}
bpWriter.Close();
return 0;
}
int BPRead(const std::string fname, const size_t N, int nSteps)
{
// Create ADIOS structures
adios2::ADIOS adios;
adios2::IO io = adios.DeclareIO("ReadIO");
io.SetEngine("BPFile");
adios2::Engine bpReader = io.Open(fname, adios2::Mode::Read);
auto data = io.InquireVariable<float>("data");
std::cout << "Steps expected by the reader: " << bpReader.Steps()
<< std::endl;
std::cout << "Expecting data per step: " << data.Shape()[0];
std::cout << " elements" << std::endl;
int write_step = bpReader.Steps();
// Create the local buffer and initialize the access point in the ADIOS file
std::vector<float> simData(N); // set size to N
const adios2::Dims start{0};
const adios2::Dims count{N};
const adios2::Box<adios2::Dims> sel(start, count);
data.SetSelection(sel);
float *gpuSimData;
cudaMalloc(&gpuSimData, N * sizeof(float));
cudaMemset(gpuSimData, 0, N);
// Read the data in each of the ADIOS steps
for (size_t step = 0; step < write_step; step++)
{
data.SetStepSelection({step, 1});
data.SetMemorySpace(adios2::MemorySpace::CUDA);
bpReader.Get(data, gpuSimData, adios2::Mode::Deferred);
bpReader.PerformGets();
cudaMemcpy(simData.data(), gpuSimData, N * sizeof(float),
cudaMemcpyDeviceToHost);
std::cout << "Simualation step " << step << " : ";
std::cout << simData.size() << " elements: " << simData[1] << std::endl;
}
bpReader.Close();
return 0;
}
int main(int argc, char **argv)
{
const std::string fname("CudaBp4wr.bp");
const int device_id = 1;
cudaSetDevice(device_id);
const size_t N = 6000;
int nSteps = 10, ret = 0;
ret += BPWrite(fname, N, nSteps);
ret += BPRead(fname, N, nSteps);
return ret;
}
|
6c7fffab6d04780138dcc6434a591ccf98a71ffa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define BOOST_TEST_DYN_LINK
#include <boost/test/unit_test.hpp>
#include "Plot/GoogleChart.hpp"
#include "timer.hpp"
#include <boost/property_tree/ptree.hpp>
#include <boost/property_tree/xml_parser.hpp>
#include "util/performance/performance_util.hpp"
#include "Point_test.hpp"
#include "util/stat/common_statistics.hpp"
extern const char * test_dir;
typedef Point_test<float> P;
constexpr int N_STAT = 32;
BOOST_AUTO_TEST_SUITE( performance )
#define NADD 128*128*128
#define NADD_GPU 256*256*256
// Property tree
struct report_vector_func_tests
{
boost::property_tree::ptree graphs;
};
report_vector_func_tests report_vector_funcs;
BOOST_AUTO_TEST_SUITE( vector_performance )
BOOST_AUTO_TEST_CASE(vector_performance)
{
report_vector_funcs.graphs.put("performance.vector(0).funcs.nele",NADD);
report_vector_funcs.graphs.put("performance.vector(0).funcs.name","add");
report_vector_funcs.graphs.put("performance.vector(1).funcs.nele",NADD);
report_vector_funcs.graphs.put("performance.vector(1).funcs.name","get");
std::vector<double> times(N_STAT + 1);
std::vector<double> times_g(N_STAT + 1);
// get test
double tot_accu = 0.0;
for (size_t i = 0 ; i < N_STAT+1 ; i++)
{
timer t;
t.start();
// create a vector
openfpm::vector<Point_test<float>> v1;
// Point
Point_test<float> p;
p.setx(1.0);
p.sety(2.0);
p.setz(3.0);
p.sets(4.0);
p.get<P::v>()[0] = 1.0;
p.get<P::v>()[1] = 2.0;
p.get<P::v>()[2] = 7.0;
p.get<P::t>()[0][0] = 10.0;
p.get<P::t>()[0][1] = 13.0;
p.get<P::t>()[0][2] = 8.0;
p.get<P::t>()[1][0] = 19.0;
p.get<P::t>()[1][1] = 23.0;
p.get<P::t>()[1][2] = 5.0;
p.get<P::t>()[2][0] = 4.0;
p.get<P::t>()[2][1] = 3.0;
p.get<P::t>()[2][2] = 11.0;
// Add test
for (size_t j = 0 ; j < NADD ; j++)
{
v1.add(p);
}
t.stop();
times[i] = t.getwct();
timer tg;
tg.start();
for (size_t j = 0 ; j < NADD ; j++)
{
double accu1 = v1.template get<P::x>(j);
double accu2 = v1.template get<P::y>(j);
double accu3 = v1.template get<P::z>(j);
double accu4 = v1.template get<P::s>(j);
double accu5 = v1.template get<P::v>(j)[0];
double accu6 = v1.template get<P::v>(j)[1];
double accu7 = v1.template get<P::v>(j)[2];
double accu8 = v1.template get<P::t>(j)[0][0];
double accu9 = v1.template get<P::t>(j)[0][1];
double accu10 = v1.template get<P::t>(j)[0][2];
double accu11 = v1.template get<P::t>(j)[1][0];
double accu12 = v1.template get<P::t>(j)[1][1];
double accu13 = v1.template get<P::t>(j)[1][2];
double accu14 = v1.template get<P::t>(j)[2][0];
double accu15 = v1.template get<P::t>(j)[2][1];
double accu16 = v1.template get<P::t>(j)[2][2];
tot_accu += accu1 + accu2 + accu3 + accu4 + accu5 + accu6 + accu7 + accu8 + accu9 + accu10 + accu11 + accu12 +
accu13 + accu14 + accu15 + accu16;
}
tg.stop();
times_g[i] = tg.getwct();
}
double mean;
double dev;
standard_deviation(times,mean,dev);
report_vector_funcs.graphs.put("performance.vector(0).y.data.mean",mean);
report_vector_funcs.graphs.put("performance.vector(0).y.data.dev",dev);
standard_deviation(times_g,mean,dev);
report_vector_funcs.graphs.put("performance.vector(1).y.data.mean",mean);
report_vector_funcs.graphs.put("performance.vector(1).y.data.dev",dev);
}
template<typename vector_prop_type, typename vector_pos_type>
__device__ __host__ void read_write(vector_prop_type & vd_prop, vector_pos_type & vd_pos, unsigned int p)
{
vd_prop.template get<0>(p) = vd_pos.template get<0>(p)[0] + vd_pos.template get<0>(p)[1];
vd_prop.template get<1>(p)[0] = vd_pos.template get<0>(p)[0];
vd_prop.template get<1>(p)[1] = vd_pos.template get<0>(p)[1];
vd_prop.template get<2>(p)[0][0] = vd_pos.template get<0>(p)[0];
vd_prop.template get<2>(p)[0][1] = vd_pos.template get<0>(p)[1];
vd_prop.template get<2>(p)[1][0] = vd_pos.template get<0>(p)[0] +
vd_pos.template get<0>(p)[1];
vd_prop.template get<2>(p)[1][1] = vd_pos.template get<0>(p)[1] -
vd_pos.template get<0>(p)[0];
vd_pos.template get<0>(p)[0] += 0.01f;
vd_pos.template get<0>(p)[1] += 0.01f;
}
template<typename vector_type1, typename vector_type2>
__global__ void read_write_ker(vector_type1 v1, vector_type2 v2)
{
unsigned int p = + blockIdx.x * blockDim.x + threadIdx.x;
read_write(v1,v2,p);
}
struct ele
{
double s;
double v[2];
double t[2][2];
};
__device__ __host__ void read_write_lin(double * pos, ele * prp, unsigned int p)
{
prp[p].s = pos[2*p] + pos[2*p+1];
prp[p].v[0] = pos[2*p];
prp[p].v[1] = pos[2*p+1];
prp[p].t[0][0] = pos[2*p];
prp[p].t[0][1] = pos[2*p+1];
prp[p].t[1][0] = pos[2*p] + pos[2*p+1];
prp[p].t[1][1] = pos[2*p+1] - pos[2*p];
pos[2*p] += 0.01f;
pos[2*p+1] += 0.01f;
}
__global__ void read_write_lin_ker(double * pos, ele * prp)
{
unsigned int p = blockIdx.x * blockDim.x + threadIdx.x;
read_write_lin(pos,prp,p);
}
__device__ __host__ void read_write_inte(double * pos, double * prp0, double * prp1, double * prp2, unsigned int p, unsigned int n_pos)
{
prp0[0*n_pos + p] = pos[0*n_pos + p] + pos[1*n_pos+p];
prp1[0*n_pos + p] = pos[0*n_pos + p];
prp1[1*n_pos + p] = pos[1*n_pos + p];
prp2[0*n_pos*2+0*n_pos + p] = pos[0*n_pos + p];
prp2[0*n_pos*2+1*n_pos + p] = pos[1*n_pos + p];
prp2[1*n_pos*2+0*n_pos + p] = pos[0*n_pos + p] +
pos[1*n_pos + p];
prp2[1*n_pos*2+1*n_pos + p] = pos[1*n_pos + p] -
pos[0*n_pos + p];
pos[0*n_pos + p] += 0.01f;
pos[1*n_pos + p] += 0.01f;
}
__global__ void read_write_inte_ker(double * pos, double * prp0, double * prp1, double * prp2, unsigned int n_pos)
{
unsigned int p = blockIdx.x * blockDim.x + threadIdx.x;
read_write_inte(pos,prp0,prp1,prp2,p,n_pos);
}
BOOST_AUTO_TEST_CASE(vector_performance_layout_vs_plain_array)
{
std::vector<double> times(N_STAT + 1);
std::vector<double> times_g(N_STAT + 1);
std::vector<double> times2(N_STAT + 1);
std::vector<double> times2_g(N_STAT + 1);
report_vector_funcs.graphs.put("performance.vector_layout(0).funcs.nele",NADD);
report_vector_funcs.graphs.put("performance.vector_layout(0).funcs.name","read_write_lin");
for (size_t i = 0 ; i < N_STAT+1 ; i++)
{
// create a vector
openfpm::vector<aggregate<double,double[2],double[2][2]>> v1;
openfpm::vector<aggregate<double[2]>> v2;
// Point
aggregate<double[2]> p;
p.get<0>()[0] = 1.0;
p.get<0>()[1] = 2.0;
aggregate<double,double[2],double[2][2]> pa;
pa.get<0>() = 1.0;
pa.get<1>()[0] = 1.0;
pa.get<1>()[1] = 1.0;
pa.get<2>()[0][0] = 1.0;
pa.get<2>()[0][1] = 1.0;
pa.get<2>()[1][0] = 1.0;
pa.get<2>()[1][1] = 1.0;
// Add test
for (size_t j = 0 ; j < NADD ; j++)
{
v1.add(pa);
v2.add(p);
}
timer tg;
tg.start();
for (size_t j = 0 ; j < NADD ; j++)
{
read_write(v1,v2,j);
}
tg.stop();
times_g[i] = tg.getwct();
timer tga;
tga.start();
double * prp = (double *)v1.getPointer<0>();
double * pos = (double *)v2.getPointer<0>();
for (size_t j = 0 ; j < NADD ; j++)
{
read_write_lin(pos,(struct ele *)prp,j);
}
tga.stop();
times[i] = tga.getwct();
}
double mean;
double dev;
standard_deviation(times_g,mean,dev);
double mean_;
double dev_;
standard_deviation(times,mean_,dev_);
report_vector_funcs.graphs.put("performance.vector_layout(0).y.data.mean",mean_/mean);
// Deviation od x/y = x/y^2 dy + 1/y dx
report_vector_funcs.graphs.put("performance.vector_layout(0).y.data.dev",mean_/(mean*mean)*dev + dev_ / mean );
report_vector_funcs.graphs.put("performance.vector_layout(1).funcs.nele",NADD);
report_vector_funcs.graphs.put("performance.vector_layout(1).funcs.name","read_write_inte");
for (size_t i = 0 ; i < N_STAT+1 ; i++)
{
// create a vector
openfpm::vector<aggregate<double,double[2],double[2][2]>,HeapMemory,memory_traits_inte> v1;
openfpm::vector<aggregate<double[2]>,HeapMemory,memory_traits_inte> v2;
// Point
aggregate<double[2]> p;
p.get<0>()[0] = 1.0;
p.get<0>()[1] = 2.0;
aggregate<double,double[2],double[2][2]> pa;
pa.get<0>() = 1.0;
pa.get<1>()[0] = 1.0;
pa.get<1>()[1] = 1.0;
pa.get<2>()[0][0] = 1.0;
pa.get<2>()[0][1] = 1.0;
pa.get<2>()[1][0] = 1.0;
pa.get<2>()[1][1] = 1.0;
// Add test
for (size_t j = 0 ; j < NADD ; j++)
{
v1.add(pa);
v2.add(p);
}
timer tg;
tg.start();
for (size_t j = 0 ; j < NADD ; j++)
{
read_write(v1,v2,j);
}
tg.stop();
times2_g[i] = tg.getwct();
int sz = v1.size();
timer tga;
tga.start();
double * prp0 = (double *)v1.getPointer<0>();
double * prp1 = (double *)v1.getPointer<1>();
double * prp2 = (double *)v1.getPointer<2>();
double * pos = (double *)v2.getPointer<0>();
for (size_t j = 0 ; j < NADD ; j++)
{
read_write_inte(pos,prp0,prp1,prp2,j,sz);
}
tga.stop();
times2[i] = tga.getwct();
}
double mean2;
double dev2;
standard_deviation(times2_g,mean2,dev2);
double mean2_;
double dev2_;
standard_deviation(times2,mean2_,dev2_);
report_vector_funcs.graphs.put("performance.vector_layout(1).y.data.mean",mean2_/mean2);
// Deviation od x/y = x/y^2 dy + 1/y dx
report_vector_funcs.graphs.put("performance.vector_layout(1).y.data.dev",mean2_/(mean2*mean2)*dev2 + dev2_ / mean2 );
}
BOOST_AUTO_TEST_CASE(vector_performance_gpu_layout_vs_plain_array)
{
std::vector<double> times(N_STAT + 1);
std::vector<double> times_g(N_STAT + 1);
std::vector<double> times2(N_STAT + 1);
std::vector<double> times2_g(N_STAT + 1);
// get test
double tot_accu = 0.0;
report_vector_funcs.graphs.put("performance.vector_layout_gpu(0).funcs.nele",NADD_GPU);
report_vector_funcs.graphs.put("performance.vector_layout_gpu(0).funcs.name","read_write_lin");
for (size_t i = 0 ; i < N_STAT+1 ; i++)
{
// create a vector
openfpm::vector<aggregate<double,double[2],double[2][2]>,CudaMemory> v1;
openfpm::vector<aggregate<double[2]>,CudaMemory> v2;
// Point
aggregate<double[2]> p;
p.get<0>()[0] = 1.0;
p.get<0>()[1] = 2.0;
aggregate<double,double[2],double[2][2]> pa;
pa.get<0>() = 1.0;
pa.get<1>()[0] = 1.0;
pa.get<1>()[1] = 1.0;
pa.get<2>()[0][0] = 1.0;
pa.get<2>()[0][1] = 1.0;
pa.get<2>()[1][0] = 1.0;
pa.get<2>()[1][1] = 1.0;
// Add test
for (size_t j = 0 ; j < NADD_GPU ; j++)
{
v1.add(pa);
v2.add(p);
}
auto ite = v1.getGPUIterator(1536);
{
timer tga;
tga.startGPU();
CUDA_LAUNCH(read_write_ker,ite,v1.toKernel(),v2.toKernel());
tga.stopGPU();
times_g[i] = tga.getwctGPU();
}
std::cout << "OpenFPM: " << times_g[i] << std::endl;
timer tga2;
tga2.startGPU();
double * prp = (double *)v1.toKernel().getPointer<0>();
double * pos = (double *)v2.toKernel().getPointer<0>();
CUDA_LAUNCH(read_write_lin_ker,ite,pos,(struct ele *)prp);
tga2.stopGPU();
times[i] = tga2.getwctGPU();
std::cout << "Array: " << times[i] << std::endl;
}
double mean;
double dev;
standard_deviation(times_g,mean,dev);
double mean_;
double dev_;
standard_deviation(times,mean_,dev_);
report_vector_funcs.graphs.put("performance.vector_layout_gpu(0).y.data.mean",mean_/mean);
// Deviation od x/y = x/y^2 dy + 1/y dx
report_vector_funcs.graphs.put("performance.vector_layout_gpu(0).y.data.dev",mean_/(mean*mean)*dev + dev_ / mean );
report_vector_funcs.graphs.put("performance.vector_layout_gpu(1).funcs.nele",NADD);
report_vector_funcs.graphs.put("performance.vector_layout_gpu(1).funcs.name","read_write_inte");
for (size_t i = 0 ; i < N_STAT+1 ; i++)
{
// create a vector
openfpm::vector<aggregate<double,double[2],double[2][2]>,CudaMemory,memory_traits_inte> v1;
openfpm::vector<aggregate<double[2]>,CudaMemory,memory_traits_inte> v2;
// Point
aggregate<double[2]> p;
p.get<0>()[0] = 1.0;
p.get<0>()[1] = 2.0;
aggregate<double,double[2],double[2][2]> pa;
pa.get<0>() = 1.0;
pa.get<1>()[0] = 1.0;
pa.get<1>()[1] = 1.0;
pa.get<2>()[0][0] = 1.0;
pa.get<2>()[0][1] = 1.0;
pa.get<2>()[1][0] = 1.0;
pa.get<2>()[1][1] = 1.0;
// Add test
for (size_t j = 0 ; j < NADD_GPU ; j++)
{
v1.add(pa);
v2.add(p);
}
timer tg;
tg.startGPU();
auto ite = v1.getGPUIterator(1536);
CUDA_LAUNCH(read_write_ker,ite,v1.toKernel(),v2.toKernel());
tg.stopGPU();
times2_g[i] = tg.getwctGPU();
std::cout << "OpenFPM inte: " << times2_g[i] << std::endl;
int sz = v1.size();
timer tga;
tga.startGPU();
double * prp0 = (double *)v1.toKernel().getPointer<0>();
double * prp1 = (double *)v1.toKernel().getPointer<1>();
double * prp2 = (double *)v1.toKernel().getPointer<2>();
double * pos = (double *)v2.toKernel().getPointer<0>();
CUDA_LAUNCH(read_write_inte_ker,ite,pos,prp0,prp1,prp2,sz);
tga.stopGPU();
times2[i] = tga.getwctGPU();
std::cout << "Array inte: " << times2[i] << std::endl;
}
double mean2;
double dev2;
standard_deviation(times2_g,mean2,dev2);
double mean2_;
double dev2_;
standard_deviation(times2,mean2_,dev2_);
report_vector_funcs.graphs.put("performance.vector_layout_gpu(1).y.data.mean",mean2_/mean2);
// Deviation od x/y = x/y^2 dy + 1/y dx
report_vector_funcs.graphs.put("performance.vector_layout_gpu(1).y.data.dev",mean2_/(mean2*mean2)*dev2 + dev2_ / mean2 );
}
BOOST_AUTO_TEST_CASE(vector_performance_write_report)
{
// Create a graphs
report_vector_funcs.graphs.put("graphs.graph(0).type","line");
report_vector_funcs.graphs.add("graphs.graph(0).title","Vector add and get");
report_vector_funcs.graphs.add("graphs.graph(0).x.title","Tests");
report_vector_funcs.graphs.add("graphs.graph(0).y.title","Time seconds");
report_vector_funcs.graphs.add("graphs.graph(0).y.data(0).source","performance.vector(#).y.data.mean");
report_vector_funcs.graphs.add("graphs.graph(0).x.data(0).source","performance.vector(#).funcs.name");
report_vector_funcs.graphs.add("graphs.graph(0).y.data(0).title","Actual");
report_vector_funcs.graphs.add("graphs.graph(0).interpolation","lines");
report_vector_funcs.graphs.put("graphs.graph(1).type","line");
report_vector_funcs.graphs.add("graphs.graph(1).title","Vector read write");
report_vector_funcs.graphs.add("graphs.graph(1).x.title","Layout");
report_vector_funcs.graphs.add("graphs.graph(1).y.title","Time seconds");
report_vector_funcs.graphs.add("graphs.graph(1).y.data(0).source","performance.vector_layout(#).y.data.mean");
report_vector_funcs.graphs.add("graphs.graph(1).x.data(0).source","performance.vector_layout(#).funcs.name");
report_vector_funcs.graphs.add("graphs.graph(1).y.data(0).title","Actual");
report_vector_funcs.graphs.add("graphs.graph(1).interpolation","lines");
report_vector_funcs.graphs.put("graphs.graph(2).type","line");
report_vector_funcs.graphs.add("graphs.graph(2).title","Vector GPU read write");
report_vector_funcs.graphs.add("graphs.graph(2).x.title","Layout");
report_vector_funcs.graphs.add("graphs.graph(2).y.title","Time seconds");
report_vector_funcs.graphs.add("graphs.graph(2).y.data(0).source","performance.vector_layout_gpu(#).y.data.mean");
report_vector_funcs.graphs.add("graphs.graph(2).x.data(0).source","performance.vector_layout_gpu(#).funcs.name");
report_vector_funcs.graphs.add("graphs.graph(2).y.data(0).title","Actual");
report_vector_funcs.graphs.add("graphs.graph(2).interpolation","lines");
boost::property_tree::xml_writer_settings<std::string> settings(' ', 4);
boost::property_tree::write_xml("vector_performance_funcs.xml", report_vector_funcs.graphs,std::locale(),settings);
GoogleChart cg;
std::string file_xml_ref(test_dir);
file_xml_ref += std::string("/openfpm_data/vector_performance_funcs_ref.xml");
StandardXMLPerformanceGraph("vector_performance_funcs.xml",file_xml_ref,cg);
addUpdtateTime(cg,1);
cg.write("vector_performance_funcs.html");
}
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE_END() | 6c7fffab6d04780138dcc6434a591ccf98a71ffa.cu | #define BOOST_TEST_DYN_LINK
#include <boost/test/unit_test.hpp>
#include "Plot/GoogleChart.hpp"
#include "timer.hpp"
#include <boost/property_tree/ptree.hpp>
#include <boost/property_tree/xml_parser.hpp>
#include "util/performance/performance_util.hpp"
#include "Point_test.hpp"
#include "util/stat/common_statistics.hpp"
extern const char * test_dir;
typedef Point_test<float> P;
constexpr int N_STAT = 32;
BOOST_AUTO_TEST_SUITE( performance )
#define NADD 128*128*128
#define NADD_GPU 256*256*256
// Property tree
struct report_vector_func_tests
{
boost::property_tree::ptree graphs;
};
report_vector_func_tests report_vector_funcs;
BOOST_AUTO_TEST_SUITE( vector_performance )
BOOST_AUTO_TEST_CASE(vector_performance)
{
report_vector_funcs.graphs.put("performance.vector(0).funcs.nele",NADD);
report_vector_funcs.graphs.put("performance.vector(0).funcs.name","add");
report_vector_funcs.graphs.put("performance.vector(1).funcs.nele",NADD);
report_vector_funcs.graphs.put("performance.vector(1).funcs.name","get");
std::vector<double> times(N_STAT + 1);
std::vector<double> times_g(N_STAT + 1);
// get test
double tot_accu = 0.0;
for (size_t i = 0 ; i < N_STAT+1 ; i++)
{
timer t;
t.start();
// create a vector
openfpm::vector<Point_test<float>> v1;
// Point
Point_test<float> p;
p.setx(1.0);
p.sety(2.0);
p.setz(3.0);
p.sets(4.0);
p.get<P::v>()[0] = 1.0;
p.get<P::v>()[1] = 2.0;
p.get<P::v>()[2] = 7.0;
p.get<P::t>()[0][0] = 10.0;
p.get<P::t>()[0][1] = 13.0;
p.get<P::t>()[0][2] = 8.0;
p.get<P::t>()[1][0] = 19.0;
p.get<P::t>()[1][1] = 23.0;
p.get<P::t>()[1][2] = 5.0;
p.get<P::t>()[2][0] = 4.0;
p.get<P::t>()[2][1] = 3.0;
p.get<P::t>()[2][2] = 11.0;
// Add test
for (size_t j = 0 ; j < NADD ; j++)
{
v1.add(p);
}
t.stop();
times[i] = t.getwct();
timer tg;
tg.start();
for (size_t j = 0 ; j < NADD ; j++)
{
double accu1 = v1.template get<P::x>(j);
double accu2 = v1.template get<P::y>(j);
double accu3 = v1.template get<P::z>(j);
double accu4 = v1.template get<P::s>(j);
double accu5 = v1.template get<P::v>(j)[0];
double accu6 = v1.template get<P::v>(j)[1];
double accu7 = v1.template get<P::v>(j)[2];
double accu8 = v1.template get<P::t>(j)[0][0];
double accu9 = v1.template get<P::t>(j)[0][1];
double accu10 = v1.template get<P::t>(j)[0][2];
double accu11 = v1.template get<P::t>(j)[1][0];
double accu12 = v1.template get<P::t>(j)[1][1];
double accu13 = v1.template get<P::t>(j)[1][2];
double accu14 = v1.template get<P::t>(j)[2][0];
double accu15 = v1.template get<P::t>(j)[2][1];
double accu16 = v1.template get<P::t>(j)[2][2];
tot_accu += accu1 + accu2 + accu3 + accu4 + accu5 + accu6 + accu7 + accu8 + accu9 + accu10 + accu11 + accu12 +
accu13 + accu14 + accu15 + accu16;
}
tg.stop();
times_g[i] = tg.getwct();
}
double mean;
double dev;
standard_deviation(times,mean,dev);
report_vector_funcs.graphs.put("performance.vector(0).y.data.mean",mean);
report_vector_funcs.graphs.put("performance.vector(0).y.data.dev",dev);
standard_deviation(times_g,mean,dev);
report_vector_funcs.graphs.put("performance.vector(1).y.data.mean",mean);
report_vector_funcs.graphs.put("performance.vector(1).y.data.dev",dev);
}
template<typename vector_prop_type, typename vector_pos_type>
__device__ __host__ void read_write(vector_prop_type & vd_prop, vector_pos_type & vd_pos, unsigned int p)
{
vd_prop.template get<0>(p) = vd_pos.template get<0>(p)[0] + vd_pos.template get<0>(p)[1];
vd_prop.template get<1>(p)[0] = vd_pos.template get<0>(p)[0];
vd_prop.template get<1>(p)[1] = vd_pos.template get<0>(p)[1];
vd_prop.template get<2>(p)[0][0] = vd_pos.template get<0>(p)[0];
vd_prop.template get<2>(p)[0][1] = vd_pos.template get<0>(p)[1];
vd_prop.template get<2>(p)[1][0] = vd_pos.template get<0>(p)[0] +
vd_pos.template get<0>(p)[1];
vd_prop.template get<2>(p)[1][1] = vd_pos.template get<0>(p)[1] -
vd_pos.template get<0>(p)[0];
vd_pos.template get<0>(p)[0] += 0.01f;
vd_pos.template get<0>(p)[1] += 0.01f;
}
template<typename vector_type1, typename vector_type2>
__global__ void read_write_ker(vector_type1 v1, vector_type2 v2)
{
unsigned int p = + blockIdx.x * blockDim.x + threadIdx.x;
read_write(v1,v2,p);
}
struct ele
{
double s;
double v[2];
double t[2][2];
};
__device__ __host__ void read_write_lin(double * pos, ele * prp, unsigned int p)
{
prp[p].s = pos[2*p] + pos[2*p+1];
prp[p].v[0] = pos[2*p];
prp[p].v[1] = pos[2*p+1];
prp[p].t[0][0] = pos[2*p];
prp[p].t[0][1] = pos[2*p+1];
prp[p].t[1][0] = pos[2*p] + pos[2*p+1];
prp[p].t[1][1] = pos[2*p+1] - pos[2*p];
pos[2*p] += 0.01f;
pos[2*p+1] += 0.01f;
}
__global__ void read_write_lin_ker(double * pos, ele * prp)
{
unsigned int p = blockIdx.x * blockDim.x + threadIdx.x;
read_write_lin(pos,prp,p);
}
__device__ __host__ void read_write_inte(double * pos, double * prp0, double * prp1, double * prp2, unsigned int p, unsigned int n_pos)
{
prp0[0*n_pos + p] = pos[0*n_pos + p] + pos[1*n_pos+p];
prp1[0*n_pos + p] = pos[0*n_pos + p];
prp1[1*n_pos + p] = pos[1*n_pos + p];
prp2[0*n_pos*2+0*n_pos + p] = pos[0*n_pos + p];
prp2[0*n_pos*2+1*n_pos + p] = pos[1*n_pos + p];
prp2[1*n_pos*2+0*n_pos + p] = pos[0*n_pos + p] +
pos[1*n_pos + p];
prp2[1*n_pos*2+1*n_pos + p] = pos[1*n_pos + p] -
pos[0*n_pos + p];
pos[0*n_pos + p] += 0.01f;
pos[1*n_pos + p] += 0.01f;
}
__global__ void read_write_inte_ker(double * pos, double * prp0, double * prp1, double * prp2, unsigned int n_pos)
{
unsigned int p = blockIdx.x * blockDim.x + threadIdx.x;
read_write_inte(pos,prp0,prp1,prp2,p,n_pos);
}
BOOST_AUTO_TEST_CASE(vector_performance_layout_vs_plain_array)
{
std::vector<double> times(N_STAT + 1);
std::vector<double> times_g(N_STAT + 1);
std::vector<double> times2(N_STAT + 1);
std::vector<double> times2_g(N_STAT + 1);
report_vector_funcs.graphs.put("performance.vector_layout(0).funcs.nele",NADD);
report_vector_funcs.graphs.put("performance.vector_layout(0).funcs.name","read_write_lin");
for (size_t i = 0 ; i < N_STAT+1 ; i++)
{
// create a vector
openfpm::vector<aggregate<double,double[2],double[2][2]>> v1;
openfpm::vector<aggregate<double[2]>> v2;
// Point
aggregate<double[2]> p;
p.get<0>()[0] = 1.0;
p.get<0>()[1] = 2.0;
aggregate<double,double[2],double[2][2]> pa;
pa.get<0>() = 1.0;
pa.get<1>()[0] = 1.0;
pa.get<1>()[1] = 1.0;
pa.get<2>()[0][0] = 1.0;
pa.get<2>()[0][1] = 1.0;
pa.get<2>()[1][0] = 1.0;
pa.get<2>()[1][1] = 1.0;
// Add test
for (size_t j = 0 ; j < NADD ; j++)
{
v1.add(pa);
v2.add(p);
}
timer tg;
tg.start();
for (size_t j = 0 ; j < NADD ; j++)
{
read_write(v1,v2,j);
}
tg.stop();
times_g[i] = tg.getwct();
timer tga;
tga.start();
double * prp = (double *)v1.getPointer<0>();
double * pos = (double *)v2.getPointer<0>();
for (size_t j = 0 ; j < NADD ; j++)
{
read_write_lin(pos,(struct ele *)prp,j);
}
tga.stop();
times[i] = tga.getwct();
}
double mean;
double dev;
standard_deviation(times_g,mean,dev);
double mean_;
double dev_;
standard_deviation(times,mean_,dev_);
report_vector_funcs.graphs.put("performance.vector_layout(0).y.data.mean",mean_/mean);
// Deviation od x/y = x/y^2 dy + 1/y dx
report_vector_funcs.graphs.put("performance.vector_layout(0).y.data.dev",mean_/(mean*mean)*dev + dev_ / mean );
report_vector_funcs.graphs.put("performance.vector_layout(1).funcs.nele",NADD);
report_vector_funcs.graphs.put("performance.vector_layout(1).funcs.name","read_write_inte");
for (size_t i = 0 ; i < N_STAT+1 ; i++)
{
// create a vector
openfpm::vector<aggregate<double,double[2],double[2][2]>,HeapMemory,memory_traits_inte> v1;
openfpm::vector<aggregate<double[2]>,HeapMemory,memory_traits_inte> v2;
// Point
aggregate<double[2]> p;
p.get<0>()[0] = 1.0;
p.get<0>()[1] = 2.0;
aggregate<double,double[2],double[2][2]> pa;
pa.get<0>() = 1.0;
pa.get<1>()[0] = 1.0;
pa.get<1>()[1] = 1.0;
pa.get<2>()[0][0] = 1.0;
pa.get<2>()[0][1] = 1.0;
pa.get<2>()[1][0] = 1.0;
pa.get<2>()[1][1] = 1.0;
// Add test
for (size_t j = 0 ; j < NADD ; j++)
{
v1.add(pa);
v2.add(p);
}
timer tg;
tg.start();
for (size_t j = 0 ; j < NADD ; j++)
{
read_write(v1,v2,j);
}
tg.stop();
times2_g[i] = tg.getwct();
int sz = v1.size();
timer tga;
tga.start();
double * prp0 = (double *)v1.getPointer<0>();
double * prp1 = (double *)v1.getPointer<1>();
double * prp2 = (double *)v1.getPointer<2>();
double * pos = (double *)v2.getPointer<0>();
for (size_t j = 0 ; j < NADD ; j++)
{
read_write_inte(pos,prp0,prp1,prp2,j,sz);
}
tga.stop();
times2[i] = tga.getwct();
}
double mean2;
double dev2;
standard_deviation(times2_g,mean2,dev2);
double mean2_;
double dev2_;
standard_deviation(times2,mean2_,dev2_);
report_vector_funcs.graphs.put("performance.vector_layout(1).y.data.mean",mean2_/mean2);
// Deviation od x/y = x/y^2 dy + 1/y dx
report_vector_funcs.graphs.put("performance.vector_layout(1).y.data.dev",mean2_/(mean2*mean2)*dev2 + dev2_ / mean2 );
}
BOOST_AUTO_TEST_CASE(vector_performance_gpu_layout_vs_plain_array)
{
std::vector<double> times(N_STAT + 1);
std::vector<double> times_g(N_STAT + 1);
std::vector<double> times2(N_STAT + 1);
std::vector<double> times2_g(N_STAT + 1);
// get test
double tot_accu = 0.0;
report_vector_funcs.graphs.put("performance.vector_layout_gpu(0).funcs.nele",NADD_GPU);
report_vector_funcs.graphs.put("performance.vector_layout_gpu(0).funcs.name","read_write_lin");
for (size_t i = 0 ; i < N_STAT+1 ; i++)
{
// create a vector
openfpm::vector<aggregate<double,double[2],double[2][2]>,CudaMemory> v1;
openfpm::vector<aggregate<double[2]>,CudaMemory> v2;
// Point
aggregate<double[2]> p;
p.get<0>()[0] = 1.0;
p.get<0>()[1] = 2.0;
aggregate<double,double[2],double[2][2]> pa;
pa.get<0>() = 1.0;
pa.get<1>()[0] = 1.0;
pa.get<1>()[1] = 1.0;
pa.get<2>()[0][0] = 1.0;
pa.get<2>()[0][1] = 1.0;
pa.get<2>()[1][0] = 1.0;
pa.get<2>()[1][1] = 1.0;
// Add test
for (size_t j = 0 ; j < NADD_GPU ; j++)
{
v1.add(pa);
v2.add(p);
}
auto ite = v1.getGPUIterator(1536);
{
timer tga;
tga.startGPU();
CUDA_LAUNCH(read_write_ker,ite,v1.toKernel(),v2.toKernel());
tga.stopGPU();
times_g[i] = tga.getwctGPU();
}
std::cout << "OpenFPM: " << times_g[i] << std::endl;
timer tga2;
tga2.startGPU();
double * prp = (double *)v1.toKernel().getPointer<0>();
double * pos = (double *)v2.toKernel().getPointer<0>();
CUDA_LAUNCH(read_write_lin_ker,ite,pos,(struct ele *)prp);
tga2.stopGPU();
times[i] = tga2.getwctGPU();
std::cout << "Array: " << times[i] << std::endl;
}
double mean;
double dev;
standard_deviation(times_g,mean,dev);
double mean_;
double dev_;
standard_deviation(times,mean_,dev_);
report_vector_funcs.graphs.put("performance.vector_layout_gpu(0).y.data.mean",mean_/mean);
// Deviation od x/y = x/y^2 dy + 1/y dx
report_vector_funcs.graphs.put("performance.vector_layout_gpu(0).y.data.dev",mean_/(mean*mean)*dev + dev_ / mean );
report_vector_funcs.graphs.put("performance.vector_layout_gpu(1).funcs.nele",NADD);
report_vector_funcs.graphs.put("performance.vector_layout_gpu(1).funcs.name","read_write_inte");
for (size_t i = 0 ; i < N_STAT+1 ; i++)
{
// create a vector
openfpm::vector<aggregate<double,double[2],double[2][2]>,CudaMemory,memory_traits_inte> v1;
openfpm::vector<aggregate<double[2]>,CudaMemory,memory_traits_inte> v2;
// Point
aggregate<double[2]> p;
p.get<0>()[0] = 1.0;
p.get<0>()[1] = 2.0;
aggregate<double,double[2],double[2][2]> pa;
pa.get<0>() = 1.0;
pa.get<1>()[0] = 1.0;
pa.get<1>()[1] = 1.0;
pa.get<2>()[0][0] = 1.0;
pa.get<2>()[0][1] = 1.0;
pa.get<2>()[1][0] = 1.0;
pa.get<2>()[1][1] = 1.0;
// Add test
for (size_t j = 0 ; j < NADD_GPU ; j++)
{
v1.add(pa);
v2.add(p);
}
timer tg;
tg.startGPU();
auto ite = v1.getGPUIterator(1536);
CUDA_LAUNCH(read_write_ker,ite,v1.toKernel(),v2.toKernel());
tg.stopGPU();
times2_g[i] = tg.getwctGPU();
std::cout << "OpenFPM inte: " << times2_g[i] << std::endl;
int sz = v1.size();
timer tga;
tga.startGPU();
double * prp0 = (double *)v1.toKernel().getPointer<0>();
double * prp1 = (double *)v1.toKernel().getPointer<1>();
double * prp2 = (double *)v1.toKernel().getPointer<2>();
double * pos = (double *)v2.toKernel().getPointer<0>();
CUDA_LAUNCH(read_write_inte_ker,ite,pos,prp0,prp1,prp2,sz);
tga.stopGPU();
times2[i] = tga.getwctGPU();
std::cout << "Array inte: " << times2[i] << std::endl;
}
double mean2;
double dev2;
standard_deviation(times2_g,mean2,dev2);
double mean2_;
double dev2_;
standard_deviation(times2,mean2_,dev2_);
report_vector_funcs.graphs.put("performance.vector_layout_gpu(1).y.data.mean",mean2_/mean2);
// Deviation od x/y = x/y^2 dy + 1/y dx
report_vector_funcs.graphs.put("performance.vector_layout_gpu(1).y.data.dev",mean2_/(mean2*mean2)*dev2 + dev2_ / mean2 );
}
BOOST_AUTO_TEST_CASE(vector_performance_write_report)
{
// Create a graphs
report_vector_funcs.graphs.put("graphs.graph(0).type","line");
report_vector_funcs.graphs.add("graphs.graph(0).title","Vector add and get");
report_vector_funcs.graphs.add("graphs.graph(0).x.title","Tests");
report_vector_funcs.graphs.add("graphs.graph(0).y.title","Time seconds");
report_vector_funcs.graphs.add("graphs.graph(0).y.data(0).source","performance.vector(#).y.data.mean");
report_vector_funcs.graphs.add("graphs.graph(0).x.data(0).source","performance.vector(#).funcs.name");
report_vector_funcs.graphs.add("graphs.graph(0).y.data(0).title","Actual");
report_vector_funcs.graphs.add("graphs.graph(0).interpolation","lines");
report_vector_funcs.graphs.put("graphs.graph(1).type","line");
report_vector_funcs.graphs.add("graphs.graph(1).title","Vector read write");
report_vector_funcs.graphs.add("graphs.graph(1).x.title","Layout");
report_vector_funcs.graphs.add("graphs.graph(1).y.title","Time seconds");
report_vector_funcs.graphs.add("graphs.graph(1).y.data(0).source","performance.vector_layout(#).y.data.mean");
report_vector_funcs.graphs.add("graphs.graph(1).x.data(0).source","performance.vector_layout(#).funcs.name");
report_vector_funcs.graphs.add("graphs.graph(1).y.data(0).title","Actual");
report_vector_funcs.graphs.add("graphs.graph(1).interpolation","lines");
report_vector_funcs.graphs.put("graphs.graph(2).type","line");
report_vector_funcs.graphs.add("graphs.graph(2).title","Vector GPU read write");
report_vector_funcs.graphs.add("graphs.graph(2).x.title","Layout");
report_vector_funcs.graphs.add("graphs.graph(2).y.title","Time seconds");
report_vector_funcs.graphs.add("graphs.graph(2).y.data(0).source","performance.vector_layout_gpu(#).y.data.mean");
report_vector_funcs.graphs.add("graphs.graph(2).x.data(0).source","performance.vector_layout_gpu(#).funcs.name");
report_vector_funcs.graphs.add("graphs.graph(2).y.data(0).title","Actual");
report_vector_funcs.graphs.add("graphs.graph(2).interpolation","lines");
boost::property_tree::xml_writer_settings<std::string> settings(' ', 4);
boost::property_tree::write_xml("vector_performance_funcs.xml", report_vector_funcs.graphs,std::locale(),settings);
GoogleChart cg;
std::string file_xml_ref(test_dir);
file_xml_ref += std::string("/openfpm_data/vector_performance_funcs_ref.xml");
StandardXMLPerformanceGraph("vector_performance_funcs.xml",file_xml_ref,cg);
addUpdtateTime(cg,1);
cg.write("vector_performance_funcs.html");
}
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE_END() |
fe887865efb8cf5a1ec90ce292bbf838bc09a8fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "THHTensor.hpp"
#include "common.h"
template <typename Dtype>
__global__ void MaxUnpoolForward(const int nthreads, const Dtype* bottom_data, const int64_t* bottom_mask,
const int num, const int channels, const int iheight, const int iwidth, const int oheight, const int owidth, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) { //index here indices the input pixels
int c = (index / iwidth / iheight) % channels;
int n = index / iwidth / iheight / channels;
top_data += (n*channels + c)*oheight*owidth;
int maxind = bottom_mask[index] - TH_INDEX_BASE;
top_data[maxind] = bottom_data[index];
}
}
template <typename Dtype>
__global__ void MaxUnpoolBackward(const int nthreads, const Dtype* top_diff, const int64_t* bottom_mask,
const int num, const int channels, const int iheight, const int iwidth, const int oheight, const int owidth, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int c = (index / iwidth / iheight) % channels;
int n = index / iwidth / iheight / channels;
top_diff += (n*channels + c)*oheight*owidth;
int maxind = bottom_mask[index] - TH_INDEX_BASE;
bottom_diff[index] = top_diff[maxind];
}
}
#include "generic/SpatialMaxUnpooling.cu"
#include "THHGenerateFloatTypes.h"
| fe887865efb8cf5a1ec90ce292bbf838bc09a8fc.cu | #include "THCUNN.h"
#include "THCTensor.hpp"
#include "common.h"
template <typename Dtype>
__global__ void MaxUnpoolForward(const int nthreads, const Dtype* bottom_data, const int64_t* bottom_mask,
const int num, const int channels, const int iheight, const int iwidth, const int oheight, const int owidth, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) { //index here indices the input pixels
int c = (index / iwidth / iheight) % channels;
int n = index / iwidth / iheight / channels;
top_data += (n*channels + c)*oheight*owidth;
int maxind = bottom_mask[index] - TH_INDEX_BASE;
top_data[maxind] = bottom_data[index];
}
}
template <typename Dtype>
__global__ void MaxUnpoolBackward(const int nthreads, const Dtype* top_diff, const int64_t* bottom_mask,
const int num, const int channels, const int iheight, const int iwidth, const int oheight, const int owidth, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int c = (index / iwidth / iheight) % channels;
int n = index / iwidth / iheight / channels;
top_diff += (n*channels + c)*oheight*owidth;
int maxind = bottom_mask[index] - TH_INDEX_BASE;
bottom_diff[index] = top_diff[maxind];
}
}
#include "generic/SpatialMaxUnpooling.cu"
#include "THCGenerateFloatTypes.h"
|
25e98a9e48059c96cff1a04d17a8655404ed5afb.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include "util.h"
#include "CudaStream.h"
#include "CudaEvent.h"
#define USE_PINNED
// CUDA kernel implementing newton solve for
// f(x) = 0
// where
// f(x) = exp(cos(x)) - 2
__global__
void newton(int n, double *x) {
auto tid = threadIdx.x + blockDim.x * blockIdx.x;
auto f = [] (double x) {
return exp(cos(x))-2;
};
auto fp = [] (double x) {
return -sin(x) * exp(cos(x));
};
if(tid<n) {
auto x0 = x[tid];
for(int i=0; i<5; ++i) {
x0 -= f(x0)/fp(x0);
}
x[tid] = x0;
}
}
int main(int argc, char** argv) {
size_t pow = read_arg(argc, argv, 1, 20);
size_t num_chunks = read_arg(argc, argv, 2, 1);
size_t N = 1 << pow;
auto size_in_bytes = N * sizeof(double);
std::cout << "memory copy overlap test of length N = " << N
<< " : " << size_in_bytes/(1024.*1024.) << "MB"
<< " with " << num_chunks << " chunks"
<< std::endl;
hipInit(0);
double* xd = malloc_device<double>(N);
double* xh = malloc_host_pinned<double>(N, 1.5);
double* x = malloc_host_pinned<double>(N);
int chunk_size = N/num_chunks; // assume N % num_chunks == 0
// precompute kernel launch configuration
auto block_dim = 128;
auto grid_dim = (chunk_size+block_dim-1)/block_dim;
CudaStream D2H_stream(true);
CudaStream H2D_stream(true);
CudaStream kernel_stream(true);
auto start_event = D2H_stream.enqueue_event();
for(int i=0; i<num_chunks; ++i) {
auto offset = i*chunk_size;
// copy chunk to device
copy_to_device_async<double>(xh+offset, xd+offset,
chunk_size, H2D_stream.stream());
// force the kernel stream to wait for the memcpy
auto H2D_event = H2D_stream.enqueue_event();
kernel_stream.wait_on_event(H2D_event);
// solve N nonlinear problems, i.e. find x[i] s.t. f(x[i])=0
hipLaunchKernelGGL(( newton), dim3(grid_dim), dim3(block_dim), 0, kernel_stream.stream(),
chunk_size, xd+offset);
cuda_check_last_kernel("newton kernel");
// copy chunk of result back to host
auto kernel_event = kernel_stream.enqueue_event();
D2H_stream.wait_on_event(kernel_event);
copy_to_host_async<double>(xd+offset, x+offset,
chunk_size, D2H_stream.stream());
}
auto end_event = D2H_stream.enqueue_event();
end_event.wait();
auto time_total = end_event.time_since(start_event);
std::cout << "-------\ntimings\n-------" << std::endl;
std::cout << "total : " << time_total << std::endl;
// check for errors
auto f = [] (double x) { return exp(cos(x))-2.; };
auto errors = 0;
for(auto i=0; i<N; ++i) {
if(::fabs(f(x[i]))>1e-10) {
errors++;
}
}
if(errors>0) std::cout << "\n============ FAILED with " << errors << " errors" << std::endl;
else std::cout << "\n============ PASSED" << std::endl;
hipFree(xd);
hipHostFree(xh);
hipHostFree(x);
return 0;
}
| 25e98a9e48059c96cff1a04d17a8655404ed5afb.cu | #include <iostream>
#include <cuda.h>
#include "util.h"
#include "CudaStream.h"
#include "CudaEvent.h"
#define USE_PINNED
// CUDA kernel implementing newton solve for
// f(x) = 0
// where
// f(x) = exp(cos(x)) - 2
__global__
void newton(int n, double *x) {
auto tid = threadIdx.x + blockDim.x * blockIdx.x;
auto f = [] (double x) {
return exp(cos(x))-2;
};
auto fp = [] (double x) {
return -sin(x) * exp(cos(x));
};
if(tid<n) {
auto x0 = x[tid];
for(int i=0; i<5; ++i) {
x0 -= f(x0)/fp(x0);
}
x[tid] = x0;
}
}
int main(int argc, char** argv) {
size_t pow = read_arg(argc, argv, 1, 20);
size_t num_chunks = read_arg(argc, argv, 2, 1);
size_t N = 1 << pow;
auto size_in_bytes = N * sizeof(double);
std::cout << "memory copy overlap test of length N = " << N
<< " : " << size_in_bytes/(1024.*1024.) << "MB"
<< " with " << num_chunks << " chunks"
<< std::endl;
cuInit(0);
double* xd = malloc_device<double>(N);
double* xh = malloc_host_pinned<double>(N, 1.5);
double* x = malloc_host_pinned<double>(N);
int chunk_size = N/num_chunks; // assume N % num_chunks == 0
// precompute kernel launch configuration
auto block_dim = 128;
auto grid_dim = (chunk_size+block_dim-1)/block_dim;
CudaStream D2H_stream(true);
CudaStream H2D_stream(true);
CudaStream kernel_stream(true);
auto start_event = D2H_stream.enqueue_event();
for(int i=0; i<num_chunks; ++i) {
auto offset = i*chunk_size;
// copy chunk to device
copy_to_device_async<double>(xh+offset, xd+offset,
chunk_size, H2D_stream.stream());
// force the kernel stream to wait for the memcpy
auto H2D_event = H2D_stream.enqueue_event();
kernel_stream.wait_on_event(H2D_event);
// solve N nonlinear problems, i.e. find x[i] s.t. f(x[i])=0
newton<<<grid_dim, block_dim, 0, kernel_stream.stream()>>>
(chunk_size, xd+offset);
cuda_check_last_kernel("newton kernel");
// copy chunk of result back to host
auto kernel_event = kernel_stream.enqueue_event();
D2H_stream.wait_on_event(kernel_event);
copy_to_host_async<double>(xd+offset, x+offset,
chunk_size, D2H_stream.stream());
}
auto end_event = D2H_stream.enqueue_event();
end_event.wait();
auto time_total = end_event.time_since(start_event);
std::cout << "-------\ntimings\n-------" << std::endl;
std::cout << "total : " << time_total << std::endl;
// check for errors
auto f = [] (double x) { return exp(cos(x))-2.; };
auto errors = 0;
for(auto i=0; i<N; ++i) {
if(std::fabs(f(x[i]))>1e-10) {
errors++;
}
}
if(errors>0) std::cout << "\n============ FAILED with " << errors << " errors" << std::endl;
else std::cout << "\n============ PASSED" << std::endl;
cudaFree(xd);
cudaFreeHost(xh);
cudaFreeHost(x);
return 0;
}
|
cf5d4537242741645edb2f9688bcd7a936a5b13d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "bondsStructs.cuh"
#include "bondsKernelsGpu.hip"
#include "bondsKernelsCpu.cu"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <stdio.h>
#include <time.h>
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
#define NUM_BONDS_RUN 1000000
int monthLengthCpu(int month, bool leapYear)
{
int MonthLength[] = {
31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 };
int MonthLeapLength[] = {
31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 };
return (leapYear? MonthLeapLength[month-1] : MonthLength[month-1]);
}
int monthOffsetCpu(int m, bool leapYear)
{
int MonthOffset[] = {
0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 };
int MonthLeapOffset[] = {
0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 };
return (leapYear? MonthLeapOffset[m-1] : MonthOffset[m-1]);
}
int yearOffsetCpu(int y)
{
int YearOffset[] = {
0, 366, 731, 1096, 1461, 1827, 2192, 2557, 2922, 3288, 3653, 4018, 4383, 4749, 5114, 5479, 5844, 6210, 6575, 6940, 7305, 7671, 8036, 8401, 8766, 9132, 9497, 9862,10227,10593, 10958,11323,11688,12054,12419,12784,13149,13515,13880,14245, 14610,14976,15341,15706,16071,16437,16802,17167,17532,17898, 18263,18628,18993,19359,19724,20089,20454,20820,21185,21550, 21915,22281,22646,23011,23376,23742,24107,24472,24837,25203, 25568,25933,26298,26664,27029,27394,27759,28125,28490,28855, 29220,29586,29951,30316,30681,31047,31412,31777,32142,32508, 32873,33238,33603,33969,34334,34699,35064,35430,35795,36160, 36525,36891,37256,37621,37986,38352,38717,39082,39447,39813, 40178,40543,40908,41274,41639,42004,42369,42735,43100,43465, 43830,44196,44561,44926,45291,45657,46022,46387,46752,47118, 47483,47848,48213,48579,48944,49309,49674,50040,50405,50770, 51135,51501,51866,52231,52596,52962,53327,53692,54057,54423, 54788,55153,55518,55884,56249,56614,56979,57345,57710,58075, 58440,58806,59171,59536,59901,60267,60632,60997,61362,61728, 62093,62458,62823,63189,63554,63919,64284,64650,65015,65380, 65745,66111,66476,66841,67206,67572,67937,68302,68667,69033, 69398,69763,70128,70494,70859,71224,71589,71955,72320,72685, 73050,73415,73780,74145,74510,74876,75241,75606,75971,76337, 76702,77067,77432,77798,78163,78528,78893,79259,79624,79989, 80354,80720,81085,81450,81815,82181,82546,82911,83276,83642, 84007,84372,84737,85103,85468,85833,86198,86564,86929,87294, 87659,88025,88390,88755,89120,89486,89851,90216,90581,90947, 91312,91677,92042,92408,92773,93138,93503,93869,94234,94599, 94964,95330,95695,96060,96425,96791,97156,97521,97886,98252, 98617,98982,99347,99713,100078,100443,100808,101174,101539,101904, 102269,102635,103000,103365,103730,104096,104461,104826,105191,105557, 105922,106287,106652,107018,107383,107748,108113,108479,108844,109209, 109574 };
return YearOffset[y-1900];
}
bool isLeapCpu(int y)
{
bool YearIsLeap[] = {
true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, false,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, false };
return YearIsLeap[y-1900];
}
bondsDateStruct intializeDateCpu(int d, int m, int y)
{
bondsDateStruct currDate;
currDate.day = d;
currDate.month = m;
currDate.year = y;
bool leap = isLeapCpu(y);
int offset = monthOffsetCpu(m,leap);
currDate.dateSerialNum = d + offset + yearOffsetCpu(y);
return currDate;
}
void runRepoEngine()
{
{
int numBonds = NUM_BONDS_RUN;
printf("\nNumber of Bonds: %d\n\n", numBonds);
inArgsStruct inArgsHost;
inArgsHost.discountCurve = (bondsYieldTermStruct*)malloc(numBonds*sizeof(bondsYieldTermStruct));
inArgsHost.repoCurve = (bondsYieldTermStruct*)malloc(numBonds*sizeof(bondsYieldTermStruct));
inArgsHost.currDate = (bondsDateStruct*)malloc(numBonds*sizeof(bondsDateStruct));
inArgsHost.maturityDate = (bondsDateStruct*)malloc(numBonds*sizeof(bondsDateStruct));
inArgsHost.bondCleanPrice = (dataType*)malloc(numBonds*sizeof(dataType));
inArgsHost.bond = (bondStruct*)malloc(numBonds*sizeof(bondStruct));
inArgsHost.dummyStrike = (dataType*)malloc(numBonds*sizeof(dataType));
srand ( time(NULL) );
int numBond;
for (numBond = 0; numBond < numBonds; numBond++)
{
dataType repoRate = 0.07;
int repoCompounding = SIMPLE_INTEREST;
dataType repoCompoundFreq = 1;
bondsDateStruct bondIssueDate = intializeDateCpu(rand() % 28 + 1, rand() % 12 + 1, 1999 - (rand() % 2));
bondsDateStruct bondMaturityDate = intializeDateCpu(rand() % 28 + 1, rand() % 12 + 1, 2000 + (rand() % 2));
bondsDateStruct todaysDate = intializeDateCpu(bondMaturityDate.day-1,bondMaturityDate.month,bondMaturityDate.year);
bondStruct bond;
bond.startDate = bondIssueDate;
bond.maturityDate = bondMaturityDate;
bond.rate = 0.08 + ((float)rand()/(float)RAND_MAX - 0.5)*0.1;
dataType bondCouponFrequency = 2;
dataType bondCleanPrice = 89.97693786;
bondsYieldTermStruct bondCurve;
bondCurve.refDate = todaysDate;
bondCurve.calDate = todaysDate;
bondCurve.forward = -0.1f;
bondCurve.compounding = COMPOUNDED_INTEREST;
bondCurve.frequency = bondCouponFrequency;
bondCurve.dayCounter = USE_EXACT_DAY;
bondCurve.refDate = todaysDate;
bondCurve.calDate = todaysDate;
bondCurve.compounding = COMPOUNDED_INTEREST;
bondCurve.frequency = bondCouponFrequency;
dataType dummyStrike = 91.5745;
bondsYieldTermStruct repoCurve;
repoCurve.refDate = todaysDate;
repoCurve.calDate = todaysDate;
repoCurve.forward = repoRate;
repoCurve.compounding = repoCompounding;
repoCurve.frequency = repoCompoundFreq;
repoCurve.dayCounter = USE_SERIAL_NUMS;
inArgsHost.discountCurve[numBond] = bondCurve;
inArgsHost.repoCurve[numBond] = repoCurve;
inArgsHost.currDate[numBond] = todaysDate;
inArgsHost.maturityDate[numBond] = bondMaturityDate;
inArgsHost.bondCleanPrice[numBond] = bondCleanPrice;
inArgsHost.bond[numBond] = bond;
inArgsHost.dummyStrike[numBond] = dummyStrike;
}
printf("Inputs for bond with index %d\n", numBonds/2);
printf("Bond Issue Date: %d-%d-%d\n", inArgsHost.bond[numBonds/2].startDate.month, inArgsHost.bond[numBonds/2].startDate.day, inArgsHost.bond[numBonds/2].startDate.year);
printf("Bond Maturity Date: %d-%d-%d\n", inArgsHost.bond[numBonds/2].maturityDate.month, inArgsHost.bond[numBonds/2].maturityDate.day, inArgsHost.bond[numBonds/2].maturityDate.year);
printf("Bond rate: %f\n\n", inArgsHost.bond[numBonds/2].rate);
resultsStruct resultsHost;
resultsStruct resultsFromGpu;
resultsHost.dirtyPrice = (dataType*)malloc(numBonds*sizeof(dataType));
resultsHost.accruedAmountCurrDate = (dataType*)malloc(numBonds*sizeof(dataType));;
resultsHost.cleanPrice = (dataType*)malloc(numBonds*sizeof(dataType));;
resultsHost.bondForwardVal = (dataType*)malloc(numBonds*sizeof(dataType));;
resultsFromGpu.dirtyPrice = (dataType*)malloc(numBonds*sizeof(dataType));
resultsFromGpu.accruedAmountCurrDate = (dataType*)malloc(numBonds*sizeof(dataType));;
resultsFromGpu.cleanPrice = (dataType*)malloc(numBonds*sizeof(dataType));;
resultsFromGpu.bondForwardVal = (dataType*)malloc(numBonds*sizeof(dataType));;
bondsYieldTermStruct* discountCurveGpu;
bondsYieldTermStruct* repoCurveGpu;
bondsDateStruct* currDateGpu;
bondsDateStruct* maturityDateGpu;
dataType* bondCleanPriceGpu;
bondStruct* bondGpu;
dataType* dummyStrikeGpu;
dataType* dirtyPriceGpu;
dataType* accruedAmountCurrDateGpu;
dataType* cleanPriceGpu;
dataType* bondForwardValGpu;
hipMalloc(&discountCurveGpu, numBonds*sizeof(bondsYieldTermStruct));
hipMalloc(&repoCurveGpu, numBonds*sizeof(bondsYieldTermStruct));
hipMalloc(&currDateGpu, numBonds*sizeof(bondsDateStruct));
hipMalloc(&maturityDateGpu, numBonds*sizeof(bondsDateStruct));
hipMalloc(&bondCleanPriceGpu, numBonds*sizeof(dataType));
hipMalloc(&bondGpu, numBonds*sizeof(bondStruct));
hipMalloc(&dummyStrikeGpu, numBonds*sizeof(dataType));
hipMalloc(&dirtyPriceGpu, numBonds*sizeof(dataType));
hipMalloc(&accruedAmountCurrDateGpu, numBonds*sizeof(dataType));
hipMalloc(&cleanPriceGpu, numBonds*sizeof(dataType));
hipMalloc(&bondForwardValGpu, numBonds*sizeof(dataType));
hipMemcpy(discountCurveGpu, inArgsHost.discountCurve, numBonds*sizeof(bondsYieldTermStruct), hipMemcpyHostToDevice);
hipMemcpy(repoCurveGpu, inArgsHost.repoCurve, numBonds*sizeof(bondsYieldTermStruct), hipMemcpyHostToDevice);
hipMemcpy(currDateGpu, inArgsHost.currDate, numBonds*sizeof(bondsDateStruct), hipMemcpyHostToDevice);
hipMemcpy(maturityDateGpu, inArgsHost.maturityDate, numBonds*sizeof(bondsDateStruct), hipMemcpyHostToDevice);
hipMemcpy(bondCleanPriceGpu, inArgsHost.bondCleanPrice, numBonds*sizeof(dataType), hipMemcpyHostToDevice);
hipMemcpy(bondGpu, inArgsHost.bond, numBonds*sizeof(bondStruct), hipMemcpyHostToDevice);
hipMemcpy(dummyStrikeGpu, inArgsHost.dummyStrike, numBonds*sizeof(dataType), hipMemcpyHostToDevice);
long seconds, useconds;
float mtimeCpu;
float mtimeGpu;
struct timeval start;
struct timeval end;
inArgsStruct inArgs;
inArgs.discountCurve = discountCurveGpu;
inArgs.repoCurve = repoCurveGpu;
inArgs.currDate = currDateGpu;
inArgs.maturityDate = maturityDateGpu;
inArgs.bondCleanPrice = bondCleanPriceGpu;
inArgs.bond = bondGpu;
inArgs.dummyStrike = dummyStrikeGpu;
resultsStruct results;
results.dirtyPrice = dirtyPriceGpu;
results.accruedAmountCurrDate = accruedAmountCurrDateGpu;
results.cleanPrice = cleanPriceGpu;
results.bondForwardVal = bondForwardValGpu;
gettimeofday(&start, NULL);
dim3 grid((ceil(((float)numBonds)/((float)256.0f))), 1, 1);
dim3 threads(256, 1, 1);hipLaunchKernelGGL((
getBondsResultsGpu) , dim3(grid), dim3(threads) , 0, 0, inArgs, results, numBonds);
hipDeviceSynchronize();
gettimeofday(&end, NULL);
hipMemcpy(resultsFromGpu.dirtyPrice, dirtyPriceGpu, numBonds*sizeof(dataType), hipMemcpyDeviceToHost);
hipMemcpy(resultsFromGpu.accruedAmountCurrDate, accruedAmountCurrDateGpu, numBonds*sizeof(dataType), hipMemcpyDeviceToHost);
hipMemcpy(resultsFromGpu.cleanPrice, cleanPriceGpu, numBonds*sizeof(dataType), hipMemcpyDeviceToHost);
hipMemcpy(resultsFromGpu.bondForwardVal, bondForwardValGpu, numBonds*sizeof(dataType), hipMemcpyDeviceToHost);
seconds = end.tv_sec - start.tv_sec;
useconds = end.tv_usec - start.tv_usec;
mtimeGpu = ((seconds)* 1000 + ((float)useconds)/1000.0) + 0.5f;
printf("Run on GPU\n");
printf("Processing time on GPU (CUDA): %f (ms) \n\n", mtimeGpu);
double totPrice = 0.0;
int numBond1;
for (numBond1= 0; numBond1< numBonds; numBond1++)
{
totPrice += resultsFromGpu.dirtyPrice[numBond1];
}
printf("Sum of output dirty prices on GPU: %f\n", totPrice);
printf("Outputs on GPU for bond with index %d: \n", numBonds/2);
printf("Dirty Price: %f\n", resultsFromGpu.dirtyPrice[numBonds/2]);
printf("Accrued Amount: %f\n", resultsFromGpu.accruedAmountCurrDate[numBonds/2]);
printf("Clean Price: %f\n", resultsFromGpu.cleanPrice[numBonds/2]);
printf("Bond Forward Val: %f\n\n", resultsFromGpu.bondForwardVal[numBonds/2]);
gettimeofday(&start, NULL);
getBondsResultsCpu(inArgsHost, resultsHost, numBonds);
gettimeofday(&end, NULL);
seconds = end.tv_sec - start.tv_sec;
useconds = end.tv_usec - start.tv_usec;
mtimeCpu = ((seconds)* 1000 + ((float)useconds)/1000.0) + 0.5f;
printf("Run on CPU\n");
printf("Processing time on CPU: %f (ms) \n\n", mtimeCpu);
totPrice = 0.0;
for (numBond1= 0; numBond1< numBonds; numBond1++)
{
totPrice += resultsHost.dirtyPrice[numBond1];
}
printf("Sum of output dirty prices on CPU: %f\n", totPrice);
printf("Outputs on CPU for bond with index %d: \n", numBonds/2);
printf("Dirty Price: %f\n", resultsHost.dirtyPrice[numBonds/2]);
printf("Accrued Amount: %f\n", resultsHost.accruedAmountCurrDate[numBonds/2]);
printf("Clean Price: %f\n", resultsHost.cleanPrice[numBonds/2]);
printf("Bond Forward Val: %f\n\n", resultsHost.bondForwardVal[numBonds/2]);
printf("Speedup using GPU: %f\n", mtimeCpu/mtimeGpu);
hipFree(discountCurveGpu);
hipFree(repoCurveGpu);
hipFree(currDateGpu);
hipFree(maturityDateGpu);
hipFree(bondCleanPriceGpu);
hipFree(bondGpu);
hipFree(dummyStrikeGpu);
hipFree(dirtyPriceGpu);
hipFree(accruedAmountCurrDateGpu);
hipFree(cleanPriceGpu);
hipFree(bondForwardValGpu);
free(resultsHost.dirtyPrice);
free(resultsHost.accruedAmountCurrDate);;
free(resultsHost.cleanPrice);;
free(resultsHost.bondForwardVal);;
free(resultsFromGpu.dirtyPrice);
free(resultsFromGpu.accruedAmountCurrDate);;
free(resultsFromGpu.cleanPrice);;
free(resultsFromGpu.bondForwardVal);
free(inArgsHost.discountCurve);
free(inArgsHost.repoCurve);
free(inArgsHost.currDate);
free(inArgsHost.maturityDate);
free(inArgsHost.bondCleanPrice);
free(inArgsHost.bond);
free(inArgsHost.dummyStrike);
}
}
int main( int argc, char** argv)
{
runRepoEngine();
return 0;
}
| cf5d4537242741645edb2f9688bcd7a936a5b13d.cu | #include "bondsStructs.cuh"
#include "bondsKernelsGpu.cu"
#include "bondsKernelsCpu.cu"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <stdio.h>
#include <time.h>
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
#define NUM_BONDS_RUN 1000000
int monthLengthCpu(int month, bool leapYear)
{
int MonthLength[] = {
31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 };
int MonthLeapLength[] = {
31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 };
return (leapYear? MonthLeapLength[month-1] : MonthLength[month-1]);
}
int monthOffsetCpu(int m, bool leapYear)
{
int MonthOffset[] = {
0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 };
int MonthLeapOffset[] = {
0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 };
return (leapYear? MonthLeapOffset[m-1] : MonthOffset[m-1]);
}
int yearOffsetCpu(int y)
{
int YearOffset[] = {
0, 366, 731, 1096, 1461, 1827, 2192, 2557, 2922, 3288, 3653, 4018, 4383, 4749, 5114, 5479, 5844, 6210, 6575, 6940, 7305, 7671, 8036, 8401, 8766, 9132, 9497, 9862,10227,10593, 10958,11323,11688,12054,12419,12784,13149,13515,13880,14245, 14610,14976,15341,15706,16071,16437,16802,17167,17532,17898, 18263,18628,18993,19359,19724,20089,20454,20820,21185,21550, 21915,22281,22646,23011,23376,23742,24107,24472,24837,25203, 25568,25933,26298,26664,27029,27394,27759,28125,28490,28855, 29220,29586,29951,30316,30681,31047,31412,31777,32142,32508, 32873,33238,33603,33969,34334,34699,35064,35430,35795,36160, 36525,36891,37256,37621,37986,38352,38717,39082,39447,39813, 40178,40543,40908,41274,41639,42004,42369,42735,43100,43465, 43830,44196,44561,44926,45291,45657,46022,46387,46752,47118, 47483,47848,48213,48579,48944,49309,49674,50040,50405,50770, 51135,51501,51866,52231,52596,52962,53327,53692,54057,54423, 54788,55153,55518,55884,56249,56614,56979,57345,57710,58075, 58440,58806,59171,59536,59901,60267,60632,60997,61362,61728, 62093,62458,62823,63189,63554,63919,64284,64650,65015,65380, 65745,66111,66476,66841,67206,67572,67937,68302,68667,69033, 69398,69763,70128,70494,70859,71224,71589,71955,72320,72685, 73050,73415,73780,74145,74510,74876,75241,75606,75971,76337, 76702,77067,77432,77798,78163,78528,78893,79259,79624,79989, 80354,80720,81085,81450,81815,82181,82546,82911,83276,83642, 84007,84372,84737,85103,85468,85833,86198,86564,86929,87294, 87659,88025,88390,88755,89120,89486,89851,90216,90581,90947, 91312,91677,92042,92408,92773,93138,93503,93869,94234,94599, 94964,95330,95695,96060,96425,96791,97156,97521,97886,98252, 98617,98982,99347,99713,100078,100443,100808,101174,101539,101904, 102269,102635,103000,103365,103730,104096,104461,104826,105191,105557, 105922,106287,106652,107018,107383,107748,108113,108479,108844,109209, 109574 };
return YearOffset[y-1900];
}
bool isLeapCpu(int y)
{
bool YearIsLeap[] = {
true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, false,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false,false,false, true,false, false,false, true,false,false,false, true,false,false,false, false };
return YearIsLeap[y-1900];
}
bondsDateStruct intializeDateCpu(int d, int m, int y)
{
bondsDateStruct currDate;
currDate.day = d;
currDate.month = m;
currDate.year = y;
bool leap = isLeapCpu(y);
int offset = monthOffsetCpu(m,leap);
currDate.dateSerialNum = d + offset + yearOffsetCpu(y);
return currDate;
}
void runRepoEngine()
{
{
int numBonds = NUM_BONDS_RUN;
printf("\nNumber of Bonds: %d\n\n", numBonds);
inArgsStruct inArgsHost;
inArgsHost.discountCurve = (bondsYieldTermStruct*)malloc(numBonds*sizeof(bondsYieldTermStruct));
inArgsHost.repoCurve = (bondsYieldTermStruct*)malloc(numBonds*sizeof(bondsYieldTermStruct));
inArgsHost.currDate = (bondsDateStruct*)malloc(numBonds*sizeof(bondsDateStruct));
inArgsHost.maturityDate = (bondsDateStruct*)malloc(numBonds*sizeof(bondsDateStruct));
inArgsHost.bondCleanPrice = (dataType*)malloc(numBonds*sizeof(dataType));
inArgsHost.bond = (bondStruct*)malloc(numBonds*sizeof(bondStruct));
inArgsHost.dummyStrike = (dataType*)malloc(numBonds*sizeof(dataType));
srand ( time(NULL) );
int numBond;
for (numBond = 0; numBond < numBonds; numBond++)
{
dataType repoRate = 0.07;
int repoCompounding = SIMPLE_INTEREST;
dataType repoCompoundFreq = 1;
bondsDateStruct bondIssueDate = intializeDateCpu(rand() % 28 + 1, rand() % 12 + 1, 1999 - (rand() % 2));
bondsDateStruct bondMaturityDate = intializeDateCpu(rand() % 28 + 1, rand() % 12 + 1, 2000 + (rand() % 2));
bondsDateStruct todaysDate = intializeDateCpu(bondMaturityDate.day-1,bondMaturityDate.month,bondMaturityDate.year);
bondStruct bond;
bond.startDate = bondIssueDate;
bond.maturityDate = bondMaturityDate;
bond.rate = 0.08 + ((float)rand()/(float)RAND_MAX - 0.5)*0.1;
dataType bondCouponFrequency = 2;
dataType bondCleanPrice = 89.97693786;
bondsYieldTermStruct bondCurve;
bondCurve.refDate = todaysDate;
bondCurve.calDate = todaysDate;
bondCurve.forward = -0.1f;
bondCurve.compounding = COMPOUNDED_INTEREST;
bondCurve.frequency = bondCouponFrequency;
bondCurve.dayCounter = USE_EXACT_DAY;
bondCurve.refDate = todaysDate;
bondCurve.calDate = todaysDate;
bondCurve.compounding = COMPOUNDED_INTEREST;
bondCurve.frequency = bondCouponFrequency;
dataType dummyStrike = 91.5745;
bondsYieldTermStruct repoCurve;
repoCurve.refDate = todaysDate;
repoCurve.calDate = todaysDate;
repoCurve.forward = repoRate;
repoCurve.compounding = repoCompounding;
repoCurve.frequency = repoCompoundFreq;
repoCurve.dayCounter = USE_SERIAL_NUMS;
inArgsHost.discountCurve[numBond] = bondCurve;
inArgsHost.repoCurve[numBond] = repoCurve;
inArgsHost.currDate[numBond] = todaysDate;
inArgsHost.maturityDate[numBond] = bondMaturityDate;
inArgsHost.bondCleanPrice[numBond] = bondCleanPrice;
inArgsHost.bond[numBond] = bond;
inArgsHost.dummyStrike[numBond] = dummyStrike;
}
printf("Inputs for bond with index %d\n", numBonds/2);
printf("Bond Issue Date: %d-%d-%d\n", inArgsHost.bond[numBonds/2].startDate.month, inArgsHost.bond[numBonds/2].startDate.day, inArgsHost.bond[numBonds/2].startDate.year);
printf("Bond Maturity Date: %d-%d-%d\n", inArgsHost.bond[numBonds/2].maturityDate.month, inArgsHost.bond[numBonds/2].maturityDate.day, inArgsHost.bond[numBonds/2].maturityDate.year);
printf("Bond rate: %f\n\n", inArgsHost.bond[numBonds/2].rate);
resultsStruct resultsHost;
resultsStruct resultsFromGpu;
resultsHost.dirtyPrice = (dataType*)malloc(numBonds*sizeof(dataType));
resultsHost.accruedAmountCurrDate = (dataType*)malloc(numBonds*sizeof(dataType));;
resultsHost.cleanPrice = (dataType*)malloc(numBonds*sizeof(dataType));;
resultsHost.bondForwardVal = (dataType*)malloc(numBonds*sizeof(dataType));;
resultsFromGpu.dirtyPrice = (dataType*)malloc(numBonds*sizeof(dataType));
resultsFromGpu.accruedAmountCurrDate = (dataType*)malloc(numBonds*sizeof(dataType));;
resultsFromGpu.cleanPrice = (dataType*)malloc(numBonds*sizeof(dataType));;
resultsFromGpu.bondForwardVal = (dataType*)malloc(numBonds*sizeof(dataType));;
bondsYieldTermStruct* discountCurveGpu;
bondsYieldTermStruct* repoCurveGpu;
bondsDateStruct* currDateGpu;
bondsDateStruct* maturityDateGpu;
dataType* bondCleanPriceGpu;
bondStruct* bondGpu;
dataType* dummyStrikeGpu;
dataType* dirtyPriceGpu;
dataType* accruedAmountCurrDateGpu;
dataType* cleanPriceGpu;
dataType* bondForwardValGpu;
cudaMalloc(&discountCurveGpu, numBonds*sizeof(bondsYieldTermStruct));
cudaMalloc(&repoCurveGpu, numBonds*sizeof(bondsYieldTermStruct));
cudaMalloc(&currDateGpu, numBonds*sizeof(bondsDateStruct));
cudaMalloc(&maturityDateGpu, numBonds*sizeof(bondsDateStruct));
cudaMalloc(&bondCleanPriceGpu, numBonds*sizeof(dataType));
cudaMalloc(&bondGpu, numBonds*sizeof(bondStruct));
cudaMalloc(&dummyStrikeGpu, numBonds*sizeof(dataType));
cudaMalloc(&dirtyPriceGpu, numBonds*sizeof(dataType));
cudaMalloc(&accruedAmountCurrDateGpu, numBonds*sizeof(dataType));
cudaMalloc(&cleanPriceGpu, numBonds*sizeof(dataType));
cudaMalloc(&bondForwardValGpu, numBonds*sizeof(dataType));
cudaMemcpy(discountCurveGpu, inArgsHost.discountCurve, numBonds*sizeof(bondsYieldTermStruct), cudaMemcpyHostToDevice);
cudaMemcpy(repoCurveGpu, inArgsHost.repoCurve, numBonds*sizeof(bondsYieldTermStruct), cudaMemcpyHostToDevice);
cudaMemcpy(currDateGpu, inArgsHost.currDate, numBonds*sizeof(bondsDateStruct), cudaMemcpyHostToDevice);
cudaMemcpy(maturityDateGpu, inArgsHost.maturityDate, numBonds*sizeof(bondsDateStruct), cudaMemcpyHostToDevice);
cudaMemcpy(bondCleanPriceGpu, inArgsHost.bondCleanPrice, numBonds*sizeof(dataType), cudaMemcpyHostToDevice);
cudaMemcpy(bondGpu, inArgsHost.bond, numBonds*sizeof(bondStruct), cudaMemcpyHostToDevice);
cudaMemcpy(dummyStrikeGpu, inArgsHost.dummyStrike, numBonds*sizeof(dataType), cudaMemcpyHostToDevice);
long seconds, useconds;
float mtimeCpu;
float mtimeGpu;
struct timeval start;
struct timeval end;
inArgsStruct inArgs;
inArgs.discountCurve = discountCurveGpu;
inArgs.repoCurve = repoCurveGpu;
inArgs.currDate = currDateGpu;
inArgs.maturityDate = maturityDateGpu;
inArgs.bondCleanPrice = bondCleanPriceGpu;
inArgs.bond = bondGpu;
inArgs.dummyStrike = dummyStrikeGpu;
resultsStruct results;
results.dirtyPrice = dirtyPriceGpu;
results.accruedAmountCurrDate = accruedAmountCurrDateGpu;
results.cleanPrice = cleanPriceGpu;
results.bondForwardVal = bondForwardValGpu;
gettimeofday(&start, NULL);
dim3 grid((ceil(((float)numBonds)/((float)256.0f))), 1, 1);
dim3 threads(256, 1, 1);
getBondsResultsGpu <<< grid, threads >>> (inArgs, results, numBonds);
cudaThreadSynchronize();
gettimeofday(&end, NULL);
cudaMemcpy(resultsFromGpu.dirtyPrice, dirtyPriceGpu, numBonds*sizeof(dataType), cudaMemcpyDeviceToHost);
cudaMemcpy(resultsFromGpu.accruedAmountCurrDate, accruedAmountCurrDateGpu, numBonds*sizeof(dataType), cudaMemcpyDeviceToHost);
cudaMemcpy(resultsFromGpu.cleanPrice, cleanPriceGpu, numBonds*sizeof(dataType), cudaMemcpyDeviceToHost);
cudaMemcpy(resultsFromGpu.bondForwardVal, bondForwardValGpu, numBonds*sizeof(dataType), cudaMemcpyDeviceToHost);
seconds = end.tv_sec - start.tv_sec;
useconds = end.tv_usec - start.tv_usec;
mtimeGpu = ((seconds)* 1000 + ((float)useconds)/1000.0) + 0.5f;
printf("Run on GPU\n");
printf("Processing time on GPU (CUDA): %f (ms) \n\n", mtimeGpu);
double totPrice = 0.0;
int numBond1;
for (numBond1= 0; numBond1< numBonds; numBond1++)
{
totPrice += resultsFromGpu.dirtyPrice[numBond1];
}
printf("Sum of output dirty prices on GPU: %f\n", totPrice);
printf("Outputs on GPU for bond with index %d: \n", numBonds/2);
printf("Dirty Price: %f\n", resultsFromGpu.dirtyPrice[numBonds/2]);
printf("Accrued Amount: %f\n", resultsFromGpu.accruedAmountCurrDate[numBonds/2]);
printf("Clean Price: %f\n", resultsFromGpu.cleanPrice[numBonds/2]);
printf("Bond Forward Val: %f\n\n", resultsFromGpu.bondForwardVal[numBonds/2]);
gettimeofday(&start, NULL);
getBondsResultsCpu(inArgsHost, resultsHost, numBonds);
gettimeofday(&end, NULL);
seconds = end.tv_sec - start.tv_sec;
useconds = end.tv_usec - start.tv_usec;
mtimeCpu = ((seconds)* 1000 + ((float)useconds)/1000.0) + 0.5f;
printf("Run on CPU\n");
printf("Processing time on CPU: %f (ms) \n\n", mtimeCpu);
totPrice = 0.0;
for (numBond1= 0; numBond1< numBonds; numBond1++)
{
totPrice += resultsHost.dirtyPrice[numBond1];
}
printf("Sum of output dirty prices on CPU: %f\n", totPrice);
printf("Outputs on CPU for bond with index %d: \n", numBonds/2);
printf("Dirty Price: %f\n", resultsHost.dirtyPrice[numBonds/2]);
printf("Accrued Amount: %f\n", resultsHost.accruedAmountCurrDate[numBonds/2]);
printf("Clean Price: %f\n", resultsHost.cleanPrice[numBonds/2]);
printf("Bond Forward Val: %f\n\n", resultsHost.bondForwardVal[numBonds/2]);
printf("Speedup using GPU: %f\n", mtimeCpu/mtimeGpu);
cudaFree(discountCurveGpu);
cudaFree(repoCurveGpu);
cudaFree(currDateGpu);
cudaFree(maturityDateGpu);
cudaFree(bondCleanPriceGpu);
cudaFree(bondGpu);
cudaFree(dummyStrikeGpu);
cudaFree(dirtyPriceGpu);
cudaFree(accruedAmountCurrDateGpu);
cudaFree(cleanPriceGpu);
cudaFree(bondForwardValGpu);
free(resultsHost.dirtyPrice);
free(resultsHost.accruedAmountCurrDate);;
free(resultsHost.cleanPrice);;
free(resultsHost.bondForwardVal);;
free(resultsFromGpu.dirtyPrice);
free(resultsFromGpu.accruedAmountCurrDate);;
free(resultsFromGpu.cleanPrice);;
free(resultsFromGpu.bondForwardVal);
free(inArgsHost.discountCurve);
free(inArgsHost.repoCurve);
free(inArgsHost.currDate);
free(inArgsHost.maturityDate);
free(inArgsHost.bondCleanPrice);
free(inArgsHost.bond);
free(inArgsHost.dummyStrike);
}
}
int main( int argc, char** argv)
{
runRepoEngine();
return 0;
}
|
241008dc64bb1c1cbafdb7185e2f84e71b12cfcb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@author Azzam Haidar
@author Tingxing Dong
*/
#include "magma_internal.h"
#include "batched_kernel_param.h"
// =============================================================================
// Auxiliary routine to compute piv final destination for the current step
/******************************************************************************/
static __device__ void setup_pivinfo_devfunc(magma_int_t *pivinfo, magma_int_t *ipiv, int m, int nb)
{
int tid = threadIdx.x;
int nchunk = magma_ceildiv( m, MAX_NTHREADS );
// initialize pivinfo (could be done in a separate kernel using multiple thread block
for (int s =0; s < nchunk; s++)
{
if ( (tid + s * MAX_NTHREADS < m) && (tid < MAX_NTHREADS) )
pivinfo[tid + s * MAX_NTHREADS] = tid + s * MAX_NTHREADS + 1;
}
__syncthreads();
if (tid == 0)
{
int i, itsreplacement, mynewrowid;
for (i=0; i < nb; i++) {
mynewrowid = ipiv[i]-1; //-1 to get the index in C
itsreplacement = pivinfo[mynewrowid];
pivinfo[mynewrowid] = pivinfo[i];
pivinfo[i] = itsreplacement;
}
}
}
/******************************************************************************/
static __device__ void setup_pivinfo_sm_devfunc(magma_int_t *pivinfo, magma_int_t *ipiv, int m, int nb)
{
const int tx = threadIdx.x;
const int nth = blockDim.x;
__shared__ int spivinfo[10240]; // 40 KB of shared memory
int nchunk = magma_ceildiv( m, nth);
int m_ = m - (nchunk-1) * nth;
// initialize spivinfo
for(int s = 0; s < m-nth; s+= nth){
spivinfo[ s + tx ] = s + tx + 1;
}
if( tx < m_){
spivinfo[ (nchunk-1) * nth + tx ] = (nchunk-1) * nth + tx + 1;
}
__syncthreads();
if (tx == 0)
{
int i, itsreplacement, mynewrowid;
for (i=0; i < nb; i++) {
mynewrowid = ipiv[i]-1; //-1 to get the index in C
itsreplacement = spivinfo[mynewrowid];
spivinfo[mynewrowid] = spivinfo[i];
spivinfo[i] = itsreplacement;
}
}
__syncthreads();
// write pivinfo
for(int s = 0; s < m-nth; s+= nth){
pivinfo[ s + tx] = spivinfo[ s + tx ];
}
if( tx < m_){
pivinfo[ (nchunk-1) * nth + tx ] = (magma_int_t)(spivinfo[ (nchunk-1) * nth + tx ]);
}
}
/******************************************************************************/
__global__ void setup_pivinfo_kernel_batched(magma_int_t **pivinfo_array, magma_int_t **ipiv_array, int ipiv_offset, int m, int nb)
{
int batchid = blockIdx.x;
setup_pivinfo_devfunc(pivinfo_array[batchid], ipiv_array[batchid]+ipiv_offset, m, nb);
}
/******************************************************************************/
__global__ void setup_pivinfo_kernel(magma_int_t *pivinfo, magma_int_t *ipiv, int m, int nb)
{
setup_pivinfo_devfunc(pivinfo, ipiv, m, nb);
}
/******************************************************************************/
__global__ void setup_pivinfo_sm_kernel(magma_int_t *pivinfo, magma_int_t *ipiv, int m, int nb)
{
setup_pivinfo_sm_devfunc(pivinfo, ipiv, m, nb);
}
/******************************************************************************/
extern "C" void
setup_pivinfo_batched( magma_int_t **pivinfo_array, magma_int_t **ipiv_array, magma_int_t ipiv_offset,
magma_int_t m, magma_int_t nb,
magma_int_t batchCount,
magma_queue_t queue)
{
if (nb == 0 ) return;
hipLaunchKernelGGL(( setup_pivinfo_kernel_batched)
, dim3(batchCount), dim3(min(m, MAX_NTHREADS)), 0, queue->cuda_stream() ,
pivinfo_array, ipiv_array, ipiv_offset, m, nb);
}
/******************************************************************************/
extern "C" void
setup_pivinfo( magma_int_t *pivinfo, magma_int_t *ipiv,
magma_int_t m, magma_int_t nb,
magma_queue_t queue)
{
if (nb == 0 ) return;
if( m > 10240 ){
hipLaunchKernelGGL(( setup_pivinfo_kernel), dim3(1), dim3(min(m, MAX_NTHREADS)), 0, queue->cuda_stream() , pivinfo, ipiv, m, nb);
}
else{
hipLaunchKernelGGL(( setup_pivinfo_sm_kernel), dim3(1), dim3(min(m, MAX_NTHREADS)), 0, queue->cuda_stream() , pivinfo, ipiv, m, nb);
}
}
// =============================================================================
// Auxiliary routine to adjust ipiv
/******************************************************************************/
static __device__ void adjust_ipiv_devfunc(magma_int_t *ipiv, int m, int offset)
{
int tid = threadIdx.x;
if (tid < m)
{
//printf("ipiv[%d]: %d -> %d\n", tid, ipiv[tid], ipiv[tid]+offset);
ipiv[tid] += offset;
}
}
/******************************************************************************/
__global__ void adjust_ipiv_kernel_batched(magma_int_t **ipiv_array, int ipiv_offset, int m, int offset)
{
int batchid = blockIdx.x;
adjust_ipiv_devfunc(ipiv_array[batchid] + ipiv_offset, m, offset);
}
/******************************************************************************/
__global__ void adjust_ipiv_kernel(magma_int_t *ipiv, int m, int offset)
{
adjust_ipiv_devfunc(ipiv, m, offset);
}
/******************************************************************************/
extern "C" void
adjust_ipiv_batched( magma_int_t **ipiv_array, magma_int_t ipiv_offset,
magma_int_t m, magma_int_t offset,
magma_int_t batchCount, magma_queue_t queue)
{
if (offset == 0 ) return;
if ( m > MAX_NTHREADS)
{
fprintf( stderr, "%s: m=%lld > %lld, not supported\n",
__func__, (long long) m, (long long) MAX_NTHREADS );
return;
}
hipLaunchKernelGGL(( adjust_ipiv_kernel_batched)
, dim3(batchCount), dim3(m), 0, queue->cuda_stream() ,
ipiv_array, ipiv_offset, m, offset);
}
/******************************************************************************/
extern "C" void
adjust_ipiv( magma_int_t *ipiv,
magma_int_t m, magma_int_t offset,
magma_queue_t queue)
{
if (offset == 0 ) return;
if ( m > 1024)
{
fprintf( stderr, "%s: m=%lld > %lld, not supported\n",
__func__, (long long) m, (long long) MAX_NTHREADS );
return;
}
hipLaunchKernelGGL(( adjust_ipiv_kernel)
, dim3(1), dim3(m), 0, queue->cuda_stream() ,
ipiv, m, offset);
}
| 241008dc64bb1c1cbafdb7185e2f84e71b12cfcb.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@author Azzam Haidar
@author Tingxing Dong
*/
#include "magma_internal.h"
#include "batched_kernel_param.h"
// =============================================================================
// Auxiliary routine to compute piv final destination for the current step
/******************************************************************************/
static __device__ void setup_pivinfo_devfunc(magma_int_t *pivinfo, magma_int_t *ipiv, int m, int nb)
{
int tid = threadIdx.x;
int nchunk = magma_ceildiv( m, MAX_NTHREADS );
// initialize pivinfo (could be done in a separate kernel using multiple thread block
for (int s =0; s < nchunk; s++)
{
if ( (tid + s * MAX_NTHREADS < m) && (tid < MAX_NTHREADS) )
pivinfo[tid + s * MAX_NTHREADS] = tid + s * MAX_NTHREADS + 1;
}
__syncthreads();
if (tid == 0)
{
int i, itsreplacement, mynewrowid;
for (i=0; i < nb; i++) {
mynewrowid = ipiv[i]-1; //-1 to get the index in C
itsreplacement = pivinfo[mynewrowid];
pivinfo[mynewrowid] = pivinfo[i];
pivinfo[i] = itsreplacement;
}
}
}
/******************************************************************************/
static __device__ void setup_pivinfo_sm_devfunc(magma_int_t *pivinfo, magma_int_t *ipiv, int m, int nb)
{
const int tx = threadIdx.x;
const int nth = blockDim.x;
__shared__ int spivinfo[10240]; // 40 KB of shared memory
int nchunk = magma_ceildiv( m, nth);
int m_ = m - (nchunk-1) * nth;
// initialize spivinfo
for(int s = 0; s < m-nth; s+= nth){
spivinfo[ s + tx ] = s + tx + 1;
}
if( tx < m_){
spivinfo[ (nchunk-1) * nth + tx ] = (nchunk-1) * nth + tx + 1;
}
__syncthreads();
if (tx == 0)
{
int i, itsreplacement, mynewrowid;
for (i=0; i < nb; i++) {
mynewrowid = ipiv[i]-1; //-1 to get the index in C
itsreplacement = spivinfo[mynewrowid];
spivinfo[mynewrowid] = spivinfo[i];
spivinfo[i] = itsreplacement;
}
}
__syncthreads();
// write pivinfo
for(int s = 0; s < m-nth; s+= nth){
pivinfo[ s + tx] = spivinfo[ s + tx ];
}
if( tx < m_){
pivinfo[ (nchunk-1) * nth + tx ] = (magma_int_t)(spivinfo[ (nchunk-1) * nth + tx ]);
}
}
/******************************************************************************/
__global__ void setup_pivinfo_kernel_batched(magma_int_t **pivinfo_array, magma_int_t **ipiv_array, int ipiv_offset, int m, int nb)
{
int batchid = blockIdx.x;
setup_pivinfo_devfunc(pivinfo_array[batchid], ipiv_array[batchid]+ipiv_offset, m, nb);
}
/******************************************************************************/
__global__ void setup_pivinfo_kernel(magma_int_t *pivinfo, magma_int_t *ipiv, int m, int nb)
{
setup_pivinfo_devfunc(pivinfo, ipiv, m, nb);
}
/******************************************************************************/
__global__ void setup_pivinfo_sm_kernel(magma_int_t *pivinfo, magma_int_t *ipiv, int m, int nb)
{
setup_pivinfo_sm_devfunc(pivinfo, ipiv, m, nb);
}
/******************************************************************************/
extern "C" void
setup_pivinfo_batched( magma_int_t **pivinfo_array, magma_int_t **ipiv_array, magma_int_t ipiv_offset,
magma_int_t m, magma_int_t nb,
magma_int_t batchCount,
magma_queue_t queue)
{
if (nb == 0 ) return;
setup_pivinfo_kernel_batched
<<< batchCount, min(m, MAX_NTHREADS), 0, queue->cuda_stream() >>>
(pivinfo_array, ipiv_array, ipiv_offset, m, nb);
}
/******************************************************************************/
extern "C" void
setup_pivinfo( magma_int_t *pivinfo, magma_int_t *ipiv,
magma_int_t m, magma_int_t nb,
magma_queue_t queue)
{
if (nb == 0 ) return;
if( m > 10240 ){
setup_pivinfo_kernel<<< 1, min(m, MAX_NTHREADS), 0, queue->cuda_stream() >>>(pivinfo, ipiv, m, nb);
}
else{
setup_pivinfo_sm_kernel<<< 1, min(m, MAX_NTHREADS), 0, queue->cuda_stream() >>>(pivinfo, ipiv, m, nb);
}
}
// =============================================================================
// Auxiliary routine to adjust ipiv
/******************************************************************************/
static __device__ void adjust_ipiv_devfunc(magma_int_t *ipiv, int m, int offset)
{
int tid = threadIdx.x;
if (tid < m)
{
//printf("ipiv[%d]: %d -> %d\n", tid, ipiv[tid], ipiv[tid]+offset);
ipiv[tid] += offset;
}
}
/******************************************************************************/
__global__ void adjust_ipiv_kernel_batched(magma_int_t **ipiv_array, int ipiv_offset, int m, int offset)
{
int batchid = blockIdx.x;
adjust_ipiv_devfunc(ipiv_array[batchid] + ipiv_offset, m, offset);
}
/******************************************************************************/
__global__ void adjust_ipiv_kernel(magma_int_t *ipiv, int m, int offset)
{
adjust_ipiv_devfunc(ipiv, m, offset);
}
/******************************************************************************/
extern "C" void
adjust_ipiv_batched( magma_int_t **ipiv_array, magma_int_t ipiv_offset,
magma_int_t m, magma_int_t offset,
magma_int_t batchCount, magma_queue_t queue)
{
if (offset == 0 ) return;
if ( m > MAX_NTHREADS)
{
fprintf( stderr, "%s: m=%lld > %lld, not supported\n",
__func__, (long long) m, (long long) MAX_NTHREADS );
return;
}
adjust_ipiv_kernel_batched
<<< batchCount, m, 0, queue->cuda_stream() >>>
(ipiv_array, ipiv_offset, m, offset);
}
/******************************************************************************/
extern "C" void
adjust_ipiv( magma_int_t *ipiv,
magma_int_t m, magma_int_t offset,
magma_queue_t queue)
{
if (offset == 0 ) return;
if ( m > 1024)
{
fprintf( stderr, "%s: m=%lld > %lld, not supported\n",
__func__, (long long) m, (long long) MAX_NTHREADS );
return;
}
adjust_ipiv_kernel
<<< 1, m, 0, queue->cuda_stream() >>>
(ipiv, m, offset);
}
|
2ecdb7517d5940d40dcae52b51391e40217501d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_generate_chunk_kernel [7][1];
static int dims_generate_chunk_kernel_h [7][1] = {0};
//user function
__device__
void generate_chunk_kernel_gpu(const ACC<double> &vertexx,
const ACC<double> &vertexy,
ACC<double> &energy0,
ACC<double> &density0,
ACC<double> &u0,
const ACC<double> &cellx,
const ACC<double> &celly) {
double radius, x_cent, y_cent;
int is_in = 0;
int is_in2 = 0;
energy0(0,0)= states[0].energy;
density0(0,0)= states[0].density;
for(int i = 1; i<number_of_states; i++) {
x_cent=states[i].xmin;
y_cent=states[i].ymin;
is_in = 0;
is_in2 = 0;
if (states[i].geometry == g_rect) {
for (int i1 = -1; i1 <= 0; i1++) {
for (int j1 = -1; j1 <= 0; j1++) {
if(vertexx(1+i1,0) >= states[i].xmin && vertexx(0+i1,0) < states[i].xmax) {
if(vertexy(0,1+j1) >= states[i].ymin && vertexy(0,0+j1) < states[i].ymax) {
is_in = 1;
}
}
}
}
if(vertexx(1,0) >= states[i].xmin && vertexx(0,0) < states[i].xmax) {
if(vertexy(0,1) >= states[i].ymin && vertexy(0,0) < states[i].ymax) {
is_in2 = 1;
}
}
if (is_in2) {
energy0(0,0) = states[i].energy;
density0(0,0) = states[i].density;
}
}
else if(states[i].geometry == g_circ) {
for (int i1 = -1; i1 <= 0; i1++) {
for (int j1 = -1; j1 <= 0; j1++) {
radius = sqrt ((cellx(i1,0) - x_cent) * (cellx(i1,0) - x_cent) +
(celly(0,j1) - y_cent) * (celly(0,j1) - y_cent));
if (radius <= states[i].radius) {
is_in = 1;
}
}
}
if (radius <= states[i].radius) is_in2 = 1;
if (is_in2) {
energy0(0,0) = states[i].energy;
density0(0,0) = states[i].density;
}
}
else if(states[i].geometry == g_point) {
if(vertexx(0,0) == x_cent && vertexy(0,0) == y_cent) {
energy0(0,0) = states[i].energy;
density0(0,0) = states[i].density;
}
}
}
u0(0,0) = energy0(0,0) * density0(0,0);
}
__global__ void ops_generate_chunk_kernel(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 0*1 * dims_generate_chunk_kernel[0][0];
arg1 += idx_x * 0*1 + idx_y * 1*1 * dims_generate_chunk_kernel[1][0];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_generate_chunk_kernel[2][0];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_generate_chunk_kernel[3][0];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_generate_chunk_kernel[4][0];
arg5 += idx_x * 1*1 + idx_y * 0*1 * dims_generate_chunk_kernel[5][0];
arg6 += idx_x * 0*1 + idx_y * 1*1 * dims_generate_chunk_kernel[6][0];
if (idx_x < size0 && idx_y < size1) {
const ACC<double> argp0(dims_generate_chunk_kernel[0][0], arg0);
const ACC<double> argp1(dims_generate_chunk_kernel[1][0], arg1);
ACC<double> argp2(dims_generate_chunk_kernel[2][0], arg2);
ACC<double> argp3(dims_generate_chunk_kernel[3][0], arg3);
ACC<double> argp4(dims_generate_chunk_kernel[4][0], arg4);
const ACC<double> argp5(dims_generate_chunk_kernel[5][0], arg5);
const ACC<double> argp6(dims_generate_chunk_kernel[6][0], arg6);
generate_chunk_kernel_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, argp6);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_generate_chunk_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6) {
#else
void ops_par_loop_generate_chunk_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[7] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,7,range,1)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(1,"generate_chunk_kernel");
OPS_kernels[1].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[2];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 7,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
int xdim6 = args[6].dat->size[0];
if (xdim0 != dims_generate_chunk_kernel_h[0][0] || xdim1 != dims_generate_chunk_kernel_h[1][0] || xdim2 != dims_generate_chunk_kernel_h[2][0] || xdim3 != dims_generate_chunk_kernel_h[3][0] || xdim4 != dims_generate_chunk_kernel_h[4][0] || xdim5 != dims_generate_chunk_kernel_h[5][0] || xdim6 != dims_generate_chunk_kernel_h[6][0]) {
dims_generate_chunk_kernel_h[0][0] = xdim0;
dims_generate_chunk_kernel_h[1][0] = xdim1;
dims_generate_chunk_kernel_h[2][0] = xdim2;
dims_generate_chunk_kernel_h[3][0] = xdim3;
dims_generate_chunk_kernel_h[4][0] = xdim4;
dims_generate_chunk_kernel_h[5][0] = xdim5;
dims_generate_chunk_kernel_h[6][0] = xdim6;
cutilSafeCall(hipMemcpyToSymbol( dims_generate_chunk_kernel, dims_generate_chunk_kernel_h, sizeof(dims_generate_chunk_kernel)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
char *p_a[7];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
p_a[6] = (char *)args[6].data_d + base6;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args,7,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[1].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0)
hipLaunchKernelGGL(( ops_generate_chunk_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6],x_size, y_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[1].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[1].mpi_time += t2-t1;
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
#ifdef OPS_LAZY
void ops_par_loop_generate_chunk_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 1;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 1;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 7;
desc->args = (ops_arg*)malloc(7*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->function = ops_par_loop_generate_chunk_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(1,"generate_chunk_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
| 2ecdb7517d5940d40dcae52b51391e40217501d3.cu | //
// auto-generated by ops.py
//
__constant__ int dims_generate_chunk_kernel [7][1];
static int dims_generate_chunk_kernel_h [7][1] = {0};
//user function
__device__
void generate_chunk_kernel_gpu(const ACC<double> &vertexx,
const ACC<double> &vertexy,
ACC<double> &energy0,
ACC<double> &density0,
ACC<double> &u0,
const ACC<double> &cellx,
const ACC<double> &celly) {
double radius, x_cent, y_cent;
int is_in = 0;
int is_in2 = 0;
energy0(0,0)= states[0].energy;
density0(0,0)= states[0].density;
for(int i = 1; i<number_of_states; i++) {
x_cent=states[i].xmin;
y_cent=states[i].ymin;
is_in = 0;
is_in2 = 0;
if (states[i].geometry == g_rect) {
for (int i1 = -1; i1 <= 0; i1++) {
for (int j1 = -1; j1 <= 0; j1++) {
if(vertexx(1+i1,0) >= states[i].xmin && vertexx(0+i1,0) < states[i].xmax) {
if(vertexy(0,1+j1) >= states[i].ymin && vertexy(0,0+j1) < states[i].ymax) {
is_in = 1;
}
}
}
}
if(vertexx(1,0) >= states[i].xmin && vertexx(0,0) < states[i].xmax) {
if(vertexy(0,1) >= states[i].ymin && vertexy(0,0) < states[i].ymax) {
is_in2 = 1;
}
}
if (is_in2) {
energy0(0,0) = states[i].energy;
density0(0,0) = states[i].density;
}
}
else if(states[i].geometry == g_circ) {
for (int i1 = -1; i1 <= 0; i1++) {
for (int j1 = -1; j1 <= 0; j1++) {
radius = sqrt ((cellx(i1,0) - x_cent) * (cellx(i1,0) - x_cent) +
(celly(0,j1) - y_cent) * (celly(0,j1) - y_cent));
if (radius <= states[i].radius) {
is_in = 1;
}
}
}
if (radius <= states[i].radius) is_in2 = 1;
if (is_in2) {
energy0(0,0) = states[i].energy;
density0(0,0) = states[i].density;
}
}
else if(states[i].geometry == g_point) {
if(vertexx(0,0) == x_cent && vertexy(0,0) == y_cent) {
energy0(0,0) = states[i].energy;
density0(0,0) = states[i].density;
}
}
}
u0(0,0) = energy0(0,0) * density0(0,0);
}
__global__ void ops_generate_chunk_kernel(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 0*1 * dims_generate_chunk_kernel[0][0];
arg1 += idx_x * 0*1 + idx_y * 1*1 * dims_generate_chunk_kernel[1][0];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_generate_chunk_kernel[2][0];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_generate_chunk_kernel[3][0];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_generate_chunk_kernel[4][0];
arg5 += idx_x * 1*1 + idx_y * 0*1 * dims_generate_chunk_kernel[5][0];
arg6 += idx_x * 0*1 + idx_y * 1*1 * dims_generate_chunk_kernel[6][0];
if (idx_x < size0 && idx_y < size1) {
const ACC<double> argp0(dims_generate_chunk_kernel[0][0], arg0);
const ACC<double> argp1(dims_generate_chunk_kernel[1][0], arg1);
ACC<double> argp2(dims_generate_chunk_kernel[2][0], arg2);
ACC<double> argp3(dims_generate_chunk_kernel[3][0], arg3);
ACC<double> argp4(dims_generate_chunk_kernel[4][0], arg4);
const ACC<double> argp5(dims_generate_chunk_kernel[5][0], arg5);
const ACC<double> argp6(dims_generate_chunk_kernel[6][0], arg6);
generate_chunk_kernel_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, argp6);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_generate_chunk_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6) {
#else
void ops_par_loop_generate_chunk_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[7] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,7,range,1)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(1,"generate_chunk_kernel");
OPS_kernels[1].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[2];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 7,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
int xdim6 = args[6].dat->size[0];
if (xdim0 != dims_generate_chunk_kernel_h[0][0] || xdim1 != dims_generate_chunk_kernel_h[1][0] || xdim2 != dims_generate_chunk_kernel_h[2][0] || xdim3 != dims_generate_chunk_kernel_h[3][0] || xdim4 != dims_generate_chunk_kernel_h[4][0] || xdim5 != dims_generate_chunk_kernel_h[5][0] || xdim6 != dims_generate_chunk_kernel_h[6][0]) {
dims_generate_chunk_kernel_h[0][0] = xdim0;
dims_generate_chunk_kernel_h[1][0] = xdim1;
dims_generate_chunk_kernel_h[2][0] = xdim2;
dims_generate_chunk_kernel_h[3][0] = xdim3;
dims_generate_chunk_kernel_h[4][0] = xdim4;
dims_generate_chunk_kernel_h[5][0] = xdim5;
dims_generate_chunk_kernel_h[6][0] = xdim6;
cutilSafeCall(cudaMemcpyToSymbol( dims_generate_chunk_kernel, dims_generate_chunk_kernel_h, sizeof(dims_generate_chunk_kernel)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
char *p_a[7];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
p_a[6] = (char *)args[6].data_d + base6;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args,7,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[1].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0)
ops_generate_chunk_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6],x_size, y_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[1].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[1].mpi_time += t2-t1;
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
#ifdef OPS_LAZY
void ops_par_loop_generate_chunk_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 1;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 1;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 7;
desc->args = (ops_arg*)malloc(7*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->function = ops_par_loop_generate_chunk_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(1,"generate_chunk_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
98a606ea174f5efd618f7873604c1192e4b99f41.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/magma_zthrsrm.cu, normal z -> s, Thu Oct 8 23:05:48 2020
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#include <hip/hip_runtime.h>
#define SWAP(a, b) { tmp = a; a = b; b = tmp; }
#define BLOCK_SIZE 128
__global__ void
zcompute_newval_kernel(
magma_int_t num_rows,
magma_index_t* Arow,
magma_index_t* Brow,
magma_index_t* Acol,
magma_index_t* Browidx,
magma_index_t* Bcol,
float* Aval,
float* Bval)
{
int tidx = blockIdx.x*blockDim.x+threadIdx.x;
magma_index_t offset_new, offset_old, end_old;
if (tidx < num_rows) {
magma_int_t count = 0;
offset_old = Arow[tidx];
offset_new = Brow[tidx];
end_old = Arow[tidx+1];
for (int i = offset_old; i < end_old; i++) {
if(Acol[i]>-1){
Bcol[offset_new+count] = Acol[i];
Bval[offset_new+count] = Aval[i];
Browidx[offset_new + count] = tidx;
count++;
}
}
}
}
//kernel
__global__ void
zcompute_nnz_kernel(
magma_int_t num_rows,
magma_index_t* Arow,
magma_index_t* Brow,
magma_index_t* Acol,
float* Aval,
float thrs)
{
int row= blockIdx.x*blockDim.x+threadIdx.x;
if (row < num_rows) {
magma_int_t rm = 0;
magma_int_t el = 0;
for (int i = Arow[row]; i<Arow[row+1]; i++) {
if (MAGMA_S_ABS(Aval[i]) <= thrs ) {
if (Acol[i] != row) {
Acol[i] = -1;//cheaperthanval
rm++;
} else {
el++;
}
} else {
el++;
}
}
Brow[row] = el;
}
}
/**
Purpose
-------
This routine selects a threshold separating the subset_size smallest
magnitude elements from the rest.
Arguments
---------
@param[in]
order magma_int_t
dummy variable for now.
@param[in,out]
A magma_s_matrix*
input/output matrix where elements are removed
@param[out]
thrs float*
computed threshold
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_sthrsholdrm_gpu(
magma_int_t order,
magma_s_matrix* A,
float* thrs,
magma_queue_t queue)
{
magma_int_t info = 0;
magma_int_t num_blocks = magma_ceildiv(A->num_rows,BLOCK_SIZE);
magma_s_matrix B={Magma_CSR};
B.num_rows = A->num_rows;
B.num_cols = A->num_cols;
B.storage_type = A->storage_type;
B.memory_location = Magma_DEV;
magma_index_t *new_rownnz={NULL};
dim3 block(BLOCK_SIZE, 1, 1);
dim3 grid(num_blocks, 1, 1 );
magma_index_malloc(&new_rownnz,A->num_rows);
magma_index_malloc(&B.drow,A->num_rows+1);
hipLaunchKernelGGL(( zcompute_nnz_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream(),
A->num_rows, A->drow, new_rownnz, A->dcol, A->dval,*thrs);
magma_sget_row_ptr(A->num_rows, &B.nnz, new_rownnz, B.drow, queue);
magma_smalloc(&B.dval,B.nnz);
magma_index_malloc(&B.rowidx,B.nnz);
magma_index_malloc(&B.dcol,B.nnz);
hipLaunchKernelGGL(( zcompute_newval_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream(),
A->num_rows, A->drow, B.drow, A->dcol,B.drowidx, B.dcol, A->dval, B.dval);
//Rewrite the matrix with all the new values
magma_smatrix_swap(&B, A, queue);
magma_smfree(&B, queue);
magma_free(new_rownnz);
return info;
}
| 98a606ea174f5efd618f7873604c1192e4b99f41.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/magma_zthrsrm.cu, normal z -> s, Thu Oct 8 23:05:48 2020
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#include <cuda_runtime.h>
#define SWAP(a, b) { tmp = a; a = b; b = tmp; }
#define BLOCK_SIZE 128
__global__ void
zcompute_newval_kernel(
magma_int_t num_rows,
magma_index_t* Arow,
magma_index_t* Brow,
magma_index_t* Acol,
magma_index_t* Browidx,
magma_index_t* Bcol,
float* Aval,
float* Bval)
{
int tidx = blockIdx.x*blockDim.x+threadIdx.x;
magma_index_t offset_new, offset_old, end_old;
if (tidx < num_rows) {
magma_int_t count = 0;
offset_old = Arow[tidx];
offset_new = Brow[tidx];
end_old = Arow[tidx+1];
for (int i = offset_old; i < end_old; i++) {
if(Acol[i]>-1){
Bcol[offset_new+count] = Acol[i];
Bval[offset_new+count] = Aval[i];
Browidx[offset_new + count] = tidx;
count++;
}
}
}
}
//kernel
__global__ void
zcompute_nnz_kernel(
magma_int_t num_rows,
magma_index_t* Arow,
magma_index_t* Brow,
magma_index_t* Acol,
float* Aval,
float thrs)
{
int row= blockIdx.x*blockDim.x+threadIdx.x;
if (row < num_rows) {
magma_int_t rm = 0;
magma_int_t el = 0;
for (int i = Arow[row]; i<Arow[row+1]; i++) {
if (MAGMA_S_ABS(Aval[i]) <= thrs ) {
if (Acol[i] != row) {
Acol[i] = -1;//cheaperthanval
rm++;
} else {
el++;
}
} else {
el++;
}
}
Brow[row] = el;
}
}
/**
Purpose
-------
This routine selects a threshold separating the subset_size smallest
magnitude elements from the rest.
Arguments
---------
@param[in]
order magma_int_t
dummy variable for now.
@param[in,out]
A magma_s_matrix*
input/output matrix where elements are removed
@param[out]
thrs float*
computed threshold
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_sthrsholdrm_gpu(
magma_int_t order,
magma_s_matrix* A,
float* thrs,
magma_queue_t queue)
{
magma_int_t info = 0;
magma_int_t num_blocks = magma_ceildiv(A->num_rows,BLOCK_SIZE);
magma_s_matrix B={Magma_CSR};
B.num_rows = A->num_rows;
B.num_cols = A->num_cols;
B.storage_type = A->storage_type;
B.memory_location = Magma_DEV;
magma_index_t *new_rownnz={NULL};
dim3 block(BLOCK_SIZE, 1, 1);
dim3 grid(num_blocks, 1, 1 );
magma_index_malloc(&new_rownnz,A->num_rows);
magma_index_malloc(&B.drow,A->num_rows+1);
zcompute_nnz_kernel<<<grid, block, 0, queue->cuda_stream()>>>
(A->num_rows, A->drow, new_rownnz, A->dcol, A->dval,*thrs);
magma_sget_row_ptr(A->num_rows, &B.nnz, new_rownnz, B.drow, queue);
magma_smalloc(&B.dval,B.nnz);
magma_index_malloc(&B.rowidx,B.nnz);
magma_index_malloc(&B.dcol,B.nnz);
zcompute_newval_kernel<<<grid, block, 0, queue->cuda_stream()>>>
(A->num_rows, A->drow, B.drow, A->dcol,B.drowidx, B.dcol, A->dval, B.dval);
//Rewrite the matrix with all the new values
magma_smatrix_swap(&B, A, queue);
magma_smfree(&B, queue);
magma_free(new_rownnz);
return info;
}
|
cb3b9ab189f15550f14678adbe289c340e1ac37a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "basic/GPUBasic.h"
#include "model/GPUDevice.h"
#include "math/GPUMath.h"
#include "math/GPUUnaryElementWise.h"
#include "math/Square.h"
namespace Deep8 {
namespace Math {
template <typename T>
struct SquareKernelOp {
DEEP8_CUDA_FUNC DEEP8_CUDA_INLINE T operator()(const T &x) {
return x * x;
}
};
void SquareGPU(const Tensor &x, Tensor &y) {
auto n = (int)x.shape.size();
int blockSize = DEEP8_GPU_BLOCK_SIZE;
int grideSize = (n + DEEP8_GPU_BLOCK_SIZE - 1) / DEEP8_GPU_BLOCK_SIZE;
switch (x.elementType.id) {
case DType::Float32:
hipLaunchKernelGGL(( UnaryElementWiseKernel<float, SquareKernelOp<float>>) , dim3(grideSize), dim3(blockSize), 0, 0,
x.data<float>(),
y.data<float>(),
SquareKernelOp<float>(),
n
);
break;
case DType::Float64:
hipLaunchKernelGGL(( UnaryElementWiseKernel<double, SquareKernelOp<double>>) , dim3(grideSize), dim3(blockSize), 0, 0,
x.data<double>(),
y.data<double>(),
SquareKernelOp<double>(),
n
);
break;
#ifdef HAVE_HALF
case DType::Float16:
hipLaunchKernelGGL(( UnaryElementWiseKernel<half, SquareKernelOp<half>>) , dim3(grideSize), dim3(blockSize), 0, 0,
x.data<half>(),
y.data<half>(),
SquareKernelOp<half>(),
n
);
break;
#endif
default:
DEEP8_RUNTIME_ERROR("type " << x.elementType.name << " is not support");
break;
}
}
template <typename T>
struct SquareGradKernelOp {
DEEP8_CUDA_FUNC DEEP8_CUDA_INLINE T operator()(const T &x, const T &y, const T &dy) {
return T(2) * x * dy;
}
};
void SquareGradGPU(const Tensor &x, Tensor &dx, const Tensor &y, const Tensor &dy) {
auto n = (int)dx.shape.size();
int blockSize = DEEP8_GPU_BLOCK_SIZE;
int grideSize = (n + DEEP8_GPU_BLOCK_SIZE - 1) / DEEP8_GPU_BLOCK_SIZE;
switch (x.elementType.id) {
case DType::Float32:
hipLaunchKernelGGL(( UnaryElementWiseGradKernel<float, SquareGradKernelOp<float>>) , dim3(grideSize), dim3(blockSize), 0, 0,
x.data<float>(),
dx.data<float>(),
y.data<float>(),
dy.data<float>(),
SquareGradKernelOp<float>(),
n
);
break;
case DType::Float64:
hipLaunchKernelGGL(( UnaryElementWiseGradKernel<double, SquareGradKernelOp<double>>) , dim3(grideSize), dim3(blockSize), 0, 0,
x.data<double>(),
dx.data<double>(),
y.data<double>(),
dy.data<double>(),
SquareGradKernelOp<double>(),
n
);
break;
#ifdef HAVE_HALF
case DType::Float16:
hipLaunchKernelGGL(( UnaryElementWiseGradKernel<half, SquareGradKernelOp<half>>) , dim3(grideSize), dim3(blockSize), 0, 0,
x.data<half>(),
dx.data<half>(),
y.data<half>(),
dy.data<half>(),
SquareGradKernelOp<half>(),
n
);
break;
#endif
default:
DEEP8_RUNTIME_ERROR("type " << x.elementType.name << " is not support");
break;
}
}
}
} | cb3b9ab189f15550f14678adbe289c340e1ac37a.cu | #include "basic/GPUBasic.h"
#include "model/GPUDevice.h"
#include "math/GPUMath.h"
#include "math/GPUUnaryElementWise.h"
#include "math/Square.h"
namespace Deep8 {
namespace Math {
template <typename T>
struct SquareKernelOp {
DEEP8_CUDA_FUNC DEEP8_CUDA_INLINE T operator()(const T &x) {
return x * x;
}
};
void SquareGPU(const Tensor &x, Tensor &y) {
auto n = (int)x.shape.size();
int blockSize = DEEP8_GPU_BLOCK_SIZE;
int grideSize = (n + DEEP8_GPU_BLOCK_SIZE - 1) / DEEP8_GPU_BLOCK_SIZE;
switch (x.elementType.id) {
case DType::Float32:
UnaryElementWiseKernel<float, SquareKernelOp<float>> <<<grideSize, blockSize>>>(
x.data<float>(),
y.data<float>(),
SquareKernelOp<float>(),
n
);
break;
case DType::Float64:
UnaryElementWiseKernel<double, SquareKernelOp<double>> <<<grideSize, blockSize>>>(
x.data<double>(),
y.data<double>(),
SquareKernelOp<double>(),
n
);
break;
#ifdef HAVE_HALF
case DType::Float16:
UnaryElementWiseKernel<half, SquareKernelOp<half>> <<<grideSize, blockSize>>>(
x.data<half>(),
y.data<half>(),
SquareKernelOp<half>(),
n
);
break;
#endif
default:
DEEP8_RUNTIME_ERROR("type " << x.elementType.name << " is not support");
break;
}
}
template <typename T>
struct SquareGradKernelOp {
DEEP8_CUDA_FUNC DEEP8_CUDA_INLINE T operator()(const T &x, const T &y, const T &dy) {
return T(2) * x * dy;
}
};
void SquareGradGPU(const Tensor &x, Tensor &dx, const Tensor &y, const Tensor &dy) {
auto n = (int)dx.shape.size();
int blockSize = DEEP8_GPU_BLOCK_SIZE;
int grideSize = (n + DEEP8_GPU_BLOCK_SIZE - 1) / DEEP8_GPU_BLOCK_SIZE;
switch (x.elementType.id) {
case DType::Float32:
UnaryElementWiseGradKernel<float, SquareGradKernelOp<float>> <<<grideSize, blockSize>>> (
x.data<float>(),
dx.data<float>(),
y.data<float>(),
dy.data<float>(),
SquareGradKernelOp<float>(),
n
);
break;
case DType::Float64:
UnaryElementWiseGradKernel<double, SquareGradKernelOp<double>> <<<grideSize, blockSize>>> (
x.data<double>(),
dx.data<double>(),
y.data<double>(),
dy.data<double>(),
SquareGradKernelOp<double>(),
n
);
break;
#ifdef HAVE_HALF
case DType::Float16:
UnaryElementWiseGradKernel<half, SquareGradKernelOp<half>> <<<grideSize, blockSize>>> (
x.data<half>(),
dx.data<half>(),
y.data<half>(),
dy.data<half>(),
SquareGradKernelOp<half>(),
n
);
break;
#endif
default:
DEEP8_RUNTIME_ERROR("type " << x.elementType.name << " is not support");
break;
}
}
}
} |
b5b5a82a12715229e128dc38349dfbf31fc58166.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zlarfg.cu, normal z -> c, Mon Jun 25 18:24:12 2018
@author Mark Gates
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define COMPLEX
// 512 is maximum number of threads for CUDA capability 1.x
#define NB 512
/******************************************************************************/
// kernel for magma_clarfg.
// Uses one block of NB (currently 512) threads.
// Each thread sums dx[ tx + k*NB ]^2 for k = 0, 1, ...,
// then does parallel sum reduction to get norm-squared.
//
// Currently setup to use NB threads, no matter how small dx is.
// This was slightly faster (5%) than passing n to magma_sum_reduce.
// To use number of threads = min( NB, max( 1, n-1 )), pass n as
// argument to magma_sum_reduce, rather than as template parameter.
__global__ void
clarfg_kernel(
int n,
magmaFloatComplex* dalpha,
magmaFloatComplex* dx, int incx,
magmaFloatComplex* dtau )
{
const int tx = threadIdx.x;
__shared__ float swork[ NB ];
// TODO is it faster for each thread to have its own scale (register)?
// if so, communicate it via swork[0]
__shared__ float sscale;
__shared__ magmaFloatComplex sscale2;
magmaFloatComplex tmp;
// find max of [dalpha, dx], to use as scaling to avoid unnecesary under- and overflow
if ( tx == 0 ) {
tmp = *dalpha;
#ifdef COMPLEX
swork[tx] = max( fabs( MAGMA_C_REAL(tmp)), fabs( MAGMA_C_IMAG(tmp)) );
#else
swork[tx] = fabs(tmp);
#endif
}
else {
swork[tx] = 0;
}
for( int j = tx; j < n-1; j += NB ) {
tmp = dx[j*incx];
#ifdef COMPLEX
swork[tx] = max( swork[tx], max( fabs( MAGMA_C_REAL(tmp)), fabs( MAGMA_C_IMAG(tmp)) ));
#else
swork[tx] = max( swork[tx], fabs(tmp) );
#endif
}
magma_max_reduce< NB >( tx, swork );
if ( tx == 0 )
sscale = swork[0];
__syncthreads();
// sum norm^2 of dx/sscale
// dx has length n-1
swork[tx] = 0;
if ( sscale > 0 ) {
for( int j = tx; j < n-1; j += NB ) {
tmp = dx[j*incx] / sscale;
swork[tx] += MAGMA_C_REAL(tmp)*MAGMA_C_REAL(tmp) + MAGMA_C_IMAG(tmp)*MAGMA_C_IMAG(tmp);
}
magma_sum_reduce< NB >( tx, swork );
//magma_sum_reduce( blockDim.x, tx, swork );
}
if ( tx == 0 ) {
magmaFloatComplex alpha = *dalpha;
if ( swork[0] == 0 && MAGMA_C_IMAG(alpha) == 0 ) {
// H = I
*dtau = MAGMA_C_ZERO;
}
else {
// beta = norm( [dalpha, dx] )
float beta;
tmp = alpha / sscale;
beta = sscale * sqrt( MAGMA_C_REAL(tmp)*MAGMA_C_REAL(tmp) + MAGMA_C_IMAG(tmp)*MAGMA_C_IMAG(tmp) + swork[0] );
beta = -copysign( beta, MAGMA_C_REAL(alpha) );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = MAGMA_C_MAKE( (beta - MAGMA_C_REAL(alpha)) / beta, -MAGMA_C_IMAG(alpha) / beta );
*dalpha = MAGMA_C_MAKE( beta, 0 );
sscale2 = 1 / (alpha - beta);
}
}
// scale x (if norm was not 0)
__syncthreads();
if ( swork[0] != 0 ) {
for( int j = tx; j < n-1; j += NB ) {
dx[j*incx] *= sscale2;
}
}
}
/***************************************************************************//**
Purpose
-------
CLARFG generates a complex elementary reflector (Householder matrix)
H of order n, such that
H * ( alpha ) = ( beta ), H**H * H = I.
( x ) ( 0 )
where alpha and beta are scalars, with beta real and beta = norm([alpha, x]),
and x is an (n-1)-element complex vector. H is represented in the form
H = I - tau * ( 1 ) * ( 1 v**H ),
( v )
where tau is a complex scalar and v is a complex (n-1)-element vector.
Note that H is not Hermitian.
If the elements of x are all zero and dalpha is real, then tau = 0
and H is taken to be the unit matrix.
Otherwise 1 <= real(tau) <= 2 and abs(tau-1) <= 1.
Arguments
---------
@param[in]
n INTEGER
The order of the elementary reflector.
@param[in,out]
dalpha COMPLEX* on the GPU.
On entry, pointer to the value alpha, i.e., the first entry of the vector.
On exit, it is overwritten with the value beta.
@param[in,out]
dx COMPLEX array, dimension (1+(N-2)*abs(INCX)), on the GPU
On entry, the (n-1)-element vector x.
On exit, it is overwritten with the vector v.
@param[in]
incx INTEGER
The increment between elements of X. INCX > 0.
@param[out]
dtau COMPLEX* on the GPU.
Pointer to the value tau.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_larfg
*******************************************************************************/
extern "C"
void magmablas_clarfg(
magma_int_t n,
magmaFloatComplex_ptr dalpha,
magmaFloatComplex_ptr dx, magma_int_t incx,
magmaFloatComplex_ptr dtau,
magma_queue_t queue )
{
dim3 threads( NB );
dim3 blocks( 1 );
hipLaunchKernelGGL(( clarfg_kernel), dim3(blocks), dim3(threads), 0, queue->cuda_stream() , n, dalpha, dx, incx, dtau );
}
| b5b5a82a12715229e128dc38349dfbf31fc58166.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zlarfg.cu, normal z -> c, Mon Jun 25 18:24:12 2018
@author Mark Gates
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define COMPLEX
// 512 is maximum number of threads for CUDA capability 1.x
#define NB 512
/******************************************************************************/
// kernel for magma_clarfg.
// Uses one block of NB (currently 512) threads.
// Each thread sums dx[ tx + k*NB ]^2 for k = 0, 1, ...,
// then does parallel sum reduction to get norm-squared.
//
// Currently setup to use NB threads, no matter how small dx is.
// This was slightly faster (5%) than passing n to magma_sum_reduce.
// To use number of threads = min( NB, max( 1, n-1 )), pass n as
// argument to magma_sum_reduce, rather than as template parameter.
__global__ void
clarfg_kernel(
int n,
magmaFloatComplex* dalpha,
magmaFloatComplex* dx, int incx,
magmaFloatComplex* dtau )
{
const int tx = threadIdx.x;
__shared__ float swork[ NB ];
// TODO is it faster for each thread to have its own scale (register)?
// if so, communicate it via swork[0]
__shared__ float sscale;
__shared__ magmaFloatComplex sscale2;
magmaFloatComplex tmp;
// find max of [dalpha, dx], to use as scaling to avoid unnecesary under- and overflow
if ( tx == 0 ) {
tmp = *dalpha;
#ifdef COMPLEX
swork[tx] = max( fabs( MAGMA_C_REAL(tmp)), fabs( MAGMA_C_IMAG(tmp)) );
#else
swork[tx] = fabs(tmp);
#endif
}
else {
swork[tx] = 0;
}
for( int j = tx; j < n-1; j += NB ) {
tmp = dx[j*incx];
#ifdef COMPLEX
swork[tx] = max( swork[tx], max( fabs( MAGMA_C_REAL(tmp)), fabs( MAGMA_C_IMAG(tmp)) ));
#else
swork[tx] = max( swork[tx], fabs(tmp) );
#endif
}
magma_max_reduce< NB >( tx, swork );
if ( tx == 0 )
sscale = swork[0];
__syncthreads();
// sum norm^2 of dx/sscale
// dx has length n-1
swork[tx] = 0;
if ( sscale > 0 ) {
for( int j = tx; j < n-1; j += NB ) {
tmp = dx[j*incx] / sscale;
swork[tx] += MAGMA_C_REAL(tmp)*MAGMA_C_REAL(tmp) + MAGMA_C_IMAG(tmp)*MAGMA_C_IMAG(tmp);
}
magma_sum_reduce< NB >( tx, swork );
//magma_sum_reduce( blockDim.x, tx, swork );
}
if ( tx == 0 ) {
magmaFloatComplex alpha = *dalpha;
if ( swork[0] == 0 && MAGMA_C_IMAG(alpha) == 0 ) {
// H = I
*dtau = MAGMA_C_ZERO;
}
else {
// beta = norm( [dalpha, dx] )
float beta;
tmp = alpha / sscale;
beta = sscale * sqrt( MAGMA_C_REAL(tmp)*MAGMA_C_REAL(tmp) + MAGMA_C_IMAG(tmp)*MAGMA_C_IMAG(tmp) + swork[0] );
beta = -copysign( beta, MAGMA_C_REAL(alpha) );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = MAGMA_C_MAKE( (beta - MAGMA_C_REAL(alpha)) / beta, -MAGMA_C_IMAG(alpha) / beta );
*dalpha = MAGMA_C_MAKE( beta, 0 );
sscale2 = 1 / (alpha - beta);
}
}
// scale x (if norm was not 0)
__syncthreads();
if ( swork[0] != 0 ) {
for( int j = tx; j < n-1; j += NB ) {
dx[j*incx] *= sscale2;
}
}
}
/***************************************************************************//**
Purpose
-------
CLARFG generates a complex elementary reflector (Householder matrix)
H of order n, such that
H * ( alpha ) = ( beta ), H**H * H = I.
( x ) ( 0 )
where alpha and beta are scalars, with beta real and beta = ±norm([alpha, x]),
and x is an (n-1)-element complex vector. H is represented in the form
H = I - tau * ( 1 ) * ( 1 v**H ),
( v )
where tau is a complex scalar and v is a complex (n-1)-element vector.
Note that H is not Hermitian.
If the elements of x are all zero and dalpha is real, then tau = 0
and H is taken to be the unit matrix.
Otherwise 1 <= real(tau) <= 2 and abs(tau-1) <= 1.
Arguments
---------
@param[in]
n INTEGER
The order of the elementary reflector.
@param[in,out]
dalpha COMPLEX* on the GPU.
On entry, pointer to the value alpha, i.e., the first entry of the vector.
On exit, it is overwritten with the value beta.
@param[in,out]
dx COMPLEX array, dimension (1+(N-2)*abs(INCX)), on the GPU
On entry, the (n-1)-element vector x.
On exit, it is overwritten with the vector v.
@param[in]
incx INTEGER
The increment between elements of X. INCX > 0.
@param[out]
dtau COMPLEX* on the GPU.
Pointer to the value tau.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_larfg
*******************************************************************************/
extern "C"
void magmablas_clarfg(
magma_int_t n,
magmaFloatComplex_ptr dalpha,
magmaFloatComplex_ptr dx, magma_int_t incx,
magmaFloatComplex_ptr dtau,
magma_queue_t queue )
{
dim3 threads( NB );
dim3 blocks( 1 );
clarfg_kernel<<< blocks, threads, 0, queue->cuda_stream() >>>( n, dalpha, dx, incx, dtau );
}
|
381edf60da7da3d304a0afd5f25e01fad325916a.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <hip/hip_runtime.h>
#include "transpose_device.cuh"
/*
* TODO for all kernels (including naive):
* Leave a comment above all non-coalesced memory accesses and bank conflicts.
* Make it clear if the suboptimal access is a read or write. If an access is
* non-coalesced, specify how many cache lines it touches, and if an access
* causes bank conflicts, say if its a 2-way bank conflict, 4-way bank
* conflict, etc.
*
* Comment all of your kernels.
*/
/*
* Each block of the naive transpose handles a 64x64 block of the input matrix,
* with each thread of the block handling a 1x4 section and each warp handling
* a 32x4 section.
*
* If we split the 64x64 matrix into 32 blocks of shape (32, 4), then we have
* a block matrix of shape (2 blocks, 16 blocks).
* Warp 0 handles block (0, 0), warp 1 handles (1, 0), warp 2 handles (0, 1),
* warp n handles (n % 2, n / 2).
*
* This kernel is launched with block shape (64, 16) and grid shape
* (n / 64, n / 64) where n is the size of the square matrix.
*
* You may notice that we suggested in lecture that threads should be able to
* handle an arbitrary number of elements and that this kernel handles exactly
* 4 elements per thread. This is OK here because to overwhelm this kernel
* it would take a 4194304 x 4194304 matrix, which would take ~17.6TB of
* memory (well beyond what I expect GPUs to have in the next few years).
*/
__global__
void naiveTransposeKernel(const float *input, float *output, int n) {
const int i = threadIdx.x + 64 * blockIdx.x;
int j = 4 * threadIdx.y + 64 * blockIdx.y;
const int end_j = j + 4;
for (; j < end_j; j++)
output[j + n * i] = input[i + n * j];
/* Since the naive kernel doesn't use shared memory, we do not have
* bank conflict. The remaining problem is data alignment.
* Each warp handles 32*4 = 128 4-bytes elements, hence there is minimum
* of 4 cache reads. However, in here, due to the fact that n >= 512,
* each thread in a warp reads from 5 cache lines. A warp reads 160 cache
* lines.
*/
}
__global__
void shmemTransposeKernel(const float *input, float *output, int n) {
__shared__ float data[64*64];
int s_i = threadIdx.x;
int s_j = threadIdx.y * 4;
const int i = threadIdx.x + 64 * blockIdx.x;
int j = 4 * threadIdx.y + 64 * blockIdx.y;
const int end_k = 4;
/* I would like to move all non-linear transformation
* into the shared memory, therefore we need to specify
* the indices for the transposed matrix instead of just
* swapping i and j.
*/
const int i_t = threadIdx.x + 64 * blockIdx.y;
int j_t = 4 * threadIdx.y + 64 * blockIdx.x;
for (int k = 0; k < end_k; k++)
data[s_j + k + s_i*64] = input[i + n * (j + k)];
__syncthreads();
for (int k = 0; k < end_k; k++)
output[i_t + n * (j_t + k)] = data[s_i + (s_j+k)*64];
}
__global__
void optimalTransposeKernel(const float *input, float *output, int n) {
/* Zero-padding for shared memory to avoid bank conflicts */
__shared__ float data[64*65];
int s_i = threadIdx.x;
int s_j = threadIdx.y * 4;
const int i = threadIdx.x + 64 * blockIdx.x;
int j = 4 * threadIdx.y + 64 * blockIdx.y;
/* I would like to move all non-linear transformation
* into the shared memory, therefore we need to specify
* the indices for the transposed matrix instead of just
* swapping i and j.
*/
const int i_t = threadIdx.x + 64 * blockIdx.y;
int j_t = 4 * threadIdx.y + 64 * blockIdx.x;
/* Unroll the loop too */
data[s_j + 0 + s_i*65] = input[i + n * (j + 0)];
data[s_j + 1 + s_i*65] = input[i + n * (j + 1)];
data[s_j + 2 + s_i*65] = input[i + n * (j + 2)];
data[s_j + 3 + s_i*65] = input[i + n * (j + 3)];
__syncthreads();
output[i_t + n * (j_t + 0)] = data[s_i + (s_j+0)*65];
output[i_t + n * (j_t + 1)] = data[s_i + (s_j+1)*65];
output[i_t + n * (j_t + 2)] = data[s_i + (s_j+2)*65];
output[i_t + n * (j_t + 3)] = data[s_i + (s_j+3)*65];
}
void cudaTranspose(
const float *d_input,
float *d_output,
int n,
TransposeImplementation type)
{
if (type == NAIVE) {
dim3 blockSize(64, 16);
dim3 gridSize(n / 64, n / 64);
hipLaunchKernelGGL(( naiveTransposeKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_input, d_output, n);
}
else if (type == SHMEM) {
dim3 blockSize(64, 16);
dim3 gridSize(n / 64, n / 64);
hipLaunchKernelGGL(( shmemTransposeKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_input, d_output, n);
}
else if (type == OPTIMAL) {
dim3 blockSize(64, 16);
dim3 gridSize(n / 64, n / 64);
hipLaunchKernelGGL(( optimalTransposeKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_input, d_output, n);
}
// Unknown type
else
assert(false);
}
| 381edf60da7da3d304a0afd5f25e01fad325916a.cu | #include <cassert>
#include <cuda_runtime.h>
#include "transpose_device.cuh"
/*
* TODO for all kernels (including naive):
* Leave a comment above all non-coalesced memory accesses and bank conflicts.
* Make it clear if the suboptimal access is a read or write. If an access is
* non-coalesced, specify how many cache lines it touches, and if an access
* causes bank conflicts, say if its a 2-way bank conflict, 4-way bank
* conflict, etc.
*
* Comment all of your kernels.
*/
/*
* Each block of the naive transpose handles a 64x64 block of the input matrix,
* with each thread of the block handling a 1x4 section and each warp handling
* a 32x4 section.
*
* If we split the 64x64 matrix into 32 blocks of shape (32, 4), then we have
* a block matrix of shape (2 blocks, 16 blocks).
* Warp 0 handles block (0, 0), warp 1 handles (1, 0), warp 2 handles (0, 1),
* warp n handles (n % 2, n / 2).
*
* This kernel is launched with block shape (64, 16) and grid shape
* (n / 64, n / 64) where n is the size of the square matrix.
*
* You may notice that we suggested in lecture that threads should be able to
* handle an arbitrary number of elements and that this kernel handles exactly
* 4 elements per thread. This is OK here because to overwhelm this kernel
* it would take a 4194304 x 4194304 matrix, which would take ~17.6TB of
* memory (well beyond what I expect GPUs to have in the next few years).
*/
__global__
void naiveTransposeKernel(const float *input, float *output, int n) {
const int i = threadIdx.x + 64 * blockIdx.x;
int j = 4 * threadIdx.y + 64 * blockIdx.y;
const int end_j = j + 4;
for (; j < end_j; j++)
output[j + n * i] = input[i + n * j];
/* Since the naive kernel doesn't use shared memory, we do not have
* bank conflict. The remaining problem is data alignment.
* Each warp handles 32*4 = 128 4-bytes elements, hence there is minimum
* of 4 cache reads. However, in here, due to the fact that n >= 512,
* each thread in a warp reads from 5 cache lines. A warp reads 160 cache
* lines.
*/
}
__global__
void shmemTransposeKernel(const float *input, float *output, int n) {
__shared__ float data[64*64];
int s_i = threadIdx.x;
int s_j = threadIdx.y * 4;
const int i = threadIdx.x + 64 * blockIdx.x;
int j = 4 * threadIdx.y + 64 * blockIdx.y;
const int end_k = 4;
/* I would like to move all non-linear transformation
* into the shared memory, therefore we need to specify
* the indices for the transposed matrix instead of just
* swapping i and j.
*/
const int i_t = threadIdx.x + 64 * blockIdx.y;
int j_t = 4 * threadIdx.y + 64 * blockIdx.x;
for (int k = 0; k < end_k; k++)
data[s_j + k + s_i*64] = input[i + n * (j + k)];
__syncthreads();
for (int k = 0; k < end_k; k++)
output[i_t + n * (j_t + k)] = data[s_i + (s_j+k)*64];
}
__global__
void optimalTransposeKernel(const float *input, float *output, int n) {
/* Zero-padding for shared memory to avoid bank conflicts */
__shared__ float data[64*65];
int s_i = threadIdx.x;
int s_j = threadIdx.y * 4;
const int i = threadIdx.x + 64 * blockIdx.x;
int j = 4 * threadIdx.y + 64 * blockIdx.y;
/* I would like to move all non-linear transformation
* into the shared memory, therefore we need to specify
* the indices for the transposed matrix instead of just
* swapping i and j.
*/
const int i_t = threadIdx.x + 64 * blockIdx.y;
int j_t = 4 * threadIdx.y + 64 * blockIdx.x;
/* Unroll the loop too */
data[s_j + 0 + s_i*65] = input[i + n * (j + 0)];
data[s_j + 1 + s_i*65] = input[i + n * (j + 1)];
data[s_j + 2 + s_i*65] = input[i + n * (j + 2)];
data[s_j + 3 + s_i*65] = input[i + n * (j + 3)];
__syncthreads();
output[i_t + n * (j_t + 0)] = data[s_i + (s_j+0)*65];
output[i_t + n * (j_t + 1)] = data[s_i + (s_j+1)*65];
output[i_t + n * (j_t + 2)] = data[s_i + (s_j+2)*65];
output[i_t + n * (j_t + 3)] = data[s_i + (s_j+3)*65];
}
void cudaTranspose(
const float *d_input,
float *d_output,
int n,
TransposeImplementation type)
{
if (type == NAIVE) {
dim3 blockSize(64, 16);
dim3 gridSize(n / 64, n / 64);
naiveTransposeKernel<<<gridSize, blockSize>>>(d_input, d_output, n);
}
else if (type == SHMEM) {
dim3 blockSize(64, 16);
dim3 gridSize(n / 64, n / 64);
shmemTransposeKernel<<<gridSize, blockSize>>>(d_input, d_output, n);
}
else if (type == OPTIMAL) {
dim3 blockSize(64, 16);
dim3 gridSize(n / 64, n / 64);
optimalTransposeKernel<<<gridSize, blockSize>>>(d_input, d_output, n);
}
// Unknown type
else
assert(false);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.