hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
c0fa8deefed800ec938ef06de99da6b419752238.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "calculate.cuh"
__global__ void gpuCalculate(int* d_arr) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
d_arr[idx]++;
}
void callCudaFunc(int* carr, int n) {
int* d_arr;
hipMalloc((void**)& d_arr, n * sizeof(int));
hipMemcpy(d_arr, carr, n*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( gpuCalculate), dim3(100000), dim3(1000), 0, 0, d_arr);
hipMemcpy(carr, d_arr, n*sizeof(int), hipMemcpyDeviceToHost);
}
| c0fa8deefed800ec938ef06de99da6b419752238.cu | #include "calculate.cuh"
__global__ void gpuCalculate(int* d_arr) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
d_arr[idx]++;
}
void callCudaFunc(int* carr, int n) {
int* d_arr;
cudaMalloc((void**)& d_arr, n * sizeof(int));
cudaMemcpy(d_arr, carr, n*sizeof(int), cudaMemcpyHostToDevice);
gpuCalculate<<<100000, 1000>>>(d_arr);
cudaMemcpy(carr, d_arr, n*sizeof(int), cudaMemcpyDeviceToHost);
}
|
c61c269ae29fedcbad628134b6e7a909b86d9cf6.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2014 Luke Marcus Biagio Testa
All rights reserved.
Redistribution and use in source and binary forms are permitted
provided that the above copyright notice and this paragraph are
duplicated in all such forms and that any documentation,
advertising materials, and other materials related to such
distribution and use acknowledge that the software was developed
by the Luke Marcus Biagio Testa. The name of the
Luke Marcus Biagio Testa may not be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*
*/
#include "FileHandler.h"
#include "FeatureSpace.h"
#include <sstream>
/*
* Running Information:
*
* x3 Command Line Arguments: [Dataset(.txt), output path, No Clusters]
*
* Dataset.txt is stored in assignment 1 repository root
*
*/
// Command line inputs: [Program InputFile OutputFilePath NoClusters]
int main(int argc, char *argv[])
{
// Check 2 command line inputs
if ( argc != 4 )
{
std::cout << "Needs 4 Arguments: [Program Dataset K OutputPath]" << std::endl;
std::cout << "Using " << argc << " Arguments" << std::endl;
for (int i = 0; i < argc; i++)
std::cout << argv[i] << std::endl;
return 0;
}
// Ensure file exists and open file as read only. Initialize FeatureSpace with N clusters
FileHandler File( argv[1], true );
FeatureSpace Dictionary( atoi(argv[3]) );
// Store data points in feature space
while ( File.filePtr().peek() != EOF )
{
File.filePtr() >> Dictionary;
}
// Start Timer. Find Clusters.
double executionTime = jbutil::gettime();
int iterations = Dictionary.ClusterSearch();
executionTime = jbutil::gettime() - executionTime;
hipDeviceReset();
std::cout << "v10 Execution Time: " << executionTime << " s" << std::endl;
std::cout << "v10 Iterations: " << iterations << std::endl;
// End of Timer. Found Clusters. Get Cluster-Allocated Data Points.
std::vector<FeaturePoint> data = Dictionary.getDataPoints();
FeaturePoint* centers = Dictionary.getCentroids();
std::cout << "------------------- Cluster Information -----------------" << std::endl;
for(int i = 0; i < atoi(argv[3]) ; i++)
std::cout << "Centroid[" << i << "]: (" << centers[i].contents().first << "," << centers[i].contents().second << ")" << std::endl;
// -------------------- Output information to file ---------------------------------
/*
// Timing information
std::string log_speed(argv[2]);
log_speed.append("Timing_Information");
FileHandler speedFile(log_speed, false);
speedFile.filePtr() << executionTime;
// Iterations
std::string log_iteration(argv[2]);
log_iteration.append("Iteration_Information");
FileHandler iterationFile(log_iteration, false);
iterationFile.filePtr() << iterations;
std::cout << "Execution Time: " << executionTime << " s" << std::endl;
std::cout << "Iterations: " << iterations << std::endl;
// Output data to file. For each cluster [ip 3 on command line], for each point in cluster[i]
for(int i = 0; i < atoi(argv[3]); i++)
{
// Format output file name
std::string temp(argv[2]);
temp.append("Kmeans_Cluster_");
std::stringstream val;
val << i << "_Data";
temp.append( val.str() );
// Open Output File
FileHandler outputFile( temp, false );
//Write Cluster's Centroid to File
outputFile.filePtr() << (Point&)centers[i];
// Write cluster's data points to file
for(int j = 0; j < data.size(); j++)
{
// Output data point to file if contained within current cluster
if ( data[j].Cluster() == i )
outputFile.filePtr() << (Point&)data[j];
}
}
*/
return 0;
}
| c61c269ae29fedcbad628134b6e7a909b86d9cf6.cu |
/*
Copyright (c) 2014 Luke Marcus Biagio Testa
All rights reserved.
Redistribution and use in source and binary forms are permitted
provided that the above copyright notice and this paragraph are
duplicated in all such forms and that any documentation,
advertising materials, and other materials related to such
distribution and use acknowledge that the software was developed
by the Luke Marcus Biagio Testa. The name of the
Luke Marcus Biagio Testa may not be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*
*/
#include "FileHandler.h"
#include "FeatureSpace.h"
#include <sstream>
/*
* Running Information:
*
* x3 Command Line Arguments: [Dataset(.txt), output path, No Clusters]
*
* Dataset.txt is stored in assignment 1 repository root
*
*/
// Command line inputs: [Program InputFile OutputFilePath NoClusters]
int main(int argc, char *argv[])
{
// Check 2 command line inputs
if ( argc != 4 )
{
std::cout << "Needs 4 Arguments: [Program Dataset K OutputPath]" << std::endl;
std::cout << "Using " << argc << " Arguments" << std::endl;
for (int i = 0; i < argc; i++)
std::cout << argv[i] << std::endl;
return 0;
}
// Ensure file exists and open file as read only. Initialize FeatureSpace with N clusters
FileHandler File( argv[1], true );
FeatureSpace Dictionary( atoi(argv[3]) );
// Store data points in feature space
while ( File.filePtr().peek() != EOF )
{
File.filePtr() >> Dictionary;
}
// Start Timer. Find Clusters.
double executionTime = jbutil::gettime();
int iterations = Dictionary.ClusterSearch();
executionTime = jbutil::gettime() - executionTime;
cudaDeviceReset();
std::cout << "v10 Execution Time: " << executionTime << " s" << std::endl;
std::cout << "v10 Iterations: " << iterations << std::endl;
// End of Timer. Found Clusters. Get Cluster-Allocated Data Points.
std::vector<FeaturePoint> data = Dictionary.getDataPoints();
FeaturePoint* centers = Dictionary.getCentroids();
std::cout << "------------------- Cluster Information -----------------" << std::endl;
for(int i = 0; i < atoi(argv[3]) ; i++)
std::cout << "Centroid[" << i << "]: (" << centers[i].contents().first << "," << centers[i].contents().second << ")" << std::endl;
// -------------------- Output information to file ---------------------------------
/*
// Timing information
std::string log_speed(argv[2]);
log_speed.append("Timing_Information");
FileHandler speedFile(log_speed, false);
speedFile.filePtr() << executionTime;
// Iterations
std::string log_iteration(argv[2]);
log_iteration.append("Iteration_Information");
FileHandler iterationFile(log_iteration, false);
iterationFile.filePtr() << iterations;
std::cout << "Execution Time: " << executionTime << " s" << std::endl;
std::cout << "Iterations: " << iterations << std::endl;
// Output data to file. For each cluster [ip 3 on command line], for each point in cluster[i]
for(int i = 0; i < atoi(argv[3]); i++)
{
// Format output file name
std::string temp(argv[2]);
temp.append("Kmeans_Cluster_");
std::stringstream val;
val << i << "_Data";
temp.append( val.str() );
// Open Output File
FileHandler outputFile( temp, false );
//Write Cluster's Centroid to File
outputFile.filePtr() << (Point&)centers[i];
// Write cluster's data points to file
for(int j = 0; j < data.size(); j++)
{
// Output data point to file if contained within current cluster
if ( data[j].Cluster() == i )
outputFile.filePtr() << (Point&)data[j];
}
}
*/
return 0;
}
|
07ea24929eeeee2936c8ae8c49f183f31e32dc78.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#include <time.h>
/****************************************************************************
This program gives an example of a poor way to implement a password cracker
in CUDA C. It is poor because it acheives this with just one thread, which
is obviously not good given the scale of parallelism available to CUDA
programs.
The intentions of this program are:
1) Demonstrate the use of __device__ and __global__ functions
2) Enable a simulation of password cracking in the absence of library
with equivalent functionality to libcrypt. The password to be found
is hardcoded into a function called is_a_match.
Compile and run with:
nvcc -o Saral_Password Saral_Password.cu
./Saral_Password
Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
/****************************************************************************
This function returns 1 if the attempt at cracking the password is
identical to the plain text password string stored in the program.
Otherwise,it returns 0.
*****************************************************************************/
__device__ int is_a_match(char *attempt) {
char mypassword1[] = "SA9780";
char mypassword2[] = "AS3145";
char mypassword3[] = "EE4652";
char mypassword4[] = "BB2565";
char *p = attempt;
char *r = attempt;
char *a = attempt;
char *t = attempt;
char *p1 = mypassword1;
char *p2 = mypassword2;
char *p3 = mypassword3;
char *p4 = mypassword4;
while(*p == *p1) {
if(*p == '\0')
{
printf("Password: %s\n",mypassword1);
break;
}
p++;
p1++;
}
while(*r == *p2) {
if(*r == '\0')
{
printf("Password: %s\n",mypassword2);
break;
}
r++;
p2++;
}
while(*a == *p3) {
if(*a == '\0')
{
printf("Password: %s\n",mypassword3);
break;
}
a++;
p3++;
}
while(*t == *p4) {
if(*t == '\0')
{
printf("Password: %s\n",mypassword4);
return 1;
}
t++;
p4++;
}
return 0;
}
__global__ void kernel() {
char s1,s2,s3,s4;
char password[7];
password[6] = '\0';
int i = blockIdx.x+65;
int j = threadIdx.x+65;
char firstMatch = i;
char secondMatch = j;
password[0] = firstMatch;
password[1] = secondMatch;
for(s1='0'; s1<='9'; s1++){
for(s2='0'; s2<='9'; s2++){
for(s3='0'; s3<='9'; s3++){
for(s4='0'; s4<='9'; s4++){
password[2] = s1;
password[3] = s2;
password[4] = s3;
password[5] = s4;
if(is_a_match(password)) {
}
else {
//printf("tried: %s\n", password);
}
}
}
}
}
}
int time_difference(struct timespec *start,
struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
hipLaunchKernelGGL(( kernel) , dim3(26),dim3(26), 0, 0, );
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9));
return 0;
}
| 07ea24929eeeee2936c8ae8c49f183f31e32dc78.cu | #include <stdio.h>
#include <cuda_runtime_api.h>
#include <time.h>
/****************************************************************************
This program gives an example of a poor way to implement a password cracker
in CUDA C. It is poor because it acheives this with just one thread, which
is obviously not good given the scale of parallelism available to CUDA
programs.
The intentions of this program are:
1) Demonstrate the use of __device__ and __global__ functions
2) Enable a simulation of password cracking in the absence of library
with equivalent functionality to libcrypt. The password to be found
is hardcoded into a function called is_a_match.
Compile and run with:
nvcc -o Saral_Password Saral_Password.cu
./Saral_Password
Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
/****************************************************************************
This function returns 1 if the attempt at cracking the password is
identical to the plain text password string stored in the program.
Otherwise,it returns 0.
*****************************************************************************/
__device__ int is_a_match(char *attempt) {
char mypassword1[] = "SA9780";
char mypassword2[] = "AS3145";
char mypassword3[] = "EE4652";
char mypassword4[] = "BB2565";
char *p = attempt;
char *r = attempt;
char *a = attempt;
char *t = attempt;
char *p1 = mypassword1;
char *p2 = mypassword2;
char *p3 = mypassword3;
char *p4 = mypassword4;
while(*p == *p1) {
if(*p == '\0')
{
printf("Password: %s\n",mypassword1);
break;
}
p++;
p1++;
}
while(*r == *p2) {
if(*r == '\0')
{
printf("Password: %s\n",mypassword2);
break;
}
r++;
p2++;
}
while(*a == *p3) {
if(*a == '\0')
{
printf("Password: %s\n",mypassword3);
break;
}
a++;
p3++;
}
while(*t == *p4) {
if(*t == '\0')
{
printf("Password: %s\n",mypassword4);
return 1;
}
t++;
p4++;
}
return 0;
}
__global__ void kernel() {
char s1,s2,s3,s4;
char password[7];
password[6] = '\0';
int i = blockIdx.x+65;
int j = threadIdx.x+65;
char firstMatch = i;
char secondMatch = j;
password[0] = firstMatch;
password[1] = secondMatch;
for(s1='0'; s1<='9'; s1++){
for(s2='0'; s2<='9'; s2++){
for(s3='0'; s3<='9'; s3++){
for(s4='0'; s4<='9'; s4++){
password[2] = s1;
password[3] = s2;
password[4] = s3;
password[5] = s4;
if(is_a_match(password)) {
}
else {
//printf("tried: %s\n", password);
}
}
}
}
}
}
int time_difference(struct timespec *start,
struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
kernel <<<26,26>>>();
cudaThreadSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9));
return 0;
}
|
d3e5fed459d92b90b4c55071976a11e800533f93.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "non_max_supp_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *data = NULL;
hipMalloc(&data, XSIZE*YSIZE);
unsigned char *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
unsigned char *theta = NULL;
hipMalloc(&theta, XSIZE*YSIZE);
int rows = XSIZE;
int cols = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
non_max_supp_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, data,out,theta,rows,cols);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
non_max_supp_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, data,out,theta,rows,cols);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
non_max_supp_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, data,out,theta,rows,cols);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | d3e5fed459d92b90b4c55071976a11e800533f93.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "non_max_supp_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *data = NULL;
cudaMalloc(&data, XSIZE*YSIZE);
unsigned char *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
unsigned char *theta = NULL;
cudaMalloc(&theta, XSIZE*YSIZE);
int rows = XSIZE;
int cols = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
non_max_supp_kernel<<<gridBlock,threadBlock>>>(data,out,theta,rows,cols);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
non_max_supp_kernel<<<gridBlock,threadBlock>>>(data,out,theta,rows,cols);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
non_max_supp_kernel<<<gridBlock,threadBlock>>>(data,out,theta,rows,cols);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
9d846a8eaf68409ab8a8d3cda9d14fdcb9a76a3a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "LBMconsts.cuh"
#include "structs.cuh"
#include "phys.h"
#include "data.cuh"
#include "compact-steps.cuh"
#include "lbm-steps.cuh"
template<int parity>__global__ __launch_bounds__(CompStep::Nblk) void compactStep_base(){
const int ix = blockIdx.x*CompStep::Nb.x+threadIdx.x;
const int iy = blockIdx.y*CompStep::Nb.y+threadIdx.y;
const int iz = blockIdx.z*CompStep::Nb.z+threadIdx.z;
Cell c = pars.data.get_cell_compact<parity>(ix,iy,iz);
c.collision();
pars.data.set_cell_compact<parity>(c, ix,iy,iz);
}
template<int parity> __global__ __launch_bounds__(CompStep::Nblk) void compactStep(){
const int ix = (blockIdx.x*CompStep::Nb.x+threadIdx.x + parity)%Nx;
const int iy = (blockIdx.y*CompStep::Nb.y+threadIdx.y + parity)%Ny;
const int iz = (blockIdx.z*CompStep::Nb.z+threadIdx.z + parity)%Nz;
const int thid = threadIdx.x + threadIdx.y*CompStep::Nb.x + threadIdx.z*CompStep::Nb.x*CompStep::Nb.y;
const int warp_id = thid/32;
const int lane_id = thid%32;
const int ix0 = blockIdx.x*CompStep::Nb.x + parity;
const int iy0 = blockIdx.y*CompStep::Nb.y + parity;
const int iz0 = blockIdx.z*CompStep::Nb.z + parity;
const int Qn = Cell::Qn;
//__shared__ ftype fi_sh[Nbs*Qn];
extern __shared__ ftype fi_sh[];
load_store_datablock(thid, make_int3(ix0,iy0,iz0), fi_sh, RW::Load);
//for(int iq=0; iq<Cell::Qn; iq++) fi_sh[thid*Qn+iq] = pars.data.tiles[ix+iy*Nx+iz*Nx*Ny].f[iq];
__syncthreads();
Cell c;
const char3 pbits3 = make_char3(threadIdx.x&1,threadIdx.y&1,threadIdx.z&1);
const char pbits = pbits3.x|pbits3.y<<1|pbits3.z<<2;
for(int iq=0; iq<Cell::Qn; iq++) {
const char4 shifts = LBMconsts::compact_access[iq+(Cell::Qn)*pbits];
const int3 shCrd = make_int3(threadIdx.x+shifts.x, threadIdx.y+shifts.y, threadIdx.z+shifts.z);
const int shid = shCrd.x+shCrd.y*CompStep::Nb.x + shCrd.z*CompStep::Nb.x*CompStep::Nb.y;
const int rev_iq = shifts.w;
c.f[iq] = fi_sh[ shid*Qn + rev_iq ];
}
//for(int iq=0; iq<Cell::Qn; iq++) c.f[iq]++;
c.collision();
for(int iq=0; iq<Cell::Qn; iq++) {
const char4 shifts = LBMconsts::compact_access[iq+(Cell::Qn)*pbits];
const int3 shCrd = make_int3(threadIdx.x+shifts.x, threadIdx.y+shifts.y, threadIdx.z+shifts.z);
const int shid = shCrd.x+shCrd.y*CompStep::Nb.x + shCrd.z*CompStep::Nb.x*CompStep::Nb.y;
const int rev_iq = shifts.w;
fi_sh[ shid*Qn + rev_iq ] = c.f[iq];
}
__syncthreads();
load_store_datablock(thid, make_int3(ix0,iy0,iz0), fi_sh, RW::Store);
//for(int iq=0; iq<Cell::Qn; iq++) pars.data.tiles[ix+iy*Nx+iz*Nx*Ny].f[iq] = fi_sh[thid*Qn+iq];
}
template<int parity>__global__ __launch_bounds__(CompStep::Nblk) void compactStep_4tests(){
const int ix = blockIdx.x*CompStep::Nb.x+threadIdx.x;
const int iy = blockIdx.y*CompStep::Nb.y+threadIdx.y;
const int iz = blockIdx.z*CompStep::Nb.z+threadIdx.z;
Cell c;
const char3 pbits3 = make_char3(ix&1,iy&1,iz&1)^parity;
const char pbits = pbits3.x|pbits3.y<<1|pbits3.z<<2;
for(int iq=0; iq<Cell::Qn; iq++) {
const int3 gCrd = make_int3(ix,iy,iz);
c.f[iq] = pars.data.tiles[ gCrd.x + gCrd.y*Nx+ gCrd.z*Nx*Ny ].f[iq];
}
for(int i=0;i<LBMconsts::Qn;i++) c.f[i]++;
for(int iq=0; iq<Cell::Qn; iq++) {
const int3 gCrd = make_int3(ix,iy,iz);
pars.data.tiles[ gCrd.x + gCrd.y*Nx+ gCrd.z*Nx*Ny ].f[iq] = c.f[iq];
}
}
template __global__ void compactStep<0>();
template __global__ void compactStep<1>();
| 9d846a8eaf68409ab8a8d3cda9d14fdcb9a76a3a.cu | #include "LBMconsts.cuh"
#include "structs.cuh"
#include "phys.h"
#include "data.cuh"
#include "compact-steps.cuh"
#include "lbm-steps.cuh"
template<int parity>__global__ __launch_bounds__(CompStep::Nblk) void compactStep_base(){
const int ix = blockIdx.x*CompStep::Nb.x+threadIdx.x;
const int iy = blockIdx.y*CompStep::Nb.y+threadIdx.y;
const int iz = blockIdx.z*CompStep::Nb.z+threadIdx.z;
Cell c = pars.data.get_cell_compact<parity>(ix,iy,iz);
c.collision();
pars.data.set_cell_compact<parity>(c, ix,iy,iz);
}
template<int parity> __global__ __launch_bounds__(CompStep::Nblk) void compactStep(){
const int ix = (blockIdx.x*CompStep::Nb.x+threadIdx.x + parity)%Nx;
const int iy = (blockIdx.y*CompStep::Nb.y+threadIdx.y + parity)%Ny;
const int iz = (blockIdx.z*CompStep::Nb.z+threadIdx.z + parity)%Nz;
const int thid = threadIdx.x + threadIdx.y*CompStep::Nb.x + threadIdx.z*CompStep::Nb.x*CompStep::Nb.y;
const int warp_id = thid/32;
const int lane_id = thid%32;
const int ix0 = blockIdx.x*CompStep::Nb.x + parity;
const int iy0 = blockIdx.y*CompStep::Nb.y + parity;
const int iz0 = blockIdx.z*CompStep::Nb.z + parity;
const int Qn = Cell::Qn;
//__shared__ ftype fi_sh[Nbs*Qn];
extern __shared__ ftype fi_sh[];
load_store_datablock(thid, make_int3(ix0,iy0,iz0), fi_sh, RW::Load);
//for(int iq=0; iq<Cell::Qn; iq++) fi_sh[thid*Qn+iq] = pars.data.tiles[ix+iy*Nx+iz*Nx*Ny].f[iq];
__syncthreads();
Cell c;
const char3 pbits3 = make_char3(threadIdx.x&1,threadIdx.y&1,threadIdx.z&1);
const char pbits = pbits3.x|pbits3.y<<1|pbits3.z<<2;
for(int iq=0; iq<Cell::Qn; iq++) {
const char4 shifts = LBMconsts::compact_access[iq+(Cell::Qn)*pbits];
const int3 shCrd = make_int3(threadIdx.x+shifts.x, threadIdx.y+shifts.y, threadIdx.z+shifts.z);
const int shid = shCrd.x+shCrd.y*CompStep::Nb.x + shCrd.z*CompStep::Nb.x*CompStep::Nb.y;
const int rev_iq = shifts.w;
c.f[iq] = fi_sh[ shid*Qn + rev_iq ];
}
//for(int iq=0; iq<Cell::Qn; iq++) c.f[iq]++;
c.collision();
for(int iq=0; iq<Cell::Qn; iq++) {
const char4 shifts = LBMconsts::compact_access[iq+(Cell::Qn)*pbits];
const int3 shCrd = make_int3(threadIdx.x+shifts.x, threadIdx.y+shifts.y, threadIdx.z+shifts.z);
const int shid = shCrd.x+shCrd.y*CompStep::Nb.x + shCrd.z*CompStep::Nb.x*CompStep::Nb.y;
const int rev_iq = shifts.w;
fi_sh[ shid*Qn + rev_iq ] = c.f[iq];
}
__syncthreads();
load_store_datablock(thid, make_int3(ix0,iy0,iz0), fi_sh, RW::Store);
//for(int iq=0; iq<Cell::Qn; iq++) pars.data.tiles[ix+iy*Nx+iz*Nx*Ny].f[iq] = fi_sh[thid*Qn+iq];
}
template<int parity>__global__ __launch_bounds__(CompStep::Nblk) void compactStep_4tests(){
const int ix = blockIdx.x*CompStep::Nb.x+threadIdx.x;
const int iy = blockIdx.y*CompStep::Nb.y+threadIdx.y;
const int iz = blockIdx.z*CompStep::Nb.z+threadIdx.z;
Cell c;
const char3 pbits3 = make_char3(ix&1,iy&1,iz&1)^parity;
const char pbits = pbits3.x|pbits3.y<<1|pbits3.z<<2;
for(int iq=0; iq<Cell::Qn; iq++) {
const int3 gCrd = make_int3(ix,iy,iz);
c.f[iq] = pars.data.tiles[ gCrd.x + gCrd.y*Nx+ gCrd.z*Nx*Ny ].f[iq];
}
for(int i=0;i<LBMconsts::Qn;i++) c.f[i]++;
for(int iq=0; iq<Cell::Qn; iq++) {
const int3 gCrd = make_int3(ix,iy,iz);
pars.data.tiles[ gCrd.x + gCrd.y*Nx+ gCrd.z*Nx*Ny ].f[iq] = c.f[iq];
}
}
template __global__ void compactStep<0>();
template __global__ void compactStep<1>();
|
ea48c71eb6b4c5a79c6acf1ff621a187b120a374.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************
*
* (C) Copyright 2010 The Board of Trustees of the
* University of Illinois
* All Rights Reserved
*
***************************************************************************/
#include <parboil.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include "util.h"
__global__ void histo_prescan_kernel (
unsigned int* input,
int size,
unsigned int* minmax);
__global__ void histo_main_kernel (
uchar4 *sm_mappings,
unsigned int num_elements,
unsigned int sm_range_min,
unsigned int sm_range_max,
unsigned int histo_height,
unsigned int histo_width,
unsigned int *global_subhisto,
unsigned int *global_histo,
unsigned int *global_overflow);
__global__ void histo_intermediates_kernel (
uint2 *input,
unsigned int height,
unsigned int width,
unsigned int input_pitch,
uchar4 *sm_mappings);
__global__ void histo_final_kernel (
unsigned int sm_range_min,
unsigned int sm_range_max,
unsigned int histo_height,
unsigned int histo_width,
unsigned int *global_subhisto,
unsigned int *global_histo,
unsigned int *global_overflow,
unsigned int *final_histo);
/******************************************************************************
* Implementation: GPU
* Details:
* in the GPU implementation of histogram, we begin by computing the span of the
* input values into the histogram. Then the histogramming computation is carried
* out by a (BLOCK_X, BLOCK_Y) sized grid, where every group of Y (same X)
* computes its own partial histogram for a part of the input, and every Y in the
* group exclusively writes to a portion of the span computed in the beginning.
* Finally, a reduction is performed to combine all the partial histograms into
* the final result.
******************************************************************************/
int main(int argc, char* argv[]) {
struct pb_TimerSet timers;
struct pb_Parameters *parameters;
parameters = pb_ReadParameters(&argc, argv);
if (!parameters)
return -1;
if(!parameters->inpFiles[0]){
fputs("Input file expected\n", stderr);
return -1;
}
char *prescans = "PreScanKernel";
char *postpremems = "PostPreMems";
char *intermediates = "IntermediatesKernel";
char *mains = "MainKernel";
char *finals = "FinalKernel";
pb_InitializeTimerSet(&timers);
pb_AddSubTimer(&timers, prescans, pb_TimerID_KERNEL);
pb_AddSubTimer(&timers, postpremems, pb_TimerID_KERNEL);
pb_AddSubTimer(&timers, intermediates, pb_TimerID_KERNEL);
pb_AddSubTimer(&timers, mains, pb_TimerID_KERNEL);
pb_AddSubTimer(&timers, finals, pb_TimerID_KERNEL);
pb_SwitchToTimer(&timers, pb_TimerID_IO);
int numIterations;
if (argc >= 2){
numIterations = atoi(argv[1]);
} else {
fputs("Expected at least one command line argument\n", stderr);
return -1;
}
unsigned int img_width, img_height;
unsigned int histo_width, histo_height;
FILE* f = fopen(parameters->inpFiles[0],"rb");
int result = 0;
result += fread(&img_width, sizeof(unsigned int), 1, f);
result += fread(&img_height, sizeof(unsigned int), 1, f);
result += fread(&histo_width, sizeof(unsigned int), 1, f);
result += fread(&histo_height, sizeof(unsigned int), 1, f);
if (result != 4){
fputs("Error reading input and output dimensions from file\n", stderr);
return -1;
}
unsigned int* img = (unsigned int*) malloc (img_width*img_height*sizeof(unsigned int));
unsigned char* histo = (unsigned char*) calloc (histo_width*histo_height, sizeof(unsigned char));
result = fread(img, sizeof(unsigned int), img_width*img_height, f);
fclose(f);
if (result != img_width*img_height){
fputs("Error reading input array from file\n", stderr);
return -1;
}
int even_width = ((img_width+1)/2)*2;
unsigned int* input;
unsigned int* ranges;
uchar4* sm_mappings;
unsigned int* global_subhisto;
unsigned short* global_histo;
unsigned int* global_overflow;
unsigned char* final_histo;
hipMalloc((void**)&input , even_width*(((img_height+UNROLL-1)/UNROLL)*UNROLL)*sizeof(unsigned int));
hipMalloc((void**)&ranges , 2*sizeof(unsigned int));
hipMalloc((void**)&sm_mappings , img_width*img_height*sizeof(uchar4));
hipMalloc((void**)&global_subhisto , BLOCK_X*img_width*histo_height*sizeof(unsigned int));
hipMalloc((void**)&global_histo , img_width*histo_height*sizeof(unsigned short));
hipMalloc((void**)&global_overflow , img_width*histo_height*sizeof(unsigned int));
hipMalloc((void**)&final_histo , img_width*histo_height*sizeof(unsigned char));
hipMemset(final_histo , 0 , img_width*histo_height*sizeof(unsigned char));
for (int y=0; y < img_height; y++){
hipMemcpy(&(((unsigned int*)input)[y*even_width]),&img[y*img_width],img_width*sizeof(unsigned int), hipMemcpyHostToDevice);
}
pb_SwitchToTimer(&timers, pb_TimerID_KERNEL);
for (int iter = 0; iter < numIterations; iter++) {
unsigned int ranges_h[2] = {UINT32_MAX, 0};
hipMemcpy(ranges,ranges_h, 2*sizeof(unsigned int), hipMemcpyHostToDevice);
pb_SwitchToSubTimer(&timers, prescans , pb_TimerID_KERNEL);
/*
std::cout memory footprint
*/
int memory_footprint = 0;
memory_footprint += even_width*(((img_height+UNROLL-1)/UNROLL)*UNROLL)*sizeof(unsigned int); //input
memory_footprint += 2*sizeof(unsigned int); //ranges
printf("\n#### histo_prescan_kernel memory_footprint:%d ####\n", memory_footprint);
/*
std::cout memory footprint
*/
hipLaunchKernelGGL(( histo_prescan_kernel), dim3(dim3(PRESCAN_BLOCKS_X)),dim3(dim3(PRESCAN_THREADS)), 0, 0, (unsigned int*)input, img_height*img_width, ranges);
pb_SwitchToSubTimer(&timers, postpremems , pb_TimerID_KERNEL);
hipMemcpy(ranges_h,ranges, 2*sizeof(unsigned int), hipMemcpyDeviceToHost);
hipMemset(global_subhisto,0,img_width*histo_height*sizeof(unsigned int));
pb_SwitchToSubTimer(&timers, intermediates, pb_TimerID_KERNEL);
/*
std::cout memory footprint
*/
memory_footprint = 0;
memory_footprint += even_width*(((img_height+UNROLL-1)/UNROLL)*UNROLL)*sizeof(unsigned int); //input
memory_footprint += img_width*img_height*sizeof(uchar4); //sm_mappings
printf("\n#### histo_intermediates_kernel memory_footprint:%d ####\n", memory_footprint);
/*
std::cout memory footprint
*/
hipLaunchKernelGGL(( histo_intermediates_kernel), dim3(dim3((img_height + UNROLL-1)/UNROLL)), dim3(dim3((img_width+1)/2)), 0, 0,
(uint2*)(input),
(unsigned int)img_height,
(unsigned int)img_width,
(img_width+1)/2,
(uchar4*)(sm_mappings)
);
pb_SwitchToSubTimer(&timers, mains, pb_TimerID_KERNEL);
/*
std::cout memory footprint
*/
memory_footprint = 0;
memory_footprint += img_width*img_height*sizeof(uchar4); //sm_mappings
memory_footprint += BLOCK_X*img_width*histo_height*sizeof(unsigned int); //global_subhisto
memory_footprint += img_width*histo_height*sizeof(unsigned short); //global_histo
memory_footprint += img_width*histo_height*sizeof(unsigned int); //global_overflow
printf("\n#### histo_main_kernel memory_footprint:%d ####\n", memory_footprint);
/*
std::cout memory footprint
*/
hipLaunchKernelGGL(( histo_main_kernel), dim3(dim3(BLOCK_X, ranges_h[1]-ranges_h[0]+1)), dim3(dim3(THREADS)), 0, 0,
(uchar4*)(sm_mappings),
img_height*img_width,
ranges_h[0], ranges_h[1],
histo_height, histo_width,
(unsigned int*)(global_subhisto),
(unsigned int*)(global_histo),
(unsigned int*)(global_overflow)
);
pb_SwitchToSubTimer(&timers, finals, pb_TimerID_KERNEL);
/*
std::cout memory footprint
*/
memory_footprint = 0;
memory_footprint += BLOCK_X*img_width*histo_height*sizeof(unsigned int); //global_subhisto
memory_footprint += img_width*histo_height*sizeof(unsigned short); //global_histo
memory_footprint += img_width*histo_height*sizeof(unsigned int); //global_overflow
memory_footprint += img_width*histo_height*sizeof(unsigned char); //final_histo
printf("\n#### histo_final_kernel memory_footprint:%d ####\n", memory_footprint);
/*
std::cout memory footprint
*/
hipLaunchKernelGGL(( histo_final_kernel), dim3(dim3(BLOCK_X*3)), dim3(dim3(512)), 0, 0,
ranges_h[0], ranges_h[1],
histo_height, histo_width,
(unsigned int*)(global_subhisto),
(unsigned int*)(global_histo),
(unsigned int*)(global_overflow),
(unsigned int*)(final_histo)
);
}
pb_SwitchToTimer(&timers, pb_TimerID_IO);
hipMemcpy(histo,final_histo, histo_height*histo_width*sizeof(unsigned char), hipMemcpyDeviceToHost);
hipFree(input);
hipFree(ranges);
hipFree(sm_mappings);
hipFree(global_subhisto);
hipFree(global_histo);
hipFree(global_overflow);
hipFree(final_histo);
if (parameters->outFile) {
dump_histo_img(histo, histo_height, histo_width, parameters->outFile);
}
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
free(img);
free(histo);
pb_SwitchToTimer(&timers, pb_TimerID_NONE);
printf("\n");
pb_PrintTimerSet(&timers);
pb_FreeParameters(parameters);
pb_DestroyTimerSet(&timers);
return 0;
}
| ea48c71eb6b4c5a79c6acf1ff621a187b120a374.cu | /***************************************************************************
*
* (C) Copyright 2010 The Board of Trustees of the
* University of Illinois
* All Rights Reserved
*
***************************************************************************/
#include <parboil.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
#include "util.h"
__global__ void histo_prescan_kernel (
unsigned int* input,
int size,
unsigned int* minmax);
__global__ void histo_main_kernel (
uchar4 *sm_mappings,
unsigned int num_elements,
unsigned int sm_range_min,
unsigned int sm_range_max,
unsigned int histo_height,
unsigned int histo_width,
unsigned int *global_subhisto,
unsigned int *global_histo,
unsigned int *global_overflow);
__global__ void histo_intermediates_kernel (
uint2 *input,
unsigned int height,
unsigned int width,
unsigned int input_pitch,
uchar4 *sm_mappings);
__global__ void histo_final_kernel (
unsigned int sm_range_min,
unsigned int sm_range_max,
unsigned int histo_height,
unsigned int histo_width,
unsigned int *global_subhisto,
unsigned int *global_histo,
unsigned int *global_overflow,
unsigned int *final_histo);
/******************************************************************************
* Implementation: GPU
* Details:
* in the GPU implementation of histogram, we begin by computing the span of the
* input values into the histogram. Then the histogramming computation is carried
* out by a (BLOCK_X, BLOCK_Y) sized grid, where every group of Y (same X)
* computes its own partial histogram for a part of the input, and every Y in the
* group exclusively writes to a portion of the span computed in the beginning.
* Finally, a reduction is performed to combine all the partial histograms into
* the final result.
******************************************************************************/
int main(int argc, char* argv[]) {
struct pb_TimerSet timers;
struct pb_Parameters *parameters;
parameters = pb_ReadParameters(&argc, argv);
if (!parameters)
return -1;
if(!parameters->inpFiles[0]){
fputs("Input file expected\n", stderr);
return -1;
}
char *prescans = "PreScanKernel";
char *postpremems = "PostPreMems";
char *intermediates = "IntermediatesKernel";
char *mains = "MainKernel";
char *finals = "FinalKernel";
pb_InitializeTimerSet(&timers);
pb_AddSubTimer(&timers, prescans, pb_TimerID_KERNEL);
pb_AddSubTimer(&timers, postpremems, pb_TimerID_KERNEL);
pb_AddSubTimer(&timers, intermediates, pb_TimerID_KERNEL);
pb_AddSubTimer(&timers, mains, pb_TimerID_KERNEL);
pb_AddSubTimer(&timers, finals, pb_TimerID_KERNEL);
pb_SwitchToTimer(&timers, pb_TimerID_IO);
int numIterations;
if (argc >= 2){
numIterations = atoi(argv[1]);
} else {
fputs("Expected at least one command line argument\n", stderr);
return -1;
}
unsigned int img_width, img_height;
unsigned int histo_width, histo_height;
FILE* f = fopen(parameters->inpFiles[0],"rb");
int result = 0;
result += fread(&img_width, sizeof(unsigned int), 1, f);
result += fread(&img_height, sizeof(unsigned int), 1, f);
result += fread(&histo_width, sizeof(unsigned int), 1, f);
result += fread(&histo_height, sizeof(unsigned int), 1, f);
if (result != 4){
fputs("Error reading input and output dimensions from file\n", stderr);
return -1;
}
unsigned int* img = (unsigned int*) malloc (img_width*img_height*sizeof(unsigned int));
unsigned char* histo = (unsigned char*) calloc (histo_width*histo_height, sizeof(unsigned char));
result = fread(img, sizeof(unsigned int), img_width*img_height, f);
fclose(f);
if (result != img_width*img_height){
fputs("Error reading input array from file\n", stderr);
return -1;
}
int even_width = ((img_width+1)/2)*2;
unsigned int* input;
unsigned int* ranges;
uchar4* sm_mappings;
unsigned int* global_subhisto;
unsigned short* global_histo;
unsigned int* global_overflow;
unsigned char* final_histo;
cudaMalloc((void**)&input , even_width*(((img_height+UNROLL-1)/UNROLL)*UNROLL)*sizeof(unsigned int));
cudaMalloc((void**)&ranges , 2*sizeof(unsigned int));
cudaMalloc((void**)&sm_mappings , img_width*img_height*sizeof(uchar4));
cudaMalloc((void**)&global_subhisto , BLOCK_X*img_width*histo_height*sizeof(unsigned int));
cudaMalloc((void**)&global_histo , img_width*histo_height*sizeof(unsigned short));
cudaMalloc((void**)&global_overflow , img_width*histo_height*sizeof(unsigned int));
cudaMalloc((void**)&final_histo , img_width*histo_height*sizeof(unsigned char));
cudaMemset(final_histo , 0 , img_width*histo_height*sizeof(unsigned char));
for (int y=0; y < img_height; y++){
cudaMemcpy(&(((unsigned int*)input)[y*even_width]),&img[y*img_width],img_width*sizeof(unsigned int), cudaMemcpyHostToDevice);
}
pb_SwitchToTimer(&timers, pb_TimerID_KERNEL);
for (int iter = 0; iter < numIterations; iter++) {
unsigned int ranges_h[2] = {UINT32_MAX, 0};
cudaMemcpy(ranges,ranges_h, 2*sizeof(unsigned int), cudaMemcpyHostToDevice);
pb_SwitchToSubTimer(&timers, prescans , pb_TimerID_KERNEL);
/*
std::cout memory footprint
*/
int memory_footprint = 0;
memory_footprint += even_width*(((img_height+UNROLL-1)/UNROLL)*UNROLL)*sizeof(unsigned int); //input
memory_footprint += 2*sizeof(unsigned int); //ranges
printf("\n#### histo_prescan_kernel memory_footprint:%d ####\n", memory_footprint);
/*
std::cout memory footprint
*/
histo_prescan_kernel<<<dim3(PRESCAN_BLOCKS_X),dim3(PRESCAN_THREADS)>>>((unsigned int*)input, img_height*img_width, ranges);
pb_SwitchToSubTimer(&timers, postpremems , pb_TimerID_KERNEL);
cudaMemcpy(ranges_h,ranges, 2*sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemset(global_subhisto,0,img_width*histo_height*sizeof(unsigned int));
pb_SwitchToSubTimer(&timers, intermediates, pb_TimerID_KERNEL);
/*
std::cout memory footprint
*/
memory_footprint = 0;
memory_footprint += even_width*(((img_height+UNROLL-1)/UNROLL)*UNROLL)*sizeof(unsigned int); //input
memory_footprint += img_width*img_height*sizeof(uchar4); //sm_mappings
printf("\n#### histo_intermediates_kernel memory_footprint:%d ####\n", memory_footprint);
/*
std::cout memory footprint
*/
histo_intermediates_kernel<<<dim3((img_height + UNROLL-1)/UNROLL), dim3((img_width+1)/2)>>>(
(uint2*)(input),
(unsigned int)img_height,
(unsigned int)img_width,
(img_width+1)/2,
(uchar4*)(sm_mappings)
);
pb_SwitchToSubTimer(&timers, mains, pb_TimerID_KERNEL);
/*
std::cout memory footprint
*/
memory_footprint = 0;
memory_footprint += img_width*img_height*sizeof(uchar4); //sm_mappings
memory_footprint += BLOCK_X*img_width*histo_height*sizeof(unsigned int); //global_subhisto
memory_footprint += img_width*histo_height*sizeof(unsigned short); //global_histo
memory_footprint += img_width*histo_height*sizeof(unsigned int); //global_overflow
printf("\n#### histo_main_kernel memory_footprint:%d ####\n", memory_footprint);
/*
std::cout memory footprint
*/
histo_main_kernel<<<dim3(BLOCK_X, ranges_h[1]-ranges_h[0]+1), dim3(THREADS)>>>(
(uchar4*)(sm_mappings),
img_height*img_width,
ranges_h[0], ranges_h[1],
histo_height, histo_width,
(unsigned int*)(global_subhisto),
(unsigned int*)(global_histo),
(unsigned int*)(global_overflow)
);
pb_SwitchToSubTimer(&timers, finals, pb_TimerID_KERNEL);
/*
std::cout memory footprint
*/
memory_footprint = 0;
memory_footprint += BLOCK_X*img_width*histo_height*sizeof(unsigned int); //global_subhisto
memory_footprint += img_width*histo_height*sizeof(unsigned short); //global_histo
memory_footprint += img_width*histo_height*sizeof(unsigned int); //global_overflow
memory_footprint += img_width*histo_height*sizeof(unsigned char); //final_histo
printf("\n#### histo_final_kernel memory_footprint:%d ####\n", memory_footprint);
/*
std::cout memory footprint
*/
histo_final_kernel<<<dim3(BLOCK_X*3), dim3(512)>>>(
ranges_h[0], ranges_h[1],
histo_height, histo_width,
(unsigned int*)(global_subhisto),
(unsigned int*)(global_histo),
(unsigned int*)(global_overflow),
(unsigned int*)(final_histo)
);
}
pb_SwitchToTimer(&timers, pb_TimerID_IO);
cudaMemcpy(histo,final_histo, histo_height*histo_width*sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaFree(input);
cudaFree(ranges);
cudaFree(sm_mappings);
cudaFree(global_subhisto);
cudaFree(global_histo);
cudaFree(global_overflow);
cudaFree(final_histo);
if (parameters->outFile) {
dump_histo_img(histo, histo_height, histo_width, parameters->outFile);
}
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
free(img);
free(histo);
pb_SwitchToTimer(&timers, pb_TimerID_NONE);
printf("\n");
pb_PrintTimerSet(&timers);
pb_FreeParameters(parameters);
pb_DestroyTimerSet(&timers);
return 0;
}
|
1cfe187078201542148d5c1da282a5aac78f44c4.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "sieve.cpp"
#include "parallelSieve.cu"
#include "utils.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <time.h>
using namespace std;
int main()
{
int choice = -1;
int bound = 0;
bool *primeArray, *findArray;
bool *cpuArray;
clock_t t;
float total_time;
while (1)
{
cout << "Which algorithm would you like to run?" << endl;
cout << "0. Sieve of Eratosthenes (serial)" << endl;
cout << "1. Sieve of Sundaram (serial)" << endl;
//cout << "2. Sieve of Sundaram (serial, optimized)" << endl;
cout << "3. Sieve of Sundaram (GPU - PerRow, 1D)" << endl;
cout << "4. Sieve of Sundaram (GPU - PerRow, 2D)" << endl;
cout << "5. Sieve of Sundaram (GPU - PerElement, 1D)" << endl;
cout << "6. Sieve of Sundaram (GPU - PerRow, 2D)" << endl;
cout << "7. Sieve of Eratosthenes (GPU - Per Element 1D)" << endl;
cout << "8. Sieve of Eratosthenes (GPU - Per Element 2D)" << endl;
cout << "9. Sieve of Eratosthenes (GPU - Per Element 1D loop launch)" << endl;
cout << "10. Exit" << endl;
cin >> choice;
//Process exit
if (choice == 10)
return 0;
if (choice < 0 || choice > 10)
continue;
cout << "What number should we find primes up to?" << endl;
cin >> bound;
cout << "Creating reference prime array...";
bool * goldArray = new bool[bound + 1];
sundaramSieve(bound, goldArray);
//sundPartOneSerial(bound, goldArray);
cout << "done." << endl;
const dim3 a_gridSize(bound / 1024, 1, 1);
const dim3 a_blockSize(512, 1, 1);
int b_bound = (int)sqrt((double)(bound)) + 1;
const dim3 b_gridSize(b_bound/32, b_bound/32, 1);
const dim3 b_blockSize(32, 32, 1);
const dim3 c_gridSize(bound / 16384*8+1,1,1);
const dim3 c_blockSize(1024, 1, 1);
const dim3 d_gridSize(1+bound / (1024*1024) / 2, 1+bound / (1024*1024) / 2,1);
const dim3 d_blockSize(1024, 1024, 1);
const dim3 t_gridSize(bound / (32*32),1,1);
const dim3 t_blockSize(1024,1,1);
if (choice >= 3) //If we've run a GPU algorithm, allocate some memory
{
cout << "Allocating " << 2 * sizeof(bool) * (bound + 1) / 1024.0 / 1024.0 << "MB of memory" << endl;
checkCudaErrors(hipMalloc(&primeArray, sizeof(bool) * (bound +1)));
checkCudaErrors(hipMalloc(&findArray, sizeof(bool) * (bound + 1)));
checkCudaErrors(hipMemset(findArray, 0, sizeof(bool) * (bound + 1)));
checkCudaErrors(hipMemset(primeArray, 1, sizeof(bool) * (bound + 1)));
}
bool *loopPrimeArray = new bool[bound + 1];
switch (choice)
{
case 0:
{
t = clock();
for (int i = 0; i < 10000; i++)
{
cpuArray = new bool[bound + 1];
eratosthenesSieve(bound, cpuArray);
break;
}
}
case 1:
{
t = clock();
//for (int i = 0; i < 10000; i++)
{
cpuArray = new bool[bound + 1];
sundaramSieve(bound, cpuArray);
break;
}
}
case 2:
//not yet implemented
break;
case 3:
t = clock();
//for (int i = 0; i < 10000; i++)
{
hipLaunchKernelGGL(( sundPartOnePerRow), dim3(t_gridSize), dim3(t_blockSize), 0, 0, bound, findArray);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( sundPartTwoPerElementOneD), dim3(t_gridSize), dim3(t_blockSize), 0, 0, bound, findArray, primeArray);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
break;
case 4:
t = clock();
//for (int i = 0; i < 10000; i++)
{
hipLaunchKernelGGL(( sundPartOnePerRow), dim3(t_gridSize), dim3(t_blockSize), 0, 0, bound, findArray);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( sundPartTwoPerElementTwoD), dim3(b_gridSize), dim3(b_blockSize), 0, 0, bound, findArray, primeArray);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
break;
case 5:
t = clock();
//for (int i = 0; i < 10000; i++)
{
cout << "part one" << endl;
hipLaunchKernelGGL(( sundPartOnePerElement), dim3(b_gridSize), dim3(b_blockSize), 0, 0, bound, findArray);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
cout << "part two" << endl;
hipLaunchKernelGGL(( sundPartTwoPerElementOneD), dim3(t_gridSize), dim3(t_blockSize), 0, 0, bound, findArray, primeArray);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
cout << "done" << endl;
}
break;
case 6:
t = clock();
//for (int i = 0; i < 10000; i++)
{
hipLaunchKernelGGL(( sundPartOnePerElement), dim3(b_gridSize), dim3(b_blockSize), 0, 0, bound, findArray);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( sundPartTwoPerElementTwoD), dim3(b_gridSize), dim3(b_blockSize), 0, 0, bound, findArray, primeArray);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
break;
case 7:
t = clock();
//for (int i = 0; i < 10000; i++)
{
checkCudaErrors(hipMemset(primeArray, 0, sizeof(bool) * (bound + 1)));
hipLaunchKernelGGL(( eratosPerElement), dim3(c_gridSize), dim3(c_blockSize), 0, 0, bound, primeArray);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
break;
case 8:
t = clock();
//for (int i = 0; i < 10000; i++)
{
checkCudaErrors(hipMemset(primeArray, 0, sizeof(bool) * (bound + 1)));
hipLaunchKernelGGL(( eratosPerElement2D), dim3(b_gridSize), dim3(b_blockSize), 0, 0, bound, primeArray);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
break;
case 9:
t = clock();
//for (int i = 0; i < 10000; i++)
{
checkCudaErrors(hipMemset(primeArray, 0, sizeof(bool) * (bound + 1)));
for( int j = 2; j < (bound /2); j++)
{
checkCudaErrors(hipMemcpy(loopPrimeArray, primeArray, sizeof(bool) * (bound + 1), hipMemcpyDeviceToHost));
if(!loopPrimeArray[j])
{
hipLaunchKernelGGL(( eratosParallelMult), dim3(c_gridSize), dim3(c_blockSize), 0, 0, j, bound, primeArray);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
}
}
break;
default:
break;
}
t = clock() - t;
total_time = ((float)t) / CLOCKS_PER_SEC;
cout << "Time taken to run: " << (total_time / 100) << " sec\n" << endl;
if (choice >= 3) //If we've run a GPU algorithm, copy then free the memory
{
bool *validatePrimeArray = new bool[bound + 1];
checkCudaErrors(hipMemcpy(validatePrimeArray, primeArray, sizeof(bool) * (bound + 1), hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(findArray));
checkCudaErrors(hipFree(primeArray));
validatePrimes(bound, goldArray, validatePrimeArray);
delete [] validatePrimeArray;
}
else
{
validatePrimes(bound, goldArray, cpuArray);
}
delete [] cpuArray;
return 0;
}
}
| 1cfe187078201542148d5c1da282a5aac78f44c4.cu | #include <iostream>
#include "sieve.cpp"
#include "parallelSieve.cu"
#include "utils.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <time.h>
using namespace std;
int main()
{
int choice = -1;
int bound = 0;
bool *primeArray, *findArray;
bool *cpuArray;
clock_t t;
float total_time;
while (1)
{
cout << "Which algorithm would you like to run?" << endl;
cout << "0. Sieve of Eratosthenes (serial)" << endl;
cout << "1. Sieve of Sundaram (serial)" << endl;
//cout << "2. Sieve of Sundaram (serial, optimized)" << endl;
cout << "3. Sieve of Sundaram (GPU - PerRow, 1D)" << endl;
cout << "4. Sieve of Sundaram (GPU - PerRow, 2D)" << endl;
cout << "5. Sieve of Sundaram (GPU - PerElement, 1D)" << endl;
cout << "6. Sieve of Sundaram (GPU - PerRow, 2D)" << endl;
cout << "7. Sieve of Eratosthenes (GPU - Per Element 1D)" << endl;
cout << "8. Sieve of Eratosthenes (GPU - Per Element 2D)" << endl;
cout << "9. Sieve of Eratosthenes (GPU - Per Element 1D loop launch)" << endl;
cout << "10. Exit" << endl;
cin >> choice;
//Process exit
if (choice == 10)
return 0;
if (choice < 0 || choice > 10)
continue;
cout << "What number should we find primes up to?" << endl;
cin >> bound;
cout << "Creating reference prime array...";
bool * goldArray = new bool[bound + 1];
sundaramSieve(bound, goldArray);
//sundPartOneSerial(bound, goldArray);
cout << "done." << endl;
const dim3 a_gridSize(bound / 1024, 1, 1);
const dim3 a_blockSize(512, 1, 1);
int b_bound = (int)sqrt((double)(bound)) + 1;
const dim3 b_gridSize(b_bound/32, b_bound/32, 1);
const dim3 b_blockSize(32, 32, 1);
const dim3 c_gridSize(bound / 16384*8+1,1,1);
const dim3 c_blockSize(1024, 1, 1);
const dim3 d_gridSize(1+bound / (1024*1024) / 2, 1+bound / (1024*1024) / 2,1);
const dim3 d_blockSize(1024, 1024, 1);
const dim3 t_gridSize(bound / (32*32),1,1);
const dim3 t_blockSize(1024,1,1);
if (choice >= 3) //If we've run a GPU algorithm, allocate some memory
{
cout << "Allocating " << 2 * sizeof(bool) * (bound + 1) / 1024.0 / 1024.0 << "MB of memory" << endl;
checkCudaErrors(cudaMalloc(&primeArray, sizeof(bool) * (bound +1)));
checkCudaErrors(cudaMalloc(&findArray, sizeof(bool) * (bound + 1)));
checkCudaErrors(cudaMemset(findArray, 0, sizeof(bool) * (bound + 1)));
checkCudaErrors(cudaMemset(primeArray, 1, sizeof(bool) * (bound + 1)));
}
bool *loopPrimeArray = new bool[bound + 1];
switch (choice)
{
case 0:
{
t = clock();
for (int i = 0; i < 10000; i++)
{
cpuArray = new bool[bound + 1];
eratosthenesSieve(bound, cpuArray);
break;
}
}
case 1:
{
t = clock();
//for (int i = 0; i < 10000; i++)
{
cpuArray = new bool[bound + 1];
sundaramSieve(bound, cpuArray);
break;
}
}
case 2:
//not yet implemented
break;
case 3:
t = clock();
//for (int i = 0; i < 10000; i++)
{
sundPartOnePerRow<<<t_gridSize, t_blockSize>>>(bound, findArray);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
sundPartTwoPerElementOneD<<<t_gridSize, t_blockSize>>>(bound, findArray, primeArray);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
break;
case 4:
t = clock();
//for (int i = 0; i < 10000; i++)
{
sundPartOnePerRow<<<t_gridSize, t_blockSize>>>(bound, findArray);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
sundPartTwoPerElementTwoD<<<b_gridSize, b_blockSize>>>(bound, findArray, primeArray);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
break;
case 5:
t = clock();
//for (int i = 0; i < 10000; i++)
{
cout << "part one" << endl;
sundPartOnePerElement<<<b_gridSize, b_blockSize>>>(bound, findArray);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
cout << "part two" << endl;
sundPartTwoPerElementOneD<<<t_gridSize, t_blockSize>>>(bound, findArray, primeArray);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
cout << "done" << endl;
}
break;
case 6:
t = clock();
//for (int i = 0; i < 10000; i++)
{
sundPartOnePerElement<<<b_gridSize, b_blockSize>>>(bound, findArray);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
sundPartTwoPerElementTwoD<<<b_gridSize, b_blockSize>>>(bound, findArray, primeArray);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
break;
case 7:
t = clock();
//for (int i = 0; i < 10000; i++)
{
checkCudaErrors(cudaMemset(primeArray, 0, sizeof(bool) * (bound + 1)));
eratosPerElement<<<c_gridSize, c_blockSize>>>(bound, primeArray);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
break;
case 8:
t = clock();
//for (int i = 0; i < 10000; i++)
{
checkCudaErrors(cudaMemset(primeArray, 0, sizeof(bool) * (bound + 1)));
eratosPerElement2D<<<b_gridSize, b_blockSize>>>(bound, primeArray);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
break;
case 9:
t = clock();
//for (int i = 0; i < 10000; i++)
{
checkCudaErrors(cudaMemset(primeArray, 0, sizeof(bool) * (bound + 1)));
for( int j = 2; j < (bound /2); j++)
{
checkCudaErrors(cudaMemcpy(loopPrimeArray, primeArray, sizeof(bool) * (bound + 1), cudaMemcpyDeviceToHost));
if(!loopPrimeArray[j])
{
eratosParallelMult<<<c_gridSize, c_blockSize>>>(j, bound, primeArray);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
}
}
break;
default:
break;
}
t = clock() - t;
total_time = ((float)t) / CLOCKS_PER_SEC;
cout << "Time taken to run: " << (total_time / 100) << " sec\n" << endl;
if (choice >= 3) //If we've run a GPU algorithm, copy then free the memory
{
bool *validatePrimeArray = new bool[bound + 1];
checkCudaErrors(cudaMemcpy(validatePrimeArray, primeArray, sizeof(bool) * (bound + 1), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(findArray));
checkCudaErrors(cudaFree(primeArray));
validatePrimes(bound, goldArray, validatePrimeArray);
delete [] validatePrimeArray;
}
else
{
validatePrimes(bound, goldArray, cpuArray);
}
delete [] cpuArray;
return 0;
}
}
|
564b371113ed171b4f2d52af6a81985c4ff30f2a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <numeric>
#include <iostream>
#include "ext_cuda_chunk.hpp"
#include "kernels/pack_kernel.cuknl"
using std::ceil;
using std::accumulate;
#define CELL_DATA 1
#define VERTEX_DATA 2
#define X_FACE_DATA 3
#define Y_FACE_DATA 4
#define Z_FACE_DATA 5
#define WARP_SIZE 32.0
#define PREPARE_INC(fieldType) \
int xInc = 0; \
int yInc = 0; \
int zInc = 0; \
switch(fieldType) \
{ \
case CELL_DATA:\
break; \
case VERTEX_DATA: \
xInc = yInc = zInc = 1; \
break; \
case X_FACE_DATA: \
xInc = 1; \
break; \
case Y_FACE_DATA: \
yInc = 1; \
break; \
case Z_FACE_DATA: \
zInc = 1; \
break; \
} \
// Entry point for packing messages
extern "C"
void ext_pack_message_(
const int* chunk,
const int* fields,
const int* offsets,
const int* depth,
const int* face,
const int* fieldType,
double* buffer)
{
Chunks[*chunk-1]->PackUnpackKernel(fields, offsets, *depth, *face, *fieldType, buffer, true);
}
// Entry point for unpacking messages
extern "C"
void ext_unpack_message_(
const int* chunk,
const int* fields,
const int* offsets,
const int* depth,
const int* face,
const int* fieldType,
double* buffer)
{
Chunks[*chunk-1]->PackUnpackKernel(fields, offsets, *depth, *face, *fieldType, buffer, false);
}
// Performs buffer packing and unpacking
void TeaLeafCudaChunk::PackUnpackKernel(
const int* fields,
const int* offsets,
const int depth,
const int face,
const int fieldType,
double* buffer,
const bool pack)
{
const int exchanges = accumulate(fields, fields+NUM_FIELDS, 0);
if(exchanges < 1) return;
PREPARE_INC(fieldType);
std::string kernelName;
double* deviceBuffer = NULL;
CuKnlPackType packKernel = NULL;
int bufferLength = 0;
int innerX = xCells-2*HALO_PAD;
int innerY = yCells-2*HALO_PAD;
int innerZ = zCells-2*HALO_PAD;
switch(face)
{
case CHUNK_LEFT:
kernelName = (pack) ? "Pack Left" : "Unpack Left";
packKernel = (pack) ? CuKnlPackLeft : CuKnlUnpackLeft;
deviceBuffer = dLeftBuffer;
bufferLength = innerY*innerZ*depth;
break;
case CHUNK_RIGHT:
kernelName = (pack) ? "Pack Right" : "Unpack Right";
packKernel = (pack) ? CuKnlPackRight : CuKnlUnpackRight;
deviceBuffer = dRightBuffer;
bufferLength = innerY*innerZ*depth;
break;
case CHUNK_TOP:
kernelName = (pack) ? "Pack Top" : "Unpack Top";
packKernel = (pack) ? CuKnlPackTop : CuKnlUnpackTop;
deviceBuffer = dTopBuffer;
bufferLength = innerX*innerZ*depth;
break;
case CHUNK_BOTTOM:
kernelName = (pack) ? "Pack Bottom" : "Unpack Bottom";
packKernel = (pack) ? CuKnlPackBottom : CuKnlUnpackBottom;
deviceBuffer = dBottomBuffer;
bufferLength = innerX*innerZ*depth;
break;
case CHUNK_FRONT:
kernelName = (pack) ? "Pack Front" : "Unpack Front";
packKernel = (pack) ? CuKnlPackFront : CuKnlUnpackFront;
deviceBuffer = dFrontBuffer;
bufferLength = innerX*innerY*depth;
break;
case CHUNK_BACK:
kernelName = (pack) ? "Pack Back" : "Unpack Back";
packKernel = (pack) ? CuKnlPackBack : CuKnlUnpackBack;
deviceBuffer = dBackBuffer;
bufferLength = innerX*innerY*depth;
break;
default:
TeaLeafCudaChunk::Abort(__LINE__, __FILE__,
"Incorrect face provided: %d.\n", face);
}
if(!pack)
{
hipMemcpy(deviceBuffer, buffer, exchanges*bufferLength*sizeof(double),
hipMemcpyHostToDevice);
TeaLeafCudaChunk::CheckErrors(__LINE__,__FILE__);
}
int offset = 0;
int numBlocks = ceil(bufferLength/(float)BLOCK_SIZE);
for(int ii = 0; ii != NUM_FIELDS; ++ii)
{
if(fields[ii])
{
double* deviceField = NULL;
switch(ii+1)
{
case FIELD_DENSITY:
deviceField = dDensity;
break;
case FIELD_ENERGY0:
deviceField = dEnergy0;
break;
case FIELD_ENERGY1:
deviceField = dEnergy1;
break;
case FIELD_U:
deviceField = dU;
break;
case FIELD_P:
deviceField = dP;
break;
case FIELD_SD:
deviceField = dSd;
break;
default:
TeaLeafCudaChunk::Abort(__LINE__,__FILE__,
"Incorrect field provided: %d.\n", ii+1);
}
START_PROFILING();
int bufferOffset = bufferLength*offset++;
hipLaunchKernelGGL(( packKernel), dim3(numBlocks), dim3(BLOCK_SIZE), 0, 0,
xCells, yCells, zCells, innerX, innerY, innerZ,
deviceField, deviceBuffer+bufferOffset, depth);
POST_KERNEL(kernelName.c_str());
}
}
if(pack)
{
hipMemcpy(buffer, deviceBuffer, exchanges*bufferLength*sizeof(double),
hipMemcpyDeviceToHost);
TeaLeafCudaChunk::CheckErrors(__LINE__,__FILE__);
}
}
| 564b371113ed171b4f2d52af6a81985c4ff30f2a.cu | #include <cstdio>
#include <numeric>
#include <iostream>
#include "ext_cuda_chunk.hpp"
#include "kernels/pack_kernel.cuknl"
using std::ceil;
using std::accumulate;
#define CELL_DATA 1
#define VERTEX_DATA 2
#define X_FACE_DATA 3
#define Y_FACE_DATA 4
#define Z_FACE_DATA 5
#define WARP_SIZE 32.0
#define PREPARE_INC(fieldType) \
int xInc = 0; \
int yInc = 0; \
int zInc = 0; \
switch(fieldType) \
{ \
case CELL_DATA:\
break; \
case VERTEX_DATA: \
xInc = yInc = zInc = 1; \
break; \
case X_FACE_DATA: \
xInc = 1; \
break; \
case Y_FACE_DATA: \
yInc = 1; \
break; \
case Z_FACE_DATA: \
zInc = 1; \
break; \
} \
// Entry point for packing messages
extern "C"
void ext_pack_message_(
const int* chunk,
const int* fields,
const int* offsets,
const int* depth,
const int* face,
const int* fieldType,
double* buffer)
{
Chunks[*chunk-1]->PackUnpackKernel(fields, offsets, *depth, *face, *fieldType, buffer, true);
}
// Entry point for unpacking messages
extern "C"
void ext_unpack_message_(
const int* chunk,
const int* fields,
const int* offsets,
const int* depth,
const int* face,
const int* fieldType,
double* buffer)
{
Chunks[*chunk-1]->PackUnpackKernel(fields, offsets, *depth, *face, *fieldType, buffer, false);
}
// Performs buffer packing and unpacking
void TeaLeafCudaChunk::PackUnpackKernel(
const int* fields,
const int* offsets,
const int depth,
const int face,
const int fieldType,
double* buffer,
const bool pack)
{
const int exchanges = accumulate(fields, fields+NUM_FIELDS, 0);
if(exchanges < 1) return;
PREPARE_INC(fieldType);
std::string kernelName;
double* deviceBuffer = NULL;
CuKnlPackType packKernel = NULL;
int bufferLength = 0;
int innerX = xCells-2*HALO_PAD;
int innerY = yCells-2*HALO_PAD;
int innerZ = zCells-2*HALO_PAD;
switch(face)
{
case CHUNK_LEFT:
kernelName = (pack) ? "Pack Left" : "Unpack Left";
packKernel = (pack) ? CuKnlPackLeft : CuKnlUnpackLeft;
deviceBuffer = dLeftBuffer;
bufferLength = innerY*innerZ*depth;
break;
case CHUNK_RIGHT:
kernelName = (pack) ? "Pack Right" : "Unpack Right";
packKernel = (pack) ? CuKnlPackRight : CuKnlUnpackRight;
deviceBuffer = dRightBuffer;
bufferLength = innerY*innerZ*depth;
break;
case CHUNK_TOP:
kernelName = (pack) ? "Pack Top" : "Unpack Top";
packKernel = (pack) ? CuKnlPackTop : CuKnlUnpackTop;
deviceBuffer = dTopBuffer;
bufferLength = innerX*innerZ*depth;
break;
case CHUNK_BOTTOM:
kernelName = (pack) ? "Pack Bottom" : "Unpack Bottom";
packKernel = (pack) ? CuKnlPackBottom : CuKnlUnpackBottom;
deviceBuffer = dBottomBuffer;
bufferLength = innerX*innerZ*depth;
break;
case CHUNK_FRONT:
kernelName = (pack) ? "Pack Front" : "Unpack Front";
packKernel = (pack) ? CuKnlPackFront : CuKnlUnpackFront;
deviceBuffer = dFrontBuffer;
bufferLength = innerX*innerY*depth;
break;
case CHUNK_BACK:
kernelName = (pack) ? "Pack Back" : "Unpack Back";
packKernel = (pack) ? CuKnlPackBack : CuKnlUnpackBack;
deviceBuffer = dBackBuffer;
bufferLength = innerX*innerY*depth;
break;
default:
TeaLeafCudaChunk::Abort(__LINE__, __FILE__,
"Incorrect face provided: %d.\n", face);
}
if(!pack)
{
cudaMemcpy(deviceBuffer, buffer, exchanges*bufferLength*sizeof(double),
cudaMemcpyHostToDevice);
TeaLeafCudaChunk::CheckErrors(__LINE__,__FILE__);
}
int offset = 0;
int numBlocks = ceil(bufferLength/(float)BLOCK_SIZE);
for(int ii = 0; ii != NUM_FIELDS; ++ii)
{
if(fields[ii])
{
double* deviceField = NULL;
switch(ii+1)
{
case FIELD_DENSITY:
deviceField = dDensity;
break;
case FIELD_ENERGY0:
deviceField = dEnergy0;
break;
case FIELD_ENERGY1:
deviceField = dEnergy1;
break;
case FIELD_U:
deviceField = dU;
break;
case FIELD_P:
deviceField = dP;
break;
case FIELD_SD:
deviceField = dSd;
break;
default:
TeaLeafCudaChunk::Abort(__LINE__,__FILE__,
"Incorrect field provided: %d.\n", ii+1);
}
START_PROFILING();
int bufferOffset = bufferLength*offset++;
packKernel<<<numBlocks, BLOCK_SIZE>>>(
xCells, yCells, zCells, innerX, innerY, innerZ,
deviceField, deviceBuffer+bufferOffset, depth);
POST_KERNEL(kernelName.c_str());
}
}
if(pack)
{
cudaMemcpy(buffer, deviceBuffer, exchanges*bufferLength*sizeof(double),
cudaMemcpyDeviceToHost);
TeaLeafCudaChunk::CheckErrors(__LINE__,__FILE__);
}
}
|
8ed7b50dcb88b7f2886507cdb781395ed26fe05f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/Config.h>
#include <ATen/Dispatch.h>
#include <ATen/Utils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/detail/CUDAHooksInterface.h>
#include <ATen/native/Resize.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/SpectralOpsUtils.h>
#include <ATen/native/hip/CuFFTUtils.h>
#include <ATen/native/hip/CuFFTPlanCache.h>
#include <c10/util/accumulate.h>
#include <cmath>
#include <vector>
namespace at { namespace native {
using namespace at::native::detail;
// Offset calculator for indexing in Hermitian mirrored order.
// In mirrored dims, maps linear index i to (n - i) % n
template <typename index_t>
struct HermitianSymmetryOffsetCalculator {
using offset_type = at::detail::Array<index_t, 1>;
using dim_type = std::remove_cv_t<decltype(MAX_DIMS)>;
dim_type dims;
at::cuda::detail::IntDivider<index_t> sizes_[MAX_DIMS];
index_t strides_[MAX_DIMS];
uint32_t mirror_dim_; // bit mask
static_assert(MAX_DIMS < 32, "Need a bigger mask type");
HermitianSymmetryOffsetCalculator(
IntArrayRef sizes, IntArrayRef strides, IntArrayRef dim,
const int64_t element_size){
TORCH_INTERNAL_ASSERT(sizes.size() == strides.size());
TORCH_INTERNAL_ASSERT(sizes.size() <= MAX_DIMS);
dims = sizes.size();
using at::cuda::detail::IntDivider;
for (dim_type i = 0; i < MAX_DIMS; ++i) {
if (i < dims) {
sizes_[i] = IntDivider<index_t>(sizes[i]);
strides_[i] = strides[i] / element_size;
} else {
sizes_[i] = IntDivider<index_t>(1);
strides_[i] = 0;
}
}
mirror_dim_ = 0;
for (int64_t i = 0; i < dim.size(); ++i) {
mirror_dim_ |= (uint32_t{1} << dim[i]);
}
}
C10_HOST_DEVICE offset_type get(index_t linear_idx) const {
index_t offset = 0;
for (dim_type dim = 0; dim < dims; ++dim) {
auto divmod = sizes_[dim].divmod(linear_idx);
linear_idx = divmod.div;
if ((mirror_dim_ & (uint32_t{1} << dim)) == 0) {
offset += divmod.mod * strides_[dim];
} else if (divmod.mod != 0) {
offset += (sizes_[dim].divisor - divmod.mod) * strides_[dim];
}
}
offset_type offsets;
offsets[0] = offset;
return offsets;
}
};
// out[:] = conj(in[:]) where in and out ordering is generalized by offset calculators
template <typename scalar_t, typename inp_calc_t, typename out_calc_t>
C10_LAUNCH_BOUNDS_1(cuda::detail::CUDA_NUM_THREADS)
__global__ void _fft_conjugate_copy_kernel(
int64_t numel, scalar_t * out_data, const scalar_t * in_data,
inp_calc_t ic, out_calc_t oc) {
CUDA_KERNEL_LOOP_TYPE(index, numel, int64_t) {
auto in_offset = ic.get(index)[0];
auto out_offset = oc.get(index)[0];
out_data[out_offset] = std::conj(in_data[in_offset]);
}
}
// In real-to-complex transform, cuFFT only fills half of the values due to
// conjugate symmetry. See native/SpectralUtils.h for more details.
// The following function fills in the other half with symmetry in
// case of real-to-complex transform with onesided=False flag.
// See NOTE [ Fourier Transform Conjugate Symmetry ] in native/SpectralOpsUtils.h.
// input should be a tensor of same size as full (twosided)
// signals, but only contains half (onesided) of the values.
// This function modifies inplace.
void _fft_fill_with_conjugate_symmetry_cuda_(
ScalarType dtype, IntArrayRef mirror_dims, IntArrayRef signal_half_sizes,
IntArrayRef in_strides, const void * in_data,
IntArrayRef out_strides, void * out_data) {
// Do the actual conjugate mirroring.
// TODO: consider adding a 32bit indexed kernel for improved performance
auto* in_strides_ptr = in_strides.data();
const int ndim = in_strides.size();
const int64_t element_size = scalarTypeToTypeMeta(dtype).itemsize();
OffsetCalculator<1, int64_t> input_offset_calculator(
ndim, signal_half_sizes.data(), &in_strides_ptr, &element_size);
HermitianSymmetryOffsetCalculator<int64_t> output_offset_calculator(
signal_half_sizes, out_strides, mirror_dims, element_size);
const auto numel = c10::multiply_integers(signal_half_sizes);
AT_DISPATCH_COMPLEX_TYPES(dtype, "_fft_fill_with_conjugate_symmetry", [&] {
using namespace cuda::detail;
hipLaunchKernelGGL(( _fft_conjugate_copy_kernel),
dim3(GET_BLOCKS(numel)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
numel,
static_cast<scalar_t*>(out_data),
static_cast<const scalar_t*>(in_data),
input_offset_calculator,
output_offset_calculator);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
REGISTER_DISPATCH(fft_fill_with_conjugate_symmetry_stub, &_fft_fill_with_conjugate_symmetry_cuda_);
}} // at::native
| 8ed7b50dcb88b7f2886507cdb781395ed26fe05f.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/Config.h>
#include <ATen/Dispatch.h>
#include <ATen/Utils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/detail/CUDAHooksInterface.h>
#include <ATen/native/Resize.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/SpectralOpsUtils.h>
#include <ATen/native/cuda/CuFFTUtils.h>
#include <ATen/native/cuda/CuFFTPlanCache.h>
#include <c10/util/accumulate.h>
#include <cmath>
#include <vector>
namespace at { namespace native {
using namespace at::native::detail;
// Offset calculator for indexing in Hermitian mirrored order.
// In mirrored dims, maps linear index i to (n - i) % n
template <typename index_t>
struct HermitianSymmetryOffsetCalculator {
using offset_type = at::detail::Array<index_t, 1>;
using dim_type = std::remove_cv_t<decltype(MAX_DIMS)>;
dim_type dims;
at::cuda::detail::IntDivider<index_t> sizes_[MAX_DIMS];
index_t strides_[MAX_DIMS];
uint32_t mirror_dim_; // bit mask
static_assert(MAX_DIMS < 32, "Need a bigger mask type");
HermitianSymmetryOffsetCalculator(
IntArrayRef sizes, IntArrayRef strides, IntArrayRef dim,
const int64_t element_size){
TORCH_INTERNAL_ASSERT(sizes.size() == strides.size());
TORCH_INTERNAL_ASSERT(sizes.size() <= MAX_DIMS);
dims = sizes.size();
using at::cuda::detail::IntDivider;
for (dim_type i = 0; i < MAX_DIMS; ++i) {
if (i < dims) {
sizes_[i] = IntDivider<index_t>(sizes[i]);
strides_[i] = strides[i] / element_size;
} else {
sizes_[i] = IntDivider<index_t>(1);
strides_[i] = 0;
}
}
mirror_dim_ = 0;
for (int64_t i = 0; i < dim.size(); ++i) {
mirror_dim_ |= (uint32_t{1} << dim[i]);
}
}
C10_HOST_DEVICE offset_type get(index_t linear_idx) const {
index_t offset = 0;
for (dim_type dim = 0; dim < dims; ++dim) {
auto divmod = sizes_[dim].divmod(linear_idx);
linear_idx = divmod.div;
if ((mirror_dim_ & (uint32_t{1} << dim)) == 0) {
offset += divmod.mod * strides_[dim];
} else if (divmod.mod != 0) {
offset += (sizes_[dim].divisor - divmod.mod) * strides_[dim];
}
}
offset_type offsets;
offsets[0] = offset;
return offsets;
}
};
// out[:] = conj(in[:]) where in and out ordering is generalized by offset calculators
template <typename scalar_t, typename inp_calc_t, typename out_calc_t>
C10_LAUNCH_BOUNDS_1(cuda::detail::CUDA_NUM_THREADS)
__global__ void _fft_conjugate_copy_kernel(
int64_t numel, scalar_t * out_data, const scalar_t * in_data,
inp_calc_t ic, out_calc_t oc) {
CUDA_KERNEL_LOOP_TYPE(index, numel, int64_t) {
auto in_offset = ic.get(index)[0];
auto out_offset = oc.get(index)[0];
out_data[out_offset] = std::conj(in_data[in_offset]);
}
}
// In real-to-complex transform, cuFFT only fills half of the values due to
// conjugate symmetry. See native/SpectralUtils.h for more details.
// The following function fills in the other half with symmetry in
// case of real-to-complex transform with onesided=False flag.
// See NOTE [ Fourier Transform Conjugate Symmetry ] in native/SpectralOpsUtils.h.
// input should be a tensor of same size as full (twosided)
// signals, but only contains half (onesided) of the values.
// This function modifies inplace.
void _fft_fill_with_conjugate_symmetry_cuda_(
ScalarType dtype, IntArrayRef mirror_dims, IntArrayRef signal_half_sizes,
IntArrayRef in_strides, const void * in_data,
IntArrayRef out_strides, void * out_data) {
// Do the actual conjugate mirroring.
// TODO: consider adding a 32bit indexed kernel for improved performance
auto* in_strides_ptr = in_strides.data();
const int ndim = in_strides.size();
const int64_t element_size = scalarTypeToTypeMeta(dtype).itemsize();
OffsetCalculator<1, int64_t> input_offset_calculator(
ndim, signal_half_sizes.data(), &in_strides_ptr, &element_size);
HermitianSymmetryOffsetCalculator<int64_t> output_offset_calculator(
signal_half_sizes, out_strides, mirror_dims, element_size);
const auto numel = c10::multiply_integers(signal_half_sizes);
AT_DISPATCH_COMPLEX_TYPES(dtype, "_fft_fill_with_conjugate_symmetry", [&] {
using namespace cuda::detail;
_fft_conjugate_copy_kernel<<<
GET_BLOCKS(numel), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(
numel,
static_cast<scalar_t*>(out_data),
static_cast<const scalar_t*>(in_data),
input_offset_calculator,
output_offset_calculator);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
REGISTER_DISPATCH(fft_fill_with_conjugate_symmetry_stub, &_fft_fill_with_conjugate_symmetry_cuda_);
}} // at::native
|
66baf669e871dc4b5d3c791ff1da0bb397ac30d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _CONVOLUTION_KERNEL_CU
#define _CONVOLUTION_KERNEL_CU
using namespace std;
#include <stdio.h>
#define IDX_1D(col, row, stride) ((col * stride) + row)
#define COL_2D(index, stride) (index / stride)
#define ROW_2D(index, stride) (index % stride)
#define ROUNDUP32(integer) ( ((integer-1)/32 + 1) * 32 )
#define SHMEM 8192
#define FLOAT_SZ sizeof(float)
texture<float, 3, hipReadModeElementType> texPsf;
textureReference* texPsfRef;
// Use tex3D(texPsf, x, y, z) to access texture data
////////////////////////////////////////////////////////////////////////////////
//
// This macros is defined because EVERY convolution-like function has the same
// variables. Mainly, the pixel identifiers for this thread based on block
// size, and the size of the padded rectangle that each block will work with
//
////////////////////////////////////////////////////////////////////////////////
#define CREATE_CONVOLUTION_VARIABLES(psfColRad, psfRowRad) \
\
const int cornerCol = blockDim.x*blockIdx.x; \
const int cornerRow = blockDim.y*blockIdx.y; \
const int globalCol = cornerCol + threadIdx.x; \
const int globalRow = cornerRow + threadIdx.y; \
const int globalIdx = IDX_1D(globalCol, globalRow, imgRows); \
\
const int localCol = threadIdx.x; \
const int localRow = threadIdx.y; \
const int localIdx = IDX_1D(localCol, localRow, blockDim.y); \
const int localPixels = blockDim.x*blockDim.y; \
\
const int padRectStride = blockDim.y + 2*psfRowRad; \
const int padRectCol = localCol + psfColRad; \
const int padRectRow = localRow + psfRowRad; \
/*const int padRectIdx = IDX_1D(padRectCol, padRectRow, padRectStride); */ \
const int padRectPixels = padRectStride * (blockDim.x + 2*psfColRad); \
\
__shared__ char sharedMem[SHMEM]; \
float* shmPadRect = (float*)sharedMem; \
float* shmOutput = (float*)&shmPadRect[ROUNDUP32(padRectPixels)]; \
int nLoop;
////////////////////////////////////////////////////////////////////////////////
//
// Every block will need a buffered copy of the input data in its shared memory,
// so it doesn't do multiple global memory reads to find the energy contributing
// to it's own value.
//
// This copy is very much like COPY_LIN_ARRAY_TO_SHMEM except that this isn't
// a linear array, and this needs to accommodate pixels that fall out of bounds
// from the image. Threads are temporarily reassigned to execute this copy in
// parallel.
//
////////////////////////////////////////////////////////////////////////////////
#define PREPARE_PADDED_RECTANGLE(psfColRad, psfRowRad) \
\
nLoop = (padRectPixels/localPixels)+1; \
for(int loopIdx=0; loopIdx<nLoop; loopIdx++) \
{ \
int prIndex = loopIdx*localPixels + localIdx; \
if(prIndex < padRectPixels) \
{ \
int prCol = COL_2D(prIndex, padRectStride); \
int prRow = ROW_2D(prIndex, padRectStride); \
int glCol = cornerCol + prCol - psfColRad; \
int glRow = cornerRow + prRow - psfRowRad; \
int glIdx = IDX_1D(glCol, glRow, imgRows); \
if(glRow >= 0 && \
glRow < imgRows && \
glCol >= 0 && \
glCol < imgCols) \
shmPadRect[prIndex] = imgInPtr[glIdx]; \
else \
shmPadRect[prIndex] = 0.0f; \
} \
} \
////////////////////////////////////////////////////////////////////////////////
//
// Same as above, except for binary images, using -1 as "OFF" and +1 as "ON"
// The user is not expected to do this him/herself, and it's easy enough to
// manipulate the data on the way in and out (just don't forget to convert back
// before copying out the result
//
////////////////////////////////////////////////////////////////////////////////
#define PREPARE_PADDED_RECTANGLE_BINARY(psfColRad, psfRowRad) \
\
nLoop = (padRectPixels/localPixels)+1; \
for(int loopIdx=0; loopIdx<nLoop; loopIdx++) \
{ \
int prIndex = loopIdx*localPixels + localIdx; \
if(prIndex < padRectPixels) \
{ \
int prCol = COL_2D(prIndex, padRectStride); \
int prRow = ROW_2D(prIndex, padRectStride); \
int glCol = cornerCol + prCol - psfColRad; \
int glRow = cornerRow + prRow - psfRowRad; \
int glIdx = IDX_1D(glCol, glRow, imgRows); \
if(glRow >= 0 && \
glRow < imgRows && \
glCol >= 0 && \
glCol < imgCols) \
shmPadRect[prIndex] = imgInPtr[glIdx]*2 - 1; \
else \
shmPadRect[prIndex] = -1; \
} \
} \
////////////////////////////////////////////////////////////////////////////////
//
// Frequently, we want to pull some linear arrays into shared memory (usually
// PSFs) which will be queried often, and we want them close to the threads.
//
// This macro temporarily reassigns all the threads to do the memory copy from
// global memory to shared memory in parallel. Since the array may be bigger
// than the blocksize, some threads may be doing multiple mem copies
//
////////////////////////////////////////////////////////////////////////////////
#define COPY_LIN_ARRAY_TO_SHMEM(srcPtr, dstPtr, nValues) \
nLoop = (nValues/localPixels)+1; \
for(int loopIdx=0; loopIdx<nLoop; loopIdx++) \
{ \
int prIndex = loopIdx*localPixels + localIdx; \
if(prIndex < nValues) \
{ \
dstPtr[prIndex] = srcPtr[prIndex]; \
} \
}
__global__ void convolveBasic(
float* imgInPtr,
float* imgOutPtr,
float* imgPsfPtr,
int imgCols,
int imgRows,
int psfColRad,
int psfRowRad)
{
CREATE_CONVOLUTION_VARIABLES(psfColRad, psfRowRad);
shmOutput[localIdx] = 0.0f;
const int psfStride = psfRowRad*2+1;
const int psfPixels = psfStride*(psfColRad*2+1);
float* shmPsf = (float*)&shmOutput[ROUNDUP32(localPixels)];
COPY_LIN_ARRAY_TO_SHMEM(imgPsfPtr, shmPsf, psfPixels);
PREPARE_PADDED_RECTANGLE(psfColRad, psfRowRad);
__syncthreads();
float accumFloat = 0.0f;
for(int coff=-psfColRad; coff<=psfColRad; coff++)
{
for(int roff=-psfRowRad; roff<=psfRowRad; roff++)
{
int psfCol = psfColRad - coff;
int psfRow = psfRowRad - roff;
int psfIdx = IDX_1D(psfCol, psfRow, psfStride);
float psfVal = shmPsf[psfIdx];
int shmPRCol = padRectCol + coff;
int shmPRRow = padRectRow + roff;
int shmPRIdx = IDX_1D(shmPRCol, shmPRRow, padRectStride);
accumFloat += psfVal * shmPadRect[shmPRIdx];
}
}
shmOutput[localIdx] = accumFloat;
__syncthreads();
imgOutPtr[globalIdx] = shmOutput[localIdx];
}
/*
// TODO: Still need to debug this function
__global__ void convolveBilateral(
float* imgInPtr,
float* imgOutPtr,
float* imgPsfPtr,
float* intPsfPtr,
int imgCols,
int imgRows,
int psfColRad,
int psfRowRad,
int intPsfRad)
{
CREATE_CONVOLUTION_VARIABLES(psfColRad, psfRowRad);
shmOutput[localIdx] = 0.0f;
const int psfStride = psfRowRad*2+1;
const int psfPixels = psfStride*(psfColRad*2+1);
float* shmPsf = (float*)&shmOutput[ROUNDUP32(localPixels)];
float* shmPsfI = (float*)&shmPsf[ROUNDUP32(psfPixels)];
COPY_LIN_ARRAY_TO_SHMEM(imgPsfPtr, shmPsf, psfPixels);
COPY_LIN_ARRAY_TO_SHMEM(intPsfPtr, shmPsfI, 2*intPsfRad+1);
PREPARE_PADDED_RECTANGLE(psfColRad, psfRowRad);
__syncthreads();
float accumFloat = 0.0f;
float myVal = shmPadRect[padRectIdx];
for(int coff=-psfColRad; coff<=psfColRad; coff++)
{
for(int roff=-psfRowRad; roff<=psfRowRad; roff++)
{
int psfCol = psfColRad - coff;
int psfRow = psfRowRad - roff;
int psfIdx = IDX_1D(psfCol, psfRow, psfStride);
float psfVal = shmPsf[psfIdx];
int shmPRCol = padRectCol + coff;
int shmPRRow = padRectRow + roff;
int shmPRIdx = IDX_1D(shmPRCol, shmPRRow, padRectStride);
float thatVal = shmPadRect[shmPRIdx];
float intVal = shmPsfI[(int)(thatVal-myVal+intPsfRad)];
accumFloat += psfVal * intVal *shmPadRect[shmPRIdx];
}
}
shmOutput[localIdx] = accumFloat;
__syncthreads();
imgOutPtr[globalIdx] = shmOutput[localIdx];
}
*/
#endif
| 66baf669e871dc4b5d3c791ff1da0bb397ac30d7.cu | #ifndef _CONVOLUTION_KERNEL_CU
#define _CONVOLUTION_KERNEL_CU
using namespace std;
#include <stdio.h>
#define IDX_1D(col, row, stride) ((col * stride) + row)
#define COL_2D(index, stride) (index / stride)
#define ROW_2D(index, stride) (index % stride)
#define ROUNDUP32(integer) ( ((integer-1)/32 + 1) * 32 )
#define SHMEM 8192
#define FLOAT_SZ sizeof(float)
texture<float, 3, cudaReadModeElementType> texPsf;
textureReference* texPsfRef;
// Use tex3D(texPsf, x, y, z) to access texture data
////////////////////////////////////////////////////////////////////////////////
//
// This macros is defined because EVERY convolution-like function has the same
// variables. Mainly, the pixel identifiers for this thread based on block
// size, and the size of the padded rectangle that each block will work with
//
////////////////////////////////////////////////////////////////////////////////
#define CREATE_CONVOLUTION_VARIABLES(psfColRad, psfRowRad) \
\
const int cornerCol = blockDim.x*blockIdx.x; \
const int cornerRow = blockDim.y*blockIdx.y; \
const int globalCol = cornerCol + threadIdx.x; \
const int globalRow = cornerRow + threadIdx.y; \
const int globalIdx = IDX_1D(globalCol, globalRow, imgRows); \
\
const int localCol = threadIdx.x; \
const int localRow = threadIdx.y; \
const int localIdx = IDX_1D(localCol, localRow, blockDim.y); \
const int localPixels = blockDim.x*blockDim.y; \
\
const int padRectStride = blockDim.y + 2*psfRowRad; \
const int padRectCol = localCol + psfColRad; \
const int padRectRow = localRow + psfRowRad; \
/*const int padRectIdx = IDX_1D(padRectCol, padRectRow, padRectStride); */ \
const int padRectPixels = padRectStride * (blockDim.x + 2*psfColRad); \
\
__shared__ char sharedMem[SHMEM]; \
float* shmPadRect = (float*)sharedMem; \
float* shmOutput = (float*)&shmPadRect[ROUNDUP32(padRectPixels)]; \
int nLoop;
////////////////////////////////////////////////////////////////////////////////
//
// Every block will need a buffered copy of the input data in its shared memory,
// so it doesn't do multiple global memory reads to find the energy contributing
// to it's own value.
//
// This copy is very much like COPY_LIN_ARRAY_TO_SHMEM except that this isn't
// a linear array, and this needs to accommodate pixels that fall out of bounds
// from the image. Threads are temporarily reassigned to execute this copy in
// parallel.
//
////////////////////////////////////////////////////////////////////////////////
#define PREPARE_PADDED_RECTANGLE(psfColRad, psfRowRad) \
\
nLoop = (padRectPixels/localPixels)+1; \
for(int loopIdx=0; loopIdx<nLoop; loopIdx++) \
{ \
int prIndex = loopIdx*localPixels + localIdx; \
if(prIndex < padRectPixels) \
{ \
int prCol = COL_2D(prIndex, padRectStride); \
int prRow = ROW_2D(prIndex, padRectStride); \
int glCol = cornerCol + prCol - psfColRad; \
int glRow = cornerRow + prRow - psfRowRad; \
int glIdx = IDX_1D(glCol, glRow, imgRows); \
if(glRow >= 0 && \
glRow < imgRows && \
glCol >= 0 && \
glCol < imgCols) \
shmPadRect[prIndex] = imgInPtr[glIdx]; \
else \
shmPadRect[prIndex] = 0.0f; \
} \
} \
////////////////////////////////////////////////////////////////////////////////
//
// Same as above, except for binary images, using -1 as "OFF" and +1 as "ON"
// The user is not expected to do this him/herself, and it's easy enough to
// manipulate the data on the way in and out (just don't forget to convert back
// before copying out the result
//
////////////////////////////////////////////////////////////////////////////////
#define PREPARE_PADDED_RECTANGLE_BINARY(psfColRad, psfRowRad) \
\
nLoop = (padRectPixels/localPixels)+1; \
for(int loopIdx=0; loopIdx<nLoop; loopIdx++) \
{ \
int prIndex = loopIdx*localPixels + localIdx; \
if(prIndex < padRectPixels) \
{ \
int prCol = COL_2D(prIndex, padRectStride); \
int prRow = ROW_2D(prIndex, padRectStride); \
int glCol = cornerCol + prCol - psfColRad; \
int glRow = cornerRow + prRow - psfRowRad; \
int glIdx = IDX_1D(glCol, glRow, imgRows); \
if(glRow >= 0 && \
glRow < imgRows && \
glCol >= 0 && \
glCol < imgCols) \
shmPadRect[prIndex] = imgInPtr[glIdx]*2 - 1; \
else \
shmPadRect[prIndex] = -1; \
} \
} \
////////////////////////////////////////////////////////////////////////////////
//
// Frequently, we want to pull some linear arrays into shared memory (usually
// PSFs) which will be queried often, and we want them close to the threads.
//
// This macro temporarily reassigns all the threads to do the memory copy from
// global memory to shared memory in parallel. Since the array may be bigger
// than the blocksize, some threads may be doing multiple mem copies
//
////////////////////////////////////////////////////////////////////////////////
#define COPY_LIN_ARRAY_TO_SHMEM(srcPtr, dstPtr, nValues) \
nLoop = (nValues/localPixels)+1; \
for(int loopIdx=0; loopIdx<nLoop; loopIdx++) \
{ \
int prIndex = loopIdx*localPixels + localIdx; \
if(prIndex < nValues) \
{ \
dstPtr[prIndex] = srcPtr[prIndex]; \
} \
}
__global__ void convolveBasic(
float* imgInPtr,
float* imgOutPtr,
float* imgPsfPtr,
int imgCols,
int imgRows,
int psfColRad,
int psfRowRad)
{
CREATE_CONVOLUTION_VARIABLES(psfColRad, psfRowRad);
shmOutput[localIdx] = 0.0f;
const int psfStride = psfRowRad*2+1;
const int psfPixels = psfStride*(psfColRad*2+1);
float* shmPsf = (float*)&shmOutput[ROUNDUP32(localPixels)];
COPY_LIN_ARRAY_TO_SHMEM(imgPsfPtr, shmPsf, psfPixels);
PREPARE_PADDED_RECTANGLE(psfColRad, psfRowRad);
__syncthreads();
float accumFloat = 0.0f;
for(int coff=-psfColRad; coff<=psfColRad; coff++)
{
for(int roff=-psfRowRad; roff<=psfRowRad; roff++)
{
int psfCol = psfColRad - coff;
int psfRow = psfRowRad - roff;
int psfIdx = IDX_1D(psfCol, psfRow, psfStride);
float psfVal = shmPsf[psfIdx];
int shmPRCol = padRectCol + coff;
int shmPRRow = padRectRow + roff;
int shmPRIdx = IDX_1D(shmPRCol, shmPRRow, padRectStride);
accumFloat += psfVal * shmPadRect[shmPRIdx];
}
}
shmOutput[localIdx] = accumFloat;
__syncthreads();
imgOutPtr[globalIdx] = shmOutput[localIdx];
}
/*
// TODO: Still need to debug this function
__global__ void convolveBilateral(
float* imgInPtr,
float* imgOutPtr,
float* imgPsfPtr,
float* intPsfPtr,
int imgCols,
int imgRows,
int psfColRad,
int psfRowRad,
int intPsfRad)
{
CREATE_CONVOLUTION_VARIABLES(psfColRad, psfRowRad);
shmOutput[localIdx] = 0.0f;
const int psfStride = psfRowRad*2+1;
const int psfPixels = psfStride*(psfColRad*2+1);
float* shmPsf = (float*)&shmOutput[ROUNDUP32(localPixels)];
float* shmPsfI = (float*)&shmPsf[ROUNDUP32(psfPixels)];
COPY_LIN_ARRAY_TO_SHMEM(imgPsfPtr, shmPsf, psfPixels);
COPY_LIN_ARRAY_TO_SHMEM(intPsfPtr, shmPsfI, 2*intPsfRad+1);
PREPARE_PADDED_RECTANGLE(psfColRad, psfRowRad);
__syncthreads();
float accumFloat = 0.0f;
float myVal = shmPadRect[padRectIdx];
for(int coff=-psfColRad; coff<=psfColRad; coff++)
{
for(int roff=-psfRowRad; roff<=psfRowRad; roff++)
{
int psfCol = psfColRad - coff;
int psfRow = psfRowRad - roff;
int psfIdx = IDX_1D(psfCol, psfRow, psfStride);
float psfVal = shmPsf[psfIdx];
int shmPRCol = padRectCol + coff;
int shmPRRow = padRectRow + roff;
int shmPRIdx = IDX_1D(shmPRCol, shmPRRow, padRectStride);
float thatVal = shmPadRect[shmPRIdx];
float intVal = shmPsfI[(int)(thatVal-myVal+intPsfRad)];
accumFloat += psfVal * intVal *shmPadRect[shmPRIdx];
}
}
shmOutput[localIdx] = accumFloat;
__syncthreads();
imgOutPtr[globalIdx] = shmOutput[localIdx];
}
*/
#endif
|
a05996f524aaec3f72ec51bc22337a3710987e9a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// (c) Copyright 2013 Lev Barash, Landau Institute for Theoretical Physics, Russian Academy of Sciences
// This is supplement to the paper:
// L.Yu. Barash, L.N. Shchur, "PRAND: GPU accelerated parallel random number generation library: Using most reliable algorithms and applying parallelism of modern GPUs and CPUs".
// e-mail: barash @ itp.ac.ru (remove space)
#include<stdio.h>
#define mrg32k3a_CUDA_CALL(x) do { if((x) != hipSuccess) { printf("Error: %s at %s:%d\n",hipGetErrorString(hipGetLastError()),__FILE__,__LINE__); exit(1);}} while(0)
#define mrg32k3a_BLOCKS 64
#define mrg32k3a_THREADS 1024
#define mrg32k3a_ARRAY_SECTIONS (mrg32k3a_BLOCKS*mrg32k3a_THREADS)
#define mrg32k3a_a12 1403580U
#define mrg32k3a_a13 4294156359U
#define mrg32k3a_a21 527612U
#define mrg32k3a_a23 4293573854U
#define mrg32k3a_sse_mask 4294967295UL
#define mrg32k3a_sse_m1 4294967087UL
#define mrg32k3a_sse_m2 4294944443UL
#define mrg32k3a_sse_x4ap 0
#define mrg32k3a_sse_x4bm 810728UL
#define mrg32k3a_sse_x4cp 1403580UL
#define mrg32k3a_sse_x4addl 4125525144UL
#define mrg32k3a_sse_x4addh 810727UL
#define mrg32k3a_sse_x6ap 149925673UL
#define mrg32k3a_sse_x6bp 489343630UL
#define mrg32k3a_sse_x6cm 1353076533UL
#define mrg32k3a_sse_x6addl 674846139UL
#define mrg32k3a_sse_x6addh 1353076467UL
#define mrg32k3a_sse_y4ap 2706407399UL
#define mrg32k3a_sse_y4bm 1370589UL
#define mrg32k3a_sse_y4cm 796966251UL
#define mrg32k3a_sse_y4addl 629268888UL
#define mrg32k3a_sse_y4addh 798332592UL
#define mrg32k3a_sse_y6ap 97673890UL
#define mrg32k3a_sse_y6bp 1431525864UL
#define mrg32k3a_sse_y6cp 1673476130UL
#define mrg32k3a_sse_y6addl 0
#define mrg32k3a_sse_y6addh 0
typedef struct{
unsigned x0,x1,x2,y0,y1,y2;
} mrg32k3a_state;
typedef struct{
unsigned s[20] __attribute__ ((aligned(16)));
// { x0,0,x1,0, x2,0,x3,0, y0,0,y1,0, y2,0,y3,0 , arrri}
} mrg32k3a_sse_state;
unsigned mrg32k3a_sse_consts[72] __attribute__ ((aligned(16))) =
{ mrg32k3a_sse_x4ap,0,mrg32k3a_sse_x4ap,0,
mrg32k3a_sse_x4bm,0,mrg32k3a_sse_x4bm,0,
mrg32k3a_sse_x4cp,0,mrg32k3a_sse_x4cp,0,
mrg32k3a_sse_x4addl,mrg32k3a_sse_x4addh,mrg32k3a_sse_x4addl,mrg32k3a_sse_x4addh,
mrg32k3a_sse_x6ap,0,mrg32k3a_sse_x6ap,0,
mrg32k3a_sse_x6bp,0,mrg32k3a_sse_x6bp,0,
mrg32k3a_sse_x6cm,0,mrg32k3a_sse_x6cm,0,
mrg32k3a_sse_x6addl,mrg32k3a_sse_x6addh,mrg32k3a_sse_x6addl,mrg32k3a_sse_x6addh,
mrg32k3a_sse_m1,0,mrg32k3a_sse_m1,0,
mrg32k3a_sse_y4ap,0,mrg32k3a_sse_y4ap,0,
mrg32k3a_sse_y4bm,0,mrg32k3a_sse_y4bm,0,
mrg32k3a_sse_y4cm,0,mrg32k3a_sse_y4cm,0,
mrg32k3a_sse_y4addl,mrg32k3a_sse_y4addh,mrg32k3a_sse_y4addl,mrg32k3a_sse_y4addh,
mrg32k3a_sse_y6ap,0,mrg32k3a_sse_y6ap,0,
mrg32k3a_sse_y6bp,0,mrg32k3a_sse_y6bp,0,
mrg32k3a_sse_y6cp,0,mrg32k3a_sse_y6cp,0,
mrg32k3a_sse_y6addl,mrg32k3a_sse_y6addh,mrg32k3a_sse_y6addl,mrg32k3a_sse_y6addh,
mrg32k3a_sse_m2,0,mrg32k3a_sse_m2,0
};
unsigned mrg32k3a_sse_arrr[4] __attribute__ ((aligned(16)));
extern "C" __host__ void mrg32k3a_sse_genRand4(unsigned* arr,mrg32k3a_sse_state* state){ // use unsigned arr[4] __attribute__ ((aligned(16)));
asm volatile("movaps (%1),%%xmm1\n"\
"movaps 16(%1),%%xmm3\n"\
"movaps %%xmm1,%%xmm0\n"\
"movaps %%xmm3,%%xmm6\n"\
"movaps 32(%1),%%xmm2\n"\
"psubq %%xmm2,%%xmm1\n"\
"psubq 48(%1),%%xmm3\n"\
"movaps %%xmm1,%%xmm4\n"\
"movaps %%xmm3,%%xmm7\n"\
"psrad $31,%%xmm4\n"\
"psrad $31,%%xmm7\n"\
"psrlq $32,%%xmm4\n"\
"psrlq $32,%%xmm7\n"\
"pand 128(%2),%%xmm4\n"\
"pand 128(%2),%%xmm7\n"\
"paddq %%xmm4,%%xmm1\n"\
"paddq %%xmm7,%%xmm3\n"\
"shufps $136,%%xmm3,%%xmm1\n"\
"movaps %%xmm1,(%0)\n"\
"movups 8(%1),%%xmm1\n"\
"movaps %%xmm1,%%xmm4\n"\
"movaps %%xmm6,%%xmm7\n"\
"pmuludq 16(%2),%%xmm1\n"\
"pmuludq 32(%2),%%xmm6\n"\
"pmuludq 64(%2),%%xmm0\n"\
"pmuludq 80(%2),%%xmm4\n"\
"pmuludq 96(%2),%%xmm7\n"\
"paddq 48(%2),%%xmm6\n"\
"paddq 112(%2),%%xmm0\n"\
"psubq %%xmm1,%%xmm6\n"\
"paddq %%xmm4,%%xmm0\n"\
"psubq %%xmm7,%%xmm0\n"\
"movaps %%xmm6,%%xmm3\n"\
"movaps %%xmm0,%%xmm5\n"\
"psrlq $32,%%xmm3\n"\
"psrlq $32,%%xmm5\n"\
"pmuludq 128(%2),%%xmm3\n"\
"pmuludq 128(%2),%%xmm5\n"\
"psubq %%xmm3,%%xmm6\n"\
"psubq %%xmm5,%%xmm0\n"\
"movaps %%xmm6,%%xmm3\n"\
"movaps %%xmm0,%%xmm5\n"\
"psrlq $32,%%xmm3\n"\
"psrlq $32,%%xmm5\n"\
"pmuludq 128(%2),%%xmm3\n"\
"pmuludq 128(%2),%%xmm5\n"\
"psubq %%xmm3,%%xmm6\n"\
"psubq %%xmm5,%%xmm0\n"\
"psubq 128(%2),%%xmm6\n"\
"psubq 128(%2),%%xmm0\n"\
"movaps %%xmm6,%%xmm3\n"\
"movaps %%xmm0,%%xmm5\n"\
"psrad $31,%%xmm3\n"\
"psrad $31,%%xmm5\n"\
"psrlq $32,%%xmm3\n"\
"psrlq $32,%%xmm5\n"\
"pand 128(%2),%%xmm3\n"\
"pand 128(%2),%%xmm5\n"\
"paddq %%xmm3,%%xmm6\n"\
"paddq %%xmm5,%%xmm0\n"\
"movaps %%xmm6,(%1)\n"\
"movaps %%xmm0,16(%1)\n"\
"movups 40(%1),%%xmm1\n"\
"movaps 48(%1),%%xmm5\n"\
"movaps %%xmm2,%%xmm3\n"\
"movaps %%xmm1,%%xmm4\n"\
"movaps %%xmm5,%%xmm6\n"\
"pmuludq 144(%2),%%xmm2\n"\
"pmuludq 160(%2),%%xmm1\n"\
"pmuludq 176(%2),%%xmm5\n"\
"pmuludq 208(%2),%%xmm3\n"\
"pmuludq 224(%2),%%xmm4\n"\
"pmuludq 240(%2),%%xmm6\n"\
"paddq 192(%2),%%xmm2\n"\
"psubq %%xmm1,%%xmm2\n"\
"paddq %%xmm3,%%xmm6\n"\
"psubq %%xmm5,%%xmm2\n"\
"paddq %%xmm4,%%xmm6\n"\
"movaps %%xmm2,%%xmm0\n"\
"movaps %%xmm6,%%xmm7\n"\
"psrlq $32,%%xmm0\n"\
"psrlq $32,%%xmm7\n"\
"pmuludq 272(%2),%%xmm0\n"\
"pmuludq 272(%2),%%xmm7\n"\
"psubq %%xmm0,%%xmm2\n"\
"psubq %%xmm7,%%xmm6\n"\
"movaps %%xmm2,%%xmm0\n"\
"movaps %%xmm6,%%xmm7\n"\
"psrlq $32,%%xmm0\n"\
"psrlq $32,%%xmm7\n"\
"pmuludq 272(%2),%%xmm0\n"\
"pmuludq 272(%2),%%xmm7\n"\
"psubq %%xmm0,%%xmm2\n"\
"psubq %%xmm7,%%xmm6\n"\
"psubq 272(%2),%%xmm2\n"\
"psubq 272(%2),%%xmm6\n"\
"movaps %%xmm2,%%xmm0\n"\
"movaps %%xmm6,%%xmm7\n"\
"psrad $31,%%xmm0\n"\
"psrad $31,%%xmm7\n"\
"psrlq $32,%%xmm0\n"\
"psrlq $32,%%xmm7\n"\
"pand 272(%2),%%xmm0\n"\
"pand 272(%2),%%xmm7\n"\
"paddq %%xmm0,%%xmm2\n"\
"paddq %%xmm7,%%xmm6\n"\
"movaps %%xmm2,32(%1)\n"\
"movaps %%xmm6,48(%1)\n"\
""::"r"(arr),"r"(state->s),"r"(mrg32k3a_sse_consts));
}
extern "C" __host__ unsigned int mrg32k3a_sse_generate_(mrg32k3a_sse_state* state){
state->s[16]--; if(state->s[16]==0) {mrg32k3a_sse_genRand4(mrg32k3a_sse_arrr,state); state->s[16]=4;}
return mrg32k3a_sse_arrr[4-state->s[16]];
}
extern "C" __device__ __host__ void mrg32k3a_get_sse_state_(mrg32k3a_state* state,mrg32k3a_sse_state* sse_state){
long long x0,x1,x2,x,y0,y1,y2,y; int i;
x0=state->x0; x1=state->x1; x2=state->x2; y0=state->y0; y1=state->y1; y2=state->y2;
for(i=0;i<8;i+=2){
sse_state->s[i] = x = (1403580*x1-810728*x0+9007203111403311U)%mrg32k3a_sse_m1; x0=x1; x1=x2; x2=x;
}
for(i=8;i<16;i+=2){
sse_state->s[i] = y = (527612*y2-1370589*y0+9007202867859652U)%mrg32k3a_sse_m2; y0=y1; y1=y2; y2=y;
}
for(i=1;i<16;i+=2) sse_state->s[i]=0; sse_state->s[16]=1;
}
__constant__ unsigned matrix_array[48][15][18];
unsigned matrix__array[48][15][18]=
{{{0, 1403580, 4294156359, 1, 0, 0, 0, 1, 0, 527612, 0, 4293573854, 1,
0, 0, 0, 1, 0}, {1403580, 4294156359, 0, 0, 1403580, 4294156359,
1, 0, 0, 3497978192, 4293573854, 2706407399, 527612, 0, 4293573854,
1, 0, 0}, {4294156359, 2941890554, 244671815, 1403580, 4294156359,
0, 0, 1403580, 4294156359, 3281754271, 2706407399, 1431525864,
3497978192, 4293573854, 2706407399, 527612, 0,
4293573854}, {2941890554, 489343630, 149925673, 4294156359,
2941890554, 244671815, 1403580, 4294156359, 0, 1673476130,
1431525864, 97673890, 3281754271, 2706407399, 1431525864,
3497978192, 4293573854, 2706407399}, {489343630, 1831234280,
3782722441, 2941890554, 489343630, 149925673, 4294156359,
2941890554, 244671815, 1430724370, 97673890, 2680076935,
1673476130, 1431525864, 97673890, 3281754271, 2706407399,
1431525864}, {1831234280, 2758233149, 1527363550, 489343630,
1831234280, 3782722441, 2941890554, 489343630, 149925673,
893509979, 2680076935, 3405842137, 1430724370, 97673890,
2680076935, 1673476130, 1431525864, 97673890}, {2758233149,
939574583, 4072640363, 1831234280, 2758233149, 1527363550,
489343630, 1831234280, 3782722441, 3280220074, 3405842137,
4035147174, 893509979, 2680076935, 3405842137, 1430724370,
97673890, 2680076935}, {939574583, 3228066636, 2064391165,
2758233149, 939574583, 4072640363, 1831234280, 2758233149,
1527363550, 361718588, 4035147174, 2623373296, 3280220074,
3405842137, 4035147174, 893509979, 2680076935,
3405842137}, {3228066636, 513534955, 3055122635, 939574583,
3228066636, 2064391165, 2758233149, 939574583, 4072640363,
951529882, 2623373296, 2214191601, 361718588, 4035147174,
2623373296, 3280220074, 3405842137, 4035147174}, {513534955,
1849694388, 2647187398, 3228066636, 513534955, 3055122635,
939574583, 3228066636, 2064391165, 856588367, 2214191601,
3490676452, 951529882, 2623373296, 2214191601, 361718588,
4035147174, 2623373296}, {1849694388, 72851784, 4057515279,
513534955, 1849694388, 2647187398, 3228066636, 513534955,
3055122635, 101833201, 3490676452, 1060044773, 856588367,
2214191601, 3490676452, 951529882, 2623373296,
2214191601}, {72851784, 2171677081, 1611532847, 1849694388,
72851784, 4057515279, 513534955, 1849694388, 2647187398,
2154540534, 1060044773, 1344438782, 101833201, 3490676452,
1060044773, 856588367, 2214191601, 3490676452}, {2171677081,
342112271, 1406241672, 72851784, 2171677081, 1611532847,
1849694388, 72851784, 4057515279, 2374762999, 1344438782,
3790774567, 2154540534, 1060044773, 1344438782, 101833201,
3490676452, 1060044773}, {342112271, 2961816100, 736416029,
2171677081, 342112271, 1406241672, 72851784, 2171677081,
1611532847, 3542344109, 3790774567, 818368950, 2374762999,
1344438782, 3790774567, 2154540534, 1060044773,
1344438782}, {2961816100, 1062452522, 387300998, 342112271,
2961816100, 736416029, 2171677081, 342112271, 1406241672,
3321940838, 818368950, 1817134745, 3542344109, 3790774567,
818368950, 2374762999, 1344438782, 3790774567}}, {{1062452522,
340793741, 2955879160, 2961816100, 1062452522, 387300998,
342112271, 2961816100, 736416029, 2854655037, 1817134745,
3493477402, 3321940838, 818368950, 1817134745, 3542344109,
3790774567, 818368950}, {3847560959, 2305448679, 3866010231,
2218748291, 3847560959, 2019641772, 1709215645, 2218748291,
1243502014, 1528225099, 1777037472, 3058260025, 479207863,
498682467, 1777037472, 811441367, 2928649385,
498682467}, {1957999095, 2154014226, 3803905489, 671479916,
1957999095, 1066184965, 279477115, 671479916, 1402917279,
254897698, 3369502947, 3762770058, 1576432507, 1603012465,
3369502947, 1996495269, 493710616, 1603012465}, {3015754,
613405362, 919711945, 3453352062, 3015754, 4062454730, 3721871040,
3453352062, 3241775219, 1089381262, 82107183, 1674879036,
2655465224, 3893311647, 82107183, 64039185, 3140922085,
3893311647}, {1714457176, 543518825, 796397074, 927927799,
1714457176, 2642728633, 2119990689, 927927799, 4150866678,
1115863221, 38719673, 3034346413, 313384592, 2415675531, 38719673,
3774285549, 1547191440, 2415675531}, {2677461178, 1363680284,
2720671525, 2095557752, 2677461178, 2069614551, 1077844911,
2095557752, 3144136330, 3508261072, 1631290368, 1481396816,
985606644, 308987612, 1631290368, 2672875808, 347710755,
308987612}, {2274623300, 3565840334, 3986386399, 4158118130,
2274623300, 2506841580, 980976528, 4158118130, 3788203520,
2795879681, 2688797252, 4273420463, 840096763, 224021768,
2688797252, 1122210911, 1347945325, 224021768}, {1644962013,
2336229602, 3501544776, 1414472808, 1644962013, 3653507277,
1746037714, 1414472808, 1955221006, 3910194649, 3174683233,
3681140872, 2828785870, 28639152, 3174683233, 2231910770,
3496041927, 28639152}, {3959658012, 2382864318, 1292801423,
2278721057, 3959658012, 2582093244, 1636022382, 2278721057,
2637926654, 2455684739, 1628178936, 3809483955, 438833385,
560791363, 1628178936, 135381837, 578081674,
560791363}, {2095854275, 241074294, 3467574677, 4270228002,
2095854275, 3012067286, 3210223643, 4270228002, 3416313165,
2131206770, 2155841413, 2592491860, 1165637085, 3997142249,
2155841413, 3495511482, 2275086959, 3997142249}, {3095786772,
2103247720, 3864750503, 2026972742, 3095786772, 4260196856,
2339910750, 2026972742, 827586410, 4043247124, 3811543518,
977381276, 3729301337, 1329179255, 3811543518, 2878595121,
654253945, 1329179255}, {3093961248, 3967481377, 2551531030,
2415235089, 3093961248, 2873360987, 3754924652, 2415235089,
2883496440, 1465244270, 681409874, 3783909260, 751154769,
3488684910, 681409874, 763303055, 1250231333,
3488684910}, {3713503784, 3857718737, 3282798680, 3376559462,
3713503784, 1075293666, 222702005, 3376559462, 2747983570,
3728928570, 456229084, 1241038999, 3921422013, 824554232,
456229084, 3401146025, 2087442872, 824554232}, {2734108498,
3897674739, 1215897460, 3111826989, 2734108498, 3832105377,
1474606220, 3111826989, 2395197850, 3270171837, 2934415941,
1000116359, 3089143895, 873394952, 2934415941, 116531987,
1544120396, 873394952}, {2440817142, 2467391843, 197714708,
822685082, 2440817142, 41338233, 2126944437, 822685082, 2900830468,
3161757001, 3365547145, 639745865, 3193278438, 3354738099,
3365547145, 1246052214, 2533379923, 3354738099}}, {{1649398389,
3109147376, 333002869, 49135452, 1649398389, 1857945175,
3441537107, 49135452, 1170096663, 3509975160, 1799677538,
1882279394, 3174861078, 1463826069, 1799677538, 3313769518,
300842059, 1463826069}, {996706937, 1481993076, 3439056503,
2297111910, 996706937, 1399961132, 862649200, 2297111910,
2299034194, 2390202783, 3103629604, 4257445059, 3409560232,
2092194020, 3103629604, 2202401252, 184076987,
2092194020}, {3886299789, 2026523226, 1990826586, 2419622249,
3886299789, 2185172794, 2253148117, 2419622249, 417740769,
2486156587, 3228001680, 4017452330, 2192037609, 4073752362,
3228001680, 1612748752, 2400655659, 4073752362}, {3979619964,
2186897562, 553886495, 458782589, 3979619964, 4241015765,
1007330283, 458782589, 4146310528, 3692836406, 209817750,
3238802184, 2974870628, 812917091, 209817750, 4168802395,
2574011276, 812917091}, {3698271908, 3645403733, 2380380979,
3965730031, 3698271908, 1503705535, 1794005444, 3965730031,
1071146226, 3905721877, 3408344210, 735511225, 1835502855,
3236286143, 3408344210, 1718305577, 1541161386,
3236286143}, {1706820424, 4154877869, 2210423860, 3119708691,
1706820424, 1030264372, 3977084597, 3119708691, 1146235803,
1095692911, 3618177584, 414159965, 3295260066, 1621943577,
3618177584, 38864005, 2244624888, 1621943577}, {3124356342,
2772994830, 3960067960, 1806881043, 3124356342, 1434126824,
3050691641, 1806881043, 2263101647, 2075595331, 4070066790,
2585466858, 1576254748, 2722713860, 4070066790, 1249128943,
1086214539, 2722713860}, {1299285967, 1217405758, 2589171163,
2130448350, 1299285967, 1392525159, 292773857, 2130448350,
3630027893, 1033463096, 2755731404, 1606221490, 2782713347,
477309738, 2755731404, 3442242150, 3314523413,
477309738}, {2416226018, 4140620736, 2165948280, 585876933,
2416226018, 1983411790, 1705263630, 585876933, 1978871456,
1277188371, 3516521880, 581321315, 2497683676, 2611083463,
3516521880, 948007642, 2929615666, 2611083463}, {3741945962,
730788749, 1745368878, 948545149, 3741945962, 4218117763,
4067146304, 948545149, 3841954865, 3790308130, 3026123612,
1979145017, 3338202446, 3233499061, 3026123612, 1002517819,
2494617440, 3233499061}, {2259606462, 4074137306, 3423319201,
2140332768, 2259606462, 3421531277, 3879048317, 2140332768,
1305370935, 2499898328, 1883032937, 2574692244, 3736263139,
3127996843, 1883032937, 1619897586, 3095497735,
3127996843}, {359678770, 3314680006, 2175543957, 1393299668,
359678770, 1655556841, 3386176735, 1393299668, 2341737887,
2884691291, 402756912, 3190930010, 2817097718, 2567113113,
402756912, 3993869449, 781663248, 2567113113}, {2448853746,
626004497, 1950071360, 1158194776, 2448853746, 874933395,
2095824912, 1158194776, 1836491782, 3345651389, 1884683967,
565971536, 4175921785, 450638539, 1884683967, 2764657060,
4146690497, 450638539}, {2721675172, 241385543, 3182328265,
3210334684, 2721675172, 325732785, 1062918236, 3210334684,
3121396438, 3445807882, 1171605168, 2631005941, 3904649711,
2223683788, 1171605168, 2738363134, 4195752245,
2223683788}, {1956350629, 1808733384, 3228333573, 627552370,
1956350629, 2293384918, 561984297, 627552370, 4217130354,
1410838732, 1579802384, 1541285529, 4154765811, 2395917056,
1579802384, 3431422519, 977617859, 2395917056}}, {{2148702503,
103819730, 3922720782, 1999175811, 2148702503, 1996163538,
2979225418, 1999175811, 892409263, 2928213494, 288604458,
1501677614, 571673571, 2155469603, 288604458, 3843369786,
3326516116, 2155469603}, {1400478398, 535326008, 3018215420,
2114210471, 1400478398, 2777288607, 3240775579, 2114210471,
1586003016, 1782279859, 2764859700, 2840894706, 3576428059,
2082469029, 2764859700, 3963963316, 749754403,
2082469029}, {3677219860, 3962510930, 3805011027, 1098715579,
3677219860, 2452527982, 1378248654, 1098715579, 377225862,
1180559812, 2405645826, 1118910623, 1737043333, 1583407457,
2405645826, 55614242, 2056027805, 1583407457}, {379210133,
250053591, 554369329, 1783231160, 379210133, 1908318389,
3576659343, 1783231160, 2188531273, 3979423632, 1022129134,
276873446, 1332558840, 3760163766, 1022129134, 1799196192,
1041986082, 3760163766}, {3510732546, 3926709784, 2244264588,
2266741858, 3510732546, 3093925525, 2040546316, 2266741858,
2249717607, 1221779212, 3671597039, 3732297029, 907924984,
1438626566, 3671597039, 1569836243, 3619082489,
1438626566}, {443349145, 590074072, 2864061919, 2339070143,
443349145, 1360064932, 3651849809, 2339070143, 2349663769,
530787287, 117552025, 774331833, 4234241969, 483787924, 117552025,
2374703971, 3115606677, 483787924}, {352887003, 499779786,
2268496651, 4017647307, 352887003, 1014398637, 737449908,
4017647307, 299115015, 4186423122, 213414981, 1671634731,
927956770, 955925224, 213414981, 3644821859, 1961750426,
955925224}, {1591992468, 381333958, 1510277444, 3951951872,
1591992468, 1046219306, 2143424240, 3951951872, 4022841636,
3539882179, 3037868518, 2582787611, 199085085, 1021313167,
3037868518, 1716381787, 1312544548, 1021313167}, {2463901027,
2986987143, 4009648910, 3063328698, 2463901027, 3219337698,
2311802612, 3063328698, 2943340610, 464995138, 1830883303,
2996449399, 1967452885, 2425752074, 1830883303, 2764061706,
2523191906, 2425752074}, {156984152, 730296663, 1925208079,
512266145, 156984152, 1769630972, 2280389234, 512266145,
3988368920, 491634866, 2992461494, 1322776230, 1972129510,
307032584, 2992461494, 3508058001, 1896983419,
307032584}, {3130108188, 1790199485, 1307664725, 4157072496,
3130108188, 3778393784, 3197963896, 4157072496, 2053143729,
648323704, 175756085, 3996184920, 2324418614, 815641485, 175756085,
468123117, 25551168, 815641485}, {3475863656, 335238560, 56818880,
2822173170, 3475863656, 3877387059, 3603749184, 2822173170,
1574808140, 2654852128, 271326184, 4253341892, 3801488812,
530045083, 271326184, 2936222370, 4104998472,
530045083}, {1045458737, 2241703288, 2260695349, 4170336679,
1045458737, 1275277505, 1228755237, 4170336679, 1688596388,
3044418239, 1113299026, 1472599397, 3106112258, 2894865853,
1113299026, 3987050111, 4166739853, 2894865853}, {2598051681,
3420560058, 1682340310, 1263226655, 2598051681, 3475056324,
2888135505, 1263226655, 741601165, 4123606263, 2907473956,
3433095739, 1084985755, 4289524458, 2907473956, 3495266509,
2596796815, 4289524458}, {1724599202, 3394637846, 3481548547,
3222317638, 1724599202, 3535522460, 3763267661, 3222317638,
3808808423, 2710127434, 3608233946, 1063456347, 3619316472,
336282572, 3608233946, 1881966502, 3715040418,
336282572}}, {{3975008370, 754337639, 2405650329, 3715182130,
3975008370, 3615342722, 642697923, 3715182130, 2256493727,
3174552073, 1030618503, 1998739584, 3467650326, 2569413030,
1030618503, 2594942403, 1631336015, 2569413030}, {460768826,
2717717970, 1089892553, 627406673, 460768826, 1541344588,
963516608, 627406673, 1286664224, 2154948056, 2110199318,
1649523168, 678342865, 2334639309, 2110199318, 601680947,
3114094203, 2334639309}, {4195833089, 2105299796, 166509779,
2970279915, 4195833089, 2389311995, 803149880, 2970279915,
1918719231, 2718719935, 2454987531, 3631058630, 161781366,
2654502125, 2454987531, 3954383978, 654123598,
2654502125}, {420480221, 372736411, 2221681883, 3471097641,
420480221, 2996150472, 2353092905, 3471097641, 2956342842,
2084602709, 780563537, 2037330914, 3029522338, 563657176,
780563537, 1641595774, 191330473, 563657176}, {4070064465,
472484932, 681790190, 337365198, 4070064465, 1549217842,
3952301470, 337365198, 3672436891, 1811448870, 509499702,
4248280928, 2086248594, 1854845597, 509499702, 817515303,
3882723762, 1854845597}, {2477940169, 4030956062, 3992821540,
3734235881, 2477940169, 3893216227, 2432837582, 3734235881,
3822346350, 588292146, 1524625318, 575533487, 2663798398,
437787232, 1524625318, 2969641819, 1211920785,
437787232}, {3386547689, 2950929880, 3067610933, 929222926,
3386547689, 4006862829, 1640422986, 929222926, 1627725117,
3707178523, 156931133, 3141547991, 3896072542, 1030901935,
156931133, 510991445, 930369198, 1030901935}, {3278195133,
2510762118, 4086436419, 153526651, 3278195133, 2662640502,
3499730988, 153526651, 420492906, 1594768331, 832866376,
2165145850, 3754780168, 3414769923, 832866376, 2238126504,
1968799026, 3414769923}, {2398009046, 3478793062, 1368903286,
3944016141, 2398009046, 1432371712, 2232180879, 3944016141,
3305849029, 3658330788, 1977221419, 154984454, 980333681,
3708557063, 1977221419, 335242563, 1678180194,
3708557063}, {3518250549, 4292756547, 1669675151, 3260865699,
3518250549, 2846577720, 3665431194, 3260865699, 3928138979,
3635025025, 2376040259, 1109694695, 3138696202, 173271797,
2376040259, 1954542705, 588512257, 173271797}, {3657009175,
918958527, 3708402605, 721977397, 3657009175, 2418584047,
2326313910, 721977397, 3102728472, 2115466097, 3821276545,
1557445315, 849916466, 2548411649, 3821276545, 291376403,
404569572, 2548411649}, {1953220754, 163100603, 2074032202,
715341436, 1953220754, 4276221887, 3127996992, 715341436,
600928360, 873301213, 1778576621, 444536774, 367796024, 1457310151,
1778576621, 2480319255, 2262086849, 1457310151}, {807416367,
1224342039, 3433328646, 1372059769, 807416367, 96106427,
3651390471, 1372059769, 3186297981, 811649392, 2200531447,
12875283, 488040707, 3777283330, 2200531447, 1495168735,
1147053469, 3777283330}, {1498262489, 2714297147, 755846283,
2568504112, 1498262489, 2596328073, 1174505496, 2568504112,
521387497, 4244052116, 441543104, 3883522997, 779122511,
3285362357, 441543104, 1639913252, 901172852,
3285362357}, {4053337039, 3268044266, 736225600, 357528052,
4053337039, 3704296099, 1712397465, 357528052, 2276132974,
2247191270, 2259772727, 3858208837, 197357514, 687590794,
2259772727, 2320333603, 3898333084, 687590794}}, {{1693168425,
1401872772, 2295790366, 2228376089, 1693168425, 3992771814,
823220763, 2228376089, 3310184147, 1885870777, 2672536210,
2391283983, 359763062, 1646861218, 2672536210, 2301581548,
2317984620, 1646861218}, {630867741, 721725795, 627043435,
1497104068, 630867741, 3746310018, 4253248635, 1497104068,
2529428830, 400681955, 2013240130, 3743932305, 605925849,
841254072, 2013240130, 1635365181, 3765813448,
841254072}, {913070769, 1398710380, 2668104628, 2008257560,
913070769, 3792106984, 1077371294, 2008257560, 419196296,
835127542, 3388634549, 1420949605, 1940153687, 936677832,
3388634549, 1670398370, 2315064847, 936677832}, {661344445,
735391270, 4246010312, 3039669025, 661344445, 526054481,
1591031831, 3039669025, 2571072593, 1455652656, 3579956569,
1960229502, 2478539210, 1930213004, 3579956569, 3077694794,
2072952279, 1930213004}, {425950143, 1130486674, 2696137290,
3284243038, 425950143, 1267378907, 1755036096, 3284243038,
2285404443, 92087486, 840820555, 3494209711, 1661263707,
1097148356, 840820555, 822503830, 4251974910,
1097148356}, {3297773364, 3438979384, 2501878263, 2917363935,
3297773364, 2668364492, 2936154555, 2917363935, 3781620139,
1349151461, 1821354750, 3717764728, 2364275695, 490241598,
1821354750, 2341304300, 1155806426, 490241598}, {1767498496,
2298523806, 1601051497, 4274532010, 1767498496, 2773692480,
1279616291, 4274532010, 1500415854, 2534783090, 722180805,
3532438427, 1251210078, 3518720669, 722180805, 3300248610,
2562727375, 3518720669}, {2644799309, 1342278931, 2658402822,
4042890210, 2644799309, 606605705, 4241772463, 4042890210,
1847312821, 4288531000, 321747515, 74521379, 1225209584,
1097613522, 321747515, 1194440107, 1784540933,
1097613522}, {2159318679, 2541146449, 2497051754, 3638042686,
2159318679, 662697782, 3835520789, 3638042686, 3960987904,
4236150810, 1028604204, 43575698, 2103314379, 2370144645,
1028604204, 3967941183, 734109746, 2370144645}, {1175764916,
2471593729, 2865336247, 3704088248, 1175764916, 2932838910,
4011400538, 3704088248, 3502592220, 3269831184, 1615892324,
245018475, 2277651644, 3795899570, 1615892324, 2568537852,
3294470896, 3795899570}, {3908925760, 3899855852, 2805314855,
774817443, 3908925760, 1364407921, 2954226506, 774817443,
744436690, 2639467910, 621455156, 283035496, 3309449323,
2831618441, 621455156, 2424365668, 3539583210,
2831618441}, {287728234, 246229618, 162441370, 3775615386,
287728234, 1502779384, 3733878711, 3775615386, 3250474907,
2595646099, 1861018675, 4077631310, 3450880655, 2284610128,
1861018675, 2988405862, 1711688841, 2284610128}, {781051313,
1972894256, 225229222, 2711483505, 781051313, 1175638317,
647050317, 2711483505, 1867530951, 2580411292, 573353505,
179605964, 734715144, 2559502572, 573353505, 350062612, 3290699372,
2559502572}, {1910085226, 4230641571, 2453891386, 3227070913,
1910085226, 2853687796, 3120894575, 3227070913, 749636765,
1383222658, 2199059659, 954928333, 3613475430, 1338063869,
2199059659, 4005334159, 4236188627, 1338063869}, {3556232066,
3732499602, 1045575721, 3430126934, 3556232066, 2411549793,
451564875, 3430126934, 127041069, 2461101821, 1604355193,
495661134, 984654734, 3287411893, 1604355193, 1772365467,
769676548, 3287411893}}, {{138013862, 1975136163, 1617749306,
1096138313, 138013862, 1501878241, 1416249993, 1096138313,
2409846784, 522705580, 769295000, 1927161272, 2468210728,
143812745, 769295000, 3514348856, 3254530816,
143812745}, {3855242202, 3354399019, 3981734504, 73950522,
3855242202, 55354701, 2965395603, 73950522, 599453422, 3076435551,
1103432342, 4161111774, 1446182108, 2692035063, 1103432342,
1643240704, 2596905012, 2692035063}, {2872969148, 3657910297,
3200219335, 1941532786, 2872969148, 3557298699, 3590950415,
1941532786, 3515748818, 2986424418, 1332423877, 1366553339,
3101816103, 1809137988, 1332423877, 3993875038, 2412502608,
1809137988}, {3029005516, 3321539504, 3501837362, 813410089,
3029005516, 1044920137, 3461955319, 813410089, 4271076381,
1471526950, 1202100604, 2327872488, 4098434768, 2375319030,
1202100604, 3742334018, 1391532370, 2375319030}, {4207289909,
559813450, 2940543965, 312277958, 4207289909, 3444686334,
960113158, 312277958, 1749168476, 2689974385, 3674658855,
828283166, 1517070807, 953526753, 3674658855, 1558514368,
3517620599, 953526753}, {2903706877, 3713704391, 201947523,
3130396563, 2903706877, 1625744025, 3837877063, 3130396563,
316005085, 2055175984, 612058994, 1918225488, 4143597212,
3063334100, 612058994, 269715831, 3228801559,
3063334100}, {1295130289, 1951986222, 2596032611, 3806079268,
1295130289, 1110389513, 2159958180, 3806079268, 2725645318,
3510580843, 2499397071, 759749547, 2970642011, 2623568215,
2499397071, 191091208, 482061697, 2623568215}, {4116985007,
1460625377, 2247500745, 941408572, 4116985007, 1546486080,
1783998098, 941408572, 3058183515, 1473695507, 4245372460,
3873219634, 4094914553, 4269164791, 4245372460, 2507855960,
2795313144, 4269164791}, {1962495392, 136296227, 2260032938,
263471390, 1962495392, 2218844901, 2778787907, 263471390,
1853758858, 867552837, 2147092923, 3605684490, 3049241245,
2065032609, 2147092923, 4284554831, 89154078,
2065032609}, {4071005559, 2713466673, 2459865767, 2395946396,
4071005559, 3665517862, 1602910626, 2395946396, 1908912254,
1988463235, 1255176101, 166921281, 1941135434, 3796792964,
1255176101, 3862286918, 946070284, 3796792964}, {3012525297,
2493852519, 3916982040, 2076256875, 3012525297, 872341650,
178159470, 2076256875, 3517362011, 3250644175, 2798600046,
317587104, 4229400815, 1962645475, 2798600046, 3379450066,
3416800987, 1962645475}, {2920178076, 1509751053, 3032162980,
2446861569, 2920178076, 2900643495, 3687940915, 2446861569,
2913757298, 1047469708, 605362535, 584784141, 1689076962,
3270595249, 605362535, 2030588250, 4262735426,
3270595249}, {984385025, 152817586, 1971054150, 3476973063,
984385025, 2488227927, 4255554672, 3476973063, 285750165,
1175894603, 3365978223, 4159606843, 1086019329, 1473711771,
3365978223, 1892972053, 1285855999, 1473711771}, {2365859770,
2749587167, 2741363210, 3890635154, 2365859770, 3783033649,
1561530403, 3890635154, 4112214391, 1187553376, 2310209542,
4132098467, 2099466409, 2342917755, 2310209542, 147543030,
2494351424, 2342917755}, {4105268381, 2073524411, 644212621,
403994004, 4105268381, 2904084179, 3831719597, 403994004,
2539915825, 1465076633, 3862834394, 2259697480, 4051520963,
2890527364, 3862834394, 934050118, 2810885896,
2890527364}}, {{94451952, 3964593332, 1242898773, 3352801941,
94451952, 639029973, 2315095646, 3352801941, 4216782514,
2354363842, 1984873167, 1212627640, 1257532340, 513890845,
1984873167, 2870530442, 1208902926, 513890845}, {385258225,
3728744670, 3569882325, 1926285644, 385258225, 2390706911,
1108147171, 1926285644, 2264905138, 596424080, 4160778291,
3611019106, 141769900, 1848364568, 4160778291, 3496528455,
1552116673, 1848364568}, {377826974, 116744102, 3055477316,
2663175885, 377826974, 2578332381, 3195438389, 2663175885,
704130800, 3038511100, 1505642955, 1757204931, 4112978448,
1292986218, 1505642955, 997232463, 172755364,
1292986218}, {2886425156, 1849148167, 749134119, 1065683096,
2886425156, 4196917281, 2992662885, 1065683096, 270679073,
672947816, 2544671570, 163978331, 2188646679, 364070020,
2544671570, 837362349, 3520039729, 364070020}, {1530094734,
468421033, 934518204, 3125897940, 1530094734, 3251080204,
862053578, 3125897940, 1316966786, 2290377268, 186437207,
924446594, 1594149094, 1953699759, 186437207, 3030821019,
2064634669, 1953699759}, {1224972102, 2873015547, 1753647773,
3731309033, 1224972102, 517089293, 3283127918, 3731309033,
3153974267, 570598119, 458236864, 3364479146, 3444657749,
2201888168, 458236864, 3299709815, 1269604905,
2201888168}, {1389036848, 3153422993, 1263031874, 266142644,
1389036848, 1128582211, 635448484, 266142644, 61084722, 247090079,
217195116, 3379569640, 1843501131, 1521644902, 217195116,
1839661585, 1694823010, 1521644902}, {948843427, 927013635,
3808868984, 1378151623, 948843427, 673810920, 951629713,
1378151623, 35689930, 3531570992, 635565666, 2548654227,
2589432341, 1192700714, 635565666, 298357363, 3968150021,
1192700714}, {4136918261, 2157852912, 2069320717, 761987401,
4136918261, 3110726637, 819879023, 761987401, 1754337092,
4185051185, 2980729810, 704428564, 676055991, 4067518843,
2980729810, 371322662, 274480292, 4067518843}, {2368072759,
1396659393, 3856394323, 3441598214, 2368072759, 1048136347,
2544685440, 3441598214, 1637944237, 4169673563, 2473676687,
474110298, 892497275, 1016257897, 2473676687, 1340613848,
2749355083, 1016257897}, {388872857, 1490932548, 1362179029,
3023837855, 388872857, 3730443308, 2847406725, 3023837855,
2916493858, 2929086880, 3014452945, 1184181069, 1817239623,
304683038, 3014452945, 4060996566, 3116171970,
304683038}, {1820387182, 1448556483, 658508974, 3971013929,
1820387182, 341462694, 120796985, 3971013929, 1708907294,
3420061274, 2187600669, 2672427080, 958916489, 3438963520,
2187600669, 2575726025, 1845346034, 3438963520}, {3266061682,
2308387653, 3566378766, 2642863999, 3266061682, 4135404306,
3873850425, 2642863999, 819998702, 793345179, 3096228739,
1869564994, 61975940, 3250678901, 3096228739, 2642083507,
2331711837, 3250678901}, {2759230943, 625353568, 2334745039,
2581576807, 2759230943, 33532058, 2454038309, 2581576807,
2719213200, 2938208331, 3103878887, 3739081453, 4187175536,
2337287538, 3103878887, 210215425, 356712025,
2337287538}, {3848666830, 419617556, 3124883107, 2796173733,
3848666830, 1916212276, 514909692, 2796173733, 1014898960,
1750023152, 1403819179, 3120433504, 3857531812, 1358390162,
1403819179, 743168835, 1720139034, 1358390162}}, {{3362920032,
1401175590, 638998846, 1130489594, 3362920032, 1457450350,
3734864133, 1130489594, 1891490872, 2446760804, 1315499519,
3300994644, 3842690720, 2709640529, 1315499519, 875361870,
676525399, 2709640529}, {1807491073, 1570264155, 351423668,
2384691454, 1807491073, 2844861718, 1730098031, 2384691454,
2254459023, 3952463556, 3774935330, 4085935803, 597633965,
2742149264, 3774935330, 3032350755, 1410604392,
2742149264}, {562572981, 2453239846, 384935380, 2217666563,
562572981, 40870381, 2001728777, 2217666563, 1165449067,
3460887490, 3969822112, 3965770634, 2396790253, 1539107473,
3969822112, 1532517403, 2655486683, 1539107473}, {1237196477,
3663731096, 143400628, 4245359555, 1237196477, 1797081212,
2449575498, 4245359555, 3047429268, 3791239282, 2077922420,
2177255734, 3651360351, 3878579563, 2077922420, 1721916511,
845297523, 3878579563}, {1045832041, 1995794548, 4043681725,
3597635471, 1045832041, 4130758833, 1913125547, 3597635471,
2598267442, 384476417, 1730887034, 819810619, 1958401362,
2059165813, 1730887034, 1557795047, 1231287179,
2059165813}, {210670816, 839244126, 2509073771, 1349445168,
210670816, 1305907164, 2733446300, 1349445168, 2147359263,
1602633162, 2032494710, 2787706468, 3511906271, 2642777370,
2032494710, 4046131253, 1064863813, 2642777370}, {1530071777,
1336807579, 2754648167, 505738396, 1530071777, 3429286737,
1012141566, 505738396, 364429965, 2338325254, 4167233761,
2789929771, 2696421199, 4143866672, 4167233761, 3873099015,
795453989, 4143866672}, {3155049403, 4281524426, 1981313839,
4263819658, 3155049403, 3719941673, 1047529624, 4263819658,
3313321106, 1338343327, 2324624266, 1725087354, 3594939336,
1570315355, 2324624266, 3522351060, 4252790045,
1570315355}, {1387563109, 2624425852, 1644616294, 574275685,
1387563109, 1526657929, 2547323088, 574275685, 769083741,
2684392708, 382658421, 664207993, 227781306, 3669375462, 382658421,
1592689209, 1348893120, 3669375462}, {108747348, 1467875854,
256526168, 899246895, 108747348, 2783815531, 3248764453, 899246895,
1429567203, 1219288409, 26016991, 2629261386, 1182007239,
3128806513, 26016991, 3791370211, 3431512800,
3128806513}, {2444402736, 3471250149, 1262085311, 52683743,
2444402736, 4071587088, 1520706, 52683743, 2610517495, 2265707740,
2606867962, 2853611619, 4153458122, 1717139409, 2606867962,
3808111413, 3949668867, 1717139409}, {582760735, 785351190,
571933335, 1423127512, 582760735, 700110581, 1283194774,
1423127512, 2740000743, 2009721680, 1824515104, 1910382756,
783304238, 2323129699, 1824515104, 4032945011, 2040722667,
2323129699}, {1334296644, 3063286903, 1849376977, 639950612,
1334296644, 535101514, 868933300, 639950612, 4261942599,
2014255865, 3811884196, 1129689402, 2695899091, 118854584,
3811884196, 4200064180, 788793439, 118854584}, {2374838356,
2052902650, 1830234951, 852164586, 2374838356, 497540878,
412380392, 852164586, 448747464, 3574765936, 3001848199, 528121192,
2298546607, 495056704, 3001848199, 299029371, 1303223717,
495056704}, {2886574442, 1125389940, 1561785374, 2721738140,
2886574442, 3857216325, 3196750633, 2721738140, 3914325071,
68595631, 1596158191, 3113306279, 506975084, 3070486263,
1596158191, 3706657140, 902478796, 3070486263}}, {{1729281525,
2268963059, 1141249417, 3263186729, 1729281525, 2951515865,
1535805957, 3263186729, 2005252417, 3881593435, 1355307047,
992174375, 313617972, 2305761589, 1355307047, 3663579047,
381933244, 2305761589}, {1416676049, 1304732377, 1040867745,
83908466, 1416676049, 1352516724, 4294308508, 83908466, 2367065164,
615040705, 3791771273, 3347975047, 4196134923, 1667857811,
3791771273, 2263851601, 1564715297, 1667857811}, {2070059496,
3110356679, 1675108536, 4096493219, 2070059496, 1201774212,
1529477403, 4096493219, 2985917792, 763211059, 1723493827,
3116429712, 3721738282, 2699274746, 1723493827, 3314336764,
2208033721, 2699274746}, {4041536918, 338830578, 1317260239,
1434230503, 4041536918, 2753040912, 2944821434, 1434230503,
3214147257, 3757047667, 4261953004, 2979573134, 3973733876,
4093947334, 4261953004, 2815567716, 3454015638,
4093947334}, {3710830263, 3564440066, 2242696089, 2193226133,
3710830263, 1348686132, 1795377731, 2193226133, 3409339184,
3223762258, 3928412309, 868538065, 4232950837, 1497333242,
3928412309, 4043986454, 3837209858, 1497333242}, {2397146654,
2221537290, 403072156, 1475654090, 2397146654, 4286962883,
2785534584, 1475654090, 3189933295, 4119399398, 668310420,
3532851694, 551557198, 4178728130, 668310420, 3927272953,
2981026540, 4178728130}, {1027835471, 3867031443, 3142787072,
1898764790, 1027835471, 1315270526, 1822660758, 1898764790,
741855424, 752516370, 243696529, 3444486351, 168490644, 4121879899,
243696529, 3607008098, 2179415297, 4121879899}, {1422043015,
1207561803, 3581125669, 2054743463, 1422043015, 1762244284,
1499597869, 2054743463, 300628476, 4213855850, 3178644752,
4172515680, 1701869032, 250120061, 3178644752, 1513430926,
570149551, 250120061}, {3067618819, 2336627700, 329186307,
2372490954, 3067618819, 3955239394, 4220275697, 2372490954,
2557168436, 2136907462, 2281145558, 944820027, 75648724,
1881049173, 2281145558, 3137351012, 1981393124,
1881049173}, {1290262277, 3455952707, 1105918720, 3516590085,
1290262277, 2775532590, 2979076122, 3516590085, 3424428541,
2633790862, 278498267, 1105517535, 4167316392, 291113277,
278498267, 3905306360, 84855550, 291113277}, {838829814,
3831552648, 740094909, 1791601245, 838829814, 4140896091,
812513995, 1791601245, 2171437563, 4238387083, 982981782,
678813567, 838704912, 1588319229, 982981782, 2097021327,
3565340575, 1588319229}, {3207500987, 1677128173, 1544523985,
2454931751, 3207500987, 1750177474, 1448920400, 2454931751,
3018097661, 3564715153, 1293184102, 320925393, 1462382740,
2471905451, 1293184102, 3574085219, 929070406,
2471905451}, {1666308811, 1118308928, 3014019217, 1555428439,
1666308811, 3294923191, 1360357393, 1555428439, 1804838591,
2975870653, 2217292755, 2781461838, 877933526, 175583848,
2217292755, 2951199791, 1612169654, 175583848}, {2981538864,
771670630, 552010452, 3521528194, 2981538864, 1137098711, 54548676,
3521528194, 1634171646, 2762980999, 2257924506, 1956954494,
409055673, 1039498873, 2257924506, 1452566739, 2948111768,
1039498873}, {2088681401, 2557642094, 1386447212, 2982267031,
2088681401, 1351458792, 1889146436, 2982267031, 584673363,
3505836007, 644987823, 2972013230, 3668362949, 662437296,
644987823, 2985946835, 2268102873, 662437296}}, {{1744636487,
3952135907, 947753516, 4064983592, 1744636487, 3049723261,
1934508265, 4064983592, 4171745404, 2852219546, 1379176112,
2842564878, 3926509890, 4158106802, 1379176112, 1815738463,
3062358456, 4158106802}, {1049600974, 1569794605, 2089137344,
3577024659, 1049600974, 1729967818, 2778677259, 3577024659,
1625369148, 2017565947, 3284646837, 415258465, 2567084715,
931848746, 3284646837, 2633569246, 256263523,
931848746}, {2016775973, 1306292301, 3670445841, 2164320300,
2016775973, 480516705, 319591773, 2164320300, 53562000, 985910059,
3909587424, 1707582600, 3313512626, 507915211, 3909587424,
3733827320, 625612469, 507915211}, {3551255470, 1680625376,
869476379, 3958611830, 3551255470, 410042396, 569117280,
3958611830, 1373068765, 2917140265, 1831496020, 1821082272,
2829448427, 1648005210, 1831496020, 3987397422, 1032291296,
1648005210}, {3817533434, 3364632636, 2770290295, 1225458244,
3817533434, 1626685441, 3824823610, 1225458244, 465800240,
57148302, 2038610457, 4187472153, 3241574777, 427497251,
2038610457, 1274456574, 1161649569, 427497251}, {2770597834,
1202045798, 2685318428, 2961159523, 2770597834, 3683942097,
282037957, 2961159523, 2756353477, 688754137, 1310546639,
3814409618, 3898177863, 3905409277, 1310546639, 52591070,
1559654803, 3905409277}, {4285714048, 2970654039, 696328069,
3989370299, 4285714048, 2737310563, 2710182478, 3989370299,
4222922794, 2620741358, 2005632892, 1332202609, 3824338376,
700824646, 2005632892, 3680531612, 1590259194,
700824646}, {1012482542, 3141042890, 2545745615, 2543645250,
1012482542, 2111984988, 913717833, 2543645250, 2108618602,
2652286672, 1669447863, 1522417114, 4292947198, 4161327077,
1669447863, 3870847744, 489964129, 4161327077}, {2851133217,
2544498605, 3040329785, 728949951, 2851133217, 1441804991,
1704708043, 728949951, 540570281, 299000690, 3113608192,
3442591292, 1601950042, 2940267842, 3113608192, 2426519800,
3446468540, 2940267842}, {4203176538, 564446240, 3663951648,
954041761, 4203176538, 2877992546, 3194902955, 954041761,
139726772, 3238228424, 980251399, 4210976074, 842055339,
2379780762, 980251399, 538895986, 2470160950,
2379780762}, {3359984447, 2945106536, 94047956, 3251898692,
3359984447, 3157047859, 2551251946, 3251898692, 3012733147,
3350090744, 3074052371, 4119905453, 1046297227, 2176706496,
3074052371, 1284433356, 698354553, 2176706496}, {1939611745,
3454434256, 1721948148, 3500039413, 1939611745, 1489246316,
1380082835, 3500039413, 1200101967, 2005464907, 3658400031,
3338913609, 4093432727, 655280634, 3658400031, 3487203083,
3675619486, 655280634}, {4070536231, 1855978637, 1957763018,
4159333798, 4070536231, 3906589069, 2115390755, 4159333798,
3251810605, 822023968, 319975458, 3368913060, 1828575570, 73560661,
319975458, 3699817316, 3243967450, 73560661}, {597890444,
2963526988, 2113178661, 1074888759, 597890444, 3687820258,
1846158283, 1074888759, 708031970, 2091575838, 1768564041,
1548255156, 1282964788, 3487733040, 1768564041, 2449029780,
2113300421, 3487733040}, {3140731760, 3240303708, 2305798886,
1768199518, 3140731760, 1837348435, 2315952390, 1768199518,
3050136075, 1992669883, 2052021448, 255854126, 630458596,
3624083038, 2052021448, 2398355340, 1874273834,
3624083038}}, {{3013855247, 82049263, 3977310441, 584852249,
3013855247, 1631801979, 2272893205, 584852249, 1157293598,
1008985039, 2981474723, 625182639, 2528619024, 1270934555,
2981474723, 505612043, 3136631324, 1270934555}, {3670086228,
1764904195, 656744553, 3137526662, 3670086228, 3580869206,
2403875621, 3137526662, 3580234334, 2460119169, 1797675893,
1123794455, 3743985508, 280996820, 1797675893, 3013099060,
143706137, 280996820}, {3470835037, 642403019, 313815441,
3018859341, 3470835037, 2713143350, 2110936889, 3018859341,
537271019, 1317375832, 1403098721, 2544509098, 2026895397,
2379814210, 1403098721, 602189114, 2634194997,
2379814210}, {3464838365, 2842987937, 3136165138, 3634185196,
3464838365, 3601823850, 3887031679, 3634185196, 2792496861,
2263904321, 3933041881, 564891116, 474242849, 919218027,
3933041881, 1125672685, 4154920441, 919218027}, {176543705,
1822300329, 358859200, 4127602744, 176543705, 2136991891,
470244788, 4127602744, 3064097390, 2567458918, 2773253774,
3791882856, 938080042, 1088061648, 2773253774, 1418143232,
2412461974, 1088061648}, {3232580086, 2220460662, 1932329582,
981305692, 3232580086, 1287512071, 955067142, 981305692, 860869470,
3541844079, 552285455, 1558638678, 3367709189, 4046953169,
552285455, 59087677, 707039159, 4046953169}, {3619349168,
705697871, 1992221970, 2531593740, 3619349168, 1354657163,
106698643, 2531593740, 450396744, 2225758663, 609496683, 113761526,
1608016081, 4085666982, 609496683, 4169742198, 3569371681,
4085666982}, {3848976950, 2441486578, 2109817045, 3230022138,
3848976950, 3427386258, 4278720212, 3230022138, 1362557480,
4267827987, 2135250851, 296035385, 969184056, 2920112852,
2135250851, 1177141043, 1965329198, 2920112852}, {3731859298,
2881137426, 833389250, 1677356938, 3731859298, 1618706784,
2828019868, 1677356938, 57798040, 978031664, 2982715660,
3530149035, 4134336104, 611127742, 2982715660, 1203193580,
3321547202, 611127742}, {1401050440, 574376174, 2610290298,
267497185, 1401050440, 13298153, 2662390285, 267497185, 2708545360,
2773111558, 3166912454, 3316963881, 3895260799, 3182682829,
3166912454, 2317042610, 216191227, 3182682829}, {3947337904,
912449363, 3158149160, 3331084441, 3947337904, 2749086652,
1858524196, 3331084441, 3471309457, 1338118568, 1601092380,
546044938, 4151360261, 3845466318, 1601092380, 4101622219,
2641032542, 3845466318}, {2583418592, 2950188629, 2277374582,
1054794505, 2583418592, 2518650890, 2873059524, 1054794505,
4064509494, 842864023, 155090437, 995757623, 3465811606,
4005961945, 155090437, 1596766252, 962333604,
4005961945}, {1111373825, 3159958443, 3781563883, 301480275,
1111373825, 222919357, 1488151509, 301480275, 2695022706,
3325437674, 4035263598, 1048085455, 1092686870, 3308111567,
4035263598, 3035036102, 2416022123, 3308111567}, {3646000753,
3244390048, 1152314996, 3585947086, 3646000753, 4188500293,
1551803386, 3585947086, 43539574, 746020360, 4257279454, 159699513,
3209952933, 3616509225, 4257279454, 2901642782, 3195052585,
3616509225}, {1255181092, 2606963172, 3665019301, 1625759465,
1255181092, 790664876, 4216115634, 1625759465, 2865685016,
561498600, 3464317143, 3268511563, 1977483496, 1623078380,
3464317143, 2236332137, 2033286498, 1623078380}}, {{2082683147,
888193479, 2341088247, 2007945401, 2082683147, 3335076429, 3868481,
2007945401, 1198519135, 1935975979, 265491023, 301796252,
2860005744, 1481142942, 265491023, 1088557292, 4120754772,
1481142942}, {782210925, 2180014252, 167617770, 3193380570,
782210925, 307060547, 565138859, 3193380570, 3473925387,
1933943506, 73849832, 3330206241, 3980799998, 2111859033, 73849832,
1001476468, 2813610100, 2111859033}, {1909350615, 2538479259,
1517357015, 938410993, 1909350615, 898527522, 2583257939,
938410993, 3946174395, 1800377045, 756158319, 3666294602,
1208877520, 4238802520, 756158319, 3659825373, 1791251057,
4238802520}, {2665400165, 3316180851, 2499994113, 3303532086,
2665400165, 908630605, 2766583698, 3303532086, 3811588895,
551079002, 2753158871, 3576525143, 3119883109, 1781286360,
2753158871, 3509383709, 3661231931, 1781286360}, {3884416364,
1705355356, 2902099262, 296464469, 3884416364, 3697213244,
3400652741, 296464469, 2828295511, 808249292, 3422735839,
3792794843, 2750435170, 1150902763, 3422735839, 946744850,
3730191199, 1150902763}, {3813545212, 3287843695, 2942299995,
91397022, 3813545212, 2332659451, 1472690314, 91397022, 3952581582,
186846111, 3244671951, 3397069741, 2795337511, 429107478,
3244671951, 689359610, 1467997203, 429107478}, {3356827989,
2041861019, 1590379239, 861234488, 3356827989, 435895955,
2817452481, 861234488, 1334460780, 209181640, 2340848640,
3239904192, 3699044308, 1453148331, 2340848640, 3494583787,
352897577, 1453148331}, {645804125, 2205365640, 2942690261,
3033075037, 645804125, 1531633406, 1505732852, 3033075037,
4288926968, 2352253462, 3763632860, 3811666068, 947424568,
1185024844, 3763632860, 1004942725, 587779104,
1185024844}, {4217730591, 2878362723, 3636389463, 3456071712,
4217730591, 3011838347, 1384409151, 3456071712, 4231070551,
804316663, 565106734, 68522955, 3901561803, 1015564368, 565106734,
1759674948, 3538307608, 1015564368}, {2958437310, 3337768466,
3831862262, 2889311147, 2958437310, 641080998, 1600837671,
2889311147, 2661805006, 3027478693, 1044930964, 2598967548,
3411488636, 3182211673, 1044930964, 688540199, 625836117,
3182211673}, {2699171944, 3184138742, 2121845320, 1884398776,
2699171944, 3906801567, 3710825596, 1884398776, 4271036859,
2542859208, 1092960720, 2604606733, 2852242016, 947085085,
1092960720, 3883689857, 818665845, 947085085}, {2493045579,
1911470121, 1463144542, 1548665231, 2493045579, 619339667,
370438611, 1548665231, 2393036245, 1048288005, 85981002,
1314775482, 4207495883, 3664720019, 85981002, 1691333553,
1230797000, 3664720019}, {1145118921, 870569133, 3115744119,
3524433451, 1145118921, 3694109804, 95035592, 3524433451,
4176687607, 3069357108, 1648927158, 1131329693, 1513911868,
1588589260, 1648927158, 80057254, 3124611325,
1588589260}, {1798887843, 897818198, 3719834407, 4138283993,
1798887843, 2955139057, 1841714444, 4138283993, 2900362665,
2598475067, 1498626910, 2640480870, 653044196, 1972185904,
1498626910, 2803386106, 2060054550, 1972185904}, {2122851837,
2135705707, 1497214115, 64062842, 2122851837, 4242864779,
906515675, 64062842, 1671390739, 3962150617, 2032722200,
3929697499, 1381143398, 2175511079, 2032722200, 2073079485,
384369059, 2175511079}}, {{1294990760, 3088484327, 209339647,
3651411522, 1294990760, 1690405883, 1652430357, 3651411522,
3976196483, 3853798112, 766129426, 2226659371, 1808643264,
1310227170, 766129426, 3172947233, 218138208,
1310227170}, {4093879780, 858124816, 3255666800, 2291131070,
4093879780, 2997812074, 2093793287, 2291131070, 3171589548,
3083355464, 3836629116, 2846140932, 3637515403, 2230902378,
3836629116, 2491962392, 4243560874, 2230902378}, {2875872016,
216187195, 1190772134, 2547678446, 2875872016, 3335475366,
3186955890, 2547678446, 3419357098, 3167919795, 2733679040,
2290142084, 965008581, 1544421101, 2733679040, 2364160468,
772024440, 1544421101}, {4007774239, 4278103786, 3322921863,
2999667479, 4007774239, 1333973326, 3995043314, 2999667479,
4113016361, 3231079441, 946166795, 3686999436, 340856814,
999448569, 946166795, 3344426626, 1464488480,
999448569}, {2050412833, 3073464912, 3722410867, 3880414744,
2050412833, 4079908614, 141813517, 3880414744, 1388901901,
1314467558, 2053642476, 529578422, 404422048, 523750865,
2053642476, 1815255389, 1014187765, 523750865}, {2121124144,
1335782505, 2508968191, 2643538107, 2121124144, 1891051658,
1732112321, 2643538107, 1439277140, 2616567252, 2111503197,
1343417573, 3329102794, 1397461933, 2111503197, 2800333485,
436473632, 1397461933}, {2189016727, 2865776175, 451498522,
3555189695, 2189016727, 1259587676, 2379307904, 3555189695,
564650170, 16002662, 257735782, 852853501, 3796032477, 3401185270,
257735782, 1291498655, 3723330339, 3401185270}, {1489702270,
158549605, 2719807628, 2109676036, 1489702270, 1701566570,
1879981040, 2109676036, 925786347, 4133888397, 2378667355,
260364836, 1493409040, 1226155368, 2378667355, 550006884,
3477563770, 1226155368}, {102714765, 1767614268, 275011730,
2660307979, 102714765, 885578421, 148391862, 2660307979, 386965783,
778237064, 3287566882, 2900226244, 693135908, 4179922375,
3287566882, 2238651166, 1260576417, 4179922375}, {3149192479,
3866462055, 3096741689, 1114069092, 3149192479, 322578046,
1698908478, 1114069092, 2029442378, 1217635756, 2211390378,
280409826, 277271300, 3950117622, 2211390378, 4045225498,
3273547172, 3950117622}, {2170437287, 1041115911, 2070764732,
4126477527, 2170437287, 139996317, 516135644, 4126477527,
896763144, 652265194, 2298338425, 4003371332, 1384035702,
1851549316, 2298338425, 1936637093, 1466780144,
1851549316}, {3210618179, 3763764745, 2000646801, 4224987734,
3210618179, 3781735882, 2705609303, 4224987734, 988998360,
1830518881, 2342461604, 1965981888, 17023679, 662024646,
2342461604, 3990280006, 2039234405, 662024646}, {1565503980,
219951997, 1499882985, 89586999, 1565503980, 1451301676,
2410542713, 89586999, 2411945299, 3819302515, 3512625613,
2398345471, 2833487279, 1895829764, 3512625613, 1829531228,
376895107, 1895829764}, {3907865583, 2413307390, 3140346149,
2529711652, 3907865583, 3318583217, 3374392672, 2529711652,
1551851116, 1625179677, 836796915, 1416778516, 2711576526,
189940394, 836796915, 3214135860, 1926084326,
189940394}, {2162683039, 1335298112, 30742342, 4145583431,
2162683039, 1543933426, 445966311, 4145583431, 1271149039,
688691120, 4030496350, 1676731953, 4164740116, 417702459,
4030496350, 659874536, 3227490550, 417702459}}, {{359516994,
3114762683, 3568862459, 3460246357, 359516994, 2135115875,
218033453, 3460246357, 2255405265, 3712928074, 3088910653,
2507911914, 3303406025, 1277901832, 3088910653, 2818511068,
310796286, 1277901832}, {186466108, 651334129, 2842197411,
4117539411, 186466108, 3807175775, 3073622315, 4117539411,
773148471, 508445538, 1623163429, 3146982514, 2209094694,
481918378, 1623163429, 1728801469, 339570348,
481918378}, {573692436, 3872444854, 35567330, 4052238530,
573692436, 3146236813, 1500692779, 4052238530, 256717243,
2023294295, 3097497909, 1346726126, 1855805319, 1121663435,
3097497909, 3234468457, 2119363369, 1121663435}, {3058812486,
1164226398, 568701600, 1475251263, 3058812486, 1693917167,
3586439101, 1475251263, 615242951, 941946604, 1024510915,
1453455184, 2122851650, 3138921230, 1024510915, 1992357430,
2381863183, 3138921230}, {2607406342, 4088734276, 3921498498,
2735954168, 2607406342, 3610099536, 908481890, 2735954168,
4079732485, 3909842681, 2441622355, 2428264750, 364883417,
3630145509, 2441622355, 2778435307, 2108137964,
3630145509}, {135488725, 2178143073, 508494460, 1041711449,
135488725, 3878048889, 2647679194, 1041711449, 3729302216,
2705683748, 801993191, 1201194185, 2207701640, 3235663883,
801993191, 3251827412, 499846706, 3235663883}, {2481747916,
391743554, 624918119, 1980187742, 2481747916, 3287199547,
1309944802, 1980187742, 1141995151, 1670622591, 1800949016,
2005458811, 2768357811, 3280476256, 1800949016, 2706872639,
682458318, 3280476256}, {1741164221, 3978412194, 2882176274,
15370275, 1741164221, 4187505695, 2061555515, 15370275, 1632636204,
1816599716, 1821933605, 3648970313, 1343489680, 2465372719,
1821933605, 3328905025, 1391015357, 2465372719}, {380080460,
4115908541, 140788931, 1599837029, 380080460, 3846137148,
846965088, 1599837029, 3882007802, 2313275318, 2166836870,
2745553930, 823423733, 585902501, 2166836870, 923832527, 351135393,
585902501}, {2185585470, 371506848, 2826850420, 4240821442,
2185585470, 2968278570, 3087593298, 4240821442, 4199667935,
2539713186, 4167642903, 3124784671, 284777447, 582796091,
4167642903, 1574617829, 1306170361, 582796091}, {1738986063,
2474731168, 2241459023, 4217920349, 1738986063, 758969635,
1809581504, 4217920349, 2543005498, 309254037, 567065147,
1873997200, 970991020, 3358259723, 567065147, 3310339072,
4164780893, 3358259723}, {2192287989, 2190852671, 3389390262,
1455254388, 2192287989, 2324442395, 1267013695, 1455254388,
4002434761, 2742038256, 381403852, 3221291545, 2932149608,
116486317, 381403852, 1696181092, 2122591885,
116486317}, {1584570018, 2824259519, 2717594912, 2335420231,
1584570018, 2642000548, 558307286, 2335420231, 2274010549,
83254296, 2981683361, 639282387, 3966380910, 1598419126,
2981683361, 683872783, 2762253144, 1598419126}, {2278658572,
4289100465, 2051186788, 3193070982, 2278658572, 3155996013,
1527096340, 3193070982, 3722528722, 479301469, 638141264,
478641834, 100617977, 3035480468, 638141264, 2351066479,
2182693760, 3035480468}, {1438975761, 2005515830, 407642856,
1935639927, 1438975761, 897329890, 2366350257, 1935639927,
2018139106, 2168302377, 36177786, 842625014, 1053303323,
2441760800, 36177786, 3842702503, 3689219738,
2441760800}}, {{2579620192, 1538453821, 3753911358, 344820524,
2579620192, 1008543583, 74213775, 344820524, 3446066703,
4043157504, 348833376, 3235582254, 2495544591, 118634664,
348833376, 2492792220, 3358712512, 118634664}, {2222725697,
690898125, 3208347646, 1269921024, 2222725697, 2050939727,
4069458760, 1269921024, 3600859892, 4093044926, 598630070,
2049250561, 1819012637, 2303067090, 598630070, 1967771133,
3371139074, 2303067090}, {250626667, 1038659713, 843125518,
2572090525, 250626667, 804558063, 3334144098, 2572090525,
2580978896, 3496861697, 294683328, 2268904495, 2496877097,
897071837, 294683328, 3837362577, 763331173,
897071837}, {764869161, 1845257036, 2729710367, 2806239788,
764869161, 975123999, 1742216102, 2806239788, 599407451,
3147308542, 3361614254, 326640887, 2807125404, 3035321857,
3361614254, 226779704, 3971176093, 3035321857}, {2650131939,
2064718263, 31199658, 1237821080, 2650131939, 4059416755,
3847187360, 1237821080, 1900612628, 3633977146, 1545732164,
2010134838, 2643845410, 4010547095, 1545732164, 511986932,
2725421511, 4010547095}, {68307108, 1313975073, 4060225449,
3034196764, 68307108, 2459581108, 3435152676, 3034196764,
1347324880, 3989342054, 2957620899, 2926759199, 433027378,
3118026103, 2957620899, 1600236290, 1037137281,
3118026103}, {2619838177, 3089879239, 3452165941, 4273872816,
2619838177, 1083671641, 2483412578, 4273872816, 832405527,
1868423350, 3613148280, 2857733472, 241254395, 2423025801,
3613148280, 995021703, 3089536821, 2423025801}, {1343714307,
4027221761, 1775329096, 3464884028, 1343714307, 580449578,
3444447102, 3464884028, 967330218, 2786835219, 1688753503,
2327946901, 94869516, 1774298149, 1688753503, 3145006948,
4179629947, 1774298149}, {499812782, 2468574300, 1328798463,
712942512, 499812782, 2114280209, 1275627961, 712942512,
1830837179, 478910498, 3423830432, 3437940333, 2143742238,
1378410006, 3423830432, 676267095, 2313761766,
1378410006}, {185092496, 3261096288, 1909413742, 826147645,
185092496, 2753609622, 3613397622, 826147645, 2255621338,
3296195515, 744399687, 1710847521, 774683716, 3554291861,
744399687, 1678181149, 828888636, 3554291861}, {3391553763,
176640707, 3923461410, 2131850586, 3391553763, 3591248161,
3300013089, 2131850586, 965662727, 2049874329, 1978721696,
3331360031, 3020599896, 857287772, 1978721696, 1435187727,
933605087, 857287772}, {3882290892, 2615791131, 3516358355,
1922354841, 3882290892, 3736488404, 4162881041, 1922354841,
665458130, 814480956, 1473201441, 673835828, 2279534654,
4268987998, 1473201441, 1719046530, 1889443262,
4268987998}, {3918254552, 977515238, 2676662991, 3837123498,
3918254552, 3811639831, 1044997226, 3837123498, 2266963059,
1536459522, 2664332345, 3749933100, 949699642, 2597331656,
2664332345, 3601983093, 4232841024, 2597331656}, {2268461137,
2202932955, 3150401156, 993647754, 2268461137, 4095831362,
3066697908, 993647754, 2303802679, 1362659482, 67690266,
2334627516, 2264460592, 3836058032, 67690266, 2795484088,
2479937809, 3836058032}, {3446131817, 3216882533, 2994862180,
4287991663, 3446131817, 1257542093, 1619913810, 4287991663,
3087142251, 1214446621, 1159228195, 1838633564, 146741160,
3781010662, 1159228195, 3140532484, 2792957909,
3781010662}}, {{1604068527, 320435440, 1818147893, 4123590610,
1604068527, 1284315665, 2477690850, 4123590610, 3426136514,
2782214191, 1198432931, 2880072915, 1527068783, 185429251,
1198432931, 3372328450, 88142322, 185429251}, {3383659282,
2265789281, 480221151, 89090276, 3383659282, 607972119, 2719996384,
89090276, 2678132557, 4036691926, 2180042365, 288658537,
1722814040, 127447080, 2180042365, 2942566616, 487724245,
127447080}, {1159516887, 4214998734, 359182081, 1236554533,
1159516887, 1498394381, 358687301, 1236554533, 2289583273,
948838482, 1989171734, 457585003, 3197042083, 3956830949,
1989171734, 1894752970, 61587123, 3956830949}, {2748163602,
1071954219, 2067064347, 2290099491, 2748163602, 3711385978,
614471834, 2290099491, 1827237091, 440737492, 3976346810,
3049605934, 2343603502, 1614979968, 3976346810, 1122661217,
1486547157, 1614979968}, {449599208, 1464260734, 760787290,
216048523, 449599208, 1934558942, 718668543, 216048523, 3331101001,
779962582, 1727662238, 2790878420, 3773409253, 504054738,
1727662238, 3640433646, 811041045, 504054738}, {3122969826,
3214748645, 4075360046, 1749803835, 3122969826, 282411468,
371323737, 1749803835, 1923297555, 425602637, 3693570622,
2527541886, 4000100726, 621090981, 3693570622, 435502401,
2940060371, 621090981}, {1238950442, 2789594859, 1922800567,
4102262073, 1238950442, 4140841154, 2033059421, 4102262073,
2944524801, 3223306859, 3184404995, 830110695, 3333612489,
2034224622, 3184404995, 3743382344, 1565165726,
2034224622}, {4279784147, 859801225, 1999065039, 921712152,
4279784147, 4038673596, 596236860, 921712152, 3894793123,
2070907998, 1308958254, 4058246217, 1338381534, 613698149,
1308958254, 3832821180, 3416334823, 613698149}, {1869986036,
2694004913, 1138713128, 351424643, 1869986036, 1293722401,
1077496222, 351424643, 2123319890, 1958561034, 1827226828,
931143480, 275738471, 1685517521, 1827226828, 2943337751,
200291141, 1685517521}, {2289719415, 2972341462, 2412008737,
3186305416, 2289719415, 1680133933, 1077008360, 3186305416,
101665309, 1002218026, 2968908216, 3857868158, 3240205613,
2209256919, 2968908216, 2571006180, 3305718358,
2209256919}, {3173538830, 2745831157, 451704758, 721933735,
3173538830, 1104060332, 1720363066, 721933735, 837410646,
134304003, 1696051246, 1593368519, 630655040, 1198172062,
1696051246, 1180998885, 1986732860, 1198172062}, {1951351916,
1461089983, 2852188423, 2398700699, 1951351916, 1998914732,
3703766159, 2398700699, 3518731582, 2660857604, 2924102885,
566720713, 561176530, 4069522778, 2924102885, 1555772973,
1558347771, 4069522778}, {212294868, 3758784734, 960139870,
4179444973, 212294868, 395117343, 4079971374, 4179444973,
3802581875, 3556775789, 1666157956, 15461087, 3188653628, 31622540,
1666157956, 303834229, 1109813012, 31622540}, {3998252514,
2487593373, 3704184254, 1332220978, 3998252514, 2113123888,
3475902759, 1332220978, 1057503638, 125794475, 2015618363,
456099605, 687136754, 3379065543, 2015618363, 3151085988,
1425999183, 3379065543}, {2881076070, 1216252469, 149475077,
575808555, 2881076070, 391320533, 349068103, 575808555, 1948549768,
2837457130, 3479195151, 3904215753, 280587190, 4108972617,
3479195151, 1116566633, 2380740889, 4108972617}}, {{2737282292,
2305990443, 2282864144, 3205297712, 2737282292, 2667672243,
1204204130, 3205297712, 7276915, 1522580506, 2091411644,
1198488263, 226649669, 2575546527, 2091411644, 125034191,
1033712257, 2575546527}, {92472822, 1307489118, 4146892220,
2428000499, 92472822, 1541887892, 510672020, 2428000499, 935922304,
1473179318, 3910426444, 3357426062, 2075080920, 1051614737,
3910426444, 3725579556, 227719572, 1051614737}, {1966900739,
651793589, 3658895407, 3516507471, 1966900739, 4257587330,
707837246, 3516507471, 3256757812, 1634112225, 2727381441,
292881953, 113387731, 1187730400, 2727381441, 2156029793,
3317527509, 1187730400}, {4173917861, 3090114656, 817556203,
3787391292, 4173917861, 2953871718, 1705516721, 3787391292,
690398653, 2034180250, 93938118, 3853931650, 4035265564,
2999155498, 93938118, 2685380188, 2971093563,
2999155498}, {3866930563, 1272807136, 3668513119, 2393910601,
3866930563, 3472891570, 2127104432, 2393910601, 3099450313,
1528950630, 2936071180, 98926204, 700790322, 3617732993,
2936071180, 1327756150, 421185366, 3617732993}, {3808361324,
2767640965, 3645860436, 523638114, 3808361324, 2436421546,
796925063, 523638114, 2596837368, 1226136476, 3600765500, 60922281,
1553393489, 3543842569, 3600765500, 519769416, 1469908890,
3543842569}, {2883601860, 778485352, 847589223, 688234786,
2883601860, 2129989903, 3421744687, 688234786, 2802045018,
3624578371, 3481369947, 3618947558, 1076042186, 2380880937,
3481369947, 2421869197, 4221233767, 2380880937}, {1286715218,
4173763431, 2579365599, 1917800003, 1286715218, 3167988201,
1740083735, 1917800003, 476867729, 3114448889, 1656084047,
3103367928, 646811031, 1253368368, 1656084047, 2836784419,
2860152458, 1253368368}, {1547913780, 1695960203, 1075571003,
3047751878, 1547913780, 3858853528, 2098168098, 3047751878,
2031334578, 1092231778, 2679074942, 576357308, 495112941,
674117032, 2679074942, 3267431613, 3527001669,
674117032}, {20494290, 672893019, 3417450723, 2902381003, 20494290,
1487116735, 3585549348, 2902381003, 875985265, 3814972479,
2362004551, 3847535541, 181736283, 2205916258, 2362004551,
3155610724, 1604698588, 2205916258}, {3598503294, 2476778816,
4051073147, 113222043, 3598503294, 63883876, 20025103, 113222043,
3029051882, 3576784848, 507031637, 2765152134, 2046029295,
2484472700, 507031637, 3522086026, 805832193,
2484472700}, {2096301120, 2089751145, 3927430411, 3782598365,
2096301120, 3291528625, 3927087723, 3782598365, 680890926,
2141296207, 1274240457, 1813716775, 2108223515, 7725939,
1274240457, 4264117811, 1654580658, 7725939}, {504139499,
3542846730, 2681551825, 1858879223, 504139499, 2974203715,
3291484606, 1858879223, 2708113221, 945751738, 1072981101,
3288357695, 719154310, 3368760853, 1072981101, 1302462697,
1176489189, 3368760853}, {3316228403, 4103225131, 3936394935,
3282095953, 3316228403, 2778786590, 1709670308, 3282095953,
2506371881, 868624934, 2069873554, 2528019064, 2003524657,
1828440339, 2069873554, 566806600, 726307104,
1828440339}, {3452335153, 106530348, 500725743, 3759949767,
3452335153, 3376077781, 2720130709, 3759949767, 3189476756,
931301677, 2397378290, 2635143024, 2329512, 206927031, 2397378290,
3183265148, 545609200, 206927031}}, {{217808460, 348848516,
2708923752, 3749431174, 217808460, 1208313783, 542781592,
3749431174, 4092801160, 1684552227, 299199825, 3634541206,
3624275162, 2962469315, 299199825, 2670244515, 4021086500,
2962469315}, {1114357536, 350550480, 2465372475, 1732869179,
1114357536, 2509789412, 3638540651, 1732869179, 381829350,
881729466, 1625976775, 3498104358, 1494982903, 804213223,
1625976775, 3143925885, 438999528, 804213223}, {2457049990,
3557973226, 1543332620, 3507668274, 2457049990, 1229010797,
103212933, 3507668274, 4088394360, 742292537, 1140571071,
2802821649, 2989367205, 2885386524, 1140571071, 4093772765,
2618720282, 2885386524}, {232741719, 170237153, 4114351745,
1698887908, 232741719, 2152325130, 3706277064, 1698887908,
3712059912, 3643511238, 1312079237, 3784344293, 1905431135,
1547173514, 1312079237, 918013965, 490999994,
1547173514}, {4247643355, 3352831267, 1732506223, 2798697140,
4247643355, 2098708615, 2813869207, 2798697140, 2230538189,
2655518143, 485137636, 4117358359, 1563107974, 3363084915,
485137636, 2840623993, 889964766, 3363084915}, {366288723,
3346018419, 901263071, 950363290, 366288723, 1842485244,
3526146168, 950363290, 141104167, 1568942409, 588313388,
4248816946, 2691287218, 2014523666, 588313388, 1550754572,
1476325540, 2014523666}, {1756558336, 3396822999, 983823623,
1252923554, 1756558336, 3523638916, 845609283, 1252923554,
1273880950, 2140373745, 1127328556, 1804600645, 3702106930,
2407332340, 1127328556, 1876171062, 3541076740,
2407332340}, {2713148271, 111258429, 2200585411, 905755330,
2713148271, 1712994855, 1717718779, 905755330, 993804379,
2193427908, 4115190113, 3088495692, 777098754, 3846994569,
4115190113, 1130633118, 2894966137, 3846994569}, {4164605063,
2001397872, 3307457992, 1332083728, 4164605063, 994764472,
3095955451, 1332083728, 3078061472, 307891156, 4277126258,
93837191, 3505944903, 2737996304, 4277126258, 2269237989,
2204801087, 2737996304}, {59975199, 3196013539, 4274647327,
3058110169, 59975199, 837447492, 2642581291, 3058110169,
3553314488, 2631834329, 730404862, 1954442101, 3087115728,
879138341, 730404862, 1377874203, 3894266887,
879138341}, {753862080, 1009449717, 881783401, 3645085278,
753862080, 1303955964, 379120643, 3645085278, 3843971092,
3999383335, 1524177551, 994115785, 298833416, 3313603273,
1524177551, 448963005, 1100704135, 3313603273}, {182182402,
3064534402, 3940978003, 1210559760, 182182402, 346013324,
2250978342, 1210559760, 1329720286, 2692497, 3750995369,
2634685097, 2232490889, 3828202622, 3750995369, 1121862301,
1988547722, 3828202622}, {1663970006, 2302549379, 1579968517,
3706516165, 1663970006, 896951691, 4102157385, 3706516165,
3924318383, 187157064, 1401539421, 2227875755, 721552325,
102342241, 1401539421, 1005947216, 2953512301,
102342241}, {2945796039, 3961175050, 584883917, 2733229985,
2945796039, 275640933, 3340851597, 2733229985, 1037589465,
1386834166, 2470436879, 4079095244, 336074580, 1186860461,
2470436879, 1174950378, 1813306535, 1186860461}, {1667519782,
2373698040, 424806096, 1202527536, 1667519782, 3530502594,
2335662921, 1202527536, 152830484, 2468190893, 1896248015,
1425047557, 2551600620, 1992474246, 1896248015, 261348052,
1374531311, 1992474246}}, {{69195019, 3528743235, 3672091415,
1871391091, 69195019, 3672831523, 4127413238, 1871391091, 82758667,
3708466080, 4292754251, 3859662829, 3889917532, 1511326704,
4292754251, 1610795712, 3759209742, 1511326704}, {2931758910,
2374097189, 1113529483, 2329303404, 2931758910, 2008671965,
99651939, 2329303404, 3361372532, 3196114006, 4248550197,
1448629089, 1926628839, 972103006, 4248550197, 878035866,
964807713, 972103006}, {3536018417, 620673032, 2847216174,
3210181465, 3536018417, 1011789944, 230171794, 3210181465,
2138712950, 3331097822, 840689212, 4278768404, 1249197731,
907238901, 840689212, 2865846472, 2926293232,
907238901}, {2508077280, 661437137, 1787615788, 1588259595,
2508077280, 2385989343, 1314332382, 1588259595, 1831590873,
2980208068, 3864816447, 2546884738, 3038399593, 3497384788,
3864816447, 3182508868, 3174249442, 3497384788}, {2106045662,
531179829, 2296845920, 401645099, 2106045662, 3998063768,
4010413858, 401645099, 2042818265, 564222425, 3462340161, 36909913,
1923026173, 784581211, 3462340161, 384279948, 2653422698,
784581211}, {2823695054, 61755986, 1786076983, 2703042396,
2823695054, 1517044321, 2307614257, 2703042396, 2184503434,
1365035178, 112779604, 3925268470, 3412123799, 1405813956,
112779604, 2384208331, 4174209611, 1405813956}, {3707259965,
2921683523, 1460744709, 4210727080, 3707259965, 66803473,
520437440, 4210727080, 3242480464, 4072194992, 3630467555,
3216768073, 2645232736, 2619474739, 3630467555, 804702670, 3132575,
2619474739}, {1453913233, 1805360390, 2311925423, 4183591379,
1453913233, 2604529491, 4049009082, 4183591379, 2326052247,
2145357457, 3813600746, 4119698302, 789475914, 1776335558,
3813600746, 4095757548, 1189944887, 1776335558}, {2097509046,
1602570228, 1673266895, 752526256, 2097509046, 1991110049,
2828141255, 752526256, 4140732895, 1412103825, 282674872,
3836717436, 84549310, 3769485546, 282674872, 2724043008,
1038695719, 3769485546}, {1825015381, 151012833, 2880492218,
3221815101, 1825015381, 1268177639, 2040707498, 3221815101,
1965424493, 4228411920, 480943335, 853742067, 2602801723,
341941264, 480943335, 3287341287, 2190935098,
341941264}, {3086679378, 3869978336, 1844125870, 3548682442,
3086679378, 318961528, 4073269919, 3548682442, 2432287668,
2652937834, 1875189386, 446965326, 2907702905, 683086802,
1875189386, 3536524881, 2785783425, 683086802}, {1982184590,
334885555, 2950459869, 2590401961, 1982184590, 3913458720,
4225456867, 2590401961, 664423898, 4282213129, 2539405616,
1676082014, 3870991887, 1736653518, 2539405616, 3568863651,
945282763, 1736653518}, {289016697, 420196906, 161587717,
1473487642, 289016697, 2432008743, 3895355757, 1473487642,
2712719680, 3422696266, 2665588127, 866331678, 4059421908,
2310863367, 2665588127, 953332332, 4014679340,
2310863367}, {1902693761, 1769522507, 931489105, 2820725828,
1902693761, 957556128, 209065955, 2820725828, 1850892875,
3030761701, 2736033223, 3655548782, 3000096196, 871287627,
2736033223, 1560288698, 845790159, 871287627}, {2290484832,
401518999, 2360845867, 527205029, 2290484832, 1551367322,
2459842680, 527205029, 2903982948, 707962386, 1432097408,
733539302, 1212329807, 2734783049, 1432097408, 571109565,
1664753021, 2734783049}}, {{1155867175, 429445904, 1777070788,
604461629, 1155867175, 794711716, 1257432102, 604461629,
3956367490, 116540512, 1675130777, 4089786548, 916677004,
4022832294, 1675130777, 1942923647, 4130146837,
4022832294}, {387798431, 1312672615, 235551485, 1257046062,
387798431, 490376081, 1427609439, 1257046062, 1686241617,
2006913311, 1444587280, 2617085459, 161923120, 165639584,
1444587280, 2037453462, 1205513289, 165639584}, {737112304,
1061526345, 1183825110, 963675979, 737112304, 1535762087,
326297318, 963675979, 1572901935, 1216784166, 1146366772,
3575639796, 44510871, 1503328438, 1146366772, 3214217109,
3635460156, 1503328438}, {4194339866, 4177893681, 1046144413,
3445363024, 4194339866, 2426867845, 3160262066, 3445363024,
2362447880, 2219267779, 1064270720, 4056228301, 230768332,
3458099202, 1064270720, 4052486999, 3062421748,
3458099202}, {3737365561, 4240734206, 622929198, 1050812700,
3737365561, 543911093, 878580341, 1050812700, 4007462960,
2522422296, 548555470, 1154892003, 2938490487, 134740506,
548555470, 1727589531, 555276190, 134740506}, {3265509251,
1726850927, 3616767886, 1723951785, 3265509251, 332901774,
2356709199, 1723951785, 2399569099, 2033981581, 3549040929,
3007893075, 624596665, 4130534548, 3549040929, 2978123129,
3958841381, 4130534548}, {3101765951, 1336753577, 1280437671,
3634584810, 3101765951, 3846059767, 2991529617, 3634584810,
1980497118, 1383863499, 847308857, 2011625723, 2460774518,
3154324725, 847308857, 4169594743, 617213161,
3154324725}, {1909472435, 375835695, 280754246, 3559576374,
1909472435, 697539134, 3107663662, 3559576374, 4251175413,
3447191459, 1789143993, 2097389984, 3463234943, 296275263,
1789143993, 2081462173, 3452455838, 296275263}, {3927878074,
1304967920, 4137456834, 1878918502, 3927878074, 1744558848,
1468256892, 1878918502, 3322056141, 2488124556, 53798688,
2539506574, 597744771, 2944457194, 53798688, 3937802461, 895604016,
2944457194}, {1473406035, 1954838782, 3884376669, 911930333,
1473406035, 3689446034, 4028966669, 911930333, 1441163739,
2818867049, 895650639, 3063493626, 202155710, 3690699991,
895650639, 3499022088, 194807645, 3690699991}, {3706828782,
3622823846, 786099080, 3752485002, 3706828782, 272570612,
1940026562, 3752485002, 293325213, 3875261308, 652020716,
1458595307, 1302343337, 2884197862, 652020716, 613179368,
1494747539, 2884197862}, {1862670973, 496211406, 402523958,
4203984455, 1862670973, 3937403282, 2167450892, 4203984455,
751906018, 1742271124, 2684216609, 4036938266, 721391189,
775854673, 2684216609, 2709062415, 2918396394,
775854673}, {1620921222, 3725091997, 2691314998, 3403610080,
1620921222, 745303566, 1413978227, 3403610080, 1337530783,
1276782621, 3630584202, 201529621, 1377329334, 1802078927,
3630584202, 360946677, 3710731073, 1802078927}, {3613797222,
4164228265, 3848675801, 2233062609, 3613797222, 149028817,
98234458, 2233062609, 726664456, 1393814954, 3083923483,
1576871217, 2299677089, 3150458758, 3083923483, 1386916196,
4126093705, 3150458758}, {3629944862, 512011875, 93012811,
1396449051, 3629944862, 2257997180, 2499923958, 1396449051,
2442173264, 431205976, 930303450, 1099910359, 366661165,
2783164045, 930303450, 1631317297, 356389158,
2783164045}}, {{1135109300, 2352874613, 3325312332, 712404985,
1135109300, 546519870, 1571467521, 712404985, 1099512970,
2627363449, 1587005542, 2766486018, 1469478670, 2828288883,
1587005542, 410553827, 3866690251, 2828288883}, {414410025,
1185848176, 1181583679, 1653045514, 414410025, 3733376326,
381988982, 1653045514, 1945425936, 4094554844, 4267121909,
420803572, 138566505, 3288027530, 4267121909, 2458742268,
412403981, 3288027530}, {65355284, 2858851708, 3052316027,
1301935019, 65355284, 3961455404, 1963289366, 1301935019, 80175856,
1765993677, 3790597750, 4056706273, 3281478755, 2136361676,
3790597750, 2068559481, 3398888999, 2136361676}, {4188191530,
293801056, 2964651598, 3019211015, 4188191530, 2683163472,
4215964965, 3019211015, 2526336124, 3960267499, 2486244684,
3560842909, 4252427633, 3844599430, 2486244684, 3283485436,
2430152838, 3844599430}, {1196758134, 1741700121, 237185264,
1387140872, 1196758134, 2971687592, 762351827, 1387140872,
1749670132, 2270459619, 1381214928, 767007913, 1111366755,
3419145577, 1381214928, 429885456, 107246070,
3419145577}, {759707097, 825174423, 749051338, 481737140,
759707097, 878719633, 1487069976, 481737140, 4238062407, 555083053,
3674972444, 62603161, 2085831739, 1494013447, 3674972444,
931794028, 1485743041, 1494013447}, {101202533, 1603656961,
2744191919, 1130524081, 101202533, 499306218, 2151646894,
1130524081, 1955913150, 349068991, 3432517708, 3096606227,
1550912558, 1677686741, 3432517708, 3063490072, 1049056456,
1677686741}, {2391992038, 3691889508, 987740265, 2253324417,
2391992038, 1880267534, 39719589, 2253324417, 1444052678, 86971645,
513233413, 44564058, 1379805031, 67933059, 513233413, 2657888382,
1294996291, 67933059}, {2107525880, 1592007351, 385198528,
175882074, 2107525880, 3148809017, 3291876418, 175882074,
4127799778, 2936899148, 1934222992, 3196093040, 2005250810,
3685899833, 1934222992, 1732298394, 1942243854,
3685899833}, {133176397, 1183299853, 1822497038, 3529992299,
133176397, 1379194676, 1457174623, 3529992299, 2302655457,
1694599040, 2337874210, 845389321, 2840999768, 325500910,
2337874210, 1468441745, 383109598, 325500910}, {961584130,
2694151944, 1915277623, 234795543, 961584130, 1190493514,
3739166168, 234795543, 4258561079, 1092670963, 1470575478,
246450419, 2204665831, 1337631967, 1470575478, 1833296230,
1847115544, 1337631967}, {1560567705, 140227043, 2367587269,
983599045, 1560567705, 1528189962, 1399037840, 983599045,
3276243992, 1139661541, 98262291, 1859723823, 1624143697,
2160778451, 98262291, 3458861828, 3690957433,
2160778451}, {384037582, 1340101783, 29577830, 3483671709,
384037582, 3272912887, 1674233603, 3483671709, 2105717500,
1386333424, 237541885, 755629466, 4212839162, 2308200298,
237541885, 3249123485, 3207761476, 2308200298}, {964726501,
595108512, 481622686, 3464133355, 964726501, 2805976441,
1938386458, 3464133355, 1761003707, 2359525461, 1489856126,
3847821866, 1157162783, 36778158, 1489856126, 4007738185,
395520753, 36778158}, {974765406, 3799832442, 3357976289,
3299292894, 974765406, 2907974061, 2764080733, 3299292894,
1949491670, 1912626269, 2519328192, 1524883318, 1842665800,
1538563035, 2519328192, 4268751569, 386085114,
1538563035}}, {{2570600780, 3460843234, 2700034538, 2335494420,
2570600780, 2227597731, 1232261118, 2335494420, 166599066,
1124501551, 2540507736, 1164400003, 3257104212, 2732588524,
2540507736, 818237694, 1866530072, 2732588524}, {292276982,
3093661552, 697844082, 1188954576, 292276982, 2511664974,
1251401239, 1188954576, 2511338360, 2343537248, 1135554501,
3251740389, 2056492193, 4199239222, 1135554501, 2121388468,
3155848463, 4199239222}, {3140636559, 3213333408, 1429177194,
4040666706, 3140636559, 902332253, 3671213347, 4040666706,
1029044592, 4000127015, 2226758301, 1886560387, 1590955204,
884974087, 2226758301, 2087168228, 1753139982,
884974087}, {3710319575, 3910069381, 2858628849, 51993077,
3710319575, 3252828938, 3540268009, 51993077, 3624650744,
1261552815, 356444753, 58733535, 1634965500, 550710036, 356444753,
1075236085, 500329021, 550710036}, {2982343032, 1169418972,
1500370974, 22047040, 2982343032, 1622877607, 1300220338, 22047040,
1300368901, 3771708494, 817854337, 1456938507, 1008316214,
1625784164, 817854337, 2897537078, 2560115442,
1625784164}, {1699174125, 2141085139, 1634036662, 989485658,
1699174125, 418481819, 759287627, 989485658, 3839868036,
1550455173, 877107862, 2913047937, 3738030270, 1821976647,
877107862, 357862042, 1482985289, 1821976647}, {814718474,
575686326, 2680605866, 4002649103, 814718474, 2635782110,
109096906, 4002649103, 3040947805, 3927428565, 789790646,
442232022, 3669602586, 4163172789, 789790646, 1968560673,
965617071, 4163172789}, {475572423, 3228514800, 3248619000,
754002362, 475572423, 1958331075, 1646581402, 754002362, 655966702,
76092226, 706488081, 585555005, 1113760995, 708689546, 706488081,
2012018174, 419139045, 708689546}, {1102669243, 2625661593,
3628247553, 155487353, 1102669243, 3849329584, 3641526883,
155487353, 842507409, 2338156799, 3880807303, 3855119703,
1262008396, 1096269180, 3880807303, 2401963969, 1210247935,
1096269180}, {459957859, 3188513729, 885773771, 667065261,
459957859, 784642827, 2388680063, 667065261, 4039816650,
3919385156, 3703890126, 1414124697, 2547175916, 1520818365,
3703890126, 2650161610, 3297719233, 1520818365}, {3970199468,
1659440263, 2074061500, 415059067, 3970199468, 2389861083,
2700180911, 415059067, 1659410671, 2798460732, 1938524173,
2690686596, 1018928982, 1836340058, 1938524173, 4098854961,
3560962982, 1836340058}, {4149545048, 1314604300, 2180788629,
1138672588, 4149545048, 626151178, 1020827900, 1138672588,
135820422, 2546254144, 831116151, 1036497162, 1919249397,
2584730290, 831116151, 2018833769, 103417098,
2584730290}, {678239872, 827239274, 2649878256, 94846164,
678239872, 3448200806, 2166582678, 94846164, 2699595416,
1973852572, 2244344958, 2435450197, 1945003835, 2866004314,
2244344958, 2746616382, 2797087795, 2866004314}, {628989796,
314784777, 1418331921, 1909399335, 628989796, 633947172,
1001703312, 1909399335, 255163547, 2060649849, 2281461798,
3416315430, 3526912856, 1029077486, 2281461798, 3286174256,
382606411, 1029077486}, {2270079132, 3076945394, 150707272,
2615860705, 2270079132, 3102811792, 521837577, 2615860705,
3695935017, 998617442, 1436679917, 3902334231, 3208436650,
723601662, 1436679917, 2678049604, 103406914,
723601662}}, {{3090782630, 1628442374, 3242468721, 4166372813,
3090782630, 2282679206, 741596417, 4166372813, 2760311307,
3912300371, 3319068849, 1675229290, 1085259665, 1293182265,
3319068849, 366230236, 3168473803, 1293182265}, {2603727025,
3149485478, 1428367254, 3532111852, 2603727025, 500404765,
1754687396, 3532111852, 4265279407, 1658665581, 756122322,
2518961174, 578262892, 3186089068, 756122322, 1211781402,
4188864734, 3186089068}, {3773430454, 2468235585, 2575088157,
2341623975, 3773430454, 733258443, 1118596215, 2341623975,
2601534865, 45514776, 2130477825, 1580291641, 995988350,
3977628955, 2130477825, 207693548, 600259676,
3977628955}, {1261269623, 1830023157, 3967600061, 2081104178,
1261269623, 4153800443, 596284397, 2081104178, 2873769531,
745646810, 4102191254, 1293500383, 878627148, 1347291439,
4102191254, 736113023, 2050427676, 1347291439}, {2843172339,
2231035373, 1806730448, 1650369421, 2843172339, 2792975338,
1789495539, 1650369421, 3741987406, 3181657906, 1520984655,
713062199, 568252202, 3687588783, 1520984655, 1438381249,
1453894028, 3687588783}, {926561185, 2950279312, 790563916,
2833805942, 926561185, 4136201738, 877956083, 2833805942,
1219416476, 2401469211, 2615508955, 1098162878, 3061138405,
1428398286, 2615508955, 59314928, 758558167,
1428398286}, {3992518614, 3914211973, 1730656997, 2840114304,
3992518614, 2736564746, 2258190816, 2840114304, 798256550,
3652744742, 854214045, 1115314385, 2093028962, 3981341272,
854214045, 569010442, 1972270158, 3981341272}, {77498444,
1070507239, 3904070810, 2229285304, 77498444, 3110641420,
3443204327, 2229285304, 278611533, 314631094, 3108887846,
1405263476, 2697923227, 4196897331, 3108887846, 1900167098,
3436564969, 4196897331}, {676854127, 568650466, 4272969986,
2797491436, 676854127, 3694482943, 4153837975, 2797491436,
2793822647, 2166847050, 2304632601, 1808184757, 2750552563,
700767207, 2304632601, 3385626848, 2856480388,
700767207}, {835628171, 165198836, 1001911068, 1438273012,
835628171, 2246148877, 1676406913, 1438273012, 1569490059,
3626831178, 956778663, 2211295748, 1815192601, 3004743607,
956778663, 4202297421, 2733058282, 3004743607}, {518172526,
1449851373, 739475642, 977556415, 518172526, 1592432867,
3955686626, 977556415, 3311851781, 2092785306, 3406476392,
3275119453, 2718052422, 2672882299, 3406476392, 3145190580,
110199372, 2672882299}, {2122146632, 2977102789, 2065383788,
236464123, 2122146632, 244990374, 3922106376, 236464123, 219341062,
301939378, 540077867, 2129238146, 2433069844, 3694919563,
540077867, 731922800, 2520419703, 3694919563}, {1011510006,
3830153744, 1670865059, 2772357215, 1011510006, 1691277696,
2154410806, 2772357215, 1849895224, 2930957650, 518291486,
640956515, 459502110, 832670351, 518291486, 3067258844, 67590653,
832670351}, {3276353542, 3933677451, 2230558099, 1729521343,
3276353542, 2059608998, 424414765, 1729521343, 2250560481,
1707617706, 2217587145, 135344313, 1363889163, 2475140271,
2217587145, 2778457406, 37335008, 2475140271}, {4218145290,
3722842911, 948415749, 1033822680, 4218145290, 1221024374,
2603155579, 1033822680, 4201417544, 14480722, 1981201988,
1721059867, 2281906062, 2114502981, 1981201988, 909430413,
2781069823, 2114502981}}, {{121482268, 3116647617, 876978915,
568528663, 121482268, 2475829068, 2177189807, 568528663, 544639534,
1513734026, 3770009830, 533700213, 793326651, 958383622,
3770009830, 1150087061, 3694638688, 958383622}, {2976916793,
1691884706, 168571747, 2404658587, 2976916793, 2158188804,
4191448009, 2404658587, 1547862823, 2684083587, 3765397477,
3380901602, 1458031003, 4119603367, 3765397477, 3534176399,
3479396923, 4119603367}, {62338927, 906126073, 2798158767,
2933510859, 62338927, 1177241360, 4240166566, 2933510859,
2707010111, 4091367000, 389885259, 1268575347, 3729279189,
178016378, 389885259, 789650986, 1184002529,
178016378}, {2951207765, 2395485231, 2075145516, 4212638780,
2951207765, 671148556, 3235157352, 4212638780, 3208213311,
338680738, 1421333909, 323724368, 3405683645, 980937351,
1421333909, 448446028, 2094378936, 980937351}, {2952700002,
2302905244, 1142378033, 3349220842, 2952700002, 224784515,
3722506196, 3349220842, 3757387996, 3589653882, 4185254045,
2006223188, 1244677534, 2381808660, 4185254045, 146194193,
341372255, 2381808660}, {1571592397, 3369375773, 2204145504,
145407649, 1571592397, 347432419, 659394903, 145407649, 1941283113,
2028886430, 2690000574, 3935039161, 3592133108, 1104593159,
2690000574, 4243190272, 2457034166, 1104593159}, {179198568,
3594781674, 3021644798, 386906095, 179198568, 1281474767,
3767619826, 386906095, 1094854803, 208124234, 3595149031,
775957906, 1556294726, 798595991, 3595149031, 1453032677,
3072704016, 798595991}, {867670560, 3613001199, 834432416,
2133433101, 867670560, 2044221845, 4043998180, 2133433101,
4080517315, 1091268872, 527851489, 4209198933, 3852871282,
2942968846, 527851489, 3549906544, 4293637338,
2942968846}, {2580561525, 862067022, 2212809055, 2677316398,
2580561525, 3025327788, 1725615982, 2677316398, 1815082440,
3827868466, 585125955, 1377490536, 641792185, 4284386809,
585125955, 1512141755, 1090940362, 4284386809}, {3486242248,
4131376232, 1673783206, 273325774, 3486242248, 296683239,
3615048227, 273325774, 3787865083, 4096418302, 568040678,
1833602093, 1110516758, 2744554909, 568040678, 3515890758,
115338549, 2744554909}, {3206287043, 761909254, 2253413915,
3624857093, 3206287043, 3199872731, 373953079, 3624857093,
891498145, 2019749517, 1489977120, 3200804515, 3759902928,
2530156636, 1489977120, 2646901097, 2523509558,
2530156636}, {643902568, 117953569, 3583402601, 2397841575,
643902568, 3914257878, 4225297936, 2397841575, 3355193419,
4283593734, 3621429915, 1589633023, 2362133832, 3926916518,
3621429915, 3323739459, 534546596, 3926916518}, {3213872118,
680556753, 2480164693, 956051037, 3213872118, 312904051,
1476930939, 956051037, 4120853570, 3727325982, 914858946,
3008893009, 135894748, 3830075648, 914858946, 3374985991,
993279056, 3830075648}, {3771960066, 4191415769, 1282291514,
3671186390, 3771960066, 1261426079, 3681543355, 3671186390,
2476625171, 266967475, 1769866689, 2602569321, 1327580909,
2252783708, 1769866689, 4146605623, 4191438403,
2252783708}, {4292532578, 3774138404, 2670391583, 3944009238,
4292532578, 2826695620, 3332928438, 3944009238, 3502046981,
1192443015, 2964738836, 4159361902, 2507987636, 3433059425,
2964738836, 747959045, 2720593265, 3433059425}}, {{607380970,
2610193258, 2198271844, 1319434267, 607380970, 740092580,
2678775073, 1319434267, 4102885735, 1875181995, 3982652344,
2055073597, 3001736262, 1975983015, 3982652344, 611187071,
2092556693, 1975983015}, {2507168618, 2687593413, 2988334517,
1317690360, 2507168618, 399240205, 1189150958, 1317690360,
1165218048, 2712717326, 2888742196, 3019977656, 3521651749,
2970221269, 2888742196, 2447465272, 880904779,
2970221269}, {16047360, 3691116188, 1346223401, 3528536028,
16047360, 3475527779, 2077936780, 3528536028, 2745374441,
1001260701, 2137129319, 1727032860, 2895847378, 710612185,
2137129319, 3956229929, 99734716, 710612185}, {1842080991,
3109935091, 3903826366, 4082006648, 1842080991, 1888486946,
338232527, 4082006648, 1028861702, 2542621682, 4101695717,
2657991148, 4264593116, 419134859, 4101695717, 747864206,
2976059897, 419134859}, {806395396, 1903753461, 2240078962,
2888571437, 806395396, 2362159826, 597224294, 2888571437,
1753159753, 679653554, 3492286586, 2825161141, 875918558,
149385744, 3492286586, 204565173, 1789714272,
149385744}, {2468977284, 1833271761, 1593946969, 3375115282,
2468977284, 3143513113, 375616616, 3375115282, 1709021254,
1811957225, 3587671931, 397670345, 321251974, 1102949689,
3587671931, 3445033624, 1203825901, 1102949689}, {2760537049,
3106631744, 1542713475, 3781287485, 2760537049, 1692479301,
3889645784, 3781287485, 884591943, 3312983533, 190837198,
2375945093, 2548729504, 1557133499, 190837198, 1115795021,
3740911662, 1557133499}, {2773979788, 2554371815, 1144301620,
2261996505, 2773979788, 710199359, 2888080641, 2261996505,
614134826, 2949176293, 1267010518, 3094232897, 3496325546,
4043135299, 1267010518, 1149778656, 1612983166,
4043135299}, {1887452538, 1950002858, 2959098349, 2798155265,
1887452538, 1051618307, 1449170252, 2798155265, 3198080186,
4177652829, 3578404586, 4256705333, 4214331358, 3773836820,
3578404586, 2684741515, 821087236, 3773836820}, {2188312410,
3390487176, 1896324706, 3025845012, 2188312410, 1628017861,
3475622581, 3025845012, 1657795857, 3640331752, 1182165056,
69336718, 3702644050, 3982666145, 1182165056, 1836627509,
2354595202, 3982666145}, {3423044419, 1758118748, 3469249742,
1137902837, 3423044419, 3440035369, 1998736700, 1137902837,
3167545079, 282834660, 2710548883, 801138460, 2258912019,
1097820760, 2710548883, 24888594, 383216742,
1097820760}, {3505214566, 478558438, 2741627244, 2988495416,
3505214566, 1788745968, 3084935324, 2988495416, 3872045348,
537227984, 1780972559, 1348023856, 1132838092, 3214297332,
1780972559, 4106231685, 2846434362, 3214297332}, {256760884,
3759178004, 2278775340, 565470180, 256760884, 976841595,
3313779653, 565470180, 2708862228, 4034322798, 1564505416,
899811882, 2875184702, 755543217, 1564505416, 759874199, 13278192,
755543217}, {2236758020, 469234254, 799823618, 1323939009,
2236758020, 2319876367, 495270742, 1323939009, 3436633871,
1597833968, 1156392497, 3132103937, 4147435046, 637552329,
1156392497, 1869389825, 4159699881, 637552329}, {3861027456,
304964505, 2185185122, 3519163420, 3861027456, 4105292342,
1062166603, 3519163420, 2675891460, 279886449, 1433999328,
3839122367, 608579980, 520981812, 1433999328, 4205230168,
353934977, 520981812}}, {{2309408315, 2760088020, 1533175115,
1285620078, 2309408315, 2423072612, 357420018, 1285620078,
4056173823, 513963325, 2182983404, 2820933455, 2355671350,
3949395794, 2182983404, 2123036003, 1774568686,
3949395794}, {2500813569, 829683767, 3378723563, 815015434,
2500813569, 180649975, 3142242173, 815015434, 4264130267,
3622293976, 4277866093, 2249371766, 3146977604, 3046911698,
4277866093, 2492729814, 2576744453, 3046911698}, {1646277780,
1227220999, 2122993239, 3002678034, 1646277780, 3270884334,
94686468, 3002678034, 780095381, 1423668157, 3712014795, 390814955,
2434706877, 87281746, 3712014795, 2157354608, 2431554641,
87281746}, {4044178729, 554748104, 467969301, 1030729435,
4044178729, 1752988797, 2812778314, 1030729435, 4174387531,
1015566287, 3536237833, 157623850, 985347517, 1391529818,
3536237833, 2587125255, 423458502, 1391529818}, {1598746056,
3303343260, 762241819, 2474459917, 1598746056, 218140428,
1196064637, 2474459917, 2980841286, 3250335592, 1487367860,
1521246973, 1406132870, 2197835200, 1487367860, 3663691762,
2133883764, 2197835200}, {1197428336, 3482088360, 310254483,
538480994, 1197428336, 571730491, 911775489, 538480994, 1224655671,
2502325941, 2258964805, 2454977948, 3036723158, 2768170623,
2258964805, 1038000683, 2671124421, 2768170623}, {1908218484,
3502680982, 2469895624, 3781005565, 1908218484, 2648830135,
3517907301, 3781005565, 3540579584, 18445794, 1915310420,
2639699364, 762348997, 1381472023, 1915310420, 2866068177,
1428725999, 1381472023}, {3069812185, 3208181168, 2542432347,
2928743274, 3069812185, 3607529209, 3776082629, 2928743274,
1348429235, 950730510, 902187690, 4019658974, 1716556555, 48329260,
902187690, 821961664, 2599277669, 48329260}, {2182176592,
1860603509, 2625050978, 1999664797, 2182176592, 321744270,
265698801, 1999664797, 356266135, 1560019161, 1171489446,
1517798425, 3843281147, 3837472901, 1171489446, 3436524397,
1784243620, 3837472901}, {3276234188, 3863982741, 1384068579,
2994139106, 3276234188, 3723068499, 1829200407, 2994139106,
2414375640, 4124897232, 1634016885, 378757639, 2161076681,
3100975771, 1634016885, 1844417430, 1019061132,
3100975771}, {1354152039, 1083036128, 1019985432, 227671285,
1354152039, 362042802, 3150490417, 227671285, 983454682,
3867285910, 1283633896, 3768959266, 714795054, 4195850249,
1283633896, 2048170658, 3679000702, 4195850249}, {3946110585,
260368160, 1853745554, 2897556757, 3946110585, 3421663444,
3145856482, 2897556757, 798763723, 465921502, 2328291482,
3863948457, 2884310858, 1045387495, 2328291482, 1236131839,
796030826, 1045387495}, {990300297, 2243839597, 1117676581,
1042468264, 990300297, 2550218202, 3310392503, 1042468264,
3049833332, 987744533, 3087581481, 3055290424, 2752836083,
1588498933, 3087581481, 2926606281, 4290908028,
1588498933}, {3932965485, 2044406932, 76660843, 3740645591,
3932965485, 3321952562, 3060595950, 3740645591, 95178102,
2513215163, 1105604243, 2484220821, 2420741811, 3483511399,
1105604243, 1920164372, 741205873, 3483511399}, {475729593,
1660827878, 3003742383, 1232366114, 475729593, 2210388529,
2053833955, 1232366114, 1992425875, 2074496556, 769497234,
3387899531, 2284204944, 2431121957, 769497234, 1281254384,
4171704364, 2431121957}}, {{1380634204, 1192551435, 3533700508,
668285756, 1380634204, 3713143233, 3816217625, 668285756,
4064845753, 1337075195, 4270158447, 2679964938, 1654940598,
1318489562, 4270158447, 3713577419, 1530977112,
1318489562}, {2057456462, 2897339640, 3410402985, 1706771705,
2057456462, 3174850469, 728123349, 1706771705, 1515684518,
522138188, 2710443459, 3098163705, 2990852339, 770600793,
2710443459, 3578552768, 3249576224, 770600793}, {1249105026,
3358422070, 3395543717, 4038063126, 1249105026, 363246278,
2168451262, 4038063126, 493252920, 1711417284, 3432253975,
201692417, 3480703284, 3299888517, 3432253975, 2474407987,
1806316064, 3299888517}, {2171402857, 3049703400, 4030688141,
531091457, 2171402857, 3895139973, 1390161328, 531091457,
3082272717, 3058519788, 477609731, 2252852611, 2140252218,
2803285489, 477609731, 3164022812, 1922250286,
2803285489}, {902295474, 591758943, 847382876, 2355871533,
902295474, 2931048320, 3949682718, 2355871533, 3935740675,
2796594703, 1613420642, 3412387271, 651730634, 3735324161,
1613420642, 2792496593, 860809210, 3735324161}, {3814732591,
3605659623, 1834617826, 956915353, 3814732591, 1282074175,
71119600, 956915353, 1096633558, 2164990937, 3213913829,
2561980091, 3655831787, 993539593, 3213913829, 3772074010,
3499265007, 993539593}, {2658882772, 2195730734, 3298597677,
883705085, 2658882772, 3547515338, 1819500595, 883705085,
1213485394, 1645121444, 2914546594, 1554324281, 3007787703,
76754721, 2914546594, 1258273773, 818311023,
76754721}, {4065669017, 3661081858, 1484817937, 3193892819,
4065669017, 65180262, 1244284192, 3193892819, 1241147206,
1638833719, 3892091460, 3440632377, 516833304, 208329741,
3892091460, 3548346666, 3633562083, 208329741}, {481582993,
1085004492, 92615274, 2738501771, 481582993, 2549334483,
2065816860, 2738501771, 2123592067, 1587534941, 4203644834,
1807063723, 3363710472, 3045996322, 4203644834, 3018954094,
2937231253, 3045996322}, {1744694238, 4063014065, 3026136269,
3203024096, 1744694238, 171128417, 3004026049, 3203024096,
1526982153, 2620744026, 2824889612, 2591924509, 973989398,
4229091400, 2824889612, 4248467238, 3067248771,
4229091400}, {1824377964, 4150561364, 2476308872, 2328554752,
1824377964, 2311173985, 617021739, 2328554752, 3910342111,
3678401252, 1733825396, 1498830772, 3595011575, 155765556,
1733825396, 1766411440, 896794637, 155765556}, {2096044974,
1990752347, 3594149900, 336736481, 2096044974, 758872195,
1251419294, 336736481, 448304887, 1869361317, 4150949449,
370948546, 1621800814, 2079664039, 4150949449, 161492691,
3542921599, 2079664039}, {4049251324, 4172483669, 3176703166,
686753864, 4049251324, 1057451785, 3522730141, 686753864,
1013007249, 1858040314, 2699994965, 1860661676, 33685366,
1534819456, 2699994965, 3381126640, 3694023951,
1534819456}, {1126833245, 2376805191, 3098917825, 1255326086,
1126833245, 1906919843, 3227066290, 1255326086, 1943662723,
1329889956, 1513259363, 1646202100, 2772391079, 936646890,
1513259363, 2750991489, 1650686446, 936646890}, {4273398097,
4142084221, 895459484, 1011704985, 4273398097, 1572508948,
901597436, 1011704985, 656756723, 4060574487, 3519120535,
680330904, 3844798444, 358645418, 3519120535, 1193359799,
3159983370, 358645418}}, {{272266053, 3202556526, 824109230,
3491341751, 272266053, 2805337292, 3414470157, 3491341751,
1438760812, 2576849579, 3482051486, 3119495098, 861657108,
1816075033, 3482051486, 959489356, 3570111203,
1816075033}, {2166856449, 3467845248, 1026946745, 3627115412,
2166856449, 1565169824, 2345042216, 3627115412, 135412706,
3558527186, 4064489450, 2441164913, 1427441010, 4240216888,
4064489450, 2102314945, 2891584407, 4240216888}, {1110795947,
3937816720, 3388382946, 2291501743, 1110795947, 1204345078,
3432007410, 2291501743, 1796100563, 2994150339, 2783614947,
4028058908, 666348656, 714331518, 2783614947, 3438751245,
510361535, 714331518}, {989966800, 3692472918, 1995297400,
3256876154, 989966800, 1254783743, 1240505488, 3256876154,
1889419951, 3837485479, 3579773554, 3266584309, 3494391959,
2918371295, 3579773554, 3469357011, 65155283,
2918371295}, {283408731, 2940099205, 122099302, 4090106148,
283408731, 2200320349, 3626838486, 4090106148, 521490557,
2470805834, 3370632392, 3635042205, 2653865633, 3134087640,
3370632392, 2001875743, 1726772477, 3134087640}, {3220365318,
345099279, 2354954698, 2792365660, 3220365318, 3837739111,
2932916238, 2792365660, 571041820, 4160828645, 3178785388,
1464413084, 2245897492, 3980269912, 3178785388, 3326058682,
827591035, 3980269912}, {1734943164, 3992671774, 3391219055,
1361586333, 1734943164, 1497398241, 3596854557, 1361586333,
433605297, 656756305, 3209395492, 475039019, 1456839203,
3838369007, 3209395492, 3736654382, 1817769266,
3838369007}, {2129675196, 2673457552, 1863853990, 285700890,
2129675196, 2515316194, 496017472, 285700890, 3206226875,
2547086826, 236489012, 2687043642, 3802558529, 2959420453,
236489012, 4082486022, 1365016881, 2959420453}, {2862445665,
1391694591, 1493776713, 1650194984, 2862445665, 1976795057,
1332249597, 1650194984, 973623943, 4146368994, 2425083298,
4207783604, 2343308094, 2268472775, 2425083298, 2017673156,
2998711045, 2268472775}, {3701118467, 736035597, 3941003137,
1526438461, 3701118467, 2147788862, 3655101605, 1526438461,
2706649805, 3430848085, 1750164979, 675214442, 3906274368,
4009083507, 1750164979, 2269130155, 3665365201,
4009083507}, {3044457518, 2139033800, 3072081745, 2171102052,
3044457518, 228850836, 144159836, 2171102052, 3376280295,
492641640, 2039626956, 3185748306, 2021802750, 4156472107,
2039626956, 2671841406, 1688870624, 4156472107}, {686457718,
755015447, 3473541724, 1141176790, 686457718, 2796763418,
2183048631, 1141176790, 2643396669, 2334756085, 969269805,
3829792024, 2232088910, 3501988208, 969269805, 3464182128,
1843500325, 3501988208}, {1380374946, 2734398888, 17136384,
3735957229, 1380374946, 318536107, 3113401503, 3735957229,
3266495269, 2392343948, 2880815392, 2994320379, 3315636301,
2418434623, 2880815392, 3150458627, 52024108,
2418434623}, {1274724480, 2699576266, 2113140763, 1801951193,
1274724480, 1607594103, 1301873229, 1801951193, 3205194636,
805980718, 1744876439, 2606047828, 3601318802, 2391580866,
1744876439, 4198050752, 3151709665, 2391580866}, {987811898,
4091718169, 1949508729, 1139329784, 987811898, 99059985,
3213093274, 1139329784, 3230719414, 2898427019, 1553680774,
1744324377, 4218240851, 1372522934, 1553680774, 3182958666,
3257212689, 1372522934}}, {{3237660221, 4216039401, 1394381051,
255160418, 3237660221, 1987092456, 772100749, 255160418,
4163770641, 2853212443, 3664909559, 4240262918, 3543921700,
4185325422, 3664909559, 3200044912, 2762854843,
4185325422}, {35690583, 2638220304, 2918902946, 2713747584,
35690583, 2300605925, 627765421, 2713747584, 2133915627,
3195556801, 1893990098, 3017062825, 2982567031, 2618500928,
1893990098, 1470046497, 4237264351, 2618500928}, {3392507624,
3007592186, 1734374648, 3355201178, 3392507624, 3710123662,
21747645, 3355201178, 3164025833, 318989836, 808588626, 2471149225,
1429672146, 641329688, 808588626, 450784177, 3426896833,
641329688}, {2820705344, 3831143549, 124590158, 998684270,
2820705344, 1791772791, 4292130625, 998684270, 2587549655,
4145063890, 1678569574, 1477016185, 4162480901, 1868464655,
1678569574, 1652841784, 3407681142, 1868464655}, {602601455,
4148291214, 1353949463, 4013304348, 602601455, 1500039745,
3896702514, 4013304348, 2530632658, 1180999153, 2419594038,
887698817, 316099898, 69281710, 2419594038, 4228064347, 4088711405,
69281710}, {1755670478, 2257707516, 1006007080, 1802646553,
1755670478, 129865302, 3446926966, 1802646553, 3910080826,
2779761666, 3168708136, 696413464, 2121517268, 346858981,
3168708136, 1550050752, 2885211332, 346858981}, {3270221196,
2874806771, 3806381796, 1121257151, 3270221196, 3487725992,
1343441704, 1121257151, 3353460660, 2034770009, 1049849374,
3174523216, 614261770, 2366501106, 1049849374, 504742298,
551117609, 2366501106}, {3737164414, 861393946, 4046686626,
3200877282, 3737164414, 3717741518, 497605289, 3200877282,
978482299, 2338974139, 3840340879, 2979958414, 3493367465,
792188465, 3840340879, 2219407026, 4251338402,
792188465}, {273136529, 2827300476, 1734661616, 1180106193,
273136529, 613899434, 1992165781, 1180106193, 1533791420,
972305202, 1875898471, 991370047, 3107469479, 1992388545,
1875898471, 2383614785, 3500603104, 1992388545}, {3717279042,
485566112, 976459397, 201453184, 3717279042, 3983740037,
3145469059, 201453184, 3183253558, 2135933046, 3833824001,
1909447704, 1333287789, 3859433262, 3833824001, 1297631730,
3764728773, 3859433262}, {1955228687, 2025327163, 1805580258,
2570033843, 1955228687, 2291064609, 3462716646, 2570033843,
4060753555, 4060835728, 4211749705, 3884788913, 3773211035,
993512225, 4211749705, 1584936902, 2022469554,
993512225}, {3994478979, 1296267255, 12048398, 1293997566,
3994478979, 2104529013, 1141681757, 1293997566, 1649247358,
3722753261, 709433989, 2997676666, 1807326569, 102264893,
709433989, 2717349223, 4038432252, 102264893}, {2339553121,
611844526, 3627910700, 3669986224, 2339553121, 2878718099,
1616065704, 3669986224, 36587589, 2621779190, 3373892447,
942431365, 497306228, 339178967, 3373892447, 2025011469,
3410598209, 339178967}, {1239287374, 1435662521, 684416427,
3409985649, 1239287374, 546146378, 2357026796, 3409985649,
1277127010, 2652264596, 3809824315, 3162683019, 576285090,
4020257258, 3809824315, 2346103599, 1217293203,
4020257258}, {1786554346, 2368337584, 635907087, 3285617675,
1786554346, 1532451420, 629763589, 3285617675, 3909645761,
2992575592, 2292821512, 3227293849, 1671163098, 3612594858,
2292821512, 2670550158, 4014948754, 3612594858}}, {{2031413512,
2413249929, 3806926970, 300934584, 2031413512, 893043137,
3179822945, 300934584, 2665561897, 1242316901, 1674533845,
3571222880, 3572905305, 478845700, 1674533845, 882114621,
2378167062, 478845700}, {1375339216, 3632503316, 847617977,
3071835354, 1375339216, 4101127251, 2575196237, 3071835354,
1417581911, 994061750, 1228103463, 3741735502, 1280685025,
2636090868, 1228103463, 71690719, 1972761498,
2636090868}, {849327659, 791369590, 1702248031, 22126256,
849327659, 1727979207, 3556899267, 22126256, 608673033, 913428082,
2592791249, 3195761319, 2079317731, 2765592972, 2592791249,
2089192298, 3759047976, 2765592972}, {219265369, 667779292,
823714885, 3296604805, 219265369, 1742777145, 898095468,
3296604805, 2747488994, 3689457190, 2892200461, 2287613563,
1505716533, 1156725261, 2892200461, 221922891, 1100755307,
1156725261}, {1605468910, 3890474462, 549943099, 471433423,
1605468910, 585977516, 2651735970, 471433423, 2021014596,
2031908929, 328778061, 2300190858, 2662750501, 716602832,
328778061, 2726490354, 851112058, 716602832}, {365568357,
3029426194, 3430128519, 1933183379, 365568357, 1024311233,
2250823873, 1933183379, 3574350911, 167534374, 3257415168,
1879184234, 1374937136, 131535614, 3257415168, 1837882588,
3548535605, 131535614}, {1034741107, 2229554511, 1441524697,
2265105869, 1034741107, 4125786414, 2758013402, 2265105869,
1074178830, 2645770575, 326215900, 934922655, 644217952,
3131954528, 326215900, 515553914, 4223897546,
3131954528}, {852668118, 3360242076, 3856381322, 3040506537,
852668118, 161827078, 3626115220, 3040506537, 2640209692,
2107521044, 1724967466, 1064364031, 3296353235, 1387244644,
1724967466, 1243609165, 3135090808, 1387244644}, {1100263009,
2979752371, 1821404425, 1699738363, 1100263009, 986094547,
1614499929, 1699738363, 1759590520, 2055866774, 904942395,
1740053234, 1082125969, 3553696508, 904942395, 3010078874,
1838778520, 3553696508}, {1879092096, 683526141, 2312908564,
139707225, 1879092096, 1433984713, 1240918485, 139707225,
2924772732, 3260026686, 4196923158, 2121157575, 1728334192,
3119621613, 4196923158, 1814247924, 1315612557,
3119621613}, {2874853246, 801552897, 3498217066, 3020672513,
2874853246, 560852972, 2792908235, 3020672513, 734281890,
626139314, 165819164, 1793847168, 4107093429, 2937371214,
165819164, 188698803, 1166433733, 2937371214}, {3966797569,
398444428, 148226011, 3695424616, 3966797569, 1908468872,
3842289671, 3695424616, 1273298591, 3252339857, 4271993187,
1015567999, 220391906, 3163881167, 4271993187, 3497026624,
782407274, 3163881167}, {2557053510, 879944047, 329222739,
1882986510, 2557053510, 3851092045, 1575791639, 1882986510,
2064408671, 1022102285, 1294483608, 475773092, 1976081659,
2979261113, 1294483608, 4198564999, 785084267,
2979261113}, {767606769, 1175453562, 3110077662, 1649912214,
767606769, 3513898624, 2308040905, 1649912214, 2589207489,
524249228, 924857915, 636732539, 2263148832, 3611909575, 924857915,
2750521871, 4105703164, 3611909575}, {533765646, 2982755296,
3821540954, 2261240934, 533765646, 786056531, 2690223275,
2261240934, 3033779718, 1829003645, 1911212005, 3313229424,
2267599794, 1575367126, 1911212005, 14632745, 4165853368,
1575367126}}, {{1731798531, 3864297722, 632196679, 4151553160,
1731798531, 266522866, 4177051283, 4151553160, 3734246393,
2656231333, 170903528, 2524982332, 1322162887, 2822471992,
170903528, 2071407475, 2034317853, 2822471992}, {505782858,
807915248, 3235634082, 1087914338, 505782858, 2764925057,
2384195794, 1087914338, 1694175127, 4015529545, 1313746234,
1397638018, 1705346273, 3653936868, 1313746234, 2484299328,
3893194049, 3653936868}, {1628360471, 1872576407, 745146577,
3134263190, 1628360471, 1502689349, 1846865568, 3134263190,
1144818794, 2947483756, 3981440524, 1358765607, 2447807956,
4185157227, 3981440524, 2907095532, 3109351418,
4185157227}, {1752665661, 3862435696, 3120241607, 2353776151,
1752665661, 890570951, 75909174, 2353776151, 2402749950,
3076201597, 3200005334, 2850728736, 3486207172, 4129760842,
3200005334, 1677834656, 1671665759, 4129760842}, {1513347386,
3705157080, 2714968311, 4185116610, 1513347386, 2614208527,
172833166, 4185116610, 374253953, 3231918599, 2298744567,
2330358092, 135093029, 1131733436, 2298744567, 1598967643,
81250788, 1131733436}, {4017988014, 2502864860, 2798362495,
694165767, 4017988014, 2894272037, 2991626149, 694165767,
550314318, 1974819238, 4056638835, 2507555368, 3282401178,
992465420, 4056638835, 149190055, 2265909379,
992465420}, {3404093702, 2907722371, 2867366571, 1322786865,
3404093702, 2682048205, 2845707467, 1322786865, 2974557971,
806288438, 3471100018, 2044779754, 3015781283, 3659578224,
3471100018, 1305362706, 2393789362, 3659578224}, {1230515664,
986791581, 1988835001, 3580155704, 1230515664, 226153695,
949770784, 3580155704, 2427906178, 2093834863, 32183930,
2824425944, 1022607788, 1464411153, 32183930, 1610723613,
277697599, 1464411153}, {2506125544, 3669791663, 775696386,
1634721384, 2506125544, 3041821170, 2616651789, 1634721384,
283819668, 80483238, 2597884302, 1395534074, 2915827764,
1987024055, 2597884302, 2484602355, 4135938407,
1987024055}, {3460687767, 605494422, 2565743611, 1879169431,
3460687767, 1033460664, 4043263051, 1879169431, 3398828375,
1338091799, 1573680955, 3157719367, 4011011631, 34724903,
1573680955, 949030629, 1570781952, 34724903}, {3276033022,
1203174334, 835672191, 3947147720, 3276033022, 1781397252,
3785816696, 3947147720, 1303193959, 1643267001, 1555067592,
2107733056, 2780635372, 2240826378, 1555067592, 2608610996,
442350942, 2240826378}, {2852643415, 3796282786, 3557895539,
4037183513, 2852643415, 2752767565, 346268022, 4037183513,
2162922488, 2040329321, 3161813725, 811227116, 3178255601,
4289888328, 3161813725, 546274137, 3225021158,
4289888328}, {1873372707, 3058850860, 91264285, 3881849593,
1873372707, 184531858, 3318066443, 3881849593, 426687651,
956040185, 1095411048, 3027703191, 2531249536, 595347446,
1095411048, 3288854391, 2647221661, 595347446}, {2203270828,
507148898, 630380767, 3154023630, 2203270828, 1676334486,
416023733, 3154023630, 1862419773, 1532439149, 3648214651,
755781963, 3530082916, 2967502836, 3648214651, 3871125195,
3663678340, 2967502836}, {2320752269, 4085123711, 496641720,
2378609553, 2320752269, 3399757898, 962889191, 2378609553,
3308347692, 1449798259, 3228258920, 3015230918, 4233660609,
3194255763, 3228258920, 1669702817, 2225459107,
3194255763}}, {{4290900039, 280623348, 4178980191, 3199155377,
4290900039, 1901920839, 3106427820, 3199155377, 1774047142,
698814233, 3674905362, 4270409313, 3572939265, 3492361727,
3674905362, 3167429889, 1027004383, 3492361727}, {826363896,
614326610, 262952343, 1934119675, 826363896, 2997767678,
3188270128, 1934119675, 3567524348, 3962062826, 2208329119,
3741227945, 1933248566, 880482061, 2208329119, 4070445105,
205175925, 880482061}, {56103715, 241829097, 2460038793,
1035961073, 56103715, 2811324520, 4075809725, 1035961073,
3698455822, 3869031993, 83639542, 3469185293, 1230302420,
2271948201, 83639542, 329763193, 2411903234,
2271948201}, {1177260757, 4017252267, 305959988, 4288164505,
1177260757, 4273461426, 2481284279, 4288164505, 1625613062,
3134382704, 107217966, 549462420, 784865788, 4184605179, 107217966,
567967482, 1189429800, 4184605179}, {2433373148, 2196772650,
4064227780, 408996081, 2433373148, 188316916, 920035025, 408996081,
1657229416, 2269902114, 3498738340, 569048495, 1620020757,
1066805703, 3498738340, 673478093, 135019560,
1066805703}, {3057838997, 4011682346, 2269698346, 429648601,
3057838997, 1272075175, 2955466274, 429648601, 3536417809,
2361530061, 3010784539, 3692171012, 3667065093, 1386364785,
3010784539, 3001857777, 4079260578, 1386364785}, {96395161,
539371694, 1092556550, 827335330, 96395161, 3763169571, 1578218079,
827335330, 4104962200, 2727718898, 4288838559, 3404279696,
692013694, 1225699600, 4288838559, 1444753071, 2669405325,
1225699600}, {379097734, 3625475947, 2084056909, 333342539,
379097734, 2944208672, 418300166, 333342539, 337929267, 3505648581,
409954030, 3398162498, 1044831206, 2732536445, 409954030,
3374588386, 1231107067, 2732536445}, {3902359972, 1884754759,
2552848969, 439909833, 3902359972, 2004746826, 2310151238,
439909833, 2935091495, 1630646333, 2061686186, 4161410671,
2141557557, 4148946889, 2061686186, 4175695071, 2900071714,
4148946889}, {2963456150, 2337231210, 2625600235, 1918117806,
2963456150, 1946098288, 635887182, 1918117806, 68058625,
1546467979, 151877124, 3634975465, 2638755179, 3249719425,
151877124, 1637240461, 4215633308, 3249719425}, {491901437,
1160228383, 1595599448, 4125397389, 491901437, 195821118,
4074509725, 4125397389, 3427241569, 2961422663, 2559990958,
3684816933, 1117957823, 1193593392, 2559990958, 2942528735,
2689560428, 1193593392}, {1170497671, 2906222607, 3094336698,
3627573759, 1170497671, 1921927973, 545164662, 3627573759,
1700493457, 3182943863, 4262743528, 2878757823, 4114669800,
2408251701, 4262743528, 4165007723, 89238831,
2408251701}, {2763247569, 2610183051, 1881736284, 690961809,
2763247569, 3182419779, 82242357, 690961809, 571239721, 963252233,
4084860538, 2153606867, 2588542673, 4169894927, 4084860538,
54942416, 890597888, 4169894927}, {1220844336, 817590918,
3926254763, 1216196496, 1220844336, 2113496301, 4089812320,
1216196496, 575329368, 1889914987, 2045407448, 1637651789,
3463310486, 1831049905, 2045407448, 325575207, 2380192587,
1831049905}, {1282026768, 1664481221, 564050910, 564995505,
1282026768, 3701549914, 3363706805, 564995505, 3665115444,
307883520, 1892548273, 3183946628, 2137982777, 3442398959,
1892548273, 4118662788, 3432627472, 3442398959}}, {{3514751918,
2021244538, 732435953, 1307754719, 3514751918, 3736721708,
1214919992, 1307754719, 1189899255, 1609425246, 3704346564,
632453145, 293694496, 2169560691, 3704346564, 637306236,
1076348534, 2169560691}, {1335684482, 3977541427, 2079007086,
1533534334, 1335684482, 1497577018, 525643282, 1533534334,
4089172695, 547142630, 3056527841, 3044937468, 1924239834,
372115891, 3056527841, 2830541169, 3928812480,
372115891}, {2514447281, 1397973230, 978015612, 2992100005,
2514447281, 2850360626, 2852461785, 2992100005, 851614119,
481162011, 1157890357, 2953685245, 3280219833, 3652440052,
1157890357, 3140353867, 1383186997, 3652440052}, {3274545167,
938934351, 4028573983, 2762754934, 3274545167, 3057872364,
3846844247, 2762754934, 3075256652, 4092197741, 1025573319,
4036331438, 3276803366, 1660852083, 1025573319, 1389092450,
3635660815, 1660852083}, {1260428167, 2360949430, 3595448064,
2626409409, 1260428167, 1188404397, 1479462144, 2626409409,
1356476668, 1426976484, 2903240813, 2695585089, 3854329533,
2683005143, 2903240813, 1291869629, 1323793242,
2683005143}, {2839247420, 1231881661, 1109942153, 887967109,
2839247420, 3381172536, 3655047107, 887967109, 1027674032,
718571338, 3730950473, 1091419714, 2191610434, 56767734,
3730950473, 111909274, 116994667, 56767734}, {2433847057,
4032596403, 2089286798, 3716427472, 2433847057, 2713114448,
3899800153, 3716427472, 3084422684, 4255875513, 3157633492,
3413552779, 2821500332, 336318787, 3157633492, 10025372, 391538001,
336318787}, {175667448, 1723133625, 4182510911, 2880151048,
175667448, 1121709186, 2523330453, 2880151048, 2597859300,
1136777988, 52572783, 2636566855, 112458461, 1360732901, 52572783,
4101068693, 2887812973, 1360732901}, {1051621590, 1009714413,
1291607291, 2637245980, 1051621590, 2616267354, 1563476086,
2637245980, 3475298403, 233353990, 2587358367, 3540845540,
387836582, 1331694098, 2587358367, 1604100349, 2366659802,
1331694098}, {2035640885, 4234718111, 3625348694, 279002466,
2035640885, 4167581230, 2095974483, 279002466, 1941802004,
1701138269, 1235170544, 2241385612, 2268885372, 1762344008,
1235170544, 1088819811, 2459681948, 1762344008}, {57358912,
3574101205, 4270777765, 3417005932, 57358912, 1810269500,
3584759597, 3417005932, 1290050021, 3162257498, 3896348330,
2477002427, 1553018822, 1296379974, 3896348330, 414111967,
3523590314, 1296379974}, {801344824, 3108506226, 1884501834,
413417024, 801344824, 2180315267, 3359465250, 413417024,
3721817818, 3907528846, 1698926010, 2283920639, 528804686,
4095500737, 1698926010, 2447600887, 3644956310,
4095500737}, {4221808861, 2192890492, 1838057295, 4244875377,
4221808861, 1303998102, 840007089, 4244875377, 2781849698,
695884695, 1150569670, 4149754370, 723778947, 2633360590,
1150569670, 3367546537, 1147146201, 2633360590}, {2171606265,
713066341, 4157990372, 3449094041, 2171606265, 2041045785,
3892718001, 3449094041, 340279647, 589816719, 1079502304,
3997377532, 1106842543, 777934113, 1079502304, 2833356628,
3692238559, 777934113}, {1483124813, 1641736987, 1646411632,
2128346339, 1483124813, 1515066999, 1953783071, 2128346339,
4175709263, 2380480324, 1873392190, 3167585478, 3155942303,
656161001, 1873392190, 1960196174, 2754060215,
656161001}}, {{3179865161, 3758716888, 391120388, 1404283933,
3179865161, 3736767353, 2982534313, 1404283933, 484148868,
2531965909, 3094157668, 2278849016, 3821833900, 3455696508,
3094157668, 3978804036, 536919193, 3455696508}, {3622350766,
3759983985, 1500151924, 1128973399, 3622350766, 1613561693,
2133702321, 1128973399, 2138867468, 2446306581, 1433592392,
808215503, 3671109604, 2125991744, 1433592392, 3790557569,
890897326, 2125991744}, {1052744045, 2541155134, 346250782,
709982328, 1052744045, 3808746634, 2430723917, 709982328,
1272813809, 4277058832, 284988983, 2044803401, 3227966904,
3177388361, 284988983, 2994552097, 1344488735,
3177388361}, {90498489, 1855245979, 4220122612, 3786576552,
90498489, 2810527099, 2698781808, 3786576552, 3027706760,
3048775967, 1789634890, 2252266098, 4130736474, 3524411799,
1789634890, 1838275365, 932865240, 3524411799}, {926043968,
3308482993, 2366952216, 2715877927, 926043968, 2161133226,
950345843, 2715877927, 2435935618, 143197755, 601331389,
4140205762, 8821334, 1621299559, 601331389, 1150597024, 134090481,
1621299559}, {3170751630, 2219637217, 3470130442, 1131439682,
3170751630, 191193907, 1152586716, 1131439682, 2105864857,
3700793188, 3866921981, 3877644423, 4148023628, 1433585261,
3866921981, 3337966045, 3765830124, 1433585261}, {4200333550,
3887313678, 1548670082, 3967050176, 4200333550, 4242277964,
1648310922, 3967050176, 3517450384, 1088836814, 1272105141,
4003580220, 1398895464, 372981067, 1272105141, 3637238, 2188645829,
372981067}, {3707591763, 2533383962, 2667061910, 1110440720,
3707591763, 2163873618, 917457922, 1110440720, 3739389517,
2648614071, 983864203, 314407045, 3734776305, 1773339925,
983864203, 1999624391, 948403862, 1773339925}, {3399685405,
1498259552, 335977594, 1489481084, 3399685405, 932745691,
3023594692, 1489481084, 3340552745, 1760608012, 1937217042,
898324201, 4125187770, 646948566, 1937217042, 4163160567,
4023984155, 646948566}, {2274644307, 1526117760, 2809236743,
1300134112, 2274644307, 610154273, 819649010, 1300134112,
611178153, 728332364, 4016270358, 1482551502, 2291620547,
1218215302, 4016270358, 4014370276, 2738902870,
1218215302}, {1523867240, 406601871, 2458752809, 4184767280,
1523867240, 2006800, 2291982710, 4184767280, 2319533484,
1160593741, 2058090716, 162997241, 1081017808, 3225020759,
2058090716, 2792768896, 3647877010, 3225020759}, {1120982854,
2157902557, 1107018173, 213023128, 1120982854, 1289665822,
821316937, 213023128, 3440567542, 2188623418, 2529043017,
2142588179, 1858653225, 1928167136, 2529043017, 1690025039,
2078532030, 1928167136}, {1482492050, 1864485675, 608924704,
82600475, 1482492050, 3831331653, 1226399920, 82600475, 876807571,
1520021961, 3063342812, 2041963354, 3012650770, 2515926148,
3063342812, 94132572, 4275611558, 2515926148}, {2722631375,
2979211453, 145303850, 2233528074, 2722631375, 2776611616,
2704349698, 2233528074, 3225924699, 4066039751, 3636044049,
668554182, 2691791214, 588894479, 3636044049, 2806555799,
2464126779, 588894479}, {432525877, 2453123524, 2553894927,
2974832964, 432525877, 3079645759, 3526001605, 2974832964,
1249623772, 3176081393, 45532245, 3846904746, 3249509424,
407344860, 45532245, 4255131403, 2513899191,
407344860}}, {{3291428549, 2072837757, 1193168720, 1812182123,
3291428549, 3422065122, 3693349190, 1812182123, 1545226000,
4014658840, 3760936985, 3758858458, 1003573324, 321802921,
3760936985, 2112167358, 1099164995, 321802921}, {4088518247,
1282224087, 3991553306, 2131723358, 4088518247, 2882890127,
3262178024, 2131723358, 3230096243, 2825656399, 4124675351,
94452540, 2527961345, 2196438580, 4124675351, 4266375092,
805386227, 2196438580}, {1799810299, 626882716, 4054810152,
2080028644, 1799810299, 720257098, 2684719080, 2080028644,
3232052958, 3634311070, 3298480980, 1603312853, 2043476219,
962978332, 3298480980, 204693794, 3814399969,
962978332}, {3410986694, 2846958957, 3684514720, 1722796810,
3410986694, 3350228505, 3697719854, 1722796810, 301207261,
1773862183, 2624855312, 4098470056, 2708679078, 66735368,
2624855312, 4186703168, 2228005807, 66735368}, {3301255996,
3260599264, 2165923343, 1216601290, 3301255996, 3331397611,
725463727, 1216601290, 3425332209, 1882092413, 3267253246,
1902786417, 2645471852, 3117234458, 3267253246, 98300176,
4042105808, 3117234458}, {1890913372, 928307593, 225727143,
3319692776, 1890913372, 1715640681, 3795749903, 3319692776,
3625524738, 2881230326, 562558814, 413766233, 1706424966,
320933009, 562558814, 3744070526, 1915174474,
320933009}, {2958499183, 1848775765, 1522753070, 3551649532,
2958499183, 4162618234, 1367036681, 3551649532, 4095128844,
3800820679, 2873503619, 2279579324, 3537737308, 3934588882,
2873503619, 3461269579, 2254841117, 3934588882}, {1285250577,
2764245175, 1105070646, 4236235786, 1285250577, 3540401964,
3871128158, 4236235786, 1532963114, 3597347398, 1105106652,
2862886282, 4047666135, 3072642883, 1105106652, 2690305546,
2746897053, 3072642883}, {3860245778, 1367204462, 595903220,
2246104180, 3860245778, 1908472715, 1596486723, 2246104180,
3024009600, 1682550309, 2320235487, 1630519363, 144911261,
3363285863, 2320235487, 720496218, 3226989775,
3363285863}, {2047742419, 2109897740, 654443081, 2563937648,
2047742419, 3298575982, 2746910512, 2563937648, 3740934706,
1622747589, 372840925, 1201903815, 1901161856, 1498353481,
372840925, 1424606567, 3428325510, 1498353481}, {1720720378,
838556730, 1562000691, 3081063009, 1720720378, 3175910157,
59642517, 3081063009, 1880202235, 2942138329, 2564328310,
3243866790, 1141460510, 379222555, 2564328310, 4276165749,
568929237, 379222555}, {2604901554, 2798059827, 3134968130,
1728254085, 2604901554, 1505600996, 119873755, 1728254085,
1240524792, 2313448358, 2617184648, 2102395010, 3119630359,
3754310983, 2617184648, 3947637114, 2829438112,
3754310983}, {738302419, 4067455371, 3502068253, 2861980889,
738302419, 3223237546, 3631218395, 2861980889, 3816578609,
1243021017, 3136524018, 2500342427, 2080374278, 3991087733,
3136524018, 96564230, 264838384, 3991087733}, {632183943,
2630662559, 152578308, 1067606885, 632183943, 2025997689,
482418964, 1067606885, 1468859634, 2296152269, 2517499860,
3966641526, 3237758154, 2033651727, 2517499860, 2324222273,
3918276995, 2033651727}, {1609491518, 2406429337, 1015003691,
1540163028, 1609491518, 3217817937, 3170708090, 1540163028,
964868630, 1672929833, 62402909, 3631636823, 4124674991, 526124226,
62402909, 4260427257, 946832060, 526124226}}, {{4004365908,
2299166464, 4238928187, 3068599594, 4004365908, 340633153,
3034582784, 3068599594, 210906218, 987156327, 3027413363,
3872967050, 3159432673, 232906611, 3027413363, 4051554873,
3873338256, 232906611}, {3948054919, 3438340286, 2399101442,
3955606166, 3948054919, 3199954992, 3081246407, 3955606166,
2274701639, 4288868534, 1447386846, 816212890, 2670438424,
1160686753, 1447386846, 1635979789, 3676603152,
1160686753}, {3333667032, 2404397407, 1309772249, 4037535932,
3333667032, 633837171, 1219209632, 4037535932, 1699759143,
3773572468, 792615675, 4000461186, 2249558877, 232406022,
792615675, 351811028, 1438391315, 232406022}, {3042453580,
2674782930, 2578519273, 1182303684, 3042453580, 4188299661,
201533985, 1182303684, 504137100, 405299994, 3440193648,
2652790808, 3520937545, 3825238244, 3440193648, 2362389441,
1445162354, 3825238244}, {3320840707, 3026546742, 128640966,
2717374630, 3320840707, 1375705778, 1743344011, 2717374630,
592752793, 1485833084, 2565782177, 3520546605, 595979811,
1153297111, 2565782177, 3755481813, 1584881761,
1153297111}, {3450980261, 1054833852, 1618563336, 3074764013,
3450980261, 1199760826, 228550476, 3074764013, 1370637124,
135106935, 4182411213, 2398102705, 3692855966, 2264796250,
4182411213, 4156333842, 1995295374, 2264796250}, {2907234375,
1092238667, 3445439485, 3710031515, 2907234375, 528870942,
2854732050, 3710031515, 1611431067, 3580357515, 3411234559,
3111723660, 3713963703, 1510709042, 3411234559, 4137143940,
3654924984, 1510709042}, {1076783073, 164438428, 1634588989,
2578452047, 1076783073, 261861891, 3140440866, 2578452047,
1382964588, 2069333087, 1455977136, 1039994763, 2433255524,
1984094858, 1455977136, 2027397575, 532165989,
1984094858}, {2532185665, 1135289565, 3062805500, 2611640170,
2532185665, 2260058290, 3719838959, 2611640170, 1806082255,
2620524685, 4033679701, 364359692, 3145117592, 4162460747,
4033679701, 411576745, 2706572732, 4162460747}, {3008710738,
1490567105, 2020031417, 3585495714, 3008710738, 316260234,
1122119678, 3585495714, 4176701833, 3084567190, 2736776780,
1110199666, 1496307045, 154841809, 2736776780, 1275201873,
1322574605, 154841809}, {3799059956, 126838375, 2467448911,
3275952471, 3799059956, 1671921785, 58436504, 3275952471,
182268988, 1090233980, 408914598, 902537192, 4175648912, 433123787,
408914598, 3772232739, 1875701401, 433123787}, {2667038252,
2896444472, 20879392, 253397989, 2667038252, 476551235, 3710003391,
253397989, 2923250664, 1792877257, 3514952071, 4201381296,
3331739241, 1114696906, 3514952071, 249072369, 2176108884,
1114696906}, {1727138303, 2412458112, 779768765, 807167497,
1727138303, 380260625, 1615189373, 807167497, 1064250869,
4288045420, 3452261767, 827316818, 4208599283, 259951996,
3452261767, 522912574, 3605790986, 259951996}, {2363989886,
17579290, 1962366311, 1271400610, 2363989886, 3424373120,
2529737790, 1271400610, 942228369, 1351149998, 214293969,
3059547798, 3309253011, 2041200841, 214293969, 1187066710,
2941656966, 2041200841}, {1948744398, 4036241737, 1564775693,
1805077493, 1948744398, 93501401, 2145592161, 1805077493,
2365733982, 3044908159, 1626832916, 1236581455, 4145982416,
4009535226, 1626832916, 810090874, 1876014673,
4009535226}}, {{2908898908, 86050422, 1213100579, 526867394,
2908898908, 2687252475, 3102803247, 526867394, 2529186343,
3912995069, 2033851810, 3686294589, 3843367307, 3680843319,
2033851810, 3516795313, 2332949611, 3680843319}, {1053552056,
4002399925, 1635227281, 538108523, 1053552056, 4193870709,
790337895, 538108523, 2690118316, 455633660, 641380480, 1723000412,
2145297779, 967423689, 641380480, 635932799, 1724183394,
967423689}, {3122368790, 2578750044, 3794884445, 2678342194,
3122368790, 3769235275, 4048456688, 2678342194, 1934635577,
4269455046, 752843557, 1546279541, 3382133383, 743327961,
752843557, 2078921540, 3817146458, 743327961}, {166243972,
2128782357, 428569070, 4205383007, 166243972, 1095349745,
1812304090, 4205383007, 2123712957, 1399334152, 273828453,
4084843716, 3112810093, 2130938335, 273828453, 2511584766,
1534972306, 2130938335}, {1961979279, 585285043, 2284701379,
290505355, 1961979279, 74485575, 4085693250, 290505355, 1735035809,
1059570184, 303326672, 1267207775, 589131299, 2035870992,
303326672, 201201961, 2161881798, 2035870992}, {1271449583,
2116594270, 2103513260, 1640006818, 1271449583, 367329306,
839902291, 1640006818, 894986302, 3658452654, 3431342036,
2713344567, 2641739085, 2966757766, 3431342036, 1627106982,
3557907827, 2966757766}, {3844312173, 3804137266, 183814247,
152874554, 3844312173, 103366292, 2043521996, 152874554, 686371398,
3673954433, 1991737811, 3036158822, 3078932982, 2817075782,
1991737811, 3023637379, 1288143068, 2817075782}, {3621125056,
1927692878, 4262164578, 3569679412, 3621125056, 3808641551,
4107175982, 3569679412, 1330151766, 2777887189, 403188859,
58789558, 2092073970, 168278549, 403188859, 190177712, 541167592,
168278549}, {2411074146, 1949247379, 1954645400, 203894202,
2411074146, 2372145203, 2891151238, 203894202, 2569647272,
1811990187, 100816607, 4147253715, 849410059, 3144790357,
100816607, 145858739, 842938465, 3144790357}, {4188490076,
2913697289, 2596622424, 2121437009, 4188490076, 2548746255,
3060019458, 2121437009, 2049531697, 2928868834, 97657552,
1751146903, 1354192475, 211564399, 97657552, 2827838989,
3139804205, 211564399}, {4115560303, 3882559608, 2364769240,
3413474739, 4115560303, 2038580445, 3031041848, 3413474739,
1032131834, 1654012724, 2129397153, 327748970, 2996610873,
156555722, 2129397153, 1103105463, 3533481890,
156555722}, {2456485740, 834096815, 1818617085, 3732834681,
2456485740, 297727134, 466628750, 3732834681, 4091558631,
4103288151, 3427777045, 1247469758, 589375738, 664028138,
3427777045, 3240810721, 360061317, 664028138}, {2153554308,
1201997039, 1275843034, 3356165919, 2153554308, 727988140,
674985468, 3356165919, 2775330589, 1446814948, 1073331092,
3575267521, 271051737, 62871475, 1073331092, 797503411, 3608447915,
62871475}, {3475172973, 4286690341, 1319538278, 3303040859,
3475172973, 4160798835, 3606461240, 3303040859, 327396059,
2499583263, 3885566636, 1518909811, 2526744450, 3972048177,
3885566636, 4109276056, 7945907, 3972048177}, {798125497,
1759339729, 2716140374, 4045305875, 798125497, 3468584533,
661033330, 4045305875, 3606806185, 2005379293, 2059615989,
46211558, 2598415204, 1818860931, 2059615989, 714940449,
3523832777, 1818860931}}, {{2729678535, 4279360935, 3379124758,
2442739556, 2729678535, 1629626641, 3894922338, 2442739556,
3606295184, 2417192332, 351187677, 3347241070, 1312056270,
634843389, 351187677, 2092828966, 4082275720,
634843389}, {3629539480, 4025926501, 852514024, 4249024666,
3629539480, 3253349463, 919210106, 4249024666, 1052092278,
3298601960, 3083745876, 4200577503, 3370743767, 443276110,
3083745876, 271102234, 1113643788, 443276110}, {3870151604,
2257727047, 4206383948, 3634978526, 3870151604, 1910917093,
4202589308, 3634978526, 2711810981, 2517854433, 4101230989,
4143478832, 3379348808, 3590857185, 4101230989, 2337971568,
3927998336, 3590857185}, {3197545170, 1976203831, 1884529704,
1252747620, 3197545170, 4227339509, 2133571953, 1252747620,
12394571, 3794766611, 144639933, 77963866, 2646475951, 3533393557,
144639933, 3400275098, 764977733, 3533393557}, {1965629728,
3936770970, 3402754508, 755337184, 1965629728, 1742774417,
3051109776, 755337184, 2628942652, 659198105, 3718843364,
2782618441, 2339730507, 878523653, 3718843364, 978544901,
1283972682, 878523653}, {2029416251, 3366782607, 1096100666,
452832640, 2029416251, 2939334015, 1101195955, 452832640,
2331594780, 2300259911, 4096393357, 410940557, 962932343,
914011908, 4096393357, 3635095314, 1379977154,
914011908}, {3265804634, 1220735951, 3404307618, 2513961362,
3265804634, 2722453189, 4113460355, 2513961362, 2216972702,
4057726734, 4229189541, 3321240812, 636990017, 4178486523,
4229189541, 7555275, 91679821, 4178486523}, {3542657885,
1546667401, 745203060, 2671019282, 3542657885, 3173738401,
2847338542, 2671019282, 2986331025, 1981721347, 3274748603,
4238693771, 4164637970, 4064854722, 3274748603, 2872196602,
1198665008, 4064854722}, {914697583, 3663538873, 4110092094,
4108960924, 914697583, 29484944, 716837900, 4108960924, 564816203,
2695814829, 3697835298, 2024864567, 1648946406, 4205748652,
3697835298, 1235798673, 3545268246, 4205748652}, {584665331,
3072929538, 935407479, 1308019352, 584665331, 2721990050,
1824121179, 1308019352, 3475245690, 1969086166, 3000164892,
2494369285, 3213078611, 658075764, 3000164892, 631337149,
868441731, 658075764}, {3716973286, 919140074, 553016820,
1204116678, 3716973286, 675934421, 446248158, 1204116678,
2367599030, 2092234532, 1102760986, 2818928245, 261952173,
4294509396, 1102760986, 1054152352, 3546688363,
4294509396}, {3293370693, 2120370930, 2487351160, 987948282,
3293370693, 2154496016, 836901607, 987948282, 1254243785,
3331497439, 1904530179, 2014861157, 1121637945, 1202027740,
1904530179, 251455117, 1218291611, 1202027740}, {3000951292,
210589449, 1003182103, 1151335465, 3000951292, 3030009760,
379309230, 1151335465, 3387111151, 1555644948, 1096779672,
3425935236, 4229417349, 133998326, 1096779672, 2168017189,
602854428, 133998326}, {99887253, 407199536, 4012293175, 976296831,
99887253, 3245218993, 2444588607, 976296831, 614238014,
1996334021, 184407828, 1130199284, 3959662009, 860183345,
184407828, 2577917907, 3722900937, 860183345}, {1445199842,
1256197293, 3160538267, 1715703880, 1445199842, 1520448575,
2536272162, 1715703880, 2559284397, 1426699760, 1130124341,
158486332, 1138404320, 742136141, 1130124341, 1781321781,
4198515709, 742136141}}, {{92565032, 257065974, 2786645250,
2311336951, 92565032, 1493974713, 2911336433, 2311336951,
2613012997, 3726919367, 885864931, 1895527787, 1344421653,
2279220396, 885864931, 1417574285, 2355957139,
2279220396}, {2509257810, 1146336783, 2979919489, 2776053372,
2509257810, 3770626858, 2204068573, 2776053372, 3424925004,
3410622769, 3175444953, 246118669, 4290541487, 2898100178,
3175444953, 348923199, 2427331008, 2898100178}, {3511408930,
2666607166, 634471839, 2215361770, 3511408930, 1105380130,
3750482090, 2215361770, 2499905758, 2277317349, 488341106,
468535899, 3877861726, 55373162, 488341106, 2739617092, 3987120186,
55373162}, {1949809417, 1194193824, 3088339524, 827894421,
1949809417, 1373055755, 515339473, 827894421, 1474384834,
1145356382, 4182706556, 1201342255, 3696899246, 284442065,
4182706556, 2295560707, 4064194676, 284442065}, {4147063048,
352467977, 104747063, 1464831324, 4147063048, 1737209131,
673124742, 1464831324, 811682426, 1983142708, 3161673906,
808492591, 1200638109, 854963956, 3161673906, 2185360428,
3894612396, 854963956}, {183217259, 533708602, 4048695575,
2367252271, 183217259, 2079352492, 658497461, 2367252271,
1759193844, 270201416, 1613992473, 2390399884, 901075807,
2146747531, 1613992473, 1430380976, 896368240,
2146747531}, {109366125, 3360277248, 552179285, 2202319015,
109366125, 3199388318, 2821035593, 2202319015, 2604083920,
3992043804, 238941078, 3045719234, 957806905, 1033390767,
238941078, 3176316290, 4214343810, 1033390767}, {4062509844,
2060641859, 3019008744, 1289872272, 4062509844, 3433422861,
3700877161, 1289872272, 1825805135, 496247378, 1624967553,
1157949454, 798014134, 656615546, 1624967553, 3724738272,
442908965, 656615546}, {1294152074, 120937588, 3153569646,
3643106446, 1294152074, 3315724193, 3880807273, 3643106446,
1110529092, 1229315533, 1702251945, 1996197369, 520651142,
4085556909, 1702251945, 2017159765, 1465806476,
4085556909}, {473948224, 1333739502, 3487031015, 1764505587,
473948224, 633183200, 3485779436, 1764505587, 3275619611,
2180513704, 2699750107, 2718176683, 1754455852, 2415550298,
2699750107, 3340104045, 2908194857, 2415550298}, {914160376,
176936203, 1218008505, 1466641753, 914160376, 3100257041,
379091939, 1466641753, 1342958838, 320803805, 3280134864,
264991624, 2731178339, 3254967153, 3280134864, 3489468987,
789779004, 3254967153}, {478140738, 20913244, 3630110705,
207065438, 478140738, 3241255407, 2635145699, 207065438,
3649677977, 3398645998, 3122382780, 2205556702, 2257745277,
1328712410, 3122382780, 3614915327, 2710816935,
1328712410}, {2567996318, 2094433326, 2414316512, 2041251744,
2567996318, 2846183244, 2056360158, 2041251744, 1704532635,
1622797842, 3643628505, 4010558652, 1877394215, 353233466,
3643628505, 671027474, 590107503, 353233466}, {1655058924,
2266474132, 3266898345, 3651863350, 1655058924, 2346780324,
363692550, 3651863350, 4083888222, 3110655118, 1845588340,
314920879, 1419069452, 1228972469, 1845588340, 1037408725,
4141615115, 1228972469}, {2202115886, 2204295311, 3164032751,
887636120, 2202115886, 771714323, 1186533451, 887636120,
2543780991, 2775289385, 1980940153, 918854465, 1908695354,
710558276, 1980940153, 1794212991, 2308769492,
710558276}}, {{60268595, 47309624, 4096010585, 4253338264,
60268595, 698444416, 3424495942, 4253338264, 3842597153,
1853486796, 3820679930, 562287964, 2961990151, 265689579,
3820679930, 3009083380, 675056541, 265689579}, {3618850300,
271787962, 2538793204, 2043518992, 3618850300, 1330201507,
1593435980, 2043518992, 2662288323, 3431158427, 4161492847,
1017447188, 226142150, 1675739167, 4161492847, 760605578,
2319843005, 1675739167}, {2352110692, 265547447, 3367780341,
170978053, 2352110692, 175903832, 1258089776, 170978053,
1177494705, 1973739759, 3522463659, 855175401, 590519806,
497640593, 3522463659, 2309875696, 3005114205,
497640593}, {1522130854, 2847712653, 3036916315, 997594656,
1522130854, 1160477043, 2398808739, 997594656, 741020448,
2495653141, 1812793060, 1168460586, 2111094408, 1759873736,
1812793060, 2154570180, 2334568602, 1759873736}, {1217712917,
3526690167, 690665294, 229123252, 1217712917, 668254219,
3329037315, 229123252, 3026480083, 1545370735, 3359371694,
3988997253, 133725548, 3688848963, 3359371694, 1224915758,
1645876077, 3688848963}, {1091790487, 273865704, 2226046119,
1352009588, 1091790487, 554324048, 3683234183, 1352009588,
1700890463, 3676167916, 3472308225, 3380069480, 3967068313,
3284594322, 3472308225, 546295846, 1149793848,
3284594322}, {1875526903, 49030556, 1631562069, 1668228073,
1875526903, 4287083180, 2616427751, 1668228073, 575879716,
2225145569, 2285616099, 1542838632, 4159278264, 1785093079,
2285616099, 3213946784, 53885944, 1785093079}, {1621136330,
784545882, 1553642730, 1889728930, 1621136330, 2042322941,
53329096, 1889728930, 2654964886, 3532023115, 2340275074,
725706104, 2168960688, 317621194, 2340275074, 664971082, 868104288,
317621194}, {2437539407, 1088147466, 48652703, 2041810917,
2437539407, 2974834965, 7471339, 2041810917, 4125172241,
2256636382, 553316582, 3977564013, 3348538895, 2644740135,
553316582, 279286037, 3932880138, 2644740135}, {2767516839,
1967973254, 396617122, 4267620040, 2767516839, 1615344995,
3476014624, 4267620040, 1729073482, 912232907, 3416933659,
1587194462, 2687676063, 3986137775, 3416933659, 322954476,
1361571890, 3986137775}, {731897082, 1445362099, 2595144917,
1447801550, 731897082, 3988455018, 188539054, 1447801550,
1188582180, 2050052185, 3881055380, 264403432, 940006676,
1957269898, 3881055380, 2469328015, 3769418331,
1957269898}, {2513781045, 3987302058, 1455564465, 798626824,
2513781045, 2219774094, 3879214027, 798626824, 900526416,
1912059035, 2919944362, 3588451359, 1464119531, 3585587043,
2919944362, 2463381051, 2378713321, 3585587043}, {2588128884,
397658028, 3082940156, 608952440, 2588128884, 2562104176,
4234655284, 608952440, 1227298854, 1114160754, 3155034092,
3835828968, 3069142880, 3109358041, 3155034092, 4202957017,
3866743692, 3109358041}, {2601868945, 667128524, 2480183392,
3926034488, 2601868945, 82621702, 2686605836, 3926034488,
1520850716, 2413787880, 4241673552, 2427278590, 3086419713,
2816680417, 4241673552, 47659702, 4155441854,
2816680417}, {2772467068, 2930191747, 2318381581, 1625506838,
2772467068, 3508796977, 4142299885, 1625506838, 3200912711,
3315632585, 2480313120, 88147339, 1413043611, 2481083808,
2480313120, 1945166752, 2247120009, 2481083808}}, {{404228189,
4107238699, 1386575385, 2831829177, 404228189, 997274536,
929124824, 2831829177, 1715219514, 1662727700, 1132340715,
1154819290, 676995757, 3926931954, 1132340715, 615601328,
2907684453, 3926931954}, {2970489088, 1425511081, 1158689953,
2912523524, 2970489088, 4216003022, 1840499723, 2912523524,
3928131551, 934679595, 475345024, 1795936544, 4206379953,
3921782078, 475345024, 2969567377, 3376494857,
3921782078}, {2086065141, 587061399, 3022076788, 1647549563,
2086065141, 3380605877, 2507630679, 1647549563, 2920924516,
1233970978, 3042622070, 1229440375, 4181781224, 3483571863,
3042622070, 742581647, 2615823606, 3483571863}, {2370490899,
2450376936, 10873814, 2510299562, 2370490899, 2505735035,
271603006, 2510299562, 2807004452, 661718928, 1047885963,
1065969969, 1581078542, 3119292228, 1047885963, 2083352304,
741613041, 3119292228}, {1354837648, 3678237069, 3682511980,
2675296174, 1354837648, 2414255248, 1246008334, 2675296174,
920524545, 2653425336, 1917491817, 2869973725, 1239349701,
3580539188, 1917491817, 1506189984, 2309910897,
3580539188}, {3793171443, 1882293939, 661495819, 1513759891,
3793171443, 337719276, 652184790, 1513759891, 895842640,
2098160992, 1401117517, 2267936793, 335339494, 3193382049,
1401117517, 3880461974, 573569291, 3193382049}, {4117350188,
3529373493, 2913153588, 3057958216, 4117350188, 3995611923,
2391642796, 3057958216, 2497597968, 3011116692, 2682960221,
1476706329, 3517522726, 4253871110, 2682960221, 153189593,
1114509184, 4253871110}, {2787925323, 3044470744, 1475797635,
1113679064, 2787925323, 1475266926, 2502160539, 1113679064,
2000734342, 993186530, 3804264051, 1246805564, 1366457944,
3643472111, 3804264051, 3995474529, 2870554228,
3643472111}, {1768207891, 2590074999, 3370546898, 175571127,
1768207891, 2499224051, 920885099, 175571127, 1846049425,
1563383369, 18544120, 2874273892, 935783747, 462450713, 18544120,
167437104, 1822716166, 462450713}, {76599155, 623201703,
2574759301, 722317846, 76599155, 1016766460, 1586650055, 722317846,
1766616799, 532216705, 1859333754, 2682882800, 2377603858,
2693567720, 1859333754, 3619720132, 1775121226,
2693567720}, {3363820471, 2298118057, 2029539379, 1292061429,
3363820471, 1915384204, 436716126, 1292061429, 2161444770,
2123202522, 2928620817, 2991388585, 2731138746, 177441128,
2928620817, 3917475518, 3291773936, 177441128}, {1724853627,
1256783759, 536042925, 4014926443, 1724853627, 2495955387,
1080154168, 4014926443, 3664739404, 3842743664, 1160802169,
92806587, 1363372142, 2520305729, 1160802169, 2663387463,
3279882298, 2520305729}, {1148079236, 2156546352, 1092717014,
2592814691, 1148079236, 3709610673, 2728288705, 2592814691,
867449101, 3879394235, 2162786564, 1706763897, 2618087039,
1182442141, 2162786564, 2087227814, 1406594286,
1182442141}, {2618021767, 2030169433, 527165723, 3373283605,
2618021767, 1560329332, 3767126799, 3373283605, 4046813655,
3607037865, 1124729601, 2731098528, 1908361865, 1402382861,
1124729601, 967911190, 2128689614, 1402382861}, {239039756,
1309462532, 1276337832, 2279012449, 239039756, 538581363,
3801080808, 2279012449, 1454676776, 3319475384, 3336285375,
4071940116, 4262329590, 1464913319, 3336285375, 2840173801,
598999078, 1464913319}}, {{3639907189, 2256217204, 2067740348,
1252556678, 3639907189, 1926798761, 3073232607, 1252556678,
1457157056, 388382377, 255632881, 3472564181, 3778927111,
796711791, 255632881, 3160293932, 3878204845,
796711791}, {686702830, 667792040, 1699805291, 1035400458,
686702830, 4126312242, 3162437311, 1035400458, 3740999688,
1213319489, 1349354417, 715722511, 1264780832, 1776984101,
1349354417, 3449763933, 1742284034, 1776984101}, {1935673443,
2304324596, 877889863, 1338322079, 1935673443, 2710885009,
817781640, 1338322079, 1345468819, 1095273395, 3070932389,
4073569309, 4183678140, 3231284907, 3070932389, 3476263944,
2981539575, 3231284907}, {4092709154, 1863075174, 659945473,
3203768688, 4092709154, 848719394, 1858240466, 3203768688,
2422495016, 3193186353, 614207188, 2046042882, 1853554849,
4261866865, 614207188, 201872335, 1914382786,
4261866865}, {3931174287, 340393141, 3818334033, 2944772441,
3931174287, 3854092115, 1446242483, 2944772441, 21345609,
3967303913, 1712333408, 2838663503, 2683472927, 4179922982,
1712333408, 3720886954, 2821238835, 4179922982}, {1579232146,
3349077947, 3154153453, 1572317229, 1579232146, 386210076,
2146084483, 1572317229, 2472609977, 2803635446, 2420177830,
3429706463, 1924402503, 2701381139, 2420177830, 2023182114,
3664845069, 2701381139}, {1649964845, 217906160, 3740686873,
1547798902, 1649964845, 3707114992, 578076866, 1547798902,
3934658083, 2228432272, 2221023672, 547107197, 722305627,
4122275824, 2221023672, 1051494202, 2032046756,
4122275824}, {3721214025, 745295675, 783929942, 871751352,
3721214025, 3976202597, 2834051003, 871751352, 246817944,
2184392547, 1251969297, 470400916, 3491451503, 2210205512,
1251969297, 3324925707, 2847073169, 2210205512}, {2154402149,
4001512734, 3816295128, 4056397833, 2154402149, 2221071186,
4004683974, 4056397833, 4263372608, 435589087, 3137962926,
2082593800, 1851577490, 1111106658, 3137962926, 2240713216,
1077637478, 1111106658}, {3915641819, 2309221783, 1234558483,
2655427644, 3915641819, 895281471, 4134896970, 2655427644,
1107172622, 2288454722, 3171155722, 1480377757, 1089650049,
2520052828, 3171155722, 2197644324, 3723852471,
2520052828}, {3250461503, 1910495453, 1512194304, 169608246,
3250461503, 309305922, 818711694, 169608246, 2925058007,
3125904656, 3492065914, 1221999270, 170682686, 992715205,
3492065914, 1638334801, 4178144540, 992715205}, {35143865,
2176984764, 1567869529, 2500915473, 35143865, 887925654, 496167814,
2500915473, 548058447, 2939006535, 503033547, 1828629701,
980661493, 1829465586, 503033547, 2957960322, 3876447782,
1829465586}, {1606635477, 1843255804, 631081701, 2295006902,
1606635477, 1370996644, 746905804, 2295006902, 894654639,
993091788, 770341956, 2384830027, 2326513067, 4197539627,
770341956, 844674446, 2567893567, 4197539627}, {1002527952,
4011213374, 3121831492, 801660332, 1002527952, 784909987,
417481691, 801660332, 3000287756, 3049405724, 1827317252,
859112106, 3308812772, 4087835913, 1827317252, 603843371,
2983990017, 4087835913}, {3051707672, 496483841, 1727559830,
3885232784, 3051707672, 3571506368, 382063349, 3885232784,
3281899811, 1611359555, 1261853865, 2360894365, 2828833838,
2485922743, 1261853865, 1951099638, 120616508,
2485922743}}, {{2906557036, 966815948, 2300510686, 3603608092,
2906557036, 3826150877, 2365398362, 3603608092, 3811740424,
4059123142, 295466806, 3527253079, 4143310876, 1523590942,
295466806, 68341529, 2391111113, 1523590942}, {2220769388,
705296543, 1346863388, 18201123, 2220769388, 437309679, 3367710570,
18201123, 2816329160, 964257199, 1814959027, 346472965,
1560544267, 1406902110, 1814959027, 1774518130, 3735012720,
1406902110}, {497836434, 523073130, 2146196184, 1183199523,
497836434, 3080242732, 686976485, 1183199523, 2743985808, 60540513,
3754191262, 2574962339, 1060465580, 992368492, 3754191262,
2674694909, 2710608111, 992368492}, {3105849797, 1735620028,
1937586849, 1662315499, 3105849797, 2572908401, 132645114,
1662315499, 3310028953, 1168504873, 2025248418, 1349947330,
4148125749, 855309653, 2025248418, 1518467541, 4208503105,
855309653}, {589436548, 2389421212, 3566940914, 3741960405,
589436548, 761213045, 2322978424, 3741960405, 142470895,
3823316426, 645207847, 2648411341, 3256971516, 2429349820,
645207847, 3556252097, 1469549233, 2429349820}, {2069885925,
700277438, 2476258020, 3883250533, 2069885925, 2884486169,
3759258130, 3883250533, 1149765069, 1586965364, 2937963376,
1930346507, 370051167, 2030076497, 2937963376, 940935419,
4179928109, 2030076497}, {1596781442, 2709977947, 2667972423,
1839394420, 1596781442, 270719656, 4089925844, 1839394420,
525032773, 3677430989, 863795123, 2000204624, 935621435,
2315368417, 863795123, 3597744580, 2843028970,
2315368417}, {2044154050, 1149892630, 1787730088, 1359675853,
2044154050, 106675209, 3599822966, 1359675853, 461386353,
1350184085, 252401654, 2793725401, 3992097193, 2375338156,
252401654, 409696181, 3629519168, 2375338156}, {1017349084,
629419078, 203312427, 2157999162, 1017349084, 2919482867,
850785849, 2157999162, 1894476622, 1029598590, 3416340823,
40448550, 3206465269, 1502299776, 3416340823, 2872782491,
354425135, 1502299776}, {3908209478, 3273124970, 4162279434,
1499732648, 3908209478, 1957153439, 1845710116, 1499732648,
1696427075, 4256446835, 3347645869, 3898835358, 742690700,
987095540, 3347645869, 3948109762, 184519571,
987095540}, {1761526113, 846440977, 968061131, 3519837725,
1761526113, 3098516054, 195554263, 3519837725, 4170693482,
1478537122, 3484995340, 2037109801, 697103737, 3043971826,
3484995340, 2423210396, 580497182, 3043971826}, {4143854702,
1557265403, 1276625905, 2034254929, 4143854702, 308885052,
404593054, 2034254929, 1678397435, 2282426993, 3529719882,
1918223997, 204704202, 2856909490, 3529719882, 3088217623,
1191427722, 2856909490}, {915021027, 57535839, 695857913,
1840996748, 915021027, 3480463983, 2917871304, 1840996748,
975961557, 2428781875, 1194218426, 3745468214, 3705289301,
2961889631, 1194218426, 978546743, 1677670172,
2961889631}, {884811167, 1724903241, 1395551713, 2234972795,
884811167, 965267288, 489460872, 2234972795, 559523405, 4059493912,
3214872273, 1442552816, 2159361799, 1594616762, 3214872273,
3944540633, 3530814584, 1594616762}, {2300205863, 355019952,
1711195105, 3708698642, 2300205863, 2274854722, 801163396,
3708698642, 778665262, 2636584781, 2618124359, 2831461146,
1676130220, 2459541613, 2618124359, 2311007603, 3880286112,
2459541613}}, {{1116882637, 79626976, 1846832385, 345146034,
1116882637, 2231869247, 1417149696, 345146034, 3303902397,
876210854, 3554143374, 1919765327, 894746180, 873141039,
3554143374, 361604799, 3885583138, 873141039}, {1101141726,
1004001443, 224270120, 3117782790, 1101141726, 3796182890,
1805260159, 3117782790, 2765049417, 1504276775, 2300930144,
2016163623, 2560214589, 246368794, 2300930144, 2317362874,
1703793169, 246368794}, {4282068381, 3102721096, 233700888,
1643675110, 4282068381, 2277708085, 977708150, 1643675110,
1905825838, 4275847685, 3631325896, 2980091964, 2962442766,
104889453, 3631325896, 468894607, 1635915525,
104889453}, {997151755, 3444341166, 1175528637, 2494198515,
997151755, 2490435731, 1356989069, 2494198515, 89118668, 550063800,
107416526, 1402542742, 1773803959, 1574610921, 107416526,
4103450226, 2147546631, 1574610921}, {1592979988, 2320747498,
1962743300, 3815695315, 1592979988, 3669456970, 1523382042,
3815695315, 2261193360, 4088546528, 2426827615, 2021488568,
544936901, 1011239422, 2426827615, 4071244026, 2496057465,
1011239422}, {1733540130, 1810350755, 1119988193, 3160454394,
1733540130, 613651010, 1595264559, 3160454394, 2610383359,
1752846470, 3825719596, 2445832362, 3679744745, 2364572364,
3825719596, 468574833, 3566983915, 2364572364}, {2703184802,
2604386602, 1446393552, 2873961450, 2703184802, 880506002,
2959349947, 2873961450, 2449247677, 1445510944, 912545172,
331250696, 814500863, 272315039, 912545172, 1200594162, 1398564460,
272315039}, {1194574818, 1728500627, 2662281592, 510225634,
1194574818, 2045217287, 286119182, 510225634, 2340639019,
296510335, 2637234667, 2895130475, 4031408742, 363388665,
2637234667, 1746615522, 592194244, 363388665}, {2294364571,
2423473437, 2052320134, 4118117831, 2294364571, 1174383839,
3465907219, 4118117831, 1531142753, 2867355922, 4123315,
2837472816, 3501850356, 977593562, 4123315, 2038498018, 3178048957,
977593562}, {2854828033, 3783089951, 4138436503, 184782823,
2854828033, 2190899193, 1797257364, 184782823, 1447842232,
4292230862, 2144494056, 2439698058, 1022614336, 208003776,
2144494056, 1566440482, 91247399, 208003776}, {1344049392,
2047337866, 1934971877, 2313144506, 1344049392, 266781035,
2279972572, 2313144506, 1583393201, 1663896395, 3064180636,
1105919809, 1581808653, 1156200190, 3064180636, 1451707531,
2185205086, 1156200190}, {2559113701, 2236162592, 4146978688,
2262141136, 2559113701, 3549231332, 1078367555, 2262141136,
3892495210, 3471595726, 218486499, 2351513088, 3142931989,
2823846657, 218486499, 3340983277, 4257316854,
2823846657}, {3755683476, 1940882830, 1866420738, 2087946058,
3755683476, 390169161, 4179048351, 2087946058, 3031968622,
2881596813, 3034998254, 2517333902, 535206560, 1190356014,
3034998254, 1803506624, 2329540208, 1190356014}, {781759955,
3182735706, 359509185, 825286037, 781759955, 830287146, 2959985729,
825286037, 1510077366, 1212961148, 2047897620, 965798968,
1674831117, 3562083579, 2047897620, 1588504573, 3058668461,
3562083579}, {590551532, 736016229, 1655805093, 4221110267,
590551532, 1365819997, 1908233913, 4221110267, 290264540,
2222615454, 2404181240, 2563913658, 2273306715, 1685030826,
2404181240, 1157041624, 3746279518, 1685030826}}, {{2343328484,
2893041005, 3465141330, 1189120688, 2343328484, 1087786165,
2848040407, 1189120688, 210787847, 4226264344, 1160174754,
1234984211, 4027094919, 3997368560, 1160174754, 3178383826,
3047771871, 3997368560}, {215280489, 1474823842, 730413847,
3236285682, 215280489, 2873263091, 962036916, 3236285682,
3438170226, 1708226553, 1776841674, 1500495759, 2867287469,
3303179301, 1776841674, 3235964171, 4243968063,
3303179301}, {4177426677, 1193948814, 3908192573, 489218847,
4177426677, 1267710679, 1841260926, 489218847, 1877599976,
2513927224, 1694166096, 1377070160, 593465055, 1859001036,
1694166096, 2391336228, 2962890971, 1859001036}, {653809404,
3710942807, 3082882924, 133010024, 653809404, 2835827516,
2886695328, 133010024, 1566461658, 3398059015, 3932773012,
3350181058, 389193591, 1482944153, 3932773012, 354466071,
3192311574, 1482944153}, {1484000297, 3188783681, 3415991698,
4003411060, 1484000297, 933110981, 3748771156, 4003411060,
1018639212, 2819889512, 2637114657, 909882870, 1117546206,
3478906695, 2637114657, 3563812138, 565159378,
3478906695}, {440283001, 1089550911, 4029103059, 3185784250,
440283001, 1151112872, 1624263326, 3185784250, 2630823869,
1154112679, 3969663510, 1537382635, 915271858, 3406907174,
3969663510, 536198867, 3949116664, 3406907174}, {1972186265,
3731240792, 1890899803, 4161490031, 1972186265, 2993166332,
868072046, 4161490031, 2558003006, 1918378675, 2326140461,
3071215524, 2413540258, 1488624292, 2326140461, 4148140705,
2799268852, 1488624292}, {621546357, 1156650856, 2697142404,
1263786956, 621546357, 762846463, 326001602, 1263786956,
4201558916, 2925300409, 1254989667, 2027371896, 2383815228,
640968550, 1254989667, 922372912, 3226860971,
640968550}, {1223308810, 203294792, 5747672, 1452450798,
1223308810, 1209885024, 4220840636, 1452450798, 2344687902,
3881403814, 4062543339, 2728743727, 2635127075, 4270641330,
4062543339, 1822455048, 1349296096, 4270641330}, {3724096918,
1596355079, 208236284, 3428374439, 3724096918, 3321163606,
651704320, 3428374439, 895018056, 292412816, 452352464, 3648643686,
218390717, 3793896259, 452352464, 2814326437, 1780812891,
3793896259}, {407784204, 38915014, 325099352, 1466266720,
407784204, 1070102291, 336999180, 1466266720, 4118978201,
1467025452, 1802921798, 3679350217, 2749639306, 812520277,
1802921798, 3868105704, 2578531869, 812520277}, {3674853211,
3045714102, 2926968519, 3102951286, 3674853211, 1039842093,
3967585868, 3102951286, 3762480362, 3172107248, 1787821043,
274678961, 114309037, 1962346210, 1787821043, 46968956, 558932075,
1962346210}, {3704702649, 235968343, 2794114303, 1938778500,
3704702649, 525722325, 1883441867, 1938778500, 3201001463,
3749243715, 2025994867, 3161869996, 978727059, 1073516940,
2025994867, 2469877757, 1518940291, 1073516940}, {4067177669,
1043444957, 999473793, 2767155734, 4067177669, 2450190076,
1698789305, 2767155734, 2712550515, 535913450, 201585531,
3030738646, 1607735058, 343873340, 201585531, 1083665713,
2462653191, 343873340}, {1372371305, 2609726990, 2019515078,
1139515116, 1372371305, 481772553, 3800286228, 1139515116,
1153162105, 4026972309, 3689846594, 3253844353, 3753500845,
2231918583, 3689846594, 1165407528, 2183075361,
2231918583}}, {{403962847, 639660658, 1483118807, 2339029465,
403962847, 2669906627, 2430211448, 2339029465, 2655768102,
1897624297, 1689291784, 1650609719, 4255405993, 2313146046,
1689291784, 1377591475, 3910187183, 2313146046}, {3500531602,
3773159052, 24637, 4228486662, 3500531602, 1913148390, 754946994,
4228486662, 3508595200, 4235849984, 3185020283, 2449669145,
1923190496, 3656310954, 3185020283, 2702189958, 882924050,
3656310954}, {3184854268, 2557204628, 2501974390, 676540794,
3184854268, 1997794025, 3402535509, 676540794, 917982480,
103612315, 4247470915, 101546088, 1398719693, 1932812117,
4247470915, 4129096120, 4060977130, 1932812117}, {2873927273,
4095476435, 2167114735, 1143874914, 2873927273, 2970344133,
3205058469, 1143874914, 4024866227, 216160452, 3926377906,
511317726, 600268838, 377232416, 3926377906, 4229103619,
1498446142, 377232416}, {1792666093, 4016848937, 1543204173,
3257189274, 1792666093, 176833036, 2280560724, 3257189274,
1147306446, 328172414, 1507371296, 3523466281, 29926908,
3043105629, 1507371296, 3253631879, 4036239959,
3043105629}, {4205223861, 878825824, 1490976015, 345877551,
4205223861, 2785571028, 1338844195, 345877551, 1924613784,
1236254347, 4161316409, 642959848, 3917474052, 2570368729,
4161316409, 152032198, 2287840344, 2570368729}, {3990133269,
3128995264, 1738689207, 3444164897, 3990133269, 3712933071,
2064904226, 3444164897, 2391813050, 761038241, 1935180159,
170979531, 3728725334, 1306954198, 1935180159, 299888769,
273871383, 1306954198}, {3574053987, 1276667706, 3630964515,
2958366486, 3574053987, 2704486034, 3027708794, 2958366486,
1479401095, 792177412, 3952111894, 3815277103, 575096961,
1969399344, 3952111894, 4220943579, 3273966859,
1969399344}, {1021903948, 2991536458, 2725921657, 1020618701,
1021903948, 547582336, 1519243117, 1020618701, 4206967196,
2892587185, 1298122989, 2445591450, 475488669, 1364818830,
1298122989, 176868453, 3110121937, 1364818830}, {1990940644,
3319755588, 1302742290, 2960959917, 1990940644, 2496813625,
1344370018, 2960959917, 486243349, 3205282294, 350307083,
3501465828, 1203224538, 3238458868, 350307083, 4294608887,
2153874647, 3238458868}, {188494288, 213770946, 3029216841,
3018898373, 188494288, 3334548848, 4213633188, 3018898373,
2892247335, 3996577694, 1751933553, 3067620234, 1428358712,
1521457388, 1751933553, 872030483, 3604618491,
1521457388}, {3855580426, 3723431054, 2974755971, 4212261536,
3855580426, 3210276198, 2870367456, 4212261536, 3471121,
2830179495, 1573179329, 3416534728, 2922475892, 1779275464,
1573179329, 2466688033, 2781126556, 1779275464}, {3921473593,
700976293, 583685154, 1551145624, 3921473593, 1197146456,
3641233485, 1551145624, 3152036247, 3807351913, 689693264,
1832287238, 141617649, 3784802596, 689693264, 1601350390,
1340587043, 3784802596}, {1543625147, 333607140, 4026328395,
504613722, 1543625147, 448131202, 60339876, 504613722, 2156457620,
4274611742, 2596241228, 274244631, 1730347342, 2823386290,
2596241228, 3458355488, 366721249, 2823386290}, {923427765,
129677983, 1847794530, 629349982, 923427765, 2962725713, 869401798,
629349982, 3406615191, 3679893364, 377634006, 2637846534,
1807536786, 814948710, 377634006, 2686254269, 3421740057,
814948710}}, {{3968427114, 2871663372, 1284825950, 1363628533,
3968427114, 3023327955, 818363998, 1363628533, 2035927380,
2557663578, 3949237584, 2564746147, 74149658, 2957238169,
3949237584, 1523740068, 1410010554, 2957238169}, {3221052941,
4281160673, 4178727866, 3897287251, 3221052941, 1527779946,
4106993377, 3897287251, 3827747418, 1477293711, 3022040116,
2387074241, 2549406364, 3377318569, 3022040116, 2556102508,
1927835240, 3377318569}, {1544125908, 2580689227, 2321269870,
1052675384, 1544125908, 92813377, 2488796230, 1052675384,
2512492218, 1791527725, 377790127, 815879636, 1719266365,
1377352274, 377790127, 3998501000, 4140278193,
1377352274}, {3774782533, 3237990203, 3880910680, 2835476193,
3774782533, 850076464, 959978619, 2835476193, 1174358892,
1607696753, 3708493374, 4435502, 4183546362, 257306870, 3708493374,
547809226, 1748489735, 257306870}, {701018289, 2603935570,
3581566907, 639047872, 701018289, 128268716, 1338762710, 639047872,
1121980250, 3604187234, 4290097866, 3379688383, 2022645560,
675681167, 4290097866, 3364059629, 3944294906,
675681167}, {1124911735, 2683742666, 1381972838, 823435890,
1124911735, 3000499818, 1896847210, 823435890, 1400690756,
1375837008, 1844725476, 2839043606, 570318859, 2404623323,
1844725476, 1615062394, 4132820260, 2404623323}, {1785616771,
843456078, 2394423301, 1188894209, 1785616771, 3386998707,
115723361, 1188894209, 1855880557, 3076793694, 2816648919,
2844159661, 2973871282, 3480594085, 2816648919, 620328775,
2014570354, 3480594085}, {2850730926, 2374744218, 1073801764,
1998893251, 2850730926, 1430713735, 1400155768, 1998893251,
3128011728, 2412946221, 675898567, 5625977, 892406741, 4076910933,
675898567, 3433720143, 930542270, 4076910933}, {2135087543,
1525187887, 4227112097, 2761515035, 2135087543, 184926998,
3735543804, 2761515035, 173729517, 3963796804, 142213288,
3009309079, 2551966101, 3928418720, 142213288, 1709878445,
3246094911, 3928418720}, {3684505492, 1299858048, 3524599640,
3000721466, 3684505492, 764299143, 850809698, 3000721466,
1152423219, 1217794308, 973255696, 2327635494, 3968325674,
3143857447, 973255696, 4202002846, 1394551864,
3143857447}, {2895494095, 294169106, 2158601842, 4275076982,
2895494095, 361679506, 2740874794, 4275076982, 2231694171,
3225185820, 142973615, 1757153604, 2433563, 3436532721, 142973615,
1974305123, 2983415849, 3436532721}, {34180751, 194682771,
3382776937, 3090564778, 34180751, 1911908313, 4205068615,
3090564778, 2188428625, 3521935017, 1469753239, 3388871077,
4063581553, 2448094751, 1469753239, 1627957632, 2840824567,
2448094751}, {16962125, 312589334, 2192048768, 2893286420,
16962125, 1459437626, 313567118, 2893286420, 1643785836,
3172602356, 3476246571, 2478813080, 1583665905, 2003796931,
3476246571, 1092519015, 4229763198, 2003796931}, {3026731980,
4200640347, 3527778260, 627769205, 3026731980, 987718671,
3010308075, 627769205, 3050396426, 207847804, 3577868319,
3566899985, 39982755, 1593620760, 3577868319, 2173731154,
1002861683, 1593620760}, {728117846, 2300694121, 631011479,
1340017637, 728117846, 2267723092, 2600113427, 1340017637,
1417809782, 1157833108, 469667457, 2542231883, 1967581696,
3875471800, 469667457, 3326806467, 1143824575, 3875471800}}};
extern "C" __host__ void mrg32k3a_init_device_consts_(){
mrg32k3a_CUDA_CALL(hipMemcpyToSymbol(matrix_array,matrix__array,sizeof(matrix__array),0,hipMemcpyHostToDevice));
}
extern "C" __host__ void mrg32k3a_free_device_consts_(){
}
extern "C" __device__ __host__ unsigned mrg32k3a_ModM1(unsigned a1,unsigned b1,unsigned a2,unsigned b2,unsigned a3,unsigned b3){
unsigned long long a,b,x;
x=(unsigned long long)a1*b1; a=(x>>32); b=(a*209)+(x-(a<<32));
x=(unsigned long long)a2*b2; a=(x>>32); b+=(a*209)+(x-(a<<32));
x=(unsigned long long)a3*b3; a=(x>>32); b+=(a*209)+(x-(a<<32));
x=(b>>32); b= (x*209)+(b-(x<<32));
return (b<4294967087) ? b : (b - 4294967087);
}
extern "C" __device__ __host__ unsigned mrg32k3a_ModM2(unsigned a1,unsigned b1,unsigned a2,unsigned b2,unsigned a3,unsigned b3){
unsigned long long a,b,x;
x=(unsigned long long)a1*b1; a=(x>>32); b=(a*22853)+(x-(a<<32));
x=(unsigned long long)a2*b2; a=(x>>32); b+=(a*22853)+(x-(a<<32));
x=(unsigned long long)a3*b3; a=(x>>32); b+=(a*22853)+(x-(a<<32));
x=(b>>32); b= (x*22853)+(b-(x<<32));
return (b<4294944443) ? b : (b - 4294944443);
}
extern "C" __device__ void mrg32k3a_dev_SkipAheadRound(mrg32k3a_state* state, unsigned g, unsigned k){
unsigned s0,s1,s2,t0,t1,t2,gg=g-1;
s2=mrg32k3a_ModM1(matrix_array[k][gg][0],state->x2,matrix_array[k][gg][1],state->x1,matrix_array[k][gg][2],state->x0);
s1=mrg32k3a_ModM1(matrix_array[k][gg][3],state->x2,matrix_array[k][gg][4],state->x1,matrix_array[k][gg][5],state->x0);
s0=mrg32k3a_ModM1(matrix_array[k][gg][6],state->x2,matrix_array[k][gg][7],state->x1,matrix_array[k][gg][8],state->x0);
state->x2=s2; state->x1=s1; state->x0=s0;
t2=mrg32k3a_ModM2(matrix_array[k][gg][9],state->y2,matrix_array[k][gg][10],state->y1,matrix_array[k][gg][11],state->y0);
t1=mrg32k3a_ModM2(matrix_array[k][gg][12],state->y2,matrix_array[k][gg][13],state->y1,matrix_array[k][gg][14],state->y0);
t0=mrg32k3a_ModM2(matrix_array[k][gg][15],state->y2,matrix_array[k][gg][16],state->y1,matrix_array[k][gg][17],state->y0);
state->y2=t2; state->y1=t1; state->y0=t0;
}
extern "C" __device__ void mrg32k3a_dev_skipahead_(mrg32k3a_state* state,unsigned long long offset128,
unsigned long long offset64,unsigned long long offset0){
unsigned long long i=offset0; unsigned k=0,g;
while(i>0){
g=i&15ULL; if(g>0) mrg32k3a_dev_SkipAheadRound(state,g,k);
i=(i>>4); k++;
}
i=offset64; k=16;
while(i>0){
g=i&15ULL; if(g>0) mrg32k3a_dev_SkipAheadRound(state,g,k);
i=(i>>4); k++;
}
i=offset128; k=32;
while(i>0){
g=i&15ULL; if(g>0) mrg32k3a_dev_SkipAheadRound(state,g,k);
i=(i>>4); k++;
}
}
extern "C" __host__ void mrg32k3a_host_SkipAheadRound(mrg32k3a_state* state, unsigned g, unsigned k){
unsigned s0,s1,s2,t0,t1,t2,gg=g-1;
s2=mrg32k3a_ModM1(matrix__array[k][gg][0],state->x2,matrix__array[k][gg][1],state->x1,matrix__array[k][gg][2],state->x0);
s1=mrg32k3a_ModM1(matrix__array[k][gg][3],state->x2,matrix__array[k][gg][4],state->x1,matrix__array[k][gg][5],state->x0);
s0=mrg32k3a_ModM1(matrix__array[k][gg][6],state->x2,matrix__array[k][gg][7],state->x1,matrix__array[k][gg][8],state->x0);
state->x2=s2; state->x1=s1; state->x0=s0;
t2=mrg32k3a_ModM2(matrix__array[k][gg][9],state->y2,matrix__array[k][gg][10],state->y1,matrix__array[k][gg][11],state->y0);
t1=mrg32k3a_ModM2(matrix__array[k][gg][12],state->y2,matrix__array[k][gg][13],state->y1,matrix__array[k][gg][14],state->y0);
t0=mrg32k3a_ModM2(matrix__array[k][gg][15],state->y2,matrix__array[k][gg][16],state->y1,matrix__array[k][gg][17],state->y0);
state->y2=t2; state->y1=t1; state->y0=t0;
}
extern "C" __host__ void mrg32k3a_skipahead_(mrg32k3a_state* state,unsigned long long offset128,
unsigned long long offset64,unsigned long long offset0){
unsigned long long i=offset0; unsigned k=0,g;
while(i>0){
g=i&15ULL; if(g>0) mrg32k3a_host_SkipAheadRound(state,g,k);
i=(i>>4); k++;
}
i=offset64; k=16;
while(i>0){
g=i&15ULL; if(g>0) mrg32k3a_host_SkipAheadRound(state,g,k);
i=(i>>4); k++;
}
i=offset128; k=32;
while(i>0){
g=i&15ULL; if(g>0) mrg32k3a_host_SkipAheadRound(state,g,k);
i=(i>>4); k++;
}
}
extern "C" __device__ __host__ void mrg32k3a_init_(mrg32k3a_state* state){
state->x0=state->x1=state->x2=state->y0=state->y1=state->y2=123;
}
extern "C" __device__ void mrg32k3a_dev_init_sequence_(mrg32k3a_state* state,unsigned long long SequenceNumber){
// 0 < SequenceNumber < 10^19 ; length of each sequence <= 10^38
unsigned long long a0, a32, b0=1029448176, b32=1712288060, b64=2074794764, b96=1914833239,t32,t96;
a32=SequenceNumber>>32; a0=SequenceNumber-(a32<<32);
mrg32k3a_init_(state);
t32=a32*b0+a0*b32; t96=a32*b64+a0*b96;
mrg32k3a_dev_skipahead_(state,a32*b96,a32*b32+a0*b64,a0*b0);
mrg32k3a_dev_skipahead_(state,t96>>32,t32>>32,0);
mrg32k3a_dev_skipahead_(state,0,(t96^4294967295ULL)<<32,(t32^4294967295ULL)<<32);
}
extern "C" __host__ void mrg32k3a_init_sequence_(mrg32k3a_state* state,unsigned long long SequenceNumber){
// 0 < SequenceNumber < 10^19 ; length of each sequence <= 10^38
unsigned long long a0, a32, b0=1029448176, b32=1712288060, b64=2074794764, b96=1914833239,t32,t96;
a32=SequenceNumber>>32; a0=SequenceNumber-(a32<<32);
mrg32k3a_init_(state);
t32=a32*b0+a0*b32; t96=a32*b64+a0*b96;
mrg32k3a_skipahead_(state,a32*b96,a32*b32+a0*b64,a0*b0);
mrg32k3a_skipahead_(state,t96>>32,t32>>32,0);
mrg32k3a_skipahead_(state,0,(t96^4294967295ULL)<<32,(t32^4294967295ULL)<<32);
}
extern "C" __device__ __host__ unsigned int mrg32k3a_generate_(mrg32k3a_state* state){
unsigned p1, p2;
p1 = mrg32k3a_ModM1(mrg32k3a_a12,state->x1 , mrg32k3a_a13,state->x0,0,0);
state->x0 = state->x1; state->x1 = state->x2; state->x2 = p1;
p2 = mrg32k3a_ModM2(mrg32k3a_a21,state->y2 , mrg32k3a_a23,state->y0,0,0);
state->y0 = state->y1; state->y1 = state->y2; state->y2 = p2;
if (p1 <= p2) return ( 4294967087 - (p2 - p1) ); else return (p1 - p2);
}
extern "C" __device__ __host__ float mrg32k3a_generate_uniform_float_(mrg32k3a_state* state){
unsigned p1, p2;
p1 = mrg32k3a_ModM1(mrg32k3a_a12,state->x1 , mrg32k3a_a13,state->x0,0,0);
state->x0 = state->x1; state->x1 = state->x2; state->x2 = p1;
p2 = mrg32k3a_ModM2(mrg32k3a_a21,state->y2 , mrg32k3a_a23,state->y0,0,0);
state->y0 = state->y1; state->y1 = state->y2; state->y2 = p2;
if (p1 <= p2) return ((float)( 4294967087 - (p2 - p1) )) * 2.3283064365386963e-10;
else return ((float)(p1 - p2)) * 2.3283064365386963e-10;
}
extern "C" __host__ void mrg32k3a_print_state_(mrg32k3a_state* state){
printf("Generator State: {{%u,%u,%u},{%u,%u,%u}}\n",
state->x0,state->x1,state->x2,state->y0,state->y1,state->y2);
}
__global__ void mrg32k3a_kernel_generate_array(mrg32k3a_state* state, unsigned int* out, long* length) {
unsigned i,seqNum,s0,s1,s2,t0,t1,t2; mrg32k3a_state mystate=*state;
seqNum = threadIdx.x + blockIdx.x * blockDim.x;
mrg32k3a_dev_skipahead_(&mystate,0,0,seqNum+1); s2=mystate.x2; t2=mystate.y2;
for(i=0;i<(*length);i++){
out[i*65536+seqNum]= ((s2 <= t2) ? ( 4294967087 - (t2 - s2) ) : (s2 - t2));
s2=mrg32k3a_ModM1(3975008370,mystate.x2,754337639,mystate.x1,2405650329,mystate.x0);
s1=mrg32k3a_ModM1(3715182130,mystate.x2,3975008370,mystate.x1,3615342722,mystate.x0);
s0=mrg32k3a_ModM1(642697923,mystate.x2,3715182130,mystate.x1,2256493727,mystate.x0);
mystate.x2=s2; mystate.x1=s1; mystate.x0=s0;
t2=mrg32k3a_ModM2(3174552073,mystate.y2,1030618503,mystate.y1,1998739584,mystate.y0);
t1=mrg32k3a_ModM2(3467650326,mystate.y2,2569413030,mystate.y1,1030618503,mystate.y0);
t0=mrg32k3a_ModM2(2594942403,mystate.y2,1631336015,mystate.y1,2569413030,mystate.y0);
mystate.y2=t2; mystate.y1=t1; mystate.y0=t0;
}
}
extern "C" __host__ void mrg32k3a_generate_gpu_array_(mrg32k3a_state* state, unsigned int* dev_out, unsigned int* length){
long mylength = (*length)/mrg32k3a_ARRAY_SECTIONS;
mrg32k3a_state* dev_state;
long* dev_length;
if((mylength*mrg32k3a_ARRAY_SECTIONS)<(*length)) mylength++;
mrg32k3a_CUDA_CALL(hipMalloc((void**)&dev_state,sizeof(mrg32k3a_state)));
mrg32k3a_CUDA_CALL(hipMalloc((void**)&dev_length,sizeof(long)));
mrg32k3a_CUDA_CALL(hipMemcpy(dev_state,state,sizeof(mrg32k3a_state),hipMemcpyHostToDevice));
mrg32k3a_CUDA_CALL(hipMemcpy(dev_length,&mylength,sizeof(long),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( mrg32k3a_kernel_generate_array), dim3(mrg32k3a_BLOCKS),dim3(mrg32k3a_THREADS), 0, 0, dev_state,dev_out,dev_length);
mrg32k3a_CUDA_CALL(hipGetLastError());
mrg32k3a_CUDA_CALL(hipFree(dev_state)); mrg32k3a_CUDA_CALL(hipFree(dev_length));
}
__global__ void mrg32k3a_kernel_generate_array_float(mrg32k3a_state* state, float* out, long* length) {
unsigned i,seqNum,s0,s1,s2,t0,t1,t2; mrg32k3a_state mystate=*state;
seqNum = threadIdx.x + blockIdx.x * blockDim.x;
mrg32k3a_dev_skipahead_(&mystate,0,0,seqNum+1); s2=mystate.x2; t2=mystate.y2;
for(i=0;i<(*length);i++){
out[i*65536+seqNum]= (float)((s2 <= t2) ? ( 4294967087 - (t2 - s2) ) : (s2 - t2)) * 2.3283064365386963e-10;
s2=mrg32k3a_ModM1(3975008370,mystate.x2,754337639,mystate.x1,2405650329,mystate.x0);
s1=mrg32k3a_ModM1(3715182130,mystate.x2,3975008370,mystate.x1,3615342722,mystate.x0);
s0=mrg32k3a_ModM1(642697923,mystate.x2,3715182130,mystate.x1,2256493727,mystate.x0);
mystate.x2=s2; mystate.x1=s1; mystate.x0=s0;
t2=mrg32k3a_ModM2(3174552073,mystate.y2,1030618503,mystate.y1,1998739584,mystate.y0);
t1=mrg32k3a_ModM2(3467650326,mystate.y2,2569413030,mystate.y1,1030618503,mystate.y0);
t0=mrg32k3a_ModM2(2594942403,mystate.y2,1631336015,mystate.y1,2569413030,mystate.y0);
mystate.y2=t2; mystate.y1=t1; mystate.y0=t0;
}
}
extern "C" __host__ void mrg32k3a_generate_gpu_array_float_(mrg32k3a_state* state, float* dev_out, unsigned int* length){
long mylength = (*length)/mrg32k3a_ARRAY_SECTIONS;
mrg32k3a_state* dev_state;
long* dev_length;
if((mylength*mrg32k3a_ARRAY_SECTIONS)<(*length)) mylength++;
mrg32k3a_CUDA_CALL(hipMalloc((void**)&dev_state,sizeof(mrg32k3a_state)));
mrg32k3a_CUDA_CALL(hipMalloc((void**)&dev_length,sizeof(long)));
mrg32k3a_CUDA_CALL(hipMemcpy(dev_state,state,sizeof(mrg32k3a_state),hipMemcpyHostToDevice));
mrg32k3a_CUDA_CALL(hipMemcpy(dev_length,&mylength,sizeof(long),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( mrg32k3a_kernel_generate_array_float), dim3(mrg32k3a_BLOCKS),dim3(mrg32k3a_THREADS), 0, 0, dev_state,dev_out,dev_length);
mrg32k3a_CUDA_CALL(hipGetLastError());
mrg32k3a_CUDA_CALL(hipFree(dev_state)); mrg32k3a_CUDA_CALL(hipFree(dev_length));
}
__global__ void mrg32k3a_kernel_generate_array_double(mrg32k3a_state* state, double* out, long* length) {
unsigned i,seqNum,s0,s1,s2,t0,t1,t2; mrg32k3a_state mystate=*state;
seqNum = threadIdx.x + blockIdx.x * blockDim.x;
mrg32k3a_dev_skipahead_(&mystate,0,0,seqNum+1); s2=mystate.x2; t2=mystate.y2;
for(i=0;i<(*length);i++){
out[i*65536+seqNum]= (double)((s2 <= t2) ? ( 4294967087 - (t2 - s2) ) : (s2 - t2)) * 2.3283064365386963e-10;
s2=mrg32k3a_ModM1(3975008370,mystate.x2,754337639,mystate.x1,2405650329,mystate.x0);
s1=mrg32k3a_ModM1(3715182130,mystate.x2,3975008370,mystate.x1,3615342722,mystate.x0);
s0=mrg32k3a_ModM1(642697923,mystate.x2,3715182130,mystate.x1,2256493727,mystate.x0);
mystate.x2=s2; mystate.x1=s1; mystate.x0=s0;
t2=mrg32k3a_ModM2(3174552073,mystate.y2,1030618503,mystate.y1,1998739584,mystate.y0);
t1=mrg32k3a_ModM2(3467650326,mystate.y2,2569413030,mystate.y1,1030618503,mystate.y0);
t0=mrg32k3a_ModM2(2594942403,mystate.y2,1631336015,mystate.y1,2569413030,mystate.y0);
mystate.y2=t2; mystate.y1=t1; mystate.y0=t0;
}
}
extern "C" __host__ void mrg32k3a_generate_gpu_array_double_(mrg32k3a_state* state, double* dev_out, unsigned int* length){
long mylength = (*length)/mrg32k3a_ARRAY_SECTIONS;
mrg32k3a_state* dev_state;
long* dev_length;
if((mylength*mrg32k3a_ARRAY_SECTIONS)<(*length)) mylength++;
mrg32k3a_CUDA_CALL(hipMalloc((void**)&dev_state,sizeof(mrg32k3a_state)));
mrg32k3a_CUDA_CALL(hipMalloc((void**)&dev_length,sizeof(long)));
mrg32k3a_CUDA_CALL(hipMemcpy(dev_state,state,sizeof(mrg32k3a_state),hipMemcpyHostToDevice));
mrg32k3a_CUDA_CALL(hipMemcpy(dev_length,&mylength,sizeof(long),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( mrg32k3a_kernel_generate_array_double), dim3(mrg32k3a_BLOCKS),dim3(mrg32k3a_THREADS), 0, 0, dev_state,dev_out,dev_length);
mrg32k3a_CUDA_CALL(hipGetLastError());
mrg32k3a_CUDA_CALL(hipFree(dev_state)); mrg32k3a_CUDA_CALL(hipFree(dev_length));
}
extern "C" __host__ void mrg32k3a_generate_array_(mrg32k3a_state* state, unsigned int* out, unsigned int* length){
long mylength = (*length)/mrg32k3a_ARRAY_SECTIONS;
mrg32k3a_state* dev_state;
unsigned int* dev_out;
long* dev_length;
if((mylength*mrg32k3a_ARRAY_SECTIONS)<(*length)) mylength++;
mrg32k3a_CUDA_CALL(hipMalloc((void**)&dev_state,sizeof(mrg32k3a_state)));
mrg32k3a_CUDA_CALL(hipMalloc((void**)&dev_out,mylength*mrg32k3a_ARRAY_SECTIONS*sizeof(unsigned int)));
mrg32k3a_CUDA_CALL(hipMalloc((void**)&dev_length,sizeof(long)));
mrg32k3a_CUDA_CALL(hipMemcpy(dev_state,state,sizeof(mrg32k3a_state),hipMemcpyHostToDevice));
mrg32k3a_CUDA_CALL(hipMemcpy(dev_length,&mylength,sizeof(long),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( mrg32k3a_kernel_generate_array), dim3(mrg32k3a_BLOCKS),dim3(mrg32k3a_THREADS), 0, 0, dev_state,dev_out,dev_length);
mrg32k3a_CUDA_CALL(hipGetLastError());
mrg32k3a_CUDA_CALL(hipMemcpy(out,dev_out,(*length)*sizeof(unsigned int),hipMemcpyDeviceToHost));
mrg32k3a_CUDA_CALL(hipFree(dev_state)); mrg32k3a_CUDA_CALL(hipFree(dev_out));
mrg32k3a_CUDA_CALL(hipFree(dev_length));
}
| a05996f524aaec3f72ec51bc22337a3710987e9a.cu | // (c) Copyright 2013 Lev Barash, Landau Institute for Theoretical Physics, Russian Academy of Sciences
// This is supplement to the paper:
// L.Yu. Barash, L.N. Shchur, "PRAND: GPU accelerated parallel random number generation library: Using most reliable algorithms and applying parallelism of modern GPUs and CPUs".
// e-mail: barash @ itp.ac.ru (remove space)
#include<stdio.h>
#define mrg32k3a_CUDA_CALL(x) do { if((x) != cudaSuccess) { printf("Error: %s at %s:%d\n",cudaGetErrorString(cudaGetLastError()),__FILE__,__LINE__); exit(1);}} while(0)
#define mrg32k3a_BLOCKS 64
#define mrg32k3a_THREADS 1024
#define mrg32k3a_ARRAY_SECTIONS (mrg32k3a_BLOCKS*mrg32k3a_THREADS)
#define mrg32k3a_a12 1403580U
#define mrg32k3a_a13 4294156359U
#define mrg32k3a_a21 527612U
#define mrg32k3a_a23 4293573854U
#define mrg32k3a_sse_mask 4294967295UL
#define mrg32k3a_sse_m1 4294967087UL
#define mrg32k3a_sse_m2 4294944443UL
#define mrg32k3a_sse_x4ap 0
#define mrg32k3a_sse_x4bm 810728UL
#define mrg32k3a_sse_x4cp 1403580UL
#define mrg32k3a_sse_x4addl 4125525144UL
#define mrg32k3a_sse_x4addh 810727UL
#define mrg32k3a_sse_x6ap 149925673UL
#define mrg32k3a_sse_x6bp 489343630UL
#define mrg32k3a_sse_x6cm 1353076533UL
#define mrg32k3a_sse_x6addl 674846139UL
#define mrg32k3a_sse_x6addh 1353076467UL
#define mrg32k3a_sse_y4ap 2706407399UL
#define mrg32k3a_sse_y4bm 1370589UL
#define mrg32k3a_sse_y4cm 796966251UL
#define mrg32k3a_sse_y4addl 629268888UL
#define mrg32k3a_sse_y4addh 798332592UL
#define mrg32k3a_sse_y6ap 97673890UL
#define mrg32k3a_sse_y6bp 1431525864UL
#define mrg32k3a_sse_y6cp 1673476130UL
#define mrg32k3a_sse_y6addl 0
#define mrg32k3a_sse_y6addh 0
typedef struct{
unsigned x0,x1,x2,y0,y1,y2;
} mrg32k3a_state;
typedef struct{
unsigned s[20] __attribute__ ((aligned(16)));
// { x0,0,x1,0, x2,0,x3,0, y0,0,y1,0, y2,0,y3,0 , arrri}
} mrg32k3a_sse_state;
unsigned mrg32k3a_sse_consts[72] __attribute__ ((aligned(16))) =
{ mrg32k3a_sse_x4ap,0,mrg32k3a_sse_x4ap,0,
mrg32k3a_sse_x4bm,0,mrg32k3a_sse_x4bm,0,
mrg32k3a_sse_x4cp,0,mrg32k3a_sse_x4cp,0,
mrg32k3a_sse_x4addl,mrg32k3a_sse_x4addh,mrg32k3a_sse_x4addl,mrg32k3a_sse_x4addh,
mrg32k3a_sse_x6ap,0,mrg32k3a_sse_x6ap,0,
mrg32k3a_sse_x6bp,0,mrg32k3a_sse_x6bp,0,
mrg32k3a_sse_x6cm,0,mrg32k3a_sse_x6cm,0,
mrg32k3a_sse_x6addl,mrg32k3a_sse_x6addh,mrg32k3a_sse_x6addl,mrg32k3a_sse_x6addh,
mrg32k3a_sse_m1,0,mrg32k3a_sse_m1,0,
mrg32k3a_sse_y4ap,0,mrg32k3a_sse_y4ap,0,
mrg32k3a_sse_y4bm,0,mrg32k3a_sse_y4bm,0,
mrg32k3a_sse_y4cm,0,mrg32k3a_sse_y4cm,0,
mrg32k3a_sse_y4addl,mrg32k3a_sse_y4addh,mrg32k3a_sse_y4addl,mrg32k3a_sse_y4addh,
mrg32k3a_sse_y6ap,0,mrg32k3a_sse_y6ap,0,
mrg32k3a_sse_y6bp,0,mrg32k3a_sse_y6bp,0,
mrg32k3a_sse_y6cp,0,mrg32k3a_sse_y6cp,0,
mrg32k3a_sse_y6addl,mrg32k3a_sse_y6addh,mrg32k3a_sse_y6addl,mrg32k3a_sse_y6addh,
mrg32k3a_sse_m2,0,mrg32k3a_sse_m2,0
};
unsigned mrg32k3a_sse_arrr[4] __attribute__ ((aligned(16)));
extern "C" __host__ void mrg32k3a_sse_genRand4(unsigned* arr,mrg32k3a_sse_state* state){ // use unsigned arr[4] __attribute__ ((aligned(16)));
asm volatile("movaps (%1),%%xmm1\n"\
"movaps 16(%1),%%xmm3\n"\
"movaps %%xmm1,%%xmm0\n"\
"movaps %%xmm3,%%xmm6\n"\
"movaps 32(%1),%%xmm2\n"\
"psubq %%xmm2,%%xmm1\n"\
"psubq 48(%1),%%xmm3\n"\
"movaps %%xmm1,%%xmm4\n"\
"movaps %%xmm3,%%xmm7\n"\
"psrad $31,%%xmm4\n"\
"psrad $31,%%xmm7\n"\
"psrlq $32,%%xmm4\n"\
"psrlq $32,%%xmm7\n"\
"pand 128(%2),%%xmm4\n"\
"pand 128(%2),%%xmm7\n"\
"paddq %%xmm4,%%xmm1\n"\
"paddq %%xmm7,%%xmm3\n"\
"shufps $136,%%xmm3,%%xmm1\n"\
"movaps %%xmm1,(%0)\n"\
"movups 8(%1),%%xmm1\n"\
"movaps %%xmm1,%%xmm4\n"\
"movaps %%xmm6,%%xmm7\n"\
"pmuludq 16(%2),%%xmm1\n"\
"pmuludq 32(%2),%%xmm6\n"\
"pmuludq 64(%2),%%xmm0\n"\
"pmuludq 80(%2),%%xmm4\n"\
"pmuludq 96(%2),%%xmm7\n"\
"paddq 48(%2),%%xmm6\n"\
"paddq 112(%2),%%xmm0\n"\
"psubq %%xmm1,%%xmm6\n"\
"paddq %%xmm4,%%xmm0\n"\
"psubq %%xmm7,%%xmm0\n"\
"movaps %%xmm6,%%xmm3\n"\
"movaps %%xmm0,%%xmm5\n"\
"psrlq $32,%%xmm3\n"\
"psrlq $32,%%xmm5\n"\
"pmuludq 128(%2),%%xmm3\n"\
"pmuludq 128(%2),%%xmm5\n"\
"psubq %%xmm3,%%xmm6\n"\
"psubq %%xmm5,%%xmm0\n"\
"movaps %%xmm6,%%xmm3\n"\
"movaps %%xmm0,%%xmm5\n"\
"psrlq $32,%%xmm3\n"\
"psrlq $32,%%xmm5\n"\
"pmuludq 128(%2),%%xmm3\n"\
"pmuludq 128(%2),%%xmm5\n"\
"psubq %%xmm3,%%xmm6\n"\
"psubq %%xmm5,%%xmm0\n"\
"psubq 128(%2),%%xmm6\n"\
"psubq 128(%2),%%xmm0\n"\
"movaps %%xmm6,%%xmm3\n"\
"movaps %%xmm0,%%xmm5\n"\
"psrad $31,%%xmm3\n"\
"psrad $31,%%xmm5\n"\
"psrlq $32,%%xmm3\n"\
"psrlq $32,%%xmm5\n"\
"pand 128(%2),%%xmm3\n"\
"pand 128(%2),%%xmm5\n"\
"paddq %%xmm3,%%xmm6\n"\
"paddq %%xmm5,%%xmm0\n"\
"movaps %%xmm6,(%1)\n"\
"movaps %%xmm0,16(%1)\n"\
"movups 40(%1),%%xmm1\n"\
"movaps 48(%1),%%xmm5\n"\
"movaps %%xmm2,%%xmm3\n"\
"movaps %%xmm1,%%xmm4\n"\
"movaps %%xmm5,%%xmm6\n"\
"pmuludq 144(%2),%%xmm2\n"\
"pmuludq 160(%2),%%xmm1\n"\
"pmuludq 176(%2),%%xmm5\n"\
"pmuludq 208(%2),%%xmm3\n"\
"pmuludq 224(%2),%%xmm4\n"\
"pmuludq 240(%2),%%xmm6\n"\
"paddq 192(%2),%%xmm2\n"\
"psubq %%xmm1,%%xmm2\n"\
"paddq %%xmm3,%%xmm6\n"\
"psubq %%xmm5,%%xmm2\n"\
"paddq %%xmm4,%%xmm6\n"\
"movaps %%xmm2,%%xmm0\n"\
"movaps %%xmm6,%%xmm7\n"\
"psrlq $32,%%xmm0\n"\
"psrlq $32,%%xmm7\n"\
"pmuludq 272(%2),%%xmm0\n"\
"pmuludq 272(%2),%%xmm7\n"\
"psubq %%xmm0,%%xmm2\n"\
"psubq %%xmm7,%%xmm6\n"\
"movaps %%xmm2,%%xmm0\n"\
"movaps %%xmm6,%%xmm7\n"\
"psrlq $32,%%xmm0\n"\
"psrlq $32,%%xmm7\n"\
"pmuludq 272(%2),%%xmm0\n"\
"pmuludq 272(%2),%%xmm7\n"\
"psubq %%xmm0,%%xmm2\n"\
"psubq %%xmm7,%%xmm6\n"\
"psubq 272(%2),%%xmm2\n"\
"psubq 272(%2),%%xmm6\n"\
"movaps %%xmm2,%%xmm0\n"\
"movaps %%xmm6,%%xmm7\n"\
"psrad $31,%%xmm0\n"\
"psrad $31,%%xmm7\n"\
"psrlq $32,%%xmm0\n"\
"psrlq $32,%%xmm7\n"\
"pand 272(%2),%%xmm0\n"\
"pand 272(%2),%%xmm7\n"\
"paddq %%xmm0,%%xmm2\n"\
"paddq %%xmm7,%%xmm6\n"\
"movaps %%xmm2,32(%1)\n"\
"movaps %%xmm6,48(%1)\n"\
""::"r"(arr),"r"(state->s),"r"(mrg32k3a_sse_consts));
}
extern "C" __host__ unsigned int mrg32k3a_sse_generate_(mrg32k3a_sse_state* state){
state->s[16]--; if(state->s[16]==0) {mrg32k3a_sse_genRand4(mrg32k3a_sse_arrr,state); state->s[16]=4;}
return mrg32k3a_sse_arrr[4-state->s[16]];
}
extern "C" __device__ __host__ void mrg32k3a_get_sse_state_(mrg32k3a_state* state,mrg32k3a_sse_state* sse_state){
long long x0,x1,x2,x,y0,y1,y2,y; int i;
x0=state->x0; x1=state->x1; x2=state->x2; y0=state->y0; y1=state->y1; y2=state->y2;
for(i=0;i<8;i+=2){
sse_state->s[i] = x = (1403580*x1-810728*x0+9007203111403311U)%mrg32k3a_sse_m1; x0=x1; x1=x2; x2=x;
}
for(i=8;i<16;i+=2){
sse_state->s[i] = y = (527612*y2-1370589*y0+9007202867859652U)%mrg32k3a_sse_m2; y0=y1; y1=y2; y2=y;
}
for(i=1;i<16;i+=2) sse_state->s[i]=0; sse_state->s[16]=1;
}
__constant__ unsigned matrix_array[48][15][18];
unsigned matrix__array[48][15][18]=
{{{0, 1403580, 4294156359, 1, 0, 0, 0, 1, 0, 527612, 0, 4293573854, 1,
0, 0, 0, 1, 0}, {1403580, 4294156359, 0, 0, 1403580, 4294156359,
1, 0, 0, 3497978192, 4293573854, 2706407399, 527612, 0, 4293573854,
1, 0, 0}, {4294156359, 2941890554, 244671815, 1403580, 4294156359,
0, 0, 1403580, 4294156359, 3281754271, 2706407399, 1431525864,
3497978192, 4293573854, 2706407399, 527612, 0,
4293573854}, {2941890554, 489343630, 149925673, 4294156359,
2941890554, 244671815, 1403580, 4294156359, 0, 1673476130,
1431525864, 97673890, 3281754271, 2706407399, 1431525864,
3497978192, 4293573854, 2706407399}, {489343630, 1831234280,
3782722441, 2941890554, 489343630, 149925673, 4294156359,
2941890554, 244671815, 1430724370, 97673890, 2680076935,
1673476130, 1431525864, 97673890, 3281754271, 2706407399,
1431525864}, {1831234280, 2758233149, 1527363550, 489343630,
1831234280, 3782722441, 2941890554, 489343630, 149925673,
893509979, 2680076935, 3405842137, 1430724370, 97673890,
2680076935, 1673476130, 1431525864, 97673890}, {2758233149,
939574583, 4072640363, 1831234280, 2758233149, 1527363550,
489343630, 1831234280, 3782722441, 3280220074, 3405842137,
4035147174, 893509979, 2680076935, 3405842137, 1430724370,
97673890, 2680076935}, {939574583, 3228066636, 2064391165,
2758233149, 939574583, 4072640363, 1831234280, 2758233149,
1527363550, 361718588, 4035147174, 2623373296, 3280220074,
3405842137, 4035147174, 893509979, 2680076935,
3405842137}, {3228066636, 513534955, 3055122635, 939574583,
3228066636, 2064391165, 2758233149, 939574583, 4072640363,
951529882, 2623373296, 2214191601, 361718588, 4035147174,
2623373296, 3280220074, 3405842137, 4035147174}, {513534955,
1849694388, 2647187398, 3228066636, 513534955, 3055122635,
939574583, 3228066636, 2064391165, 856588367, 2214191601,
3490676452, 951529882, 2623373296, 2214191601, 361718588,
4035147174, 2623373296}, {1849694388, 72851784, 4057515279,
513534955, 1849694388, 2647187398, 3228066636, 513534955,
3055122635, 101833201, 3490676452, 1060044773, 856588367,
2214191601, 3490676452, 951529882, 2623373296,
2214191601}, {72851784, 2171677081, 1611532847, 1849694388,
72851784, 4057515279, 513534955, 1849694388, 2647187398,
2154540534, 1060044773, 1344438782, 101833201, 3490676452,
1060044773, 856588367, 2214191601, 3490676452}, {2171677081,
342112271, 1406241672, 72851784, 2171677081, 1611532847,
1849694388, 72851784, 4057515279, 2374762999, 1344438782,
3790774567, 2154540534, 1060044773, 1344438782, 101833201,
3490676452, 1060044773}, {342112271, 2961816100, 736416029,
2171677081, 342112271, 1406241672, 72851784, 2171677081,
1611532847, 3542344109, 3790774567, 818368950, 2374762999,
1344438782, 3790774567, 2154540534, 1060044773,
1344438782}, {2961816100, 1062452522, 387300998, 342112271,
2961816100, 736416029, 2171677081, 342112271, 1406241672,
3321940838, 818368950, 1817134745, 3542344109, 3790774567,
818368950, 2374762999, 1344438782, 3790774567}}, {{1062452522,
340793741, 2955879160, 2961816100, 1062452522, 387300998,
342112271, 2961816100, 736416029, 2854655037, 1817134745,
3493477402, 3321940838, 818368950, 1817134745, 3542344109,
3790774567, 818368950}, {3847560959, 2305448679, 3866010231,
2218748291, 3847560959, 2019641772, 1709215645, 2218748291,
1243502014, 1528225099, 1777037472, 3058260025, 479207863,
498682467, 1777037472, 811441367, 2928649385,
498682467}, {1957999095, 2154014226, 3803905489, 671479916,
1957999095, 1066184965, 279477115, 671479916, 1402917279,
254897698, 3369502947, 3762770058, 1576432507, 1603012465,
3369502947, 1996495269, 493710616, 1603012465}, {3015754,
613405362, 919711945, 3453352062, 3015754, 4062454730, 3721871040,
3453352062, 3241775219, 1089381262, 82107183, 1674879036,
2655465224, 3893311647, 82107183, 64039185, 3140922085,
3893311647}, {1714457176, 543518825, 796397074, 927927799,
1714457176, 2642728633, 2119990689, 927927799, 4150866678,
1115863221, 38719673, 3034346413, 313384592, 2415675531, 38719673,
3774285549, 1547191440, 2415675531}, {2677461178, 1363680284,
2720671525, 2095557752, 2677461178, 2069614551, 1077844911,
2095557752, 3144136330, 3508261072, 1631290368, 1481396816,
985606644, 308987612, 1631290368, 2672875808, 347710755,
308987612}, {2274623300, 3565840334, 3986386399, 4158118130,
2274623300, 2506841580, 980976528, 4158118130, 3788203520,
2795879681, 2688797252, 4273420463, 840096763, 224021768,
2688797252, 1122210911, 1347945325, 224021768}, {1644962013,
2336229602, 3501544776, 1414472808, 1644962013, 3653507277,
1746037714, 1414472808, 1955221006, 3910194649, 3174683233,
3681140872, 2828785870, 28639152, 3174683233, 2231910770,
3496041927, 28639152}, {3959658012, 2382864318, 1292801423,
2278721057, 3959658012, 2582093244, 1636022382, 2278721057,
2637926654, 2455684739, 1628178936, 3809483955, 438833385,
560791363, 1628178936, 135381837, 578081674,
560791363}, {2095854275, 241074294, 3467574677, 4270228002,
2095854275, 3012067286, 3210223643, 4270228002, 3416313165,
2131206770, 2155841413, 2592491860, 1165637085, 3997142249,
2155841413, 3495511482, 2275086959, 3997142249}, {3095786772,
2103247720, 3864750503, 2026972742, 3095786772, 4260196856,
2339910750, 2026972742, 827586410, 4043247124, 3811543518,
977381276, 3729301337, 1329179255, 3811543518, 2878595121,
654253945, 1329179255}, {3093961248, 3967481377, 2551531030,
2415235089, 3093961248, 2873360987, 3754924652, 2415235089,
2883496440, 1465244270, 681409874, 3783909260, 751154769,
3488684910, 681409874, 763303055, 1250231333,
3488684910}, {3713503784, 3857718737, 3282798680, 3376559462,
3713503784, 1075293666, 222702005, 3376559462, 2747983570,
3728928570, 456229084, 1241038999, 3921422013, 824554232,
456229084, 3401146025, 2087442872, 824554232}, {2734108498,
3897674739, 1215897460, 3111826989, 2734108498, 3832105377,
1474606220, 3111826989, 2395197850, 3270171837, 2934415941,
1000116359, 3089143895, 873394952, 2934415941, 116531987,
1544120396, 873394952}, {2440817142, 2467391843, 197714708,
822685082, 2440817142, 41338233, 2126944437, 822685082, 2900830468,
3161757001, 3365547145, 639745865, 3193278438, 3354738099,
3365547145, 1246052214, 2533379923, 3354738099}}, {{1649398389,
3109147376, 333002869, 49135452, 1649398389, 1857945175,
3441537107, 49135452, 1170096663, 3509975160, 1799677538,
1882279394, 3174861078, 1463826069, 1799677538, 3313769518,
300842059, 1463826069}, {996706937, 1481993076, 3439056503,
2297111910, 996706937, 1399961132, 862649200, 2297111910,
2299034194, 2390202783, 3103629604, 4257445059, 3409560232,
2092194020, 3103629604, 2202401252, 184076987,
2092194020}, {3886299789, 2026523226, 1990826586, 2419622249,
3886299789, 2185172794, 2253148117, 2419622249, 417740769,
2486156587, 3228001680, 4017452330, 2192037609, 4073752362,
3228001680, 1612748752, 2400655659, 4073752362}, {3979619964,
2186897562, 553886495, 458782589, 3979619964, 4241015765,
1007330283, 458782589, 4146310528, 3692836406, 209817750,
3238802184, 2974870628, 812917091, 209817750, 4168802395,
2574011276, 812917091}, {3698271908, 3645403733, 2380380979,
3965730031, 3698271908, 1503705535, 1794005444, 3965730031,
1071146226, 3905721877, 3408344210, 735511225, 1835502855,
3236286143, 3408344210, 1718305577, 1541161386,
3236286143}, {1706820424, 4154877869, 2210423860, 3119708691,
1706820424, 1030264372, 3977084597, 3119708691, 1146235803,
1095692911, 3618177584, 414159965, 3295260066, 1621943577,
3618177584, 38864005, 2244624888, 1621943577}, {3124356342,
2772994830, 3960067960, 1806881043, 3124356342, 1434126824,
3050691641, 1806881043, 2263101647, 2075595331, 4070066790,
2585466858, 1576254748, 2722713860, 4070066790, 1249128943,
1086214539, 2722713860}, {1299285967, 1217405758, 2589171163,
2130448350, 1299285967, 1392525159, 292773857, 2130448350,
3630027893, 1033463096, 2755731404, 1606221490, 2782713347,
477309738, 2755731404, 3442242150, 3314523413,
477309738}, {2416226018, 4140620736, 2165948280, 585876933,
2416226018, 1983411790, 1705263630, 585876933, 1978871456,
1277188371, 3516521880, 581321315, 2497683676, 2611083463,
3516521880, 948007642, 2929615666, 2611083463}, {3741945962,
730788749, 1745368878, 948545149, 3741945962, 4218117763,
4067146304, 948545149, 3841954865, 3790308130, 3026123612,
1979145017, 3338202446, 3233499061, 3026123612, 1002517819,
2494617440, 3233499061}, {2259606462, 4074137306, 3423319201,
2140332768, 2259606462, 3421531277, 3879048317, 2140332768,
1305370935, 2499898328, 1883032937, 2574692244, 3736263139,
3127996843, 1883032937, 1619897586, 3095497735,
3127996843}, {359678770, 3314680006, 2175543957, 1393299668,
359678770, 1655556841, 3386176735, 1393299668, 2341737887,
2884691291, 402756912, 3190930010, 2817097718, 2567113113,
402756912, 3993869449, 781663248, 2567113113}, {2448853746,
626004497, 1950071360, 1158194776, 2448853746, 874933395,
2095824912, 1158194776, 1836491782, 3345651389, 1884683967,
565971536, 4175921785, 450638539, 1884683967, 2764657060,
4146690497, 450638539}, {2721675172, 241385543, 3182328265,
3210334684, 2721675172, 325732785, 1062918236, 3210334684,
3121396438, 3445807882, 1171605168, 2631005941, 3904649711,
2223683788, 1171605168, 2738363134, 4195752245,
2223683788}, {1956350629, 1808733384, 3228333573, 627552370,
1956350629, 2293384918, 561984297, 627552370, 4217130354,
1410838732, 1579802384, 1541285529, 4154765811, 2395917056,
1579802384, 3431422519, 977617859, 2395917056}}, {{2148702503,
103819730, 3922720782, 1999175811, 2148702503, 1996163538,
2979225418, 1999175811, 892409263, 2928213494, 288604458,
1501677614, 571673571, 2155469603, 288604458, 3843369786,
3326516116, 2155469603}, {1400478398, 535326008, 3018215420,
2114210471, 1400478398, 2777288607, 3240775579, 2114210471,
1586003016, 1782279859, 2764859700, 2840894706, 3576428059,
2082469029, 2764859700, 3963963316, 749754403,
2082469029}, {3677219860, 3962510930, 3805011027, 1098715579,
3677219860, 2452527982, 1378248654, 1098715579, 377225862,
1180559812, 2405645826, 1118910623, 1737043333, 1583407457,
2405645826, 55614242, 2056027805, 1583407457}, {379210133,
250053591, 554369329, 1783231160, 379210133, 1908318389,
3576659343, 1783231160, 2188531273, 3979423632, 1022129134,
276873446, 1332558840, 3760163766, 1022129134, 1799196192,
1041986082, 3760163766}, {3510732546, 3926709784, 2244264588,
2266741858, 3510732546, 3093925525, 2040546316, 2266741858,
2249717607, 1221779212, 3671597039, 3732297029, 907924984,
1438626566, 3671597039, 1569836243, 3619082489,
1438626566}, {443349145, 590074072, 2864061919, 2339070143,
443349145, 1360064932, 3651849809, 2339070143, 2349663769,
530787287, 117552025, 774331833, 4234241969, 483787924, 117552025,
2374703971, 3115606677, 483787924}, {352887003, 499779786,
2268496651, 4017647307, 352887003, 1014398637, 737449908,
4017647307, 299115015, 4186423122, 213414981, 1671634731,
927956770, 955925224, 213414981, 3644821859, 1961750426,
955925224}, {1591992468, 381333958, 1510277444, 3951951872,
1591992468, 1046219306, 2143424240, 3951951872, 4022841636,
3539882179, 3037868518, 2582787611, 199085085, 1021313167,
3037868518, 1716381787, 1312544548, 1021313167}, {2463901027,
2986987143, 4009648910, 3063328698, 2463901027, 3219337698,
2311802612, 3063328698, 2943340610, 464995138, 1830883303,
2996449399, 1967452885, 2425752074, 1830883303, 2764061706,
2523191906, 2425752074}, {156984152, 730296663, 1925208079,
512266145, 156984152, 1769630972, 2280389234, 512266145,
3988368920, 491634866, 2992461494, 1322776230, 1972129510,
307032584, 2992461494, 3508058001, 1896983419,
307032584}, {3130108188, 1790199485, 1307664725, 4157072496,
3130108188, 3778393784, 3197963896, 4157072496, 2053143729,
648323704, 175756085, 3996184920, 2324418614, 815641485, 175756085,
468123117, 25551168, 815641485}, {3475863656, 335238560, 56818880,
2822173170, 3475863656, 3877387059, 3603749184, 2822173170,
1574808140, 2654852128, 271326184, 4253341892, 3801488812,
530045083, 271326184, 2936222370, 4104998472,
530045083}, {1045458737, 2241703288, 2260695349, 4170336679,
1045458737, 1275277505, 1228755237, 4170336679, 1688596388,
3044418239, 1113299026, 1472599397, 3106112258, 2894865853,
1113299026, 3987050111, 4166739853, 2894865853}, {2598051681,
3420560058, 1682340310, 1263226655, 2598051681, 3475056324,
2888135505, 1263226655, 741601165, 4123606263, 2907473956,
3433095739, 1084985755, 4289524458, 2907473956, 3495266509,
2596796815, 4289524458}, {1724599202, 3394637846, 3481548547,
3222317638, 1724599202, 3535522460, 3763267661, 3222317638,
3808808423, 2710127434, 3608233946, 1063456347, 3619316472,
336282572, 3608233946, 1881966502, 3715040418,
336282572}}, {{3975008370, 754337639, 2405650329, 3715182130,
3975008370, 3615342722, 642697923, 3715182130, 2256493727,
3174552073, 1030618503, 1998739584, 3467650326, 2569413030,
1030618503, 2594942403, 1631336015, 2569413030}, {460768826,
2717717970, 1089892553, 627406673, 460768826, 1541344588,
963516608, 627406673, 1286664224, 2154948056, 2110199318,
1649523168, 678342865, 2334639309, 2110199318, 601680947,
3114094203, 2334639309}, {4195833089, 2105299796, 166509779,
2970279915, 4195833089, 2389311995, 803149880, 2970279915,
1918719231, 2718719935, 2454987531, 3631058630, 161781366,
2654502125, 2454987531, 3954383978, 654123598,
2654502125}, {420480221, 372736411, 2221681883, 3471097641,
420480221, 2996150472, 2353092905, 3471097641, 2956342842,
2084602709, 780563537, 2037330914, 3029522338, 563657176,
780563537, 1641595774, 191330473, 563657176}, {4070064465,
472484932, 681790190, 337365198, 4070064465, 1549217842,
3952301470, 337365198, 3672436891, 1811448870, 509499702,
4248280928, 2086248594, 1854845597, 509499702, 817515303,
3882723762, 1854845597}, {2477940169, 4030956062, 3992821540,
3734235881, 2477940169, 3893216227, 2432837582, 3734235881,
3822346350, 588292146, 1524625318, 575533487, 2663798398,
437787232, 1524625318, 2969641819, 1211920785,
437787232}, {3386547689, 2950929880, 3067610933, 929222926,
3386547689, 4006862829, 1640422986, 929222926, 1627725117,
3707178523, 156931133, 3141547991, 3896072542, 1030901935,
156931133, 510991445, 930369198, 1030901935}, {3278195133,
2510762118, 4086436419, 153526651, 3278195133, 2662640502,
3499730988, 153526651, 420492906, 1594768331, 832866376,
2165145850, 3754780168, 3414769923, 832866376, 2238126504,
1968799026, 3414769923}, {2398009046, 3478793062, 1368903286,
3944016141, 2398009046, 1432371712, 2232180879, 3944016141,
3305849029, 3658330788, 1977221419, 154984454, 980333681,
3708557063, 1977221419, 335242563, 1678180194,
3708557063}, {3518250549, 4292756547, 1669675151, 3260865699,
3518250549, 2846577720, 3665431194, 3260865699, 3928138979,
3635025025, 2376040259, 1109694695, 3138696202, 173271797,
2376040259, 1954542705, 588512257, 173271797}, {3657009175,
918958527, 3708402605, 721977397, 3657009175, 2418584047,
2326313910, 721977397, 3102728472, 2115466097, 3821276545,
1557445315, 849916466, 2548411649, 3821276545, 291376403,
404569572, 2548411649}, {1953220754, 163100603, 2074032202,
715341436, 1953220754, 4276221887, 3127996992, 715341436,
600928360, 873301213, 1778576621, 444536774, 367796024, 1457310151,
1778576621, 2480319255, 2262086849, 1457310151}, {807416367,
1224342039, 3433328646, 1372059769, 807416367, 96106427,
3651390471, 1372059769, 3186297981, 811649392, 2200531447,
12875283, 488040707, 3777283330, 2200531447, 1495168735,
1147053469, 3777283330}, {1498262489, 2714297147, 755846283,
2568504112, 1498262489, 2596328073, 1174505496, 2568504112,
521387497, 4244052116, 441543104, 3883522997, 779122511,
3285362357, 441543104, 1639913252, 901172852,
3285362357}, {4053337039, 3268044266, 736225600, 357528052,
4053337039, 3704296099, 1712397465, 357528052, 2276132974,
2247191270, 2259772727, 3858208837, 197357514, 687590794,
2259772727, 2320333603, 3898333084, 687590794}}, {{1693168425,
1401872772, 2295790366, 2228376089, 1693168425, 3992771814,
823220763, 2228376089, 3310184147, 1885870777, 2672536210,
2391283983, 359763062, 1646861218, 2672536210, 2301581548,
2317984620, 1646861218}, {630867741, 721725795, 627043435,
1497104068, 630867741, 3746310018, 4253248635, 1497104068,
2529428830, 400681955, 2013240130, 3743932305, 605925849,
841254072, 2013240130, 1635365181, 3765813448,
841254072}, {913070769, 1398710380, 2668104628, 2008257560,
913070769, 3792106984, 1077371294, 2008257560, 419196296,
835127542, 3388634549, 1420949605, 1940153687, 936677832,
3388634549, 1670398370, 2315064847, 936677832}, {661344445,
735391270, 4246010312, 3039669025, 661344445, 526054481,
1591031831, 3039669025, 2571072593, 1455652656, 3579956569,
1960229502, 2478539210, 1930213004, 3579956569, 3077694794,
2072952279, 1930213004}, {425950143, 1130486674, 2696137290,
3284243038, 425950143, 1267378907, 1755036096, 3284243038,
2285404443, 92087486, 840820555, 3494209711, 1661263707,
1097148356, 840820555, 822503830, 4251974910,
1097148356}, {3297773364, 3438979384, 2501878263, 2917363935,
3297773364, 2668364492, 2936154555, 2917363935, 3781620139,
1349151461, 1821354750, 3717764728, 2364275695, 490241598,
1821354750, 2341304300, 1155806426, 490241598}, {1767498496,
2298523806, 1601051497, 4274532010, 1767498496, 2773692480,
1279616291, 4274532010, 1500415854, 2534783090, 722180805,
3532438427, 1251210078, 3518720669, 722180805, 3300248610,
2562727375, 3518720669}, {2644799309, 1342278931, 2658402822,
4042890210, 2644799309, 606605705, 4241772463, 4042890210,
1847312821, 4288531000, 321747515, 74521379, 1225209584,
1097613522, 321747515, 1194440107, 1784540933,
1097613522}, {2159318679, 2541146449, 2497051754, 3638042686,
2159318679, 662697782, 3835520789, 3638042686, 3960987904,
4236150810, 1028604204, 43575698, 2103314379, 2370144645,
1028604204, 3967941183, 734109746, 2370144645}, {1175764916,
2471593729, 2865336247, 3704088248, 1175764916, 2932838910,
4011400538, 3704088248, 3502592220, 3269831184, 1615892324,
245018475, 2277651644, 3795899570, 1615892324, 2568537852,
3294470896, 3795899570}, {3908925760, 3899855852, 2805314855,
774817443, 3908925760, 1364407921, 2954226506, 774817443,
744436690, 2639467910, 621455156, 283035496, 3309449323,
2831618441, 621455156, 2424365668, 3539583210,
2831618441}, {287728234, 246229618, 162441370, 3775615386,
287728234, 1502779384, 3733878711, 3775615386, 3250474907,
2595646099, 1861018675, 4077631310, 3450880655, 2284610128,
1861018675, 2988405862, 1711688841, 2284610128}, {781051313,
1972894256, 225229222, 2711483505, 781051313, 1175638317,
647050317, 2711483505, 1867530951, 2580411292, 573353505,
179605964, 734715144, 2559502572, 573353505, 350062612, 3290699372,
2559502572}, {1910085226, 4230641571, 2453891386, 3227070913,
1910085226, 2853687796, 3120894575, 3227070913, 749636765,
1383222658, 2199059659, 954928333, 3613475430, 1338063869,
2199059659, 4005334159, 4236188627, 1338063869}, {3556232066,
3732499602, 1045575721, 3430126934, 3556232066, 2411549793,
451564875, 3430126934, 127041069, 2461101821, 1604355193,
495661134, 984654734, 3287411893, 1604355193, 1772365467,
769676548, 3287411893}}, {{138013862, 1975136163, 1617749306,
1096138313, 138013862, 1501878241, 1416249993, 1096138313,
2409846784, 522705580, 769295000, 1927161272, 2468210728,
143812745, 769295000, 3514348856, 3254530816,
143812745}, {3855242202, 3354399019, 3981734504, 73950522,
3855242202, 55354701, 2965395603, 73950522, 599453422, 3076435551,
1103432342, 4161111774, 1446182108, 2692035063, 1103432342,
1643240704, 2596905012, 2692035063}, {2872969148, 3657910297,
3200219335, 1941532786, 2872969148, 3557298699, 3590950415,
1941532786, 3515748818, 2986424418, 1332423877, 1366553339,
3101816103, 1809137988, 1332423877, 3993875038, 2412502608,
1809137988}, {3029005516, 3321539504, 3501837362, 813410089,
3029005516, 1044920137, 3461955319, 813410089, 4271076381,
1471526950, 1202100604, 2327872488, 4098434768, 2375319030,
1202100604, 3742334018, 1391532370, 2375319030}, {4207289909,
559813450, 2940543965, 312277958, 4207289909, 3444686334,
960113158, 312277958, 1749168476, 2689974385, 3674658855,
828283166, 1517070807, 953526753, 3674658855, 1558514368,
3517620599, 953526753}, {2903706877, 3713704391, 201947523,
3130396563, 2903706877, 1625744025, 3837877063, 3130396563,
316005085, 2055175984, 612058994, 1918225488, 4143597212,
3063334100, 612058994, 269715831, 3228801559,
3063334100}, {1295130289, 1951986222, 2596032611, 3806079268,
1295130289, 1110389513, 2159958180, 3806079268, 2725645318,
3510580843, 2499397071, 759749547, 2970642011, 2623568215,
2499397071, 191091208, 482061697, 2623568215}, {4116985007,
1460625377, 2247500745, 941408572, 4116985007, 1546486080,
1783998098, 941408572, 3058183515, 1473695507, 4245372460,
3873219634, 4094914553, 4269164791, 4245372460, 2507855960,
2795313144, 4269164791}, {1962495392, 136296227, 2260032938,
263471390, 1962495392, 2218844901, 2778787907, 263471390,
1853758858, 867552837, 2147092923, 3605684490, 3049241245,
2065032609, 2147092923, 4284554831, 89154078,
2065032609}, {4071005559, 2713466673, 2459865767, 2395946396,
4071005559, 3665517862, 1602910626, 2395946396, 1908912254,
1988463235, 1255176101, 166921281, 1941135434, 3796792964,
1255176101, 3862286918, 946070284, 3796792964}, {3012525297,
2493852519, 3916982040, 2076256875, 3012525297, 872341650,
178159470, 2076256875, 3517362011, 3250644175, 2798600046,
317587104, 4229400815, 1962645475, 2798600046, 3379450066,
3416800987, 1962645475}, {2920178076, 1509751053, 3032162980,
2446861569, 2920178076, 2900643495, 3687940915, 2446861569,
2913757298, 1047469708, 605362535, 584784141, 1689076962,
3270595249, 605362535, 2030588250, 4262735426,
3270595249}, {984385025, 152817586, 1971054150, 3476973063,
984385025, 2488227927, 4255554672, 3476973063, 285750165,
1175894603, 3365978223, 4159606843, 1086019329, 1473711771,
3365978223, 1892972053, 1285855999, 1473711771}, {2365859770,
2749587167, 2741363210, 3890635154, 2365859770, 3783033649,
1561530403, 3890635154, 4112214391, 1187553376, 2310209542,
4132098467, 2099466409, 2342917755, 2310209542, 147543030,
2494351424, 2342917755}, {4105268381, 2073524411, 644212621,
403994004, 4105268381, 2904084179, 3831719597, 403994004,
2539915825, 1465076633, 3862834394, 2259697480, 4051520963,
2890527364, 3862834394, 934050118, 2810885896,
2890527364}}, {{94451952, 3964593332, 1242898773, 3352801941,
94451952, 639029973, 2315095646, 3352801941, 4216782514,
2354363842, 1984873167, 1212627640, 1257532340, 513890845,
1984873167, 2870530442, 1208902926, 513890845}, {385258225,
3728744670, 3569882325, 1926285644, 385258225, 2390706911,
1108147171, 1926285644, 2264905138, 596424080, 4160778291,
3611019106, 141769900, 1848364568, 4160778291, 3496528455,
1552116673, 1848364568}, {377826974, 116744102, 3055477316,
2663175885, 377826974, 2578332381, 3195438389, 2663175885,
704130800, 3038511100, 1505642955, 1757204931, 4112978448,
1292986218, 1505642955, 997232463, 172755364,
1292986218}, {2886425156, 1849148167, 749134119, 1065683096,
2886425156, 4196917281, 2992662885, 1065683096, 270679073,
672947816, 2544671570, 163978331, 2188646679, 364070020,
2544671570, 837362349, 3520039729, 364070020}, {1530094734,
468421033, 934518204, 3125897940, 1530094734, 3251080204,
862053578, 3125897940, 1316966786, 2290377268, 186437207,
924446594, 1594149094, 1953699759, 186437207, 3030821019,
2064634669, 1953699759}, {1224972102, 2873015547, 1753647773,
3731309033, 1224972102, 517089293, 3283127918, 3731309033,
3153974267, 570598119, 458236864, 3364479146, 3444657749,
2201888168, 458236864, 3299709815, 1269604905,
2201888168}, {1389036848, 3153422993, 1263031874, 266142644,
1389036848, 1128582211, 635448484, 266142644, 61084722, 247090079,
217195116, 3379569640, 1843501131, 1521644902, 217195116,
1839661585, 1694823010, 1521644902}, {948843427, 927013635,
3808868984, 1378151623, 948843427, 673810920, 951629713,
1378151623, 35689930, 3531570992, 635565666, 2548654227,
2589432341, 1192700714, 635565666, 298357363, 3968150021,
1192700714}, {4136918261, 2157852912, 2069320717, 761987401,
4136918261, 3110726637, 819879023, 761987401, 1754337092,
4185051185, 2980729810, 704428564, 676055991, 4067518843,
2980729810, 371322662, 274480292, 4067518843}, {2368072759,
1396659393, 3856394323, 3441598214, 2368072759, 1048136347,
2544685440, 3441598214, 1637944237, 4169673563, 2473676687,
474110298, 892497275, 1016257897, 2473676687, 1340613848,
2749355083, 1016257897}, {388872857, 1490932548, 1362179029,
3023837855, 388872857, 3730443308, 2847406725, 3023837855,
2916493858, 2929086880, 3014452945, 1184181069, 1817239623,
304683038, 3014452945, 4060996566, 3116171970,
304683038}, {1820387182, 1448556483, 658508974, 3971013929,
1820387182, 341462694, 120796985, 3971013929, 1708907294,
3420061274, 2187600669, 2672427080, 958916489, 3438963520,
2187600669, 2575726025, 1845346034, 3438963520}, {3266061682,
2308387653, 3566378766, 2642863999, 3266061682, 4135404306,
3873850425, 2642863999, 819998702, 793345179, 3096228739,
1869564994, 61975940, 3250678901, 3096228739, 2642083507,
2331711837, 3250678901}, {2759230943, 625353568, 2334745039,
2581576807, 2759230943, 33532058, 2454038309, 2581576807,
2719213200, 2938208331, 3103878887, 3739081453, 4187175536,
2337287538, 3103878887, 210215425, 356712025,
2337287538}, {3848666830, 419617556, 3124883107, 2796173733,
3848666830, 1916212276, 514909692, 2796173733, 1014898960,
1750023152, 1403819179, 3120433504, 3857531812, 1358390162,
1403819179, 743168835, 1720139034, 1358390162}}, {{3362920032,
1401175590, 638998846, 1130489594, 3362920032, 1457450350,
3734864133, 1130489594, 1891490872, 2446760804, 1315499519,
3300994644, 3842690720, 2709640529, 1315499519, 875361870,
676525399, 2709640529}, {1807491073, 1570264155, 351423668,
2384691454, 1807491073, 2844861718, 1730098031, 2384691454,
2254459023, 3952463556, 3774935330, 4085935803, 597633965,
2742149264, 3774935330, 3032350755, 1410604392,
2742149264}, {562572981, 2453239846, 384935380, 2217666563,
562572981, 40870381, 2001728777, 2217666563, 1165449067,
3460887490, 3969822112, 3965770634, 2396790253, 1539107473,
3969822112, 1532517403, 2655486683, 1539107473}, {1237196477,
3663731096, 143400628, 4245359555, 1237196477, 1797081212,
2449575498, 4245359555, 3047429268, 3791239282, 2077922420,
2177255734, 3651360351, 3878579563, 2077922420, 1721916511,
845297523, 3878579563}, {1045832041, 1995794548, 4043681725,
3597635471, 1045832041, 4130758833, 1913125547, 3597635471,
2598267442, 384476417, 1730887034, 819810619, 1958401362,
2059165813, 1730887034, 1557795047, 1231287179,
2059165813}, {210670816, 839244126, 2509073771, 1349445168,
210670816, 1305907164, 2733446300, 1349445168, 2147359263,
1602633162, 2032494710, 2787706468, 3511906271, 2642777370,
2032494710, 4046131253, 1064863813, 2642777370}, {1530071777,
1336807579, 2754648167, 505738396, 1530071777, 3429286737,
1012141566, 505738396, 364429965, 2338325254, 4167233761,
2789929771, 2696421199, 4143866672, 4167233761, 3873099015,
795453989, 4143866672}, {3155049403, 4281524426, 1981313839,
4263819658, 3155049403, 3719941673, 1047529624, 4263819658,
3313321106, 1338343327, 2324624266, 1725087354, 3594939336,
1570315355, 2324624266, 3522351060, 4252790045,
1570315355}, {1387563109, 2624425852, 1644616294, 574275685,
1387563109, 1526657929, 2547323088, 574275685, 769083741,
2684392708, 382658421, 664207993, 227781306, 3669375462, 382658421,
1592689209, 1348893120, 3669375462}, {108747348, 1467875854,
256526168, 899246895, 108747348, 2783815531, 3248764453, 899246895,
1429567203, 1219288409, 26016991, 2629261386, 1182007239,
3128806513, 26016991, 3791370211, 3431512800,
3128806513}, {2444402736, 3471250149, 1262085311, 52683743,
2444402736, 4071587088, 1520706, 52683743, 2610517495, 2265707740,
2606867962, 2853611619, 4153458122, 1717139409, 2606867962,
3808111413, 3949668867, 1717139409}, {582760735, 785351190,
571933335, 1423127512, 582760735, 700110581, 1283194774,
1423127512, 2740000743, 2009721680, 1824515104, 1910382756,
783304238, 2323129699, 1824515104, 4032945011, 2040722667,
2323129699}, {1334296644, 3063286903, 1849376977, 639950612,
1334296644, 535101514, 868933300, 639950612, 4261942599,
2014255865, 3811884196, 1129689402, 2695899091, 118854584,
3811884196, 4200064180, 788793439, 118854584}, {2374838356,
2052902650, 1830234951, 852164586, 2374838356, 497540878,
412380392, 852164586, 448747464, 3574765936, 3001848199, 528121192,
2298546607, 495056704, 3001848199, 299029371, 1303223717,
495056704}, {2886574442, 1125389940, 1561785374, 2721738140,
2886574442, 3857216325, 3196750633, 2721738140, 3914325071,
68595631, 1596158191, 3113306279, 506975084, 3070486263,
1596158191, 3706657140, 902478796, 3070486263}}, {{1729281525,
2268963059, 1141249417, 3263186729, 1729281525, 2951515865,
1535805957, 3263186729, 2005252417, 3881593435, 1355307047,
992174375, 313617972, 2305761589, 1355307047, 3663579047,
381933244, 2305761589}, {1416676049, 1304732377, 1040867745,
83908466, 1416676049, 1352516724, 4294308508, 83908466, 2367065164,
615040705, 3791771273, 3347975047, 4196134923, 1667857811,
3791771273, 2263851601, 1564715297, 1667857811}, {2070059496,
3110356679, 1675108536, 4096493219, 2070059496, 1201774212,
1529477403, 4096493219, 2985917792, 763211059, 1723493827,
3116429712, 3721738282, 2699274746, 1723493827, 3314336764,
2208033721, 2699274746}, {4041536918, 338830578, 1317260239,
1434230503, 4041536918, 2753040912, 2944821434, 1434230503,
3214147257, 3757047667, 4261953004, 2979573134, 3973733876,
4093947334, 4261953004, 2815567716, 3454015638,
4093947334}, {3710830263, 3564440066, 2242696089, 2193226133,
3710830263, 1348686132, 1795377731, 2193226133, 3409339184,
3223762258, 3928412309, 868538065, 4232950837, 1497333242,
3928412309, 4043986454, 3837209858, 1497333242}, {2397146654,
2221537290, 403072156, 1475654090, 2397146654, 4286962883,
2785534584, 1475654090, 3189933295, 4119399398, 668310420,
3532851694, 551557198, 4178728130, 668310420, 3927272953,
2981026540, 4178728130}, {1027835471, 3867031443, 3142787072,
1898764790, 1027835471, 1315270526, 1822660758, 1898764790,
741855424, 752516370, 243696529, 3444486351, 168490644, 4121879899,
243696529, 3607008098, 2179415297, 4121879899}, {1422043015,
1207561803, 3581125669, 2054743463, 1422043015, 1762244284,
1499597869, 2054743463, 300628476, 4213855850, 3178644752,
4172515680, 1701869032, 250120061, 3178644752, 1513430926,
570149551, 250120061}, {3067618819, 2336627700, 329186307,
2372490954, 3067618819, 3955239394, 4220275697, 2372490954,
2557168436, 2136907462, 2281145558, 944820027, 75648724,
1881049173, 2281145558, 3137351012, 1981393124,
1881049173}, {1290262277, 3455952707, 1105918720, 3516590085,
1290262277, 2775532590, 2979076122, 3516590085, 3424428541,
2633790862, 278498267, 1105517535, 4167316392, 291113277,
278498267, 3905306360, 84855550, 291113277}, {838829814,
3831552648, 740094909, 1791601245, 838829814, 4140896091,
812513995, 1791601245, 2171437563, 4238387083, 982981782,
678813567, 838704912, 1588319229, 982981782, 2097021327,
3565340575, 1588319229}, {3207500987, 1677128173, 1544523985,
2454931751, 3207500987, 1750177474, 1448920400, 2454931751,
3018097661, 3564715153, 1293184102, 320925393, 1462382740,
2471905451, 1293184102, 3574085219, 929070406,
2471905451}, {1666308811, 1118308928, 3014019217, 1555428439,
1666308811, 3294923191, 1360357393, 1555428439, 1804838591,
2975870653, 2217292755, 2781461838, 877933526, 175583848,
2217292755, 2951199791, 1612169654, 175583848}, {2981538864,
771670630, 552010452, 3521528194, 2981538864, 1137098711, 54548676,
3521528194, 1634171646, 2762980999, 2257924506, 1956954494,
409055673, 1039498873, 2257924506, 1452566739, 2948111768,
1039498873}, {2088681401, 2557642094, 1386447212, 2982267031,
2088681401, 1351458792, 1889146436, 2982267031, 584673363,
3505836007, 644987823, 2972013230, 3668362949, 662437296,
644987823, 2985946835, 2268102873, 662437296}}, {{1744636487,
3952135907, 947753516, 4064983592, 1744636487, 3049723261,
1934508265, 4064983592, 4171745404, 2852219546, 1379176112,
2842564878, 3926509890, 4158106802, 1379176112, 1815738463,
3062358456, 4158106802}, {1049600974, 1569794605, 2089137344,
3577024659, 1049600974, 1729967818, 2778677259, 3577024659,
1625369148, 2017565947, 3284646837, 415258465, 2567084715,
931848746, 3284646837, 2633569246, 256263523,
931848746}, {2016775973, 1306292301, 3670445841, 2164320300,
2016775973, 480516705, 319591773, 2164320300, 53562000, 985910059,
3909587424, 1707582600, 3313512626, 507915211, 3909587424,
3733827320, 625612469, 507915211}, {3551255470, 1680625376,
869476379, 3958611830, 3551255470, 410042396, 569117280,
3958611830, 1373068765, 2917140265, 1831496020, 1821082272,
2829448427, 1648005210, 1831496020, 3987397422, 1032291296,
1648005210}, {3817533434, 3364632636, 2770290295, 1225458244,
3817533434, 1626685441, 3824823610, 1225458244, 465800240,
57148302, 2038610457, 4187472153, 3241574777, 427497251,
2038610457, 1274456574, 1161649569, 427497251}, {2770597834,
1202045798, 2685318428, 2961159523, 2770597834, 3683942097,
282037957, 2961159523, 2756353477, 688754137, 1310546639,
3814409618, 3898177863, 3905409277, 1310546639, 52591070,
1559654803, 3905409277}, {4285714048, 2970654039, 696328069,
3989370299, 4285714048, 2737310563, 2710182478, 3989370299,
4222922794, 2620741358, 2005632892, 1332202609, 3824338376,
700824646, 2005632892, 3680531612, 1590259194,
700824646}, {1012482542, 3141042890, 2545745615, 2543645250,
1012482542, 2111984988, 913717833, 2543645250, 2108618602,
2652286672, 1669447863, 1522417114, 4292947198, 4161327077,
1669447863, 3870847744, 489964129, 4161327077}, {2851133217,
2544498605, 3040329785, 728949951, 2851133217, 1441804991,
1704708043, 728949951, 540570281, 299000690, 3113608192,
3442591292, 1601950042, 2940267842, 3113608192, 2426519800,
3446468540, 2940267842}, {4203176538, 564446240, 3663951648,
954041761, 4203176538, 2877992546, 3194902955, 954041761,
139726772, 3238228424, 980251399, 4210976074, 842055339,
2379780762, 980251399, 538895986, 2470160950,
2379780762}, {3359984447, 2945106536, 94047956, 3251898692,
3359984447, 3157047859, 2551251946, 3251898692, 3012733147,
3350090744, 3074052371, 4119905453, 1046297227, 2176706496,
3074052371, 1284433356, 698354553, 2176706496}, {1939611745,
3454434256, 1721948148, 3500039413, 1939611745, 1489246316,
1380082835, 3500039413, 1200101967, 2005464907, 3658400031,
3338913609, 4093432727, 655280634, 3658400031, 3487203083,
3675619486, 655280634}, {4070536231, 1855978637, 1957763018,
4159333798, 4070536231, 3906589069, 2115390755, 4159333798,
3251810605, 822023968, 319975458, 3368913060, 1828575570, 73560661,
319975458, 3699817316, 3243967450, 73560661}, {597890444,
2963526988, 2113178661, 1074888759, 597890444, 3687820258,
1846158283, 1074888759, 708031970, 2091575838, 1768564041,
1548255156, 1282964788, 3487733040, 1768564041, 2449029780,
2113300421, 3487733040}, {3140731760, 3240303708, 2305798886,
1768199518, 3140731760, 1837348435, 2315952390, 1768199518,
3050136075, 1992669883, 2052021448, 255854126, 630458596,
3624083038, 2052021448, 2398355340, 1874273834,
3624083038}}, {{3013855247, 82049263, 3977310441, 584852249,
3013855247, 1631801979, 2272893205, 584852249, 1157293598,
1008985039, 2981474723, 625182639, 2528619024, 1270934555,
2981474723, 505612043, 3136631324, 1270934555}, {3670086228,
1764904195, 656744553, 3137526662, 3670086228, 3580869206,
2403875621, 3137526662, 3580234334, 2460119169, 1797675893,
1123794455, 3743985508, 280996820, 1797675893, 3013099060,
143706137, 280996820}, {3470835037, 642403019, 313815441,
3018859341, 3470835037, 2713143350, 2110936889, 3018859341,
537271019, 1317375832, 1403098721, 2544509098, 2026895397,
2379814210, 1403098721, 602189114, 2634194997,
2379814210}, {3464838365, 2842987937, 3136165138, 3634185196,
3464838365, 3601823850, 3887031679, 3634185196, 2792496861,
2263904321, 3933041881, 564891116, 474242849, 919218027,
3933041881, 1125672685, 4154920441, 919218027}, {176543705,
1822300329, 358859200, 4127602744, 176543705, 2136991891,
470244788, 4127602744, 3064097390, 2567458918, 2773253774,
3791882856, 938080042, 1088061648, 2773253774, 1418143232,
2412461974, 1088061648}, {3232580086, 2220460662, 1932329582,
981305692, 3232580086, 1287512071, 955067142, 981305692, 860869470,
3541844079, 552285455, 1558638678, 3367709189, 4046953169,
552285455, 59087677, 707039159, 4046953169}, {3619349168,
705697871, 1992221970, 2531593740, 3619349168, 1354657163,
106698643, 2531593740, 450396744, 2225758663, 609496683, 113761526,
1608016081, 4085666982, 609496683, 4169742198, 3569371681,
4085666982}, {3848976950, 2441486578, 2109817045, 3230022138,
3848976950, 3427386258, 4278720212, 3230022138, 1362557480,
4267827987, 2135250851, 296035385, 969184056, 2920112852,
2135250851, 1177141043, 1965329198, 2920112852}, {3731859298,
2881137426, 833389250, 1677356938, 3731859298, 1618706784,
2828019868, 1677356938, 57798040, 978031664, 2982715660,
3530149035, 4134336104, 611127742, 2982715660, 1203193580,
3321547202, 611127742}, {1401050440, 574376174, 2610290298,
267497185, 1401050440, 13298153, 2662390285, 267497185, 2708545360,
2773111558, 3166912454, 3316963881, 3895260799, 3182682829,
3166912454, 2317042610, 216191227, 3182682829}, {3947337904,
912449363, 3158149160, 3331084441, 3947337904, 2749086652,
1858524196, 3331084441, 3471309457, 1338118568, 1601092380,
546044938, 4151360261, 3845466318, 1601092380, 4101622219,
2641032542, 3845466318}, {2583418592, 2950188629, 2277374582,
1054794505, 2583418592, 2518650890, 2873059524, 1054794505,
4064509494, 842864023, 155090437, 995757623, 3465811606,
4005961945, 155090437, 1596766252, 962333604,
4005961945}, {1111373825, 3159958443, 3781563883, 301480275,
1111373825, 222919357, 1488151509, 301480275, 2695022706,
3325437674, 4035263598, 1048085455, 1092686870, 3308111567,
4035263598, 3035036102, 2416022123, 3308111567}, {3646000753,
3244390048, 1152314996, 3585947086, 3646000753, 4188500293,
1551803386, 3585947086, 43539574, 746020360, 4257279454, 159699513,
3209952933, 3616509225, 4257279454, 2901642782, 3195052585,
3616509225}, {1255181092, 2606963172, 3665019301, 1625759465,
1255181092, 790664876, 4216115634, 1625759465, 2865685016,
561498600, 3464317143, 3268511563, 1977483496, 1623078380,
3464317143, 2236332137, 2033286498, 1623078380}}, {{2082683147,
888193479, 2341088247, 2007945401, 2082683147, 3335076429, 3868481,
2007945401, 1198519135, 1935975979, 265491023, 301796252,
2860005744, 1481142942, 265491023, 1088557292, 4120754772,
1481142942}, {782210925, 2180014252, 167617770, 3193380570,
782210925, 307060547, 565138859, 3193380570, 3473925387,
1933943506, 73849832, 3330206241, 3980799998, 2111859033, 73849832,
1001476468, 2813610100, 2111859033}, {1909350615, 2538479259,
1517357015, 938410993, 1909350615, 898527522, 2583257939,
938410993, 3946174395, 1800377045, 756158319, 3666294602,
1208877520, 4238802520, 756158319, 3659825373, 1791251057,
4238802520}, {2665400165, 3316180851, 2499994113, 3303532086,
2665400165, 908630605, 2766583698, 3303532086, 3811588895,
551079002, 2753158871, 3576525143, 3119883109, 1781286360,
2753158871, 3509383709, 3661231931, 1781286360}, {3884416364,
1705355356, 2902099262, 296464469, 3884416364, 3697213244,
3400652741, 296464469, 2828295511, 808249292, 3422735839,
3792794843, 2750435170, 1150902763, 3422735839, 946744850,
3730191199, 1150902763}, {3813545212, 3287843695, 2942299995,
91397022, 3813545212, 2332659451, 1472690314, 91397022, 3952581582,
186846111, 3244671951, 3397069741, 2795337511, 429107478,
3244671951, 689359610, 1467997203, 429107478}, {3356827989,
2041861019, 1590379239, 861234488, 3356827989, 435895955,
2817452481, 861234488, 1334460780, 209181640, 2340848640,
3239904192, 3699044308, 1453148331, 2340848640, 3494583787,
352897577, 1453148331}, {645804125, 2205365640, 2942690261,
3033075037, 645804125, 1531633406, 1505732852, 3033075037,
4288926968, 2352253462, 3763632860, 3811666068, 947424568,
1185024844, 3763632860, 1004942725, 587779104,
1185024844}, {4217730591, 2878362723, 3636389463, 3456071712,
4217730591, 3011838347, 1384409151, 3456071712, 4231070551,
804316663, 565106734, 68522955, 3901561803, 1015564368, 565106734,
1759674948, 3538307608, 1015564368}, {2958437310, 3337768466,
3831862262, 2889311147, 2958437310, 641080998, 1600837671,
2889311147, 2661805006, 3027478693, 1044930964, 2598967548,
3411488636, 3182211673, 1044930964, 688540199, 625836117,
3182211673}, {2699171944, 3184138742, 2121845320, 1884398776,
2699171944, 3906801567, 3710825596, 1884398776, 4271036859,
2542859208, 1092960720, 2604606733, 2852242016, 947085085,
1092960720, 3883689857, 818665845, 947085085}, {2493045579,
1911470121, 1463144542, 1548665231, 2493045579, 619339667,
370438611, 1548665231, 2393036245, 1048288005, 85981002,
1314775482, 4207495883, 3664720019, 85981002, 1691333553,
1230797000, 3664720019}, {1145118921, 870569133, 3115744119,
3524433451, 1145118921, 3694109804, 95035592, 3524433451,
4176687607, 3069357108, 1648927158, 1131329693, 1513911868,
1588589260, 1648927158, 80057254, 3124611325,
1588589260}, {1798887843, 897818198, 3719834407, 4138283993,
1798887843, 2955139057, 1841714444, 4138283993, 2900362665,
2598475067, 1498626910, 2640480870, 653044196, 1972185904,
1498626910, 2803386106, 2060054550, 1972185904}, {2122851837,
2135705707, 1497214115, 64062842, 2122851837, 4242864779,
906515675, 64062842, 1671390739, 3962150617, 2032722200,
3929697499, 1381143398, 2175511079, 2032722200, 2073079485,
384369059, 2175511079}}, {{1294990760, 3088484327, 209339647,
3651411522, 1294990760, 1690405883, 1652430357, 3651411522,
3976196483, 3853798112, 766129426, 2226659371, 1808643264,
1310227170, 766129426, 3172947233, 218138208,
1310227170}, {4093879780, 858124816, 3255666800, 2291131070,
4093879780, 2997812074, 2093793287, 2291131070, 3171589548,
3083355464, 3836629116, 2846140932, 3637515403, 2230902378,
3836629116, 2491962392, 4243560874, 2230902378}, {2875872016,
216187195, 1190772134, 2547678446, 2875872016, 3335475366,
3186955890, 2547678446, 3419357098, 3167919795, 2733679040,
2290142084, 965008581, 1544421101, 2733679040, 2364160468,
772024440, 1544421101}, {4007774239, 4278103786, 3322921863,
2999667479, 4007774239, 1333973326, 3995043314, 2999667479,
4113016361, 3231079441, 946166795, 3686999436, 340856814,
999448569, 946166795, 3344426626, 1464488480,
999448569}, {2050412833, 3073464912, 3722410867, 3880414744,
2050412833, 4079908614, 141813517, 3880414744, 1388901901,
1314467558, 2053642476, 529578422, 404422048, 523750865,
2053642476, 1815255389, 1014187765, 523750865}, {2121124144,
1335782505, 2508968191, 2643538107, 2121124144, 1891051658,
1732112321, 2643538107, 1439277140, 2616567252, 2111503197,
1343417573, 3329102794, 1397461933, 2111503197, 2800333485,
436473632, 1397461933}, {2189016727, 2865776175, 451498522,
3555189695, 2189016727, 1259587676, 2379307904, 3555189695,
564650170, 16002662, 257735782, 852853501, 3796032477, 3401185270,
257735782, 1291498655, 3723330339, 3401185270}, {1489702270,
158549605, 2719807628, 2109676036, 1489702270, 1701566570,
1879981040, 2109676036, 925786347, 4133888397, 2378667355,
260364836, 1493409040, 1226155368, 2378667355, 550006884,
3477563770, 1226155368}, {102714765, 1767614268, 275011730,
2660307979, 102714765, 885578421, 148391862, 2660307979, 386965783,
778237064, 3287566882, 2900226244, 693135908, 4179922375,
3287566882, 2238651166, 1260576417, 4179922375}, {3149192479,
3866462055, 3096741689, 1114069092, 3149192479, 322578046,
1698908478, 1114069092, 2029442378, 1217635756, 2211390378,
280409826, 277271300, 3950117622, 2211390378, 4045225498,
3273547172, 3950117622}, {2170437287, 1041115911, 2070764732,
4126477527, 2170437287, 139996317, 516135644, 4126477527,
896763144, 652265194, 2298338425, 4003371332, 1384035702,
1851549316, 2298338425, 1936637093, 1466780144,
1851549316}, {3210618179, 3763764745, 2000646801, 4224987734,
3210618179, 3781735882, 2705609303, 4224987734, 988998360,
1830518881, 2342461604, 1965981888, 17023679, 662024646,
2342461604, 3990280006, 2039234405, 662024646}, {1565503980,
219951997, 1499882985, 89586999, 1565503980, 1451301676,
2410542713, 89586999, 2411945299, 3819302515, 3512625613,
2398345471, 2833487279, 1895829764, 3512625613, 1829531228,
376895107, 1895829764}, {3907865583, 2413307390, 3140346149,
2529711652, 3907865583, 3318583217, 3374392672, 2529711652,
1551851116, 1625179677, 836796915, 1416778516, 2711576526,
189940394, 836796915, 3214135860, 1926084326,
189940394}, {2162683039, 1335298112, 30742342, 4145583431,
2162683039, 1543933426, 445966311, 4145583431, 1271149039,
688691120, 4030496350, 1676731953, 4164740116, 417702459,
4030496350, 659874536, 3227490550, 417702459}}, {{359516994,
3114762683, 3568862459, 3460246357, 359516994, 2135115875,
218033453, 3460246357, 2255405265, 3712928074, 3088910653,
2507911914, 3303406025, 1277901832, 3088910653, 2818511068,
310796286, 1277901832}, {186466108, 651334129, 2842197411,
4117539411, 186466108, 3807175775, 3073622315, 4117539411,
773148471, 508445538, 1623163429, 3146982514, 2209094694,
481918378, 1623163429, 1728801469, 339570348,
481918378}, {573692436, 3872444854, 35567330, 4052238530,
573692436, 3146236813, 1500692779, 4052238530, 256717243,
2023294295, 3097497909, 1346726126, 1855805319, 1121663435,
3097497909, 3234468457, 2119363369, 1121663435}, {3058812486,
1164226398, 568701600, 1475251263, 3058812486, 1693917167,
3586439101, 1475251263, 615242951, 941946604, 1024510915,
1453455184, 2122851650, 3138921230, 1024510915, 1992357430,
2381863183, 3138921230}, {2607406342, 4088734276, 3921498498,
2735954168, 2607406342, 3610099536, 908481890, 2735954168,
4079732485, 3909842681, 2441622355, 2428264750, 364883417,
3630145509, 2441622355, 2778435307, 2108137964,
3630145509}, {135488725, 2178143073, 508494460, 1041711449,
135488725, 3878048889, 2647679194, 1041711449, 3729302216,
2705683748, 801993191, 1201194185, 2207701640, 3235663883,
801993191, 3251827412, 499846706, 3235663883}, {2481747916,
391743554, 624918119, 1980187742, 2481747916, 3287199547,
1309944802, 1980187742, 1141995151, 1670622591, 1800949016,
2005458811, 2768357811, 3280476256, 1800949016, 2706872639,
682458318, 3280476256}, {1741164221, 3978412194, 2882176274,
15370275, 1741164221, 4187505695, 2061555515, 15370275, 1632636204,
1816599716, 1821933605, 3648970313, 1343489680, 2465372719,
1821933605, 3328905025, 1391015357, 2465372719}, {380080460,
4115908541, 140788931, 1599837029, 380080460, 3846137148,
846965088, 1599837029, 3882007802, 2313275318, 2166836870,
2745553930, 823423733, 585902501, 2166836870, 923832527, 351135393,
585902501}, {2185585470, 371506848, 2826850420, 4240821442,
2185585470, 2968278570, 3087593298, 4240821442, 4199667935,
2539713186, 4167642903, 3124784671, 284777447, 582796091,
4167642903, 1574617829, 1306170361, 582796091}, {1738986063,
2474731168, 2241459023, 4217920349, 1738986063, 758969635,
1809581504, 4217920349, 2543005498, 309254037, 567065147,
1873997200, 970991020, 3358259723, 567065147, 3310339072,
4164780893, 3358259723}, {2192287989, 2190852671, 3389390262,
1455254388, 2192287989, 2324442395, 1267013695, 1455254388,
4002434761, 2742038256, 381403852, 3221291545, 2932149608,
116486317, 381403852, 1696181092, 2122591885,
116486317}, {1584570018, 2824259519, 2717594912, 2335420231,
1584570018, 2642000548, 558307286, 2335420231, 2274010549,
83254296, 2981683361, 639282387, 3966380910, 1598419126,
2981683361, 683872783, 2762253144, 1598419126}, {2278658572,
4289100465, 2051186788, 3193070982, 2278658572, 3155996013,
1527096340, 3193070982, 3722528722, 479301469, 638141264,
478641834, 100617977, 3035480468, 638141264, 2351066479,
2182693760, 3035480468}, {1438975761, 2005515830, 407642856,
1935639927, 1438975761, 897329890, 2366350257, 1935639927,
2018139106, 2168302377, 36177786, 842625014, 1053303323,
2441760800, 36177786, 3842702503, 3689219738,
2441760800}}, {{2579620192, 1538453821, 3753911358, 344820524,
2579620192, 1008543583, 74213775, 344820524, 3446066703,
4043157504, 348833376, 3235582254, 2495544591, 118634664,
348833376, 2492792220, 3358712512, 118634664}, {2222725697,
690898125, 3208347646, 1269921024, 2222725697, 2050939727,
4069458760, 1269921024, 3600859892, 4093044926, 598630070,
2049250561, 1819012637, 2303067090, 598630070, 1967771133,
3371139074, 2303067090}, {250626667, 1038659713, 843125518,
2572090525, 250626667, 804558063, 3334144098, 2572090525,
2580978896, 3496861697, 294683328, 2268904495, 2496877097,
897071837, 294683328, 3837362577, 763331173,
897071837}, {764869161, 1845257036, 2729710367, 2806239788,
764869161, 975123999, 1742216102, 2806239788, 599407451,
3147308542, 3361614254, 326640887, 2807125404, 3035321857,
3361614254, 226779704, 3971176093, 3035321857}, {2650131939,
2064718263, 31199658, 1237821080, 2650131939, 4059416755,
3847187360, 1237821080, 1900612628, 3633977146, 1545732164,
2010134838, 2643845410, 4010547095, 1545732164, 511986932,
2725421511, 4010547095}, {68307108, 1313975073, 4060225449,
3034196764, 68307108, 2459581108, 3435152676, 3034196764,
1347324880, 3989342054, 2957620899, 2926759199, 433027378,
3118026103, 2957620899, 1600236290, 1037137281,
3118026103}, {2619838177, 3089879239, 3452165941, 4273872816,
2619838177, 1083671641, 2483412578, 4273872816, 832405527,
1868423350, 3613148280, 2857733472, 241254395, 2423025801,
3613148280, 995021703, 3089536821, 2423025801}, {1343714307,
4027221761, 1775329096, 3464884028, 1343714307, 580449578,
3444447102, 3464884028, 967330218, 2786835219, 1688753503,
2327946901, 94869516, 1774298149, 1688753503, 3145006948,
4179629947, 1774298149}, {499812782, 2468574300, 1328798463,
712942512, 499812782, 2114280209, 1275627961, 712942512,
1830837179, 478910498, 3423830432, 3437940333, 2143742238,
1378410006, 3423830432, 676267095, 2313761766,
1378410006}, {185092496, 3261096288, 1909413742, 826147645,
185092496, 2753609622, 3613397622, 826147645, 2255621338,
3296195515, 744399687, 1710847521, 774683716, 3554291861,
744399687, 1678181149, 828888636, 3554291861}, {3391553763,
176640707, 3923461410, 2131850586, 3391553763, 3591248161,
3300013089, 2131850586, 965662727, 2049874329, 1978721696,
3331360031, 3020599896, 857287772, 1978721696, 1435187727,
933605087, 857287772}, {3882290892, 2615791131, 3516358355,
1922354841, 3882290892, 3736488404, 4162881041, 1922354841,
665458130, 814480956, 1473201441, 673835828, 2279534654,
4268987998, 1473201441, 1719046530, 1889443262,
4268987998}, {3918254552, 977515238, 2676662991, 3837123498,
3918254552, 3811639831, 1044997226, 3837123498, 2266963059,
1536459522, 2664332345, 3749933100, 949699642, 2597331656,
2664332345, 3601983093, 4232841024, 2597331656}, {2268461137,
2202932955, 3150401156, 993647754, 2268461137, 4095831362,
3066697908, 993647754, 2303802679, 1362659482, 67690266,
2334627516, 2264460592, 3836058032, 67690266, 2795484088,
2479937809, 3836058032}, {3446131817, 3216882533, 2994862180,
4287991663, 3446131817, 1257542093, 1619913810, 4287991663,
3087142251, 1214446621, 1159228195, 1838633564, 146741160,
3781010662, 1159228195, 3140532484, 2792957909,
3781010662}}, {{1604068527, 320435440, 1818147893, 4123590610,
1604068527, 1284315665, 2477690850, 4123590610, 3426136514,
2782214191, 1198432931, 2880072915, 1527068783, 185429251,
1198432931, 3372328450, 88142322, 185429251}, {3383659282,
2265789281, 480221151, 89090276, 3383659282, 607972119, 2719996384,
89090276, 2678132557, 4036691926, 2180042365, 288658537,
1722814040, 127447080, 2180042365, 2942566616, 487724245,
127447080}, {1159516887, 4214998734, 359182081, 1236554533,
1159516887, 1498394381, 358687301, 1236554533, 2289583273,
948838482, 1989171734, 457585003, 3197042083, 3956830949,
1989171734, 1894752970, 61587123, 3956830949}, {2748163602,
1071954219, 2067064347, 2290099491, 2748163602, 3711385978,
614471834, 2290099491, 1827237091, 440737492, 3976346810,
3049605934, 2343603502, 1614979968, 3976346810, 1122661217,
1486547157, 1614979968}, {449599208, 1464260734, 760787290,
216048523, 449599208, 1934558942, 718668543, 216048523, 3331101001,
779962582, 1727662238, 2790878420, 3773409253, 504054738,
1727662238, 3640433646, 811041045, 504054738}, {3122969826,
3214748645, 4075360046, 1749803835, 3122969826, 282411468,
371323737, 1749803835, 1923297555, 425602637, 3693570622,
2527541886, 4000100726, 621090981, 3693570622, 435502401,
2940060371, 621090981}, {1238950442, 2789594859, 1922800567,
4102262073, 1238950442, 4140841154, 2033059421, 4102262073,
2944524801, 3223306859, 3184404995, 830110695, 3333612489,
2034224622, 3184404995, 3743382344, 1565165726,
2034224622}, {4279784147, 859801225, 1999065039, 921712152,
4279784147, 4038673596, 596236860, 921712152, 3894793123,
2070907998, 1308958254, 4058246217, 1338381534, 613698149,
1308958254, 3832821180, 3416334823, 613698149}, {1869986036,
2694004913, 1138713128, 351424643, 1869986036, 1293722401,
1077496222, 351424643, 2123319890, 1958561034, 1827226828,
931143480, 275738471, 1685517521, 1827226828, 2943337751,
200291141, 1685517521}, {2289719415, 2972341462, 2412008737,
3186305416, 2289719415, 1680133933, 1077008360, 3186305416,
101665309, 1002218026, 2968908216, 3857868158, 3240205613,
2209256919, 2968908216, 2571006180, 3305718358,
2209256919}, {3173538830, 2745831157, 451704758, 721933735,
3173538830, 1104060332, 1720363066, 721933735, 837410646,
134304003, 1696051246, 1593368519, 630655040, 1198172062,
1696051246, 1180998885, 1986732860, 1198172062}, {1951351916,
1461089983, 2852188423, 2398700699, 1951351916, 1998914732,
3703766159, 2398700699, 3518731582, 2660857604, 2924102885,
566720713, 561176530, 4069522778, 2924102885, 1555772973,
1558347771, 4069522778}, {212294868, 3758784734, 960139870,
4179444973, 212294868, 395117343, 4079971374, 4179444973,
3802581875, 3556775789, 1666157956, 15461087, 3188653628, 31622540,
1666157956, 303834229, 1109813012, 31622540}, {3998252514,
2487593373, 3704184254, 1332220978, 3998252514, 2113123888,
3475902759, 1332220978, 1057503638, 125794475, 2015618363,
456099605, 687136754, 3379065543, 2015618363, 3151085988,
1425999183, 3379065543}, {2881076070, 1216252469, 149475077,
575808555, 2881076070, 391320533, 349068103, 575808555, 1948549768,
2837457130, 3479195151, 3904215753, 280587190, 4108972617,
3479195151, 1116566633, 2380740889, 4108972617}}, {{2737282292,
2305990443, 2282864144, 3205297712, 2737282292, 2667672243,
1204204130, 3205297712, 7276915, 1522580506, 2091411644,
1198488263, 226649669, 2575546527, 2091411644, 125034191,
1033712257, 2575546527}, {92472822, 1307489118, 4146892220,
2428000499, 92472822, 1541887892, 510672020, 2428000499, 935922304,
1473179318, 3910426444, 3357426062, 2075080920, 1051614737,
3910426444, 3725579556, 227719572, 1051614737}, {1966900739,
651793589, 3658895407, 3516507471, 1966900739, 4257587330,
707837246, 3516507471, 3256757812, 1634112225, 2727381441,
292881953, 113387731, 1187730400, 2727381441, 2156029793,
3317527509, 1187730400}, {4173917861, 3090114656, 817556203,
3787391292, 4173917861, 2953871718, 1705516721, 3787391292,
690398653, 2034180250, 93938118, 3853931650, 4035265564,
2999155498, 93938118, 2685380188, 2971093563,
2999155498}, {3866930563, 1272807136, 3668513119, 2393910601,
3866930563, 3472891570, 2127104432, 2393910601, 3099450313,
1528950630, 2936071180, 98926204, 700790322, 3617732993,
2936071180, 1327756150, 421185366, 3617732993}, {3808361324,
2767640965, 3645860436, 523638114, 3808361324, 2436421546,
796925063, 523638114, 2596837368, 1226136476, 3600765500, 60922281,
1553393489, 3543842569, 3600765500, 519769416, 1469908890,
3543842569}, {2883601860, 778485352, 847589223, 688234786,
2883601860, 2129989903, 3421744687, 688234786, 2802045018,
3624578371, 3481369947, 3618947558, 1076042186, 2380880937,
3481369947, 2421869197, 4221233767, 2380880937}, {1286715218,
4173763431, 2579365599, 1917800003, 1286715218, 3167988201,
1740083735, 1917800003, 476867729, 3114448889, 1656084047,
3103367928, 646811031, 1253368368, 1656084047, 2836784419,
2860152458, 1253368368}, {1547913780, 1695960203, 1075571003,
3047751878, 1547913780, 3858853528, 2098168098, 3047751878,
2031334578, 1092231778, 2679074942, 576357308, 495112941,
674117032, 2679074942, 3267431613, 3527001669,
674117032}, {20494290, 672893019, 3417450723, 2902381003, 20494290,
1487116735, 3585549348, 2902381003, 875985265, 3814972479,
2362004551, 3847535541, 181736283, 2205916258, 2362004551,
3155610724, 1604698588, 2205916258}, {3598503294, 2476778816,
4051073147, 113222043, 3598503294, 63883876, 20025103, 113222043,
3029051882, 3576784848, 507031637, 2765152134, 2046029295,
2484472700, 507031637, 3522086026, 805832193,
2484472700}, {2096301120, 2089751145, 3927430411, 3782598365,
2096301120, 3291528625, 3927087723, 3782598365, 680890926,
2141296207, 1274240457, 1813716775, 2108223515, 7725939,
1274240457, 4264117811, 1654580658, 7725939}, {504139499,
3542846730, 2681551825, 1858879223, 504139499, 2974203715,
3291484606, 1858879223, 2708113221, 945751738, 1072981101,
3288357695, 719154310, 3368760853, 1072981101, 1302462697,
1176489189, 3368760853}, {3316228403, 4103225131, 3936394935,
3282095953, 3316228403, 2778786590, 1709670308, 3282095953,
2506371881, 868624934, 2069873554, 2528019064, 2003524657,
1828440339, 2069873554, 566806600, 726307104,
1828440339}, {3452335153, 106530348, 500725743, 3759949767,
3452335153, 3376077781, 2720130709, 3759949767, 3189476756,
931301677, 2397378290, 2635143024, 2329512, 206927031, 2397378290,
3183265148, 545609200, 206927031}}, {{217808460, 348848516,
2708923752, 3749431174, 217808460, 1208313783, 542781592,
3749431174, 4092801160, 1684552227, 299199825, 3634541206,
3624275162, 2962469315, 299199825, 2670244515, 4021086500,
2962469315}, {1114357536, 350550480, 2465372475, 1732869179,
1114357536, 2509789412, 3638540651, 1732869179, 381829350,
881729466, 1625976775, 3498104358, 1494982903, 804213223,
1625976775, 3143925885, 438999528, 804213223}, {2457049990,
3557973226, 1543332620, 3507668274, 2457049990, 1229010797,
103212933, 3507668274, 4088394360, 742292537, 1140571071,
2802821649, 2989367205, 2885386524, 1140571071, 4093772765,
2618720282, 2885386524}, {232741719, 170237153, 4114351745,
1698887908, 232741719, 2152325130, 3706277064, 1698887908,
3712059912, 3643511238, 1312079237, 3784344293, 1905431135,
1547173514, 1312079237, 918013965, 490999994,
1547173514}, {4247643355, 3352831267, 1732506223, 2798697140,
4247643355, 2098708615, 2813869207, 2798697140, 2230538189,
2655518143, 485137636, 4117358359, 1563107974, 3363084915,
485137636, 2840623993, 889964766, 3363084915}, {366288723,
3346018419, 901263071, 950363290, 366288723, 1842485244,
3526146168, 950363290, 141104167, 1568942409, 588313388,
4248816946, 2691287218, 2014523666, 588313388, 1550754572,
1476325540, 2014523666}, {1756558336, 3396822999, 983823623,
1252923554, 1756558336, 3523638916, 845609283, 1252923554,
1273880950, 2140373745, 1127328556, 1804600645, 3702106930,
2407332340, 1127328556, 1876171062, 3541076740,
2407332340}, {2713148271, 111258429, 2200585411, 905755330,
2713148271, 1712994855, 1717718779, 905755330, 993804379,
2193427908, 4115190113, 3088495692, 777098754, 3846994569,
4115190113, 1130633118, 2894966137, 3846994569}, {4164605063,
2001397872, 3307457992, 1332083728, 4164605063, 994764472,
3095955451, 1332083728, 3078061472, 307891156, 4277126258,
93837191, 3505944903, 2737996304, 4277126258, 2269237989,
2204801087, 2737996304}, {59975199, 3196013539, 4274647327,
3058110169, 59975199, 837447492, 2642581291, 3058110169,
3553314488, 2631834329, 730404862, 1954442101, 3087115728,
879138341, 730404862, 1377874203, 3894266887,
879138341}, {753862080, 1009449717, 881783401, 3645085278,
753862080, 1303955964, 379120643, 3645085278, 3843971092,
3999383335, 1524177551, 994115785, 298833416, 3313603273,
1524177551, 448963005, 1100704135, 3313603273}, {182182402,
3064534402, 3940978003, 1210559760, 182182402, 346013324,
2250978342, 1210559760, 1329720286, 2692497, 3750995369,
2634685097, 2232490889, 3828202622, 3750995369, 1121862301,
1988547722, 3828202622}, {1663970006, 2302549379, 1579968517,
3706516165, 1663970006, 896951691, 4102157385, 3706516165,
3924318383, 187157064, 1401539421, 2227875755, 721552325,
102342241, 1401539421, 1005947216, 2953512301,
102342241}, {2945796039, 3961175050, 584883917, 2733229985,
2945796039, 275640933, 3340851597, 2733229985, 1037589465,
1386834166, 2470436879, 4079095244, 336074580, 1186860461,
2470436879, 1174950378, 1813306535, 1186860461}, {1667519782,
2373698040, 424806096, 1202527536, 1667519782, 3530502594,
2335662921, 1202527536, 152830484, 2468190893, 1896248015,
1425047557, 2551600620, 1992474246, 1896248015, 261348052,
1374531311, 1992474246}}, {{69195019, 3528743235, 3672091415,
1871391091, 69195019, 3672831523, 4127413238, 1871391091, 82758667,
3708466080, 4292754251, 3859662829, 3889917532, 1511326704,
4292754251, 1610795712, 3759209742, 1511326704}, {2931758910,
2374097189, 1113529483, 2329303404, 2931758910, 2008671965,
99651939, 2329303404, 3361372532, 3196114006, 4248550197,
1448629089, 1926628839, 972103006, 4248550197, 878035866,
964807713, 972103006}, {3536018417, 620673032, 2847216174,
3210181465, 3536018417, 1011789944, 230171794, 3210181465,
2138712950, 3331097822, 840689212, 4278768404, 1249197731,
907238901, 840689212, 2865846472, 2926293232,
907238901}, {2508077280, 661437137, 1787615788, 1588259595,
2508077280, 2385989343, 1314332382, 1588259595, 1831590873,
2980208068, 3864816447, 2546884738, 3038399593, 3497384788,
3864816447, 3182508868, 3174249442, 3497384788}, {2106045662,
531179829, 2296845920, 401645099, 2106045662, 3998063768,
4010413858, 401645099, 2042818265, 564222425, 3462340161, 36909913,
1923026173, 784581211, 3462340161, 384279948, 2653422698,
784581211}, {2823695054, 61755986, 1786076983, 2703042396,
2823695054, 1517044321, 2307614257, 2703042396, 2184503434,
1365035178, 112779604, 3925268470, 3412123799, 1405813956,
112779604, 2384208331, 4174209611, 1405813956}, {3707259965,
2921683523, 1460744709, 4210727080, 3707259965, 66803473,
520437440, 4210727080, 3242480464, 4072194992, 3630467555,
3216768073, 2645232736, 2619474739, 3630467555, 804702670, 3132575,
2619474739}, {1453913233, 1805360390, 2311925423, 4183591379,
1453913233, 2604529491, 4049009082, 4183591379, 2326052247,
2145357457, 3813600746, 4119698302, 789475914, 1776335558,
3813600746, 4095757548, 1189944887, 1776335558}, {2097509046,
1602570228, 1673266895, 752526256, 2097509046, 1991110049,
2828141255, 752526256, 4140732895, 1412103825, 282674872,
3836717436, 84549310, 3769485546, 282674872, 2724043008,
1038695719, 3769485546}, {1825015381, 151012833, 2880492218,
3221815101, 1825015381, 1268177639, 2040707498, 3221815101,
1965424493, 4228411920, 480943335, 853742067, 2602801723,
341941264, 480943335, 3287341287, 2190935098,
341941264}, {3086679378, 3869978336, 1844125870, 3548682442,
3086679378, 318961528, 4073269919, 3548682442, 2432287668,
2652937834, 1875189386, 446965326, 2907702905, 683086802,
1875189386, 3536524881, 2785783425, 683086802}, {1982184590,
334885555, 2950459869, 2590401961, 1982184590, 3913458720,
4225456867, 2590401961, 664423898, 4282213129, 2539405616,
1676082014, 3870991887, 1736653518, 2539405616, 3568863651,
945282763, 1736653518}, {289016697, 420196906, 161587717,
1473487642, 289016697, 2432008743, 3895355757, 1473487642,
2712719680, 3422696266, 2665588127, 866331678, 4059421908,
2310863367, 2665588127, 953332332, 4014679340,
2310863367}, {1902693761, 1769522507, 931489105, 2820725828,
1902693761, 957556128, 209065955, 2820725828, 1850892875,
3030761701, 2736033223, 3655548782, 3000096196, 871287627,
2736033223, 1560288698, 845790159, 871287627}, {2290484832,
401518999, 2360845867, 527205029, 2290484832, 1551367322,
2459842680, 527205029, 2903982948, 707962386, 1432097408,
733539302, 1212329807, 2734783049, 1432097408, 571109565,
1664753021, 2734783049}}, {{1155867175, 429445904, 1777070788,
604461629, 1155867175, 794711716, 1257432102, 604461629,
3956367490, 116540512, 1675130777, 4089786548, 916677004,
4022832294, 1675130777, 1942923647, 4130146837,
4022832294}, {387798431, 1312672615, 235551485, 1257046062,
387798431, 490376081, 1427609439, 1257046062, 1686241617,
2006913311, 1444587280, 2617085459, 161923120, 165639584,
1444587280, 2037453462, 1205513289, 165639584}, {737112304,
1061526345, 1183825110, 963675979, 737112304, 1535762087,
326297318, 963675979, 1572901935, 1216784166, 1146366772,
3575639796, 44510871, 1503328438, 1146366772, 3214217109,
3635460156, 1503328438}, {4194339866, 4177893681, 1046144413,
3445363024, 4194339866, 2426867845, 3160262066, 3445363024,
2362447880, 2219267779, 1064270720, 4056228301, 230768332,
3458099202, 1064270720, 4052486999, 3062421748,
3458099202}, {3737365561, 4240734206, 622929198, 1050812700,
3737365561, 543911093, 878580341, 1050812700, 4007462960,
2522422296, 548555470, 1154892003, 2938490487, 134740506,
548555470, 1727589531, 555276190, 134740506}, {3265509251,
1726850927, 3616767886, 1723951785, 3265509251, 332901774,
2356709199, 1723951785, 2399569099, 2033981581, 3549040929,
3007893075, 624596665, 4130534548, 3549040929, 2978123129,
3958841381, 4130534548}, {3101765951, 1336753577, 1280437671,
3634584810, 3101765951, 3846059767, 2991529617, 3634584810,
1980497118, 1383863499, 847308857, 2011625723, 2460774518,
3154324725, 847308857, 4169594743, 617213161,
3154324725}, {1909472435, 375835695, 280754246, 3559576374,
1909472435, 697539134, 3107663662, 3559576374, 4251175413,
3447191459, 1789143993, 2097389984, 3463234943, 296275263,
1789143993, 2081462173, 3452455838, 296275263}, {3927878074,
1304967920, 4137456834, 1878918502, 3927878074, 1744558848,
1468256892, 1878918502, 3322056141, 2488124556, 53798688,
2539506574, 597744771, 2944457194, 53798688, 3937802461, 895604016,
2944457194}, {1473406035, 1954838782, 3884376669, 911930333,
1473406035, 3689446034, 4028966669, 911930333, 1441163739,
2818867049, 895650639, 3063493626, 202155710, 3690699991,
895650639, 3499022088, 194807645, 3690699991}, {3706828782,
3622823846, 786099080, 3752485002, 3706828782, 272570612,
1940026562, 3752485002, 293325213, 3875261308, 652020716,
1458595307, 1302343337, 2884197862, 652020716, 613179368,
1494747539, 2884197862}, {1862670973, 496211406, 402523958,
4203984455, 1862670973, 3937403282, 2167450892, 4203984455,
751906018, 1742271124, 2684216609, 4036938266, 721391189,
775854673, 2684216609, 2709062415, 2918396394,
775854673}, {1620921222, 3725091997, 2691314998, 3403610080,
1620921222, 745303566, 1413978227, 3403610080, 1337530783,
1276782621, 3630584202, 201529621, 1377329334, 1802078927,
3630584202, 360946677, 3710731073, 1802078927}, {3613797222,
4164228265, 3848675801, 2233062609, 3613797222, 149028817,
98234458, 2233062609, 726664456, 1393814954, 3083923483,
1576871217, 2299677089, 3150458758, 3083923483, 1386916196,
4126093705, 3150458758}, {3629944862, 512011875, 93012811,
1396449051, 3629944862, 2257997180, 2499923958, 1396449051,
2442173264, 431205976, 930303450, 1099910359, 366661165,
2783164045, 930303450, 1631317297, 356389158,
2783164045}}, {{1135109300, 2352874613, 3325312332, 712404985,
1135109300, 546519870, 1571467521, 712404985, 1099512970,
2627363449, 1587005542, 2766486018, 1469478670, 2828288883,
1587005542, 410553827, 3866690251, 2828288883}, {414410025,
1185848176, 1181583679, 1653045514, 414410025, 3733376326,
381988982, 1653045514, 1945425936, 4094554844, 4267121909,
420803572, 138566505, 3288027530, 4267121909, 2458742268,
412403981, 3288027530}, {65355284, 2858851708, 3052316027,
1301935019, 65355284, 3961455404, 1963289366, 1301935019, 80175856,
1765993677, 3790597750, 4056706273, 3281478755, 2136361676,
3790597750, 2068559481, 3398888999, 2136361676}, {4188191530,
293801056, 2964651598, 3019211015, 4188191530, 2683163472,
4215964965, 3019211015, 2526336124, 3960267499, 2486244684,
3560842909, 4252427633, 3844599430, 2486244684, 3283485436,
2430152838, 3844599430}, {1196758134, 1741700121, 237185264,
1387140872, 1196758134, 2971687592, 762351827, 1387140872,
1749670132, 2270459619, 1381214928, 767007913, 1111366755,
3419145577, 1381214928, 429885456, 107246070,
3419145577}, {759707097, 825174423, 749051338, 481737140,
759707097, 878719633, 1487069976, 481737140, 4238062407, 555083053,
3674972444, 62603161, 2085831739, 1494013447, 3674972444,
931794028, 1485743041, 1494013447}, {101202533, 1603656961,
2744191919, 1130524081, 101202533, 499306218, 2151646894,
1130524081, 1955913150, 349068991, 3432517708, 3096606227,
1550912558, 1677686741, 3432517708, 3063490072, 1049056456,
1677686741}, {2391992038, 3691889508, 987740265, 2253324417,
2391992038, 1880267534, 39719589, 2253324417, 1444052678, 86971645,
513233413, 44564058, 1379805031, 67933059, 513233413, 2657888382,
1294996291, 67933059}, {2107525880, 1592007351, 385198528,
175882074, 2107525880, 3148809017, 3291876418, 175882074,
4127799778, 2936899148, 1934222992, 3196093040, 2005250810,
3685899833, 1934222992, 1732298394, 1942243854,
3685899833}, {133176397, 1183299853, 1822497038, 3529992299,
133176397, 1379194676, 1457174623, 3529992299, 2302655457,
1694599040, 2337874210, 845389321, 2840999768, 325500910,
2337874210, 1468441745, 383109598, 325500910}, {961584130,
2694151944, 1915277623, 234795543, 961584130, 1190493514,
3739166168, 234795543, 4258561079, 1092670963, 1470575478,
246450419, 2204665831, 1337631967, 1470575478, 1833296230,
1847115544, 1337631967}, {1560567705, 140227043, 2367587269,
983599045, 1560567705, 1528189962, 1399037840, 983599045,
3276243992, 1139661541, 98262291, 1859723823, 1624143697,
2160778451, 98262291, 3458861828, 3690957433,
2160778451}, {384037582, 1340101783, 29577830, 3483671709,
384037582, 3272912887, 1674233603, 3483671709, 2105717500,
1386333424, 237541885, 755629466, 4212839162, 2308200298,
237541885, 3249123485, 3207761476, 2308200298}, {964726501,
595108512, 481622686, 3464133355, 964726501, 2805976441,
1938386458, 3464133355, 1761003707, 2359525461, 1489856126,
3847821866, 1157162783, 36778158, 1489856126, 4007738185,
395520753, 36778158}, {974765406, 3799832442, 3357976289,
3299292894, 974765406, 2907974061, 2764080733, 3299292894,
1949491670, 1912626269, 2519328192, 1524883318, 1842665800,
1538563035, 2519328192, 4268751569, 386085114,
1538563035}}, {{2570600780, 3460843234, 2700034538, 2335494420,
2570600780, 2227597731, 1232261118, 2335494420, 166599066,
1124501551, 2540507736, 1164400003, 3257104212, 2732588524,
2540507736, 818237694, 1866530072, 2732588524}, {292276982,
3093661552, 697844082, 1188954576, 292276982, 2511664974,
1251401239, 1188954576, 2511338360, 2343537248, 1135554501,
3251740389, 2056492193, 4199239222, 1135554501, 2121388468,
3155848463, 4199239222}, {3140636559, 3213333408, 1429177194,
4040666706, 3140636559, 902332253, 3671213347, 4040666706,
1029044592, 4000127015, 2226758301, 1886560387, 1590955204,
884974087, 2226758301, 2087168228, 1753139982,
884974087}, {3710319575, 3910069381, 2858628849, 51993077,
3710319575, 3252828938, 3540268009, 51993077, 3624650744,
1261552815, 356444753, 58733535, 1634965500, 550710036, 356444753,
1075236085, 500329021, 550710036}, {2982343032, 1169418972,
1500370974, 22047040, 2982343032, 1622877607, 1300220338, 22047040,
1300368901, 3771708494, 817854337, 1456938507, 1008316214,
1625784164, 817854337, 2897537078, 2560115442,
1625784164}, {1699174125, 2141085139, 1634036662, 989485658,
1699174125, 418481819, 759287627, 989485658, 3839868036,
1550455173, 877107862, 2913047937, 3738030270, 1821976647,
877107862, 357862042, 1482985289, 1821976647}, {814718474,
575686326, 2680605866, 4002649103, 814718474, 2635782110,
109096906, 4002649103, 3040947805, 3927428565, 789790646,
442232022, 3669602586, 4163172789, 789790646, 1968560673,
965617071, 4163172789}, {475572423, 3228514800, 3248619000,
754002362, 475572423, 1958331075, 1646581402, 754002362, 655966702,
76092226, 706488081, 585555005, 1113760995, 708689546, 706488081,
2012018174, 419139045, 708689546}, {1102669243, 2625661593,
3628247553, 155487353, 1102669243, 3849329584, 3641526883,
155487353, 842507409, 2338156799, 3880807303, 3855119703,
1262008396, 1096269180, 3880807303, 2401963969, 1210247935,
1096269180}, {459957859, 3188513729, 885773771, 667065261,
459957859, 784642827, 2388680063, 667065261, 4039816650,
3919385156, 3703890126, 1414124697, 2547175916, 1520818365,
3703890126, 2650161610, 3297719233, 1520818365}, {3970199468,
1659440263, 2074061500, 415059067, 3970199468, 2389861083,
2700180911, 415059067, 1659410671, 2798460732, 1938524173,
2690686596, 1018928982, 1836340058, 1938524173, 4098854961,
3560962982, 1836340058}, {4149545048, 1314604300, 2180788629,
1138672588, 4149545048, 626151178, 1020827900, 1138672588,
135820422, 2546254144, 831116151, 1036497162, 1919249397,
2584730290, 831116151, 2018833769, 103417098,
2584730290}, {678239872, 827239274, 2649878256, 94846164,
678239872, 3448200806, 2166582678, 94846164, 2699595416,
1973852572, 2244344958, 2435450197, 1945003835, 2866004314,
2244344958, 2746616382, 2797087795, 2866004314}, {628989796,
314784777, 1418331921, 1909399335, 628989796, 633947172,
1001703312, 1909399335, 255163547, 2060649849, 2281461798,
3416315430, 3526912856, 1029077486, 2281461798, 3286174256,
382606411, 1029077486}, {2270079132, 3076945394, 150707272,
2615860705, 2270079132, 3102811792, 521837577, 2615860705,
3695935017, 998617442, 1436679917, 3902334231, 3208436650,
723601662, 1436679917, 2678049604, 103406914,
723601662}}, {{3090782630, 1628442374, 3242468721, 4166372813,
3090782630, 2282679206, 741596417, 4166372813, 2760311307,
3912300371, 3319068849, 1675229290, 1085259665, 1293182265,
3319068849, 366230236, 3168473803, 1293182265}, {2603727025,
3149485478, 1428367254, 3532111852, 2603727025, 500404765,
1754687396, 3532111852, 4265279407, 1658665581, 756122322,
2518961174, 578262892, 3186089068, 756122322, 1211781402,
4188864734, 3186089068}, {3773430454, 2468235585, 2575088157,
2341623975, 3773430454, 733258443, 1118596215, 2341623975,
2601534865, 45514776, 2130477825, 1580291641, 995988350,
3977628955, 2130477825, 207693548, 600259676,
3977628955}, {1261269623, 1830023157, 3967600061, 2081104178,
1261269623, 4153800443, 596284397, 2081104178, 2873769531,
745646810, 4102191254, 1293500383, 878627148, 1347291439,
4102191254, 736113023, 2050427676, 1347291439}, {2843172339,
2231035373, 1806730448, 1650369421, 2843172339, 2792975338,
1789495539, 1650369421, 3741987406, 3181657906, 1520984655,
713062199, 568252202, 3687588783, 1520984655, 1438381249,
1453894028, 3687588783}, {926561185, 2950279312, 790563916,
2833805942, 926561185, 4136201738, 877956083, 2833805942,
1219416476, 2401469211, 2615508955, 1098162878, 3061138405,
1428398286, 2615508955, 59314928, 758558167,
1428398286}, {3992518614, 3914211973, 1730656997, 2840114304,
3992518614, 2736564746, 2258190816, 2840114304, 798256550,
3652744742, 854214045, 1115314385, 2093028962, 3981341272,
854214045, 569010442, 1972270158, 3981341272}, {77498444,
1070507239, 3904070810, 2229285304, 77498444, 3110641420,
3443204327, 2229285304, 278611533, 314631094, 3108887846,
1405263476, 2697923227, 4196897331, 3108887846, 1900167098,
3436564969, 4196897331}, {676854127, 568650466, 4272969986,
2797491436, 676854127, 3694482943, 4153837975, 2797491436,
2793822647, 2166847050, 2304632601, 1808184757, 2750552563,
700767207, 2304632601, 3385626848, 2856480388,
700767207}, {835628171, 165198836, 1001911068, 1438273012,
835628171, 2246148877, 1676406913, 1438273012, 1569490059,
3626831178, 956778663, 2211295748, 1815192601, 3004743607,
956778663, 4202297421, 2733058282, 3004743607}, {518172526,
1449851373, 739475642, 977556415, 518172526, 1592432867,
3955686626, 977556415, 3311851781, 2092785306, 3406476392,
3275119453, 2718052422, 2672882299, 3406476392, 3145190580,
110199372, 2672882299}, {2122146632, 2977102789, 2065383788,
236464123, 2122146632, 244990374, 3922106376, 236464123, 219341062,
301939378, 540077867, 2129238146, 2433069844, 3694919563,
540077867, 731922800, 2520419703, 3694919563}, {1011510006,
3830153744, 1670865059, 2772357215, 1011510006, 1691277696,
2154410806, 2772357215, 1849895224, 2930957650, 518291486,
640956515, 459502110, 832670351, 518291486, 3067258844, 67590653,
832670351}, {3276353542, 3933677451, 2230558099, 1729521343,
3276353542, 2059608998, 424414765, 1729521343, 2250560481,
1707617706, 2217587145, 135344313, 1363889163, 2475140271,
2217587145, 2778457406, 37335008, 2475140271}, {4218145290,
3722842911, 948415749, 1033822680, 4218145290, 1221024374,
2603155579, 1033822680, 4201417544, 14480722, 1981201988,
1721059867, 2281906062, 2114502981, 1981201988, 909430413,
2781069823, 2114502981}}, {{121482268, 3116647617, 876978915,
568528663, 121482268, 2475829068, 2177189807, 568528663, 544639534,
1513734026, 3770009830, 533700213, 793326651, 958383622,
3770009830, 1150087061, 3694638688, 958383622}, {2976916793,
1691884706, 168571747, 2404658587, 2976916793, 2158188804,
4191448009, 2404658587, 1547862823, 2684083587, 3765397477,
3380901602, 1458031003, 4119603367, 3765397477, 3534176399,
3479396923, 4119603367}, {62338927, 906126073, 2798158767,
2933510859, 62338927, 1177241360, 4240166566, 2933510859,
2707010111, 4091367000, 389885259, 1268575347, 3729279189,
178016378, 389885259, 789650986, 1184002529,
178016378}, {2951207765, 2395485231, 2075145516, 4212638780,
2951207765, 671148556, 3235157352, 4212638780, 3208213311,
338680738, 1421333909, 323724368, 3405683645, 980937351,
1421333909, 448446028, 2094378936, 980937351}, {2952700002,
2302905244, 1142378033, 3349220842, 2952700002, 224784515,
3722506196, 3349220842, 3757387996, 3589653882, 4185254045,
2006223188, 1244677534, 2381808660, 4185254045, 146194193,
341372255, 2381808660}, {1571592397, 3369375773, 2204145504,
145407649, 1571592397, 347432419, 659394903, 145407649, 1941283113,
2028886430, 2690000574, 3935039161, 3592133108, 1104593159,
2690000574, 4243190272, 2457034166, 1104593159}, {179198568,
3594781674, 3021644798, 386906095, 179198568, 1281474767,
3767619826, 386906095, 1094854803, 208124234, 3595149031,
775957906, 1556294726, 798595991, 3595149031, 1453032677,
3072704016, 798595991}, {867670560, 3613001199, 834432416,
2133433101, 867670560, 2044221845, 4043998180, 2133433101,
4080517315, 1091268872, 527851489, 4209198933, 3852871282,
2942968846, 527851489, 3549906544, 4293637338,
2942968846}, {2580561525, 862067022, 2212809055, 2677316398,
2580561525, 3025327788, 1725615982, 2677316398, 1815082440,
3827868466, 585125955, 1377490536, 641792185, 4284386809,
585125955, 1512141755, 1090940362, 4284386809}, {3486242248,
4131376232, 1673783206, 273325774, 3486242248, 296683239,
3615048227, 273325774, 3787865083, 4096418302, 568040678,
1833602093, 1110516758, 2744554909, 568040678, 3515890758,
115338549, 2744554909}, {3206287043, 761909254, 2253413915,
3624857093, 3206287043, 3199872731, 373953079, 3624857093,
891498145, 2019749517, 1489977120, 3200804515, 3759902928,
2530156636, 1489977120, 2646901097, 2523509558,
2530156636}, {643902568, 117953569, 3583402601, 2397841575,
643902568, 3914257878, 4225297936, 2397841575, 3355193419,
4283593734, 3621429915, 1589633023, 2362133832, 3926916518,
3621429915, 3323739459, 534546596, 3926916518}, {3213872118,
680556753, 2480164693, 956051037, 3213872118, 312904051,
1476930939, 956051037, 4120853570, 3727325982, 914858946,
3008893009, 135894748, 3830075648, 914858946, 3374985991,
993279056, 3830075648}, {3771960066, 4191415769, 1282291514,
3671186390, 3771960066, 1261426079, 3681543355, 3671186390,
2476625171, 266967475, 1769866689, 2602569321, 1327580909,
2252783708, 1769866689, 4146605623, 4191438403,
2252783708}, {4292532578, 3774138404, 2670391583, 3944009238,
4292532578, 2826695620, 3332928438, 3944009238, 3502046981,
1192443015, 2964738836, 4159361902, 2507987636, 3433059425,
2964738836, 747959045, 2720593265, 3433059425}}, {{607380970,
2610193258, 2198271844, 1319434267, 607380970, 740092580,
2678775073, 1319434267, 4102885735, 1875181995, 3982652344,
2055073597, 3001736262, 1975983015, 3982652344, 611187071,
2092556693, 1975983015}, {2507168618, 2687593413, 2988334517,
1317690360, 2507168618, 399240205, 1189150958, 1317690360,
1165218048, 2712717326, 2888742196, 3019977656, 3521651749,
2970221269, 2888742196, 2447465272, 880904779,
2970221269}, {16047360, 3691116188, 1346223401, 3528536028,
16047360, 3475527779, 2077936780, 3528536028, 2745374441,
1001260701, 2137129319, 1727032860, 2895847378, 710612185,
2137129319, 3956229929, 99734716, 710612185}, {1842080991,
3109935091, 3903826366, 4082006648, 1842080991, 1888486946,
338232527, 4082006648, 1028861702, 2542621682, 4101695717,
2657991148, 4264593116, 419134859, 4101695717, 747864206,
2976059897, 419134859}, {806395396, 1903753461, 2240078962,
2888571437, 806395396, 2362159826, 597224294, 2888571437,
1753159753, 679653554, 3492286586, 2825161141, 875918558,
149385744, 3492286586, 204565173, 1789714272,
149385744}, {2468977284, 1833271761, 1593946969, 3375115282,
2468977284, 3143513113, 375616616, 3375115282, 1709021254,
1811957225, 3587671931, 397670345, 321251974, 1102949689,
3587671931, 3445033624, 1203825901, 1102949689}, {2760537049,
3106631744, 1542713475, 3781287485, 2760537049, 1692479301,
3889645784, 3781287485, 884591943, 3312983533, 190837198,
2375945093, 2548729504, 1557133499, 190837198, 1115795021,
3740911662, 1557133499}, {2773979788, 2554371815, 1144301620,
2261996505, 2773979788, 710199359, 2888080641, 2261996505,
614134826, 2949176293, 1267010518, 3094232897, 3496325546,
4043135299, 1267010518, 1149778656, 1612983166,
4043135299}, {1887452538, 1950002858, 2959098349, 2798155265,
1887452538, 1051618307, 1449170252, 2798155265, 3198080186,
4177652829, 3578404586, 4256705333, 4214331358, 3773836820,
3578404586, 2684741515, 821087236, 3773836820}, {2188312410,
3390487176, 1896324706, 3025845012, 2188312410, 1628017861,
3475622581, 3025845012, 1657795857, 3640331752, 1182165056,
69336718, 3702644050, 3982666145, 1182165056, 1836627509,
2354595202, 3982666145}, {3423044419, 1758118748, 3469249742,
1137902837, 3423044419, 3440035369, 1998736700, 1137902837,
3167545079, 282834660, 2710548883, 801138460, 2258912019,
1097820760, 2710548883, 24888594, 383216742,
1097820760}, {3505214566, 478558438, 2741627244, 2988495416,
3505214566, 1788745968, 3084935324, 2988495416, 3872045348,
537227984, 1780972559, 1348023856, 1132838092, 3214297332,
1780972559, 4106231685, 2846434362, 3214297332}, {256760884,
3759178004, 2278775340, 565470180, 256760884, 976841595,
3313779653, 565470180, 2708862228, 4034322798, 1564505416,
899811882, 2875184702, 755543217, 1564505416, 759874199, 13278192,
755543217}, {2236758020, 469234254, 799823618, 1323939009,
2236758020, 2319876367, 495270742, 1323939009, 3436633871,
1597833968, 1156392497, 3132103937, 4147435046, 637552329,
1156392497, 1869389825, 4159699881, 637552329}, {3861027456,
304964505, 2185185122, 3519163420, 3861027456, 4105292342,
1062166603, 3519163420, 2675891460, 279886449, 1433999328,
3839122367, 608579980, 520981812, 1433999328, 4205230168,
353934977, 520981812}}, {{2309408315, 2760088020, 1533175115,
1285620078, 2309408315, 2423072612, 357420018, 1285620078,
4056173823, 513963325, 2182983404, 2820933455, 2355671350,
3949395794, 2182983404, 2123036003, 1774568686,
3949395794}, {2500813569, 829683767, 3378723563, 815015434,
2500813569, 180649975, 3142242173, 815015434, 4264130267,
3622293976, 4277866093, 2249371766, 3146977604, 3046911698,
4277866093, 2492729814, 2576744453, 3046911698}, {1646277780,
1227220999, 2122993239, 3002678034, 1646277780, 3270884334,
94686468, 3002678034, 780095381, 1423668157, 3712014795, 390814955,
2434706877, 87281746, 3712014795, 2157354608, 2431554641,
87281746}, {4044178729, 554748104, 467969301, 1030729435,
4044178729, 1752988797, 2812778314, 1030729435, 4174387531,
1015566287, 3536237833, 157623850, 985347517, 1391529818,
3536237833, 2587125255, 423458502, 1391529818}, {1598746056,
3303343260, 762241819, 2474459917, 1598746056, 218140428,
1196064637, 2474459917, 2980841286, 3250335592, 1487367860,
1521246973, 1406132870, 2197835200, 1487367860, 3663691762,
2133883764, 2197835200}, {1197428336, 3482088360, 310254483,
538480994, 1197428336, 571730491, 911775489, 538480994, 1224655671,
2502325941, 2258964805, 2454977948, 3036723158, 2768170623,
2258964805, 1038000683, 2671124421, 2768170623}, {1908218484,
3502680982, 2469895624, 3781005565, 1908218484, 2648830135,
3517907301, 3781005565, 3540579584, 18445794, 1915310420,
2639699364, 762348997, 1381472023, 1915310420, 2866068177,
1428725999, 1381472023}, {3069812185, 3208181168, 2542432347,
2928743274, 3069812185, 3607529209, 3776082629, 2928743274,
1348429235, 950730510, 902187690, 4019658974, 1716556555, 48329260,
902187690, 821961664, 2599277669, 48329260}, {2182176592,
1860603509, 2625050978, 1999664797, 2182176592, 321744270,
265698801, 1999664797, 356266135, 1560019161, 1171489446,
1517798425, 3843281147, 3837472901, 1171489446, 3436524397,
1784243620, 3837472901}, {3276234188, 3863982741, 1384068579,
2994139106, 3276234188, 3723068499, 1829200407, 2994139106,
2414375640, 4124897232, 1634016885, 378757639, 2161076681,
3100975771, 1634016885, 1844417430, 1019061132,
3100975771}, {1354152039, 1083036128, 1019985432, 227671285,
1354152039, 362042802, 3150490417, 227671285, 983454682,
3867285910, 1283633896, 3768959266, 714795054, 4195850249,
1283633896, 2048170658, 3679000702, 4195850249}, {3946110585,
260368160, 1853745554, 2897556757, 3946110585, 3421663444,
3145856482, 2897556757, 798763723, 465921502, 2328291482,
3863948457, 2884310858, 1045387495, 2328291482, 1236131839,
796030826, 1045387495}, {990300297, 2243839597, 1117676581,
1042468264, 990300297, 2550218202, 3310392503, 1042468264,
3049833332, 987744533, 3087581481, 3055290424, 2752836083,
1588498933, 3087581481, 2926606281, 4290908028,
1588498933}, {3932965485, 2044406932, 76660843, 3740645591,
3932965485, 3321952562, 3060595950, 3740645591, 95178102,
2513215163, 1105604243, 2484220821, 2420741811, 3483511399,
1105604243, 1920164372, 741205873, 3483511399}, {475729593,
1660827878, 3003742383, 1232366114, 475729593, 2210388529,
2053833955, 1232366114, 1992425875, 2074496556, 769497234,
3387899531, 2284204944, 2431121957, 769497234, 1281254384,
4171704364, 2431121957}}, {{1380634204, 1192551435, 3533700508,
668285756, 1380634204, 3713143233, 3816217625, 668285756,
4064845753, 1337075195, 4270158447, 2679964938, 1654940598,
1318489562, 4270158447, 3713577419, 1530977112,
1318489562}, {2057456462, 2897339640, 3410402985, 1706771705,
2057456462, 3174850469, 728123349, 1706771705, 1515684518,
522138188, 2710443459, 3098163705, 2990852339, 770600793,
2710443459, 3578552768, 3249576224, 770600793}, {1249105026,
3358422070, 3395543717, 4038063126, 1249105026, 363246278,
2168451262, 4038063126, 493252920, 1711417284, 3432253975,
201692417, 3480703284, 3299888517, 3432253975, 2474407987,
1806316064, 3299888517}, {2171402857, 3049703400, 4030688141,
531091457, 2171402857, 3895139973, 1390161328, 531091457,
3082272717, 3058519788, 477609731, 2252852611, 2140252218,
2803285489, 477609731, 3164022812, 1922250286,
2803285489}, {902295474, 591758943, 847382876, 2355871533,
902295474, 2931048320, 3949682718, 2355871533, 3935740675,
2796594703, 1613420642, 3412387271, 651730634, 3735324161,
1613420642, 2792496593, 860809210, 3735324161}, {3814732591,
3605659623, 1834617826, 956915353, 3814732591, 1282074175,
71119600, 956915353, 1096633558, 2164990937, 3213913829,
2561980091, 3655831787, 993539593, 3213913829, 3772074010,
3499265007, 993539593}, {2658882772, 2195730734, 3298597677,
883705085, 2658882772, 3547515338, 1819500595, 883705085,
1213485394, 1645121444, 2914546594, 1554324281, 3007787703,
76754721, 2914546594, 1258273773, 818311023,
76754721}, {4065669017, 3661081858, 1484817937, 3193892819,
4065669017, 65180262, 1244284192, 3193892819, 1241147206,
1638833719, 3892091460, 3440632377, 516833304, 208329741,
3892091460, 3548346666, 3633562083, 208329741}, {481582993,
1085004492, 92615274, 2738501771, 481582993, 2549334483,
2065816860, 2738501771, 2123592067, 1587534941, 4203644834,
1807063723, 3363710472, 3045996322, 4203644834, 3018954094,
2937231253, 3045996322}, {1744694238, 4063014065, 3026136269,
3203024096, 1744694238, 171128417, 3004026049, 3203024096,
1526982153, 2620744026, 2824889612, 2591924509, 973989398,
4229091400, 2824889612, 4248467238, 3067248771,
4229091400}, {1824377964, 4150561364, 2476308872, 2328554752,
1824377964, 2311173985, 617021739, 2328554752, 3910342111,
3678401252, 1733825396, 1498830772, 3595011575, 155765556,
1733825396, 1766411440, 896794637, 155765556}, {2096044974,
1990752347, 3594149900, 336736481, 2096044974, 758872195,
1251419294, 336736481, 448304887, 1869361317, 4150949449,
370948546, 1621800814, 2079664039, 4150949449, 161492691,
3542921599, 2079664039}, {4049251324, 4172483669, 3176703166,
686753864, 4049251324, 1057451785, 3522730141, 686753864,
1013007249, 1858040314, 2699994965, 1860661676, 33685366,
1534819456, 2699994965, 3381126640, 3694023951,
1534819456}, {1126833245, 2376805191, 3098917825, 1255326086,
1126833245, 1906919843, 3227066290, 1255326086, 1943662723,
1329889956, 1513259363, 1646202100, 2772391079, 936646890,
1513259363, 2750991489, 1650686446, 936646890}, {4273398097,
4142084221, 895459484, 1011704985, 4273398097, 1572508948,
901597436, 1011704985, 656756723, 4060574487, 3519120535,
680330904, 3844798444, 358645418, 3519120535, 1193359799,
3159983370, 358645418}}, {{272266053, 3202556526, 824109230,
3491341751, 272266053, 2805337292, 3414470157, 3491341751,
1438760812, 2576849579, 3482051486, 3119495098, 861657108,
1816075033, 3482051486, 959489356, 3570111203,
1816075033}, {2166856449, 3467845248, 1026946745, 3627115412,
2166856449, 1565169824, 2345042216, 3627115412, 135412706,
3558527186, 4064489450, 2441164913, 1427441010, 4240216888,
4064489450, 2102314945, 2891584407, 4240216888}, {1110795947,
3937816720, 3388382946, 2291501743, 1110795947, 1204345078,
3432007410, 2291501743, 1796100563, 2994150339, 2783614947,
4028058908, 666348656, 714331518, 2783614947, 3438751245,
510361535, 714331518}, {989966800, 3692472918, 1995297400,
3256876154, 989966800, 1254783743, 1240505488, 3256876154,
1889419951, 3837485479, 3579773554, 3266584309, 3494391959,
2918371295, 3579773554, 3469357011, 65155283,
2918371295}, {283408731, 2940099205, 122099302, 4090106148,
283408731, 2200320349, 3626838486, 4090106148, 521490557,
2470805834, 3370632392, 3635042205, 2653865633, 3134087640,
3370632392, 2001875743, 1726772477, 3134087640}, {3220365318,
345099279, 2354954698, 2792365660, 3220365318, 3837739111,
2932916238, 2792365660, 571041820, 4160828645, 3178785388,
1464413084, 2245897492, 3980269912, 3178785388, 3326058682,
827591035, 3980269912}, {1734943164, 3992671774, 3391219055,
1361586333, 1734943164, 1497398241, 3596854557, 1361586333,
433605297, 656756305, 3209395492, 475039019, 1456839203,
3838369007, 3209395492, 3736654382, 1817769266,
3838369007}, {2129675196, 2673457552, 1863853990, 285700890,
2129675196, 2515316194, 496017472, 285700890, 3206226875,
2547086826, 236489012, 2687043642, 3802558529, 2959420453,
236489012, 4082486022, 1365016881, 2959420453}, {2862445665,
1391694591, 1493776713, 1650194984, 2862445665, 1976795057,
1332249597, 1650194984, 973623943, 4146368994, 2425083298,
4207783604, 2343308094, 2268472775, 2425083298, 2017673156,
2998711045, 2268472775}, {3701118467, 736035597, 3941003137,
1526438461, 3701118467, 2147788862, 3655101605, 1526438461,
2706649805, 3430848085, 1750164979, 675214442, 3906274368,
4009083507, 1750164979, 2269130155, 3665365201,
4009083507}, {3044457518, 2139033800, 3072081745, 2171102052,
3044457518, 228850836, 144159836, 2171102052, 3376280295,
492641640, 2039626956, 3185748306, 2021802750, 4156472107,
2039626956, 2671841406, 1688870624, 4156472107}, {686457718,
755015447, 3473541724, 1141176790, 686457718, 2796763418,
2183048631, 1141176790, 2643396669, 2334756085, 969269805,
3829792024, 2232088910, 3501988208, 969269805, 3464182128,
1843500325, 3501988208}, {1380374946, 2734398888, 17136384,
3735957229, 1380374946, 318536107, 3113401503, 3735957229,
3266495269, 2392343948, 2880815392, 2994320379, 3315636301,
2418434623, 2880815392, 3150458627, 52024108,
2418434623}, {1274724480, 2699576266, 2113140763, 1801951193,
1274724480, 1607594103, 1301873229, 1801951193, 3205194636,
805980718, 1744876439, 2606047828, 3601318802, 2391580866,
1744876439, 4198050752, 3151709665, 2391580866}, {987811898,
4091718169, 1949508729, 1139329784, 987811898, 99059985,
3213093274, 1139329784, 3230719414, 2898427019, 1553680774,
1744324377, 4218240851, 1372522934, 1553680774, 3182958666,
3257212689, 1372522934}}, {{3237660221, 4216039401, 1394381051,
255160418, 3237660221, 1987092456, 772100749, 255160418,
4163770641, 2853212443, 3664909559, 4240262918, 3543921700,
4185325422, 3664909559, 3200044912, 2762854843,
4185325422}, {35690583, 2638220304, 2918902946, 2713747584,
35690583, 2300605925, 627765421, 2713747584, 2133915627,
3195556801, 1893990098, 3017062825, 2982567031, 2618500928,
1893990098, 1470046497, 4237264351, 2618500928}, {3392507624,
3007592186, 1734374648, 3355201178, 3392507624, 3710123662,
21747645, 3355201178, 3164025833, 318989836, 808588626, 2471149225,
1429672146, 641329688, 808588626, 450784177, 3426896833,
641329688}, {2820705344, 3831143549, 124590158, 998684270,
2820705344, 1791772791, 4292130625, 998684270, 2587549655,
4145063890, 1678569574, 1477016185, 4162480901, 1868464655,
1678569574, 1652841784, 3407681142, 1868464655}, {602601455,
4148291214, 1353949463, 4013304348, 602601455, 1500039745,
3896702514, 4013304348, 2530632658, 1180999153, 2419594038,
887698817, 316099898, 69281710, 2419594038, 4228064347, 4088711405,
69281710}, {1755670478, 2257707516, 1006007080, 1802646553,
1755670478, 129865302, 3446926966, 1802646553, 3910080826,
2779761666, 3168708136, 696413464, 2121517268, 346858981,
3168708136, 1550050752, 2885211332, 346858981}, {3270221196,
2874806771, 3806381796, 1121257151, 3270221196, 3487725992,
1343441704, 1121257151, 3353460660, 2034770009, 1049849374,
3174523216, 614261770, 2366501106, 1049849374, 504742298,
551117609, 2366501106}, {3737164414, 861393946, 4046686626,
3200877282, 3737164414, 3717741518, 497605289, 3200877282,
978482299, 2338974139, 3840340879, 2979958414, 3493367465,
792188465, 3840340879, 2219407026, 4251338402,
792188465}, {273136529, 2827300476, 1734661616, 1180106193,
273136529, 613899434, 1992165781, 1180106193, 1533791420,
972305202, 1875898471, 991370047, 3107469479, 1992388545,
1875898471, 2383614785, 3500603104, 1992388545}, {3717279042,
485566112, 976459397, 201453184, 3717279042, 3983740037,
3145469059, 201453184, 3183253558, 2135933046, 3833824001,
1909447704, 1333287789, 3859433262, 3833824001, 1297631730,
3764728773, 3859433262}, {1955228687, 2025327163, 1805580258,
2570033843, 1955228687, 2291064609, 3462716646, 2570033843,
4060753555, 4060835728, 4211749705, 3884788913, 3773211035,
993512225, 4211749705, 1584936902, 2022469554,
993512225}, {3994478979, 1296267255, 12048398, 1293997566,
3994478979, 2104529013, 1141681757, 1293997566, 1649247358,
3722753261, 709433989, 2997676666, 1807326569, 102264893,
709433989, 2717349223, 4038432252, 102264893}, {2339553121,
611844526, 3627910700, 3669986224, 2339553121, 2878718099,
1616065704, 3669986224, 36587589, 2621779190, 3373892447,
942431365, 497306228, 339178967, 3373892447, 2025011469,
3410598209, 339178967}, {1239287374, 1435662521, 684416427,
3409985649, 1239287374, 546146378, 2357026796, 3409985649,
1277127010, 2652264596, 3809824315, 3162683019, 576285090,
4020257258, 3809824315, 2346103599, 1217293203,
4020257258}, {1786554346, 2368337584, 635907087, 3285617675,
1786554346, 1532451420, 629763589, 3285617675, 3909645761,
2992575592, 2292821512, 3227293849, 1671163098, 3612594858,
2292821512, 2670550158, 4014948754, 3612594858}}, {{2031413512,
2413249929, 3806926970, 300934584, 2031413512, 893043137,
3179822945, 300934584, 2665561897, 1242316901, 1674533845,
3571222880, 3572905305, 478845700, 1674533845, 882114621,
2378167062, 478845700}, {1375339216, 3632503316, 847617977,
3071835354, 1375339216, 4101127251, 2575196237, 3071835354,
1417581911, 994061750, 1228103463, 3741735502, 1280685025,
2636090868, 1228103463, 71690719, 1972761498,
2636090868}, {849327659, 791369590, 1702248031, 22126256,
849327659, 1727979207, 3556899267, 22126256, 608673033, 913428082,
2592791249, 3195761319, 2079317731, 2765592972, 2592791249,
2089192298, 3759047976, 2765592972}, {219265369, 667779292,
823714885, 3296604805, 219265369, 1742777145, 898095468,
3296604805, 2747488994, 3689457190, 2892200461, 2287613563,
1505716533, 1156725261, 2892200461, 221922891, 1100755307,
1156725261}, {1605468910, 3890474462, 549943099, 471433423,
1605468910, 585977516, 2651735970, 471433423, 2021014596,
2031908929, 328778061, 2300190858, 2662750501, 716602832,
328778061, 2726490354, 851112058, 716602832}, {365568357,
3029426194, 3430128519, 1933183379, 365568357, 1024311233,
2250823873, 1933183379, 3574350911, 167534374, 3257415168,
1879184234, 1374937136, 131535614, 3257415168, 1837882588,
3548535605, 131535614}, {1034741107, 2229554511, 1441524697,
2265105869, 1034741107, 4125786414, 2758013402, 2265105869,
1074178830, 2645770575, 326215900, 934922655, 644217952,
3131954528, 326215900, 515553914, 4223897546,
3131954528}, {852668118, 3360242076, 3856381322, 3040506537,
852668118, 161827078, 3626115220, 3040506537, 2640209692,
2107521044, 1724967466, 1064364031, 3296353235, 1387244644,
1724967466, 1243609165, 3135090808, 1387244644}, {1100263009,
2979752371, 1821404425, 1699738363, 1100263009, 986094547,
1614499929, 1699738363, 1759590520, 2055866774, 904942395,
1740053234, 1082125969, 3553696508, 904942395, 3010078874,
1838778520, 3553696508}, {1879092096, 683526141, 2312908564,
139707225, 1879092096, 1433984713, 1240918485, 139707225,
2924772732, 3260026686, 4196923158, 2121157575, 1728334192,
3119621613, 4196923158, 1814247924, 1315612557,
3119621613}, {2874853246, 801552897, 3498217066, 3020672513,
2874853246, 560852972, 2792908235, 3020672513, 734281890,
626139314, 165819164, 1793847168, 4107093429, 2937371214,
165819164, 188698803, 1166433733, 2937371214}, {3966797569,
398444428, 148226011, 3695424616, 3966797569, 1908468872,
3842289671, 3695424616, 1273298591, 3252339857, 4271993187,
1015567999, 220391906, 3163881167, 4271993187, 3497026624,
782407274, 3163881167}, {2557053510, 879944047, 329222739,
1882986510, 2557053510, 3851092045, 1575791639, 1882986510,
2064408671, 1022102285, 1294483608, 475773092, 1976081659,
2979261113, 1294483608, 4198564999, 785084267,
2979261113}, {767606769, 1175453562, 3110077662, 1649912214,
767606769, 3513898624, 2308040905, 1649912214, 2589207489,
524249228, 924857915, 636732539, 2263148832, 3611909575, 924857915,
2750521871, 4105703164, 3611909575}, {533765646, 2982755296,
3821540954, 2261240934, 533765646, 786056531, 2690223275,
2261240934, 3033779718, 1829003645, 1911212005, 3313229424,
2267599794, 1575367126, 1911212005, 14632745, 4165853368,
1575367126}}, {{1731798531, 3864297722, 632196679, 4151553160,
1731798531, 266522866, 4177051283, 4151553160, 3734246393,
2656231333, 170903528, 2524982332, 1322162887, 2822471992,
170903528, 2071407475, 2034317853, 2822471992}, {505782858,
807915248, 3235634082, 1087914338, 505782858, 2764925057,
2384195794, 1087914338, 1694175127, 4015529545, 1313746234,
1397638018, 1705346273, 3653936868, 1313746234, 2484299328,
3893194049, 3653936868}, {1628360471, 1872576407, 745146577,
3134263190, 1628360471, 1502689349, 1846865568, 3134263190,
1144818794, 2947483756, 3981440524, 1358765607, 2447807956,
4185157227, 3981440524, 2907095532, 3109351418,
4185157227}, {1752665661, 3862435696, 3120241607, 2353776151,
1752665661, 890570951, 75909174, 2353776151, 2402749950,
3076201597, 3200005334, 2850728736, 3486207172, 4129760842,
3200005334, 1677834656, 1671665759, 4129760842}, {1513347386,
3705157080, 2714968311, 4185116610, 1513347386, 2614208527,
172833166, 4185116610, 374253953, 3231918599, 2298744567,
2330358092, 135093029, 1131733436, 2298744567, 1598967643,
81250788, 1131733436}, {4017988014, 2502864860, 2798362495,
694165767, 4017988014, 2894272037, 2991626149, 694165767,
550314318, 1974819238, 4056638835, 2507555368, 3282401178,
992465420, 4056638835, 149190055, 2265909379,
992465420}, {3404093702, 2907722371, 2867366571, 1322786865,
3404093702, 2682048205, 2845707467, 1322786865, 2974557971,
806288438, 3471100018, 2044779754, 3015781283, 3659578224,
3471100018, 1305362706, 2393789362, 3659578224}, {1230515664,
986791581, 1988835001, 3580155704, 1230515664, 226153695,
949770784, 3580155704, 2427906178, 2093834863, 32183930,
2824425944, 1022607788, 1464411153, 32183930, 1610723613,
277697599, 1464411153}, {2506125544, 3669791663, 775696386,
1634721384, 2506125544, 3041821170, 2616651789, 1634721384,
283819668, 80483238, 2597884302, 1395534074, 2915827764,
1987024055, 2597884302, 2484602355, 4135938407,
1987024055}, {3460687767, 605494422, 2565743611, 1879169431,
3460687767, 1033460664, 4043263051, 1879169431, 3398828375,
1338091799, 1573680955, 3157719367, 4011011631, 34724903,
1573680955, 949030629, 1570781952, 34724903}, {3276033022,
1203174334, 835672191, 3947147720, 3276033022, 1781397252,
3785816696, 3947147720, 1303193959, 1643267001, 1555067592,
2107733056, 2780635372, 2240826378, 1555067592, 2608610996,
442350942, 2240826378}, {2852643415, 3796282786, 3557895539,
4037183513, 2852643415, 2752767565, 346268022, 4037183513,
2162922488, 2040329321, 3161813725, 811227116, 3178255601,
4289888328, 3161813725, 546274137, 3225021158,
4289888328}, {1873372707, 3058850860, 91264285, 3881849593,
1873372707, 184531858, 3318066443, 3881849593, 426687651,
956040185, 1095411048, 3027703191, 2531249536, 595347446,
1095411048, 3288854391, 2647221661, 595347446}, {2203270828,
507148898, 630380767, 3154023630, 2203270828, 1676334486,
416023733, 3154023630, 1862419773, 1532439149, 3648214651,
755781963, 3530082916, 2967502836, 3648214651, 3871125195,
3663678340, 2967502836}, {2320752269, 4085123711, 496641720,
2378609553, 2320752269, 3399757898, 962889191, 2378609553,
3308347692, 1449798259, 3228258920, 3015230918, 4233660609,
3194255763, 3228258920, 1669702817, 2225459107,
3194255763}}, {{4290900039, 280623348, 4178980191, 3199155377,
4290900039, 1901920839, 3106427820, 3199155377, 1774047142,
698814233, 3674905362, 4270409313, 3572939265, 3492361727,
3674905362, 3167429889, 1027004383, 3492361727}, {826363896,
614326610, 262952343, 1934119675, 826363896, 2997767678,
3188270128, 1934119675, 3567524348, 3962062826, 2208329119,
3741227945, 1933248566, 880482061, 2208329119, 4070445105,
205175925, 880482061}, {56103715, 241829097, 2460038793,
1035961073, 56103715, 2811324520, 4075809725, 1035961073,
3698455822, 3869031993, 83639542, 3469185293, 1230302420,
2271948201, 83639542, 329763193, 2411903234,
2271948201}, {1177260757, 4017252267, 305959988, 4288164505,
1177260757, 4273461426, 2481284279, 4288164505, 1625613062,
3134382704, 107217966, 549462420, 784865788, 4184605179, 107217966,
567967482, 1189429800, 4184605179}, {2433373148, 2196772650,
4064227780, 408996081, 2433373148, 188316916, 920035025, 408996081,
1657229416, 2269902114, 3498738340, 569048495, 1620020757,
1066805703, 3498738340, 673478093, 135019560,
1066805703}, {3057838997, 4011682346, 2269698346, 429648601,
3057838997, 1272075175, 2955466274, 429648601, 3536417809,
2361530061, 3010784539, 3692171012, 3667065093, 1386364785,
3010784539, 3001857777, 4079260578, 1386364785}, {96395161,
539371694, 1092556550, 827335330, 96395161, 3763169571, 1578218079,
827335330, 4104962200, 2727718898, 4288838559, 3404279696,
692013694, 1225699600, 4288838559, 1444753071, 2669405325,
1225699600}, {379097734, 3625475947, 2084056909, 333342539,
379097734, 2944208672, 418300166, 333342539, 337929267, 3505648581,
409954030, 3398162498, 1044831206, 2732536445, 409954030,
3374588386, 1231107067, 2732536445}, {3902359972, 1884754759,
2552848969, 439909833, 3902359972, 2004746826, 2310151238,
439909833, 2935091495, 1630646333, 2061686186, 4161410671,
2141557557, 4148946889, 2061686186, 4175695071, 2900071714,
4148946889}, {2963456150, 2337231210, 2625600235, 1918117806,
2963456150, 1946098288, 635887182, 1918117806, 68058625,
1546467979, 151877124, 3634975465, 2638755179, 3249719425,
151877124, 1637240461, 4215633308, 3249719425}, {491901437,
1160228383, 1595599448, 4125397389, 491901437, 195821118,
4074509725, 4125397389, 3427241569, 2961422663, 2559990958,
3684816933, 1117957823, 1193593392, 2559990958, 2942528735,
2689560428, 1193593392}, {1170497671, 2906222607, 3094336698,
3627573759, 1170497671, 1921927973, 545164662, 3627573759,
1700493457, 3182943863, 4262743528, 2878757823, 4114669800,
2408251701, 4262743528, 4165007723, 89238831,
2408251701}, {2763247569, 2610183051, 1881736284, 690961809,
2763247569, 3182419779, 82242357, 690961809, 571239721, 963252233,
4084860538, 2153606867, 2588542673, 4169894927, 4084860538,
54942416, 890597888, 4169894927}, {1220844336, 817590918,
3926254763, 1216196496, 1220844336, 2113496301, 4089812320,
1216196496, 575329368, 1889914987, 2045407448, 1637651789,
3463310486, 1831049905, 2045407448, 325575207, 2380192587,
1831049905}, {1282026768, 1664481221, 564050910, 564995505,
1282026768, 3701549914, 3363706805, 564995505, 3665115444,
307883520, 1892548273, 3183946628, 2137982777, 3442398959,
1892548273, 4118662788, 3432627472, 3442398959}}, {{3514751918,
2021244538, 732435953, 1307754719, 3514751918, 3736721708,
1214919992, 1307754719, 1189899255, 1609425246, 3704346564,
632453145, 293694496, 2169560691, 3704346564, 637306236,
1076348534, 2169560691}, {1335684482, 3977541427, 2079007086,
1533534334, 1335684482, 1497577018, 525643282, 1533534334,
4089172695, 547142630, 3056527841, 3044937468, 1924239834,
372115891, 3056527841, 2830541169, 3928812480,
372115891}, {2514447281, 1397973230, 978015612, 2992100005,
2514447281, 2850360626, 2852461785, 2992100005, 851614119,
481162011, 1157890357, 2953685245, 3280219833, 3652440052,
1157890357, 3140353867, 1383186997, 3652440052}, {3274545167,
938934351, 4028573983, 2762754934, 3274545167, 3057872364,
3846844247, 2762754934, 3075256652, 4092197741, 1025573319,
4036331438, 3276803366, 1660852083, 1025573319, 1389092450,
3635660815, 1660852083}, {1260428167, 2360949430, 3595448064,
2626409409, 1260428167, 1188404397, 1479462144, 2626409409,
1356476668, 1426976484, 2903240813, 2695585089, 3854329533,
2683005143, 2903240813, 1291869629, 1323793242,
2683005143}, {2839247420, 1231881661, 1109942153, 887967109,
2839247420, 3381172536, 3655047107, 887967109, 1027674032,
718571338, 3730950473, 1091419714, 2191610434, 56767734,
3730950473, 111909274, 116994667, 56767734}, {2433847057,
4032596403, 2089286798, 3716427472, 2433847057, 2713114448,
3899800153, 3716427472, 3084422684, 4255875513, 3157633492,
3413552779, 2821500332, 336318787, 3157633492, 10025372, 391538001,
336318787}, {175667448, 1723133625, 4182510911, 2880151048,
175667448, 1121709186, 2523330453, 2880151048, 2597859300,
1136777988, 52572783, 2636566855, 112458461, 1360732901, 52572783,
4101068693, 2887812973, 1360732901}, {1051621590, 1009714413,
1291607291, 2637245980, 1051621590, 2616267354, 1563476086,
2637245980, 3475298403, 233353990, 2587358367, 3540845540,
387836582, 1331694098, 2587358367, 1604100349, 2366659802,
1331694098}, {2035640885, 4234718111, 3625348694, 279002466,
2035640885, 4167581230, 2095974483, 279002466, 1941802004,
1701138269, 1235170544, 2241385612, 2268885372, 1762344008,
1235170544, 1088819811, 2459681948, 1762344008}, {57358912,
3574101205, 4270777765, 3417005932, 57358912, 1810269500,
3584759597, 3417005932, 1290050021, 3162257498, 3896348330,
2477002427, 1553018822, 1296379974, 3896348330, 414111967,
3523590314, 1296379974}, {801344824, 3108506226, 1884501834,
413417024, 801344824, 2180315267, 3359465250, 413417024,
3721817818, 3907528846, 1698926010, 2283920639, 528804686,
4095500737, 1698926010, 2447600887, 3644956310,
4095500737}, {4221808861, 2192890492, 1838057295, 4244875377,
4221808861, 1303998102, 840007089, 4244875377, 2781849698,
695884695, 1150569670, 4149754370, 723778947, 2633360590,
1150569670, 3367546537, 1147146201, 2633360590}, {2171606265,
713066341, 4157990372, 3449094041, 2171606265, 2041045785,
3892718001, 3449094041, 340279647, 589816719, 1079502304,
3997377532, 1106842543, 777934113, 1079502304, 2833356628,
3692238559, 777934113}, {1483124813, 1641736987, 1646411632,
2128346339, 1483124813, 1515066999, 1953783071, 2128346339,
4175709263, 2380480324, 1873392190, 3167585478, 3155942303,
656161001, 1873392190, 1960196174, 2754060215,
656161001}}, {{3179865161, 3758716888, 391120388, 1404283933,
3179865161, 3736767353, 2982534313, 1404283933, 484148868,
2531965909, 3094157668, 2278849016, 3821833900, 3455696508,
3094157668, 3978804036, 536919193, 3455696508}, {3622350766,
3759983985, 1500151924, 1128973399, 3622350766, 1613561693,
2133702321, 1128973399, 2138867468, 2446306581, 1433592392,
808215503, 3671109604, 2125991744, 1433592392, 3790557569,
890897326, 2125991744}, {1052744045, 2541155134, 346250782,
709982328, 1052744045, 3808746634, 2430723917, 709982328,
1272813809, 4277058832, 284988983, 2044803401, 3227966904,
3177388361, 284988983, 2994552097, 1344488735,
3177388361}, {90498489, 1855245979, 4220122612, 3786576552,
90498489, 2810527099, 2698781808, 3786576552, 3027706760,
3048775967, 1789634890, 2252266098, 4130736474, 3524411799,
1789634890, 1838275365, 932865240, 3524411799}, {926043968,
3308482993, 2366952216, 2715877927, 926043968, 2161133226,
950345843, 2715877927, 2435935618, 143197755, 601331389,
4140205762, 8821334, 1621299559, 601331389, 1150597024, 134090481,
1621299559}, {3170751630, 2219637217, 3470130442, 1131439682,
3170751630, 191193907, 1152586716, 1131439682, 2105864857,
3700793188, 3866921981, 3877644423, 4148023628, 1433585261,
3866921981, 3337966045, 3765830124, 1433585261}, {4200333550,
3887313678, 1548670082, 3967050176, 4200333550, 4242277964,
1648310922, 3967050176, 3517450384, 1088836814, 1272105141,
4003580220, 1398895464, 372981067, 1272105141, 3637238, 2188645829,
372981067}, {3707591763, 2533383962, 2667061910, 1110440720,
3707591763, 2163873618, 917457922, 1110440720, 3739389517,
2648614071, 983864203, 314407045, 3734776305, 1773339925,
983864203, 1999624391, 948403862, 1773339925}, {3399685405,
1498259552, 335977594, 1489481084, 3399685405, 932745691,
3023594692, 1489481084, 3340552745, 1760608012, 1937217042,
898324201, 4125187770, 646948566, 1937217042, 4163160567,
4023984155, 646948566}, {2274644307, 1526117760, 2809236743,
1300134112, 2274644307, 610154273, 819649010, 1300134112,
611178153, 728332364, 4016270358, 1482551502, 2291620547,
1218215302, 4016270358, 4014370276, 2738902870,
1218215302}, {1523867240, 406601871, 2458752809, 4184767280,
1523867240, 2006800, 2291982710, 4184767280, 2319533484,
1160593741, 2058090716, 162997241, 1081017808, 3225020759,
2058090716, 2792768896, 3647877010, 3225020759}, {1120982854,
2157902557, 1107018173, 213023128, 1120982854, 1289665822,
821316937, 213023128, 3440567542, 2188623418, 2529043017,
2142588179, 1858653225, 1928167136, 2529043017, 1690025039,
2078532030, 1928167136}, {1482492050, 1864485675, 608924704,
82600475, 1482492050, 3831331653, 1226399920, 82600475, 876807571,
1520021961, 3063342812, 2041963354, 3012650770, 2515926148,
3063342812, 94132572, 4275611558, 2515926148}, {2722631375,
2979211453, 145303850, 2233528074, 2722631375, 2776611616,
2704349698, 2233528074, 3225924699, 4066039751, 3636044049,
668554182, 2691791214, 588894479, 3636044049, 2806555799,
2464126779, 588894479}, {432525877, 2453123524, 2553894927,
2974832964, 432525877, 3079645759, 3526001605, 2974832964,
1249623772, 3176081393, 45532245, 3846904746, 3249509424,
407344860, 45532245, 4255131403, 2513899191,
407344860}}, {{3291428549, 2072837757, 1193168720, 1812182123,
3291428549, 3422065122, 3693349190, 1812182123, 1545226000,
4014658840, 3760936985, 3758858458, 1003573324, 321802921,
3760936985, 2112167358, 1099164995, 321802921}, {4088518247,
1282224087, 3991553306, 2131723358, 4088518247, 2882890127,
3262178024, 2131723358, 3230096243, 2825656399, 4124675351,
94452540, 2527961345, 2196438580, 4124675351, 4266375092,
805386227, 2196438580}, {1799810299, 626882716, 4054810152,
2080028644, 1799810299, 720257098, 2684719080, 2080028644,
3232052958, 3634311070, 3298480980, 1603312853, 2043476219,
962978332, 3298480980, 204693794, 3814399969,
962978332}, {3410986694, 2846958957, 3684514720, 1722796810,
3410986694, 3350228505, 3697719854, 1722796810, 301207261,
1773862183, 2624855312, 4098470056, 2708679078, 66735368,
2624855312, 4186703168, 2228005807, 66735368}, {3301255996,
3260599264, 2165923343, 1216601290, 3301255996, 3331397611,
725463727, 1216601290, 3425332209, 1882092413, 3267253246,
1902786417, 2645471852, 3117234458, 3267253246, 98300176,
4042105808, 3117234458}, {1890913372, 928307593, 225727143,
3319692776, 1890913372, 1715640681, 3795749903, 3319692776,
3625524738, 2881230326, 562558814, 413766233, 1706424966,
320933009, 562558814, 3744070526, 1915174474,
320933009}, {2958499183, 1848775765, 1522753070, 3551649532,
2958499183, 4162618234, 1367036681, 3551649532, 4095128844,
3800820679, 2873503619, 2279579324, 3537737308, 3934588882,
2873503619, 3461269579, 2254841117, 3934588882}, {1285250577,
2764245175, 1105070646, 4236235786, 1285250577, 3540401964,
3871128158, 4236235786, 1532963114, 3597347398, 1105106652,
2862886282, 4047666135, 3072642883, 1105106652, 2690305546,
2746897053, 3072642883}, {3860245778, 1367204462, 595903220,
2246104180, 3860245778, 1908472715, 1596486723, 2246104180,
3024009600, 1682550309, 2320235487, 1630519363, 144911261,
3363285863, 2320235487, 720496218, 3226989775,
3363285863}, {2047742419, 2109897740, 654443081, 2563937648,
2047742419, 3298575982, 2746910512, 2563937648, 3740934706,
1622747589, 372840925, 1201903815, 1901161856, 1498353481,
372840925, 1424606567, 3428325510, 1498353481}, {1720720378,
838556730, 1562000691, 3081063009, 1720720378, 3175910157,
59642517, 3081063009, 1880202235, 2942138329, 2564328310,
3243866790, 1141460510, 379222555, 2564328310, 4276165749,
568929237, 379222555}, {2604901554, 2798059827, 3134968130,
1728254085, 2604901554, 1505600996, 119873755, 1728254085,
1240524792, 2313448358, 2617184648, 2102395010, 3119630359,
3754310983, 2617184648, 3947637114, 2829438112,
3754310983}, {738302419, 4067455371, 3502068253, 2861980889,
738302419, 3223237546, 3631218395, 2861980889, 3816578609,
1243021017, 3136524018, 2500342427, 2080374278, 3991087733,
3136524018, 96564230, 264838384, 3991087733}, {632183943,
2630662559, 152578308, 1067606885, 632183943, 2025997689,
482418964, 1067606885, 1468859634, 2296152269, 2517499860,
3966641526, 3237758154, 2033651727, 2517499860, 2324222273,
3918276995, 2033651727}, {1609491518, 2406429337, 1015003691,
1540163028, 1609491518, 3217817937, 3170708090, 1540163028,
964868630, 1672929833, 62402909, 3631636823, 4124674991, 526124226,
62402909, 4260427257, 946832060, 526124226}}, {{4004365908,
2299166464, 4238928187, 3068599594, 4004365908, 340633153,
3034582784, 3068599594, 210906218, 987156327, 3027413363,
3872967050, 3159432673, 232906611, 3027413363, 4051554873,
3873338256, 232906611}, {3948054919, 3438340286, 2399101442,
3955606166, 3948054919, 3199954992, 3081246407, 3955606166,
2274701639, 4288868534, 1447386846, 816212890, 2670438424,
1160686753, 1447386846, 1635979789, 3676603152,
1160686753}, {3333667032, 2404397407, 1309772249, 4037535932,
3333667032, 633837171, 1219209632, 4037535932, 1699759143,
3773572468, 792615675, 4000461186, 2249558877, 232406022,
792615675, 351811028, 1438391315, 232406022}, {3042453580,
2674782930, 2578519273, 1182303684, 3042453580, 4188299661,
201533985, 1182303684, 504137100, 405299994, 3440193648,
2652790808, 3520937545, 3825238244, 3440193648, 2362389441,
1445162354, 3825238244}, {3320840707, 3026546742, 128640966,
2717374630, 3320840707, 1375705778, 1743344011, 2717374630,
592752793, 1485833084, 2565782177, 3520546605, 595979811,
1153297111, 2565782177, 3755481813, 1584881761,
1153297111}, {3450980261, 1054833852, 1618563336, 3074764013,
3450980261, 1199760826, 228550476, 3074764013, 1370637124,
135106935, 4182411213, 2398102705, 3692855966, 2264796250,
4182411213, 4156333842, 1995295374, 2264796250}, {2907234375,
1092238667, 3445439485, 3710031515, 2907234375, 528870942,
2854732050, 3710031515, 1611431067, 3580357515, 3411234559,
3111723660, 3713963703, 1510709042, 3411234559, 4137143940,
3654924984, 1510709042}, {1076783073, 164438428, 1634588989,
2578452047, 1076783073, 261861891, 3140440866, 2578452047,
1382964588, 2069333087, 1455977136, 1039994763, 2433255524,
1984094858, 1455977136, 2027397575, 532165989,
1984094858}, {2532185665, 1135289565, 3062805500, 2611640170,
2532185665, 2260058290, 3719838959, 2611640170, 1806082255,
2620524685, 4033679701, 364359692, 3145117592, 4162460747,
4033679701, 411576745, 2706572732, 4162460747}, {3008710738,
1490567105, 2020031417, 3585495714, 3008710738, 316260234,
1122119678, 3585495714, 4176701833, 3084567190, 2736776780,
1110199666, 1496307045, 154841809, 2736776780, 1275201873,
1322574605, 154841809}, {3799059956, 126838375, 2467448911,
3275952471, 3799059956, 1671921785, 58436504, 3275952471,
182268988, 1090233980, 408914598, 902537192, 4175648912, 433123787,
408914598, 3772232739, 1875701401, 433123787}, {2667038252,
2896444472, 20879392, 253397989, 2667038252, 476551235, 3710003391,
253397989, 2923250664, 1792877257, 3514952071, 4201381296,
3331739241, 1114696906, 3514952071, 249072369, 2176108884,
1114696906}, {1727138303, 2412458112, 779768765, 807167497,
1727138303, 380260625, 1615189373, 807167497, 1064250869,
4288045420, 3452261767, 827316818, 4208599283, 259951996,
3452261767, 522912574, 3605790986, 259951996}, {2363989886,
17579290, 1962366311, 1271400610, 2363989886, 3424373120,
2529737790, 1271400610, 942228369, 1351149998, 214293969,
3059547798, 3309253011, 2041200841, 214293969, 1187066710,
2941656966, 2041200841}, {1948744398, 4036241737, 1564775693,
1805077493, 1948744398, 93501401, 2145592161, 1805077493,
2365733982, 3044908159, 1626832916, 1236581455, 4145982416,
4009535226, 1626832916, 810090874, 1876014673,
4009535226}}, {{2908898908, 86050422, 1213100579, 526867394,
2908898908, 2687252475, 3102803247, 526867394, 2529186343,
3912995069, 2033851810, 3686294589, 3843367307, 3680843319,
2033851810, 3516795313, 2332949611, 3680843319}, {1053552056,
4002399925, 1635227281, 538108523, 1053552056, 4193870709,
790337895, 538108523, 2690118316, 455633660, 641380480, 1723000412,
2145297779, 967423689, 641380480, 635932799, 1724183394,
967423689}, {3122368790, 2578750044, 3794884445, 2678342194,
3122368790, 3769235275, 4048456688, 2678342194, 1934635577,
4269455046, 752843557, 1546279541, 3382133383, 743327961,
752843557, 2078921540, 3817146458, 743327961}, {166243972,
2128782357, 428569070, 4205383007, 166243972, 1095349745,
1812304090, 4205383007, 2123712957, 1399334152, 273828453,
4084843716, 3112810093, 2130938335, 273828453, 2511584766,
1534972306, 2130938335}, {1961979279, 585285043, 2284701379,
290505355, 1961979279, 74485575, 4085693250, 290505355, 1735035809,
1059570184, 303326672, 1267207775, 589131299, 2035870992,
303326672, 201201961, 2161881798, 2035870992}, {1271449583,
2116594270, 2103513260, 1640006818, 1271449583, 367329306,
839902291, 1640006818, 894986302, 3658452654, 3431342036,
2713344567, 2641739085, 2966757766, 3431342036, 1627106982,
3557907827, 2966757766}, {3844312173, 3804137266, 183814247,
152874554, 3844312173, 103366292, 2043521996, 152874554, 686371398,
3673954433, 1991737811, 3036158822, 3078932982, 2817075782,
1991737811, 3023637379, 1288143068, 2817075782}, {3621125056,
1927692878, 4262164578, 3569679412, 3621125056, 3808641551,
4107175982, 3569679412, 1330151766, 2777887189, 403188859,
58789558, 2092073970, 168278549, 403188859, 190177712, 541167592,
168278549}, {2411074146, 1949247379, 1954645400, 203894202,
2411074146, 2372145203, 2891151238, 203894202, 2569647272,
1811990187, 100816607, 4147253715, 849410059, 3144790357,
100816607, 145858739, 842938465, 3144790357}, {4188490076,
2913697289, 2596622424, 2121437009, 4188490076, 2548746255,
3060019458, 2121437009, 2049531697, 2928868834, 97657552,
1751146903, 1354192475, 211564399, 97657552, 2827838989,
3139804205, 211564399}, {4115560303, 3882559608, 2364769240,
3413474739, 4115560303, 2038580445, 3031041848, 3413474739,
1032131834, 1654012724, 2129397153, 327748970, 2996610873,
156555722, 2129397153, 1103105463, 3533481890,
156555722}, {2456485740, 834096815, 1818617085, 3732834681,
2456485740, 297727134, 466628750, 3732834681, 4091558631,
4103288151, 3427777045, 1247469758, 589375738, 664028138,
3427777045, 3240810721, 360061317, 664028138}, {2153554308,
1201997039, 1275843034, 3356165919, 2153554308, 727988140,
674985468, 3356165919, 2775330589, 1446814948, 1073331092,
3575267521, 271051737, 62871475, 1073331092, 797503411, 3608447915,
62871475}, {3475172973, 4286690341, 1319538278, 3303040859,
3475172973, 4160798835, 3606461240, 3303040859, 327396059,
2499583263, 3885566636, 1518909811, 2526744450, 3972048177,
3885566636, 4109276056, 7945907, 3972048177}, {798125497,
1759339729, 2716140374, 4045305875, 798125497, 3468584533,
661033330, 4045305875, 3606806185, 2005379293, 2059615989,
46211558, 2598415204, 1818860931, 2059615989, 714940449,
3523832777, 1818860931}}, {{2729678535, 4279360935, 3379124758,
2442739556, 2729678535, 1629626641, 3894922338, 2442739556,
3606295184, 2417192332, 351187677, 3347241070, 1312056270,
634843389, 351187677, 2092828966, 4082275720,
634843389}, {3629539480, 4025926501, 852514024, 4249024666,
3629539480, 3253349463, 919210106, 4249024666, 1052092278,
3298601960, 3083745876, 4200577503, 3370743767, 443276110,
3083745876, 271102234, 1113643788, 443276110}, {3870151604,
2257727047, 4206383948, 3634978526, 3870151604, 1910917093,
4202589308, 3634978526, 2711810981, 2517854433, 4101230989,
4143478832, 3379348808, 3590857185, 4101230989, 2337971568,
3927998336, 3590857185}, {3197545170, 1976203831, 1884529704,
1252747620, 3197545170, 4227339509, 2133571953, 1252747620,
12394571, 3794766611, 144639933, 77963866, 2646475951, 3533393557,
144639933, 3400275098, 764977733, 3533393557}, {1965629728,
3936770970, 3402754508, 755337184, 1965629728, 1742774417,
3051109776, 755337184, 2628942652, 659198105, 3718843364,
2782618441, 2339730507, 878523653, 3718843364, 978544901,
1283972682, 878523653}, {2029416251, 3366782607, 1096100666,
452832640, 2029416251, 2939334015, 1101195955, 452832640,
2331594780, 2300259911, 4096393357, 410940557, 962932343,
914011908, 4096393357, 3635095314, 1379977154,
914011908}, {3265804634, 1220735951, 3404307618, 2513961362,
3265804634, 2722453189, 4113460355, 2513961362, 2216972702,
4057726734, 4229189541, 3321240812, 636990017, 4178486523,
4229189541, 7555275, 91679821, 4178486523}, {3542657885,
1546667401, 745203060, 2671019282, 3542657885, 3173738401,
2847338542, 2671019282, 2986331025, 1981721347, 3274748603,
4238693771, 4164637970, 4064854722, 3274748603, 2872196602,
1198665008, 4064854722}, {914697583, 3663538873, 4110092094,
4108960924, 914697583, 29484944, 716837900, 4108960924, 564816203,
2695814829, 3697835298, 2024864567, 1648946406, 4205748652,
3697835298, 1235798673, 3545268246, 4205748652}, {584665331,
3072929538, 935407479, 1308019352, 584665331, 2721990050,
1824121179, 1308019352, 3475245690, 1969086166, 3000164892,
2494369285, 3213078611, 658075764, 3000164892, 631337149,
868441731, 658075764}, {3716973286, 919140074, 553016820,
1204116678, 3716973286, 675934421, 446248158, 1204116678,
2367599030, 2092234532, 1102760986, 2818928245, 261952173,
4294509396, 1102760986, 1054152352, 3546688363,
4294509396}, {3293370693, 2120370930, 2487351160, 987948282,
3293370693, 2154496016, 836901607, 987948282, 1254243785,
3331497439, 1904530179, 2014861157, 1121637945, 1202027740,
1904530179, 251455117, 1218291611, 1202027740}, {3000951292,
210589449, 1003182103, 1151335465, 3000951292, 3030009760,
379309230, 1151335465, 3387111151, 1555644948, 1096779672,
3425935236, 4229417349, 133998326, 1096779672, 2168017189,
602854428, 133998326}, {99887253, 407199536, 4012293175, 976296831,
99887253, 3245218993, 2444588607, 976296831, 614238014,
1996334021, 184407828, 1130199284, 3959662009, 860183345,
184407828, 2577917907, 3722900937, 860183345}, {1445199842,
1256197293, 3160538267, 1715703880, 1445199842, 1520448575,
2536272162, 1715703880, 2559284397, 1426699760, 1130124341,
158486332, 1138404320, 742136141, 1130124341, 1781321781,
4198515709, 742136141}}, {{92565032, 257065974, 2786645250,
2311336951, 92565032, 1493974713, 2911336433, 2311336951,
2613012997, 3726919367, 885864931, 1895527787, 1344421653,
2279220396, 885864931, 1417574285, 2355957139,
2279220396}, {2509257810, 1146336783, 2979919489, 2776053372,
2509257810, 3770626858, 2204068573, 2776053372, 3424925004,
3410622769, 3175444953, 246118669, 4290541487, 2898100178,
3175444953, 348923199, 2427331008, 2898100178}, {3511408930,
2666607166, 634471839, 2215361770, 3511408930, 1105380130,
3750482090, 2215361770, 2499905758, 2277317349, 488341106,
468535899, 3877861726, 55373162, 488341106, 2739617092, 3987120186,
55373162}, {1949809417, 1194193824, 3088339524, 827894421,
1949809417, 1373055755, 515339473, 827894421, 1474384834,
1145356382, 4182706556, 1201342255, 3696899246, 284442065,
4182706556, 2295560707, 4064194676, 284442065}, {4147063048,
352467977, 104747063, 1464831324, 4147063048, 1737209131,
673124742, 1464831324, 811682426, 1983142708, 3161673906,
808492591, 1200638109, 854963956, 3161673906, 2185360428,
3894612396, 854963956}, {183217259, 533708602, 4048695575,
2367252271, 183217259, 2079352492, 658497461, 2367252271,
1759193844, 270201416, 1613992473, 2390399884, 901075807,
2146747531, 1613992473, 1430380976, 896368240,
2146747531}, {109366125, 3360277248, 552179285, 2202319015,
109366125, 3199388318, 2821035593, 2202319015, 2604083920,
3992043804, 238941078, 3045719234, 957806905, 1033390767,
238941078, 3176316290, 4214343810, 1033390767}, {4062509844,
2060641859, 3019008744, 1289872272, 4062509844, 3433422861,
3700877161, 1289872272, 1825805135, 496247378, 1624967553,
1157949454, 798014134, 656615546, 1624967553, 3724738272,
442908965, 656615546}, {1294152074, 120937588, 3153569646,
3643106446, 1294152074, 3315724193, 3880807273, 3643106446,
1110529092, 1229315533, 1702251945, 1996197369, 520651142,
4085556909, 1702251945, 2017159765, 1465806476,
4085556909}, {473948224, 1333739502, 3487031015, 1764505587,
473948224, 633183200, 3485779436, 1764505587, 3275619611,
2180513704, 2699750107, 2718176683, 1754455852, 2415550298,
2699750107, 3340104045, 2908194857, 2415550298}, {914160376,
176936203, 1218008505, 1466641753, 914160376, 3100257041,
379091939, 1466641753, 1342958838, 320803805, 3280134864,
264991624, 2731178339, 3254967153, 3280134864, 3489468987,
789779004, 3254967153}, {478140738, 20913244, 3630110705,
207065438, 478140738, 3241255407, 2635145699, 207065438,
3649677977, 3398645998, 3122382780, 2205556702, 2257745277,
1328712410, 3122382780, 3614915327, 2710816935,
1328712410}, {2567996318, 2094433326, 2414316512, 2041251744,
2567996318, 2846183244, 2056360158, 2041251744, 1704532635,
1622797842, 3643628505, 4010558652, 1877394215, 353233466,
3643628505, 671027474, 590107503, 353233466}, {1655058924,
2266474132, 3266898345, 3651863350, 1655058924, 2346780324,
363692550, 3651863350, 4083888222, 3110655118, 1845588340,
314920879, 1419069452, 1228972469, 1845588340, 1037408725,
4141615115, 1228972469}, {2202115886, 2204295311, 3164032751,
887636120, 2202115886, 771714323, 1186533451, 887636120,
2543780991, 2775289385, 1980940153, 918854465, 1908695354,
710558276, 1980940153, 1794212991, 2308769492,
710558276}}, {{60268595, 47309624, 4096010585, 4253338264,
60268595, 698444416, 3424495942, 4253338264, 3842597153,
1853486796, 3820679930, 562287964, 2961990151, 265689579,
3820679930, 3009083380, 675056541, 265689579}, {3618850300,
271787962, 2538793204, 2043518992, 3618850300, 1330201507,
1593435980, 2043518992, 2662288323, 3431158427, 4161492847,
1017447188, 226142150, 1675739167, 4161492847, 760605578,
2319843005, 1675739167}, {2352110692, 265547447, 3367780341,
170978053, 2352110692, 175903832, 1258089776, 170978053,
1177494705, 1973739759, 3522463659, 855175401, 590519806,
497640593, 3522463659, 2309875696, 3005114205,
497640593}, {1522130854, 2847712653, 3036916315, 997594656,
1522130854, 1160477043, 2398808739, 997594656, 741020448,
2495653141, 1812793060, 1168460586, 2111094408, 1759873736,
1812793060, 2154570180, 2334568602, 1759873736}, {1217712917,
3526690167, 690665294, 229123252, 1217712917, 668254219,
3329037315, 229123252, 3026480083, 1545370735, 3359371694,
3988997253, 133725548, 3688848963, 3359371694, 1224915758,
1645876077, 3688848963}, {1091790487, 273865704, 2226046119,
1352009588, 1091790487, 554324048, 3683234183, 1352009588,
1700890463, 3676167916, 3472308225, 3380069480, 3967068313,
3284594322, 3472308225, 546295846, 1149793848,
3284594322}, {1875526903, 49030556, 1631562069, 1668228073,
1875526903, 4287083180, 2616427751, 1668228073, 575879716,
2225145569, 2285616099, 1542838632, 4159278264, 1785093079,
2285616099, 3213946784, 53885944, 1785093079}, {1621136330,
784545882, 1553642730, 1889728930, 1621136330, 2042322941,
53329096, 1889728930, 2654964886, 3532023115, 2340275074,
725706104, 2168960688, 317621194, 2340275074, 664971082, 868104288,
317621194}, {2437539407, 1088147466, 48652703, 2041810917,
2437539407, 2974834965, 7471339, 2041810917, 4125172241,
2256636382, 553316582, 3977564013, 3348538895, 2644740135,
553316582, 279286037, 3932880138, 2644740135}, {2767516839,
1967973254, 396617122, 4267620040, 2767516839, 1615344995,
3476014624, 4267620040, 1729073482, 912232907, 3416933659,
1587194462, 2687676063, 3986137775, 3416933659, 322954476,
1361571890, 3986137775}, {731897082, 1445362099, 2595144917,
1447801550, 731897082, 3988455018, 188539054, 1447801550,
1188582180, 2050052185, 3881055380, 264403432, 940006676,
1957269898, 3881055380, 2469328015, 3769418331,
1957269898}, {2513781045, 3987302058, 1455564465, 798626824,
2513781045, 2219774094, 3879214027, 798626824, 900526416,
1912059035, 2919944362, 3588451359, 1464119531, 3585587043,
2919944362, 2463381051, 2378713321, 3585587043}, {2588128884,
397658028, 3082940156, 608952440, 2588128884, 2562104176,
4234655284, 608952440, 1227298854, 1114160754, 3155034092,
3835828968, 3069142880, 3109358041, 3155034092, 4202957017,
3866743692, 3109358041}, {2601868945, 667128524, 2480183392,
3926034488, 2601868945, 82621702, 2686605836, 3926034488,
1520850716, 2413787880, 4241673552, 2427278590, 3086419713,
2816680417, 4241673552, 47659702, 4155441854,
2816680417}, {2772467068, 2930191747, 2318381581, 1625506838,
2772467068, 3508796977, 4142299885, 1625506838, 3200912711,
3315632585, 2480313120, 88147339, 1413043611, 2481083808,
2480313120, 1945166752, 2247120009, 2481083808}}, {{404228189,
4107238699, 1386575385, 2831829177, 404228189, 997274536,
929124824, 2831829177, 1715219514, 1662727700, 1132340715,
1154819290, 676995757, 3926931954, 1132340715, 615601328,
2907684453, 3926931954}, {2970489088, 1425511081, 1158689953,
2912523524, 2970489088, 4216003022, 1840499723, 2912523524,
3928131551, 934679595, 475345024, 1795936544, 4206379953,
3921782078, 475345024, 2969567377, 3376494857,
3921782078}, {2086065141, 587061399, 3022076788, 1647549563,
2086065141, 3380605877, 2507630679, 1647549563, 2920924516,
1233970978, 3042622070, 1229440375, 4181781224, 3483571863,
3042622070, 742581647, 2615823606, 3483571863}, {2370490899,
2450376936, 10873814, 2510299562, 2370490899, 2505735035,
271603006, 2510299562, 2807004452, 661718928, 1047885963,
1065969969, 1581078542, 3119292228, 1047885963, 2083352304,
741613041, 3119292228}, {1354837648, 3678237069, 3682511980,
2675296174, 1354837648, 2414255248, 1246008334, 2675296174,
920524545, 2653425336, 1917491817, 2869973725, 1239349701,
3580539188, 1917491817, 1506189984, 2309910897,
3580539188}, {3793171443, 1882293939, 661495819, 1513759891,
3793171443, 337719276, 652184790, 1513759891, 895842640,
2098160992, 1401117517, 2267936793, 335339494, 3193382049,
1401117517, 3880461974, 573569291, 3193382049}, {4117350188,
3529373493, 2913153588, 3057958216, 4117350188, 3995611923,
2391642796, 3057958216, 2497597968, 3011116692, 2682960221,
1476706329, 3517522726, 4253871110, 2682960221, 153189593,
1114509184, 4253871110}, {2787925323, 3044470744, 1475797635,
1113679064, 2787925323, 1475266926, 2502160539, 1113679064,
2000734342, 993186530, 3804264051, 1246805564, 1366457944,
3643472111, 3804264051, 3995474529, 2870554228,
3643472111}, {1768207891, 2590074999, 3370546898, 175571127,
1768207891, 2499224051, 920885099, 175571127, 1846049425,
1563383369, 18544120, 2874273892, 935783747, 462450713, 18544120,
167437104, 1822716166, 462450713}, {76599155, 623201703,
2574759301, 722317846, 76599155, 1016766460, 1586650055, 722317846,
1766616799, 532216705, 1859333754, 2682882800, 2377603858,
2693567720, 1859333754, 3619720132, 1775121226,
2693567720}, {3363820471, 2298118057, 2029539379, 1292061429,
3363820471, 1915384204, 436716126, 1292061429, 2161444770,
2123202522, 2928620817, 2991388585, 2731138746, 177441128,
2928620817, 3917475518, 3291773936, 177441128}, {1724853627,
1256783759, 536042925, 4014926443, 1724853627, 2495955387,
1080154168, 4014926443, 3664739404, 3842743664, 1160802169,
92806587, 1363372142, 2520305729, 1160802169, 2663387463,
3279882298, 2520305729}, {1148079236, 2156546352, 1092717014,
2592814691, 1148079236, 3709610673, 2728288705, 2592814691,
867449101, 3879394235, 2162786564, 1706763897, 2618087039,
1182442141, 2162786564, 2087227814, 1406594286,
1182442141}, {2618021767, 2030169433, 527165723, 3373283605,
2618021767, 1560329332, 3767126799, 3373283605, 4046813655,
3607037865, 1124729601, 2731098528, 1908361865, 1402382861,
1124729601, 967911190, 2128689614, 1402382861}, {239039756,
1309462532, 1276337832, 2279012449, 239039756, 538581363,
3801080808, 2279012449, 1454676776, 3319475384, 3336285375,
4071940116, 4262329590, 1464913319, 3336285375, 2840173801,
598999078, 1464913319}}, {{3639907189, 2256217204, 2067740348,
1252556678, 3639907189, 1926798761, 3073232607, 1252556678,
1457157056, 388382377, 255632881, 3472564181, 3778927111,
796711791, 255632881, 3160293932, 3878204845,
796711791}, {686702830, 667792040, 1699805291, 1035400458,
686702830, 4126312242, 3162437311, 1035400458, 3740999688,
1213319489, 1349354417, 715722511, 1264780832, 1776984101,
1349354417, 3449763933, 1742284034, 1776984101}, {1935673443,
2304324596, 877889863, 1338322079, 1935673443, 2710885009,
817781640, 1338322079, 1345468819, 1095273395, 3070932389,
4073569309, 4183678140, 3231284907, 3070932389, 3476263944,
2981539575, 3231284907}, {4092709154, 1863075174, 659945473,
3203768688, 4092709154, 848719394, 1858240466, 3203768688,
2422495016, 3193186353, 614207188, 2046042882, 1853554849,
4261866865, 614207188, 201872335, 1914382786,
4261866865}, {3931174287, 340393141, 3818334033, 2944772441,
3931174287, 3854092115, 1446242483, 2944772441, 21345609,
3967303913, 1712333408, 2838663503, 2683472927, 4179922982,
1712333408, 3720886954, 2821238835, 4179922982}, {1579232146,
3349077947, 3154153453, 1572317229, 1579232146, 386210076,
2146084483, 1572317229, 2472609977, 2803635446, 2420177830,
3429706463, 1924402503, 2701381139, 2420177830, 2023182114,
3664845069, 2701381139}, {1649964845, 217906160, 3740686873,
1547798902, 1649964845, 3707114992, 578076866, 1547798902,
3934658083, 2228432272, 2221023672, 547107197, 722305627,
4122275824, 2221023672, 1051494202, 2032046756,
4122275824}, {3721214025, 745295675, 783929942, 871751352,
3721214025, 3976202597, 2834051003, 871751352, 246817944,
2184392547, 1251969297, 470400916, 3491451503, 2210205512,
1251969297, 3324925707, 2847073169, 2210205512}, {2154402149,
4001512734, 3816295128, 4056397833, 2154402149, 2221071186,
4004683974, 4056397833, 4263372608, 435589087, 3137962926,
2082593800, 1851577490, 1111106658, 3137962926, 2240713216,
1077637478, 1111106658}, {3915641819, 2309221783, 1234558483,
2655427644, 3915641819, 895281471, 4134896970, 2655427644,
1107172622, 2288454722, 3171155722, 1480377757, 1089650049,
2520052828, 3171155722, 2197644324, 3723852471,
2520052828}, {3250461503, 1910495453, 1512194304, 169608246,
3250461503, 309305922, 818711694, 169608246, 2925058007,
3125904656, 3492065914, 1221999270, 170682686, 992715205,
3492065914, 1638334801, 4178144540, 992715205}, {35143865,
2176984764, 1567869529, 2500915473, 35143865, 887925654, 496167814,
2500915473, 548058447, 2939006535, 503033547, 1828629701,
980661493, 1829465586, 503033547, 2957960322, 3876447782,
1829465586}, {1606635477, 1843255804, 631081701, 2295006902,
1606635477, 1370996644, 746905804, 2295006902, 894654639,
993091788, 770341956, 2384830027, 2326513067, 4197539627,
770341956, 844674446, 2567893567, 4197539627}, {1002527952,
4011213374, 3121831492, 801660332, 1002527952, 784909987,
417481691, 801660332, 3000287756, 3049405724, 1827317252,
859112106, 3308812772, 4087835913, 1827317252, 603843371,
2983990017, 4087835913}, {3051707672, 496483841, 1727559830,
3885232784, 3051707672, 3571506368, 382063349, 3885232784,
3281899811, 1611359555, 1261853865, 2360894365, 2828833838,
2485922743, 1261853865, 1951099638, 120616508,
2485922743}}, {{2906557036, 966815948, 2300510686, 3603608092,
2906557036, 3826150877, 2365398362, 3603608092, 3811740424,
4059123142, 295466806, 3527253079, 4143310876, 1523590942,
295466806, 68341529, 2391111113, 1523590942}, {2220769388,
705296543, 1346863388, 18201123, 2220769388, 437309679, 3367710570,
18201123, 2816329160, 964257199, 1814959027, 346472965,
1560544267, 1406902110, 1814959027, 1774518130, 3735012720,
1406902110}, {497836434, 523073130, 2146196184, 1183199523,
497836434, 3080242732, 686976485, 1183199523, 2743985808, 60540513,
3754191262, 2574962339, 1060465580, 992368492, 3754191262,
2674694909, 2710608111, 992368492}, {3105849797, 1735620028,
1937586849, 1662315499, 3105849797, 2572908401, 132645114,
1662315499, 3310028953, 1168504873, 2025248418, 1349947330,
4148125749, 855309653, 2025248418, 1518467541, 4208503105,
855309653}, {589436548, 2389421212, 3566940914, 3741960405,
589436548, 761213045, 2322978424, 3741960405, 142470895,
3823316426, 645207847, 2648411341, 3256971516, 2429349820,
645207847, 3556252097, 1469549233, 2429349820}, {2069885925,
700277438, 2476258020, 3883250533, 2069885925, 2884486169,
3759258130, 3883250533, 1149765069, 1586965364, 2937963376,
1930346507, 370051167, 2030076497, 2937963376, 940935419,
4179928109, 2030076497}, {1596781442, 2709977947, 2667972423,
1839394420, 1596781442, 270719656, 4089925844, 1839394420,
525032773, 3677430989, 863795123, 2000204624, 935621435,
2315368417, 863795123, 3597744580, 2843028970,
2315368417}, {2044154050, 1149892630, 1787730088, 1359675853,
2044154050, 106675209, 3599822966, 1359675853, 461386353,
1350184085, 252401654, 2793725401, 3992097193, 2375338156,
252401654, 409696181, 3629519168, 2375338156}, {1017349084,
629419078, 203312427, 2157999162, 1017349084, 2919482867,
850785849, 2157999162, 1894476622, 1029598590, 3416340823,
40448550, 3206465269, 1502299776, 3416340823, 2872782491,
354425135, 1502299776}, {3908209478, 3273124970, 4162279434,
1499732648, 3908209478, 1957153439, 1845710116, 1499732648,
1696427075, 4256446835, 3347645869, 3898835358, 742690700,
987095540, 3347645869, 3948109762, 184519571,
987095540}, {1761526113, 846440977, 968061131, 3519837725,
1761526113, 3098516054, 195554263, 3519837725, 4170693482,
1478537122, 3484995340, 2037109801, 697103737, 3043971826,
3484995340, 2423210396, 580497182, 3043971826}, {4143854702,
1557265403, 1276625905, 2034254929, 4143854702, 308885052,
404593054, 2034254929, 1678397435, 2282426993, 3529719882,
1918223997, 204704202, 2856909490, 3529719882, 3088217623,
1191427722, 2856909490}, {915021027, 57535839, 695857913,
1840996748, 915021027, 3480463983, 2917871304, 1840996748,
975961557, 2428781875, 1194218426, 3745468214, 3705289301,
2961889631, 1194218426, 978546743, 1677670172,
2961889631}, {884811167, 1724903241, 1395551713, 2234972795,
884811167, 965267288, 489460872, 2234972795, 559523405, 4059493912,
3214872273, 1442552816, 2159361799, 1594616762, 3214872273,
3944540633, 3530814584, 1594616762}, {2300205863, 355019952,
1711195105, 3708698642, 2300205863, 2274854722, 801163396,
3708698642, 778665262, 2636584781, 2618124359, 2831461146,
1676130220, 2459541613, 2618124359, 2311007603, 3880286112,
2459541613}}, {{1116882637, 79626976, 1846832385, 345146034,
1116882637, 2231869247, 1417149696, 345146034, 3303902397,
876210854, 3554143374, 1919765327, 894746180, 873141039,
3554143374, 361604799, 3885583138, 873141039}, {1101141726,
1004001443, 224270120, 3117782790, 1101141726, 3796182890,
1805260159, 3117782790, 2765049417, 1504276775, 2300930144,
2016163623, 2560214589, 246368794, 2300930144, 2317362874,
1703793169, 246368794}, {4282068381, 3102721096, 233700888,
1643675110, 4282068381, 2277708085, 977708150, 1643675110,
1905825838, 4275847685, 3631325896, 2980091964, 2962442766,
104889453, 3631325896, 468894607, 1635915525,
104889453}, {997151755, 3444341166, 1175528637, 2494198515,
997151755, 2490435731, 1356989069, 2494198515, 89118668, 550063800,
107416526, 1402542742, 1773803959, 1574610921, 107416526,
4103450226, 2147546631, 1574610921}, {1592979988, 2320747498,
1962743300, 3815695315, 1592979988, 3669456970, 1523382042,
3815695315, 2261193360, 4088546528, 2426827615, 2021488568,
544936901, 1011239422, 2426827615, 4071244026, 2496057465,
1011239422}, {1733540130, 1810350755, 1119988193, 3160454394,
1733540130, 613651010, 1595264559, 3160454394, 2610383359,
1752846470, 3825719596, 2445832362, 3679744745, 2364572364,
3825719596, 468574833, 3566983915, 2364572364}, {2703184802,
2604386602, 1446393552, 2873961450, 2703184802, 880506002,
2959349947, 2873961450, 2449247677, 1445510944, 912545172,
331250696, 814500863, 272315039, 912545172, 1200594162, 1398564460,
272315039}, {1194574818, 1728500627, 2662281592, 510225634,
1194574818, 2045217287, 286119182, 510225634, 2340639019,
296510335, 2637234667, 2895130475, 4031408742, 363388665,
2637234667, 1746615522, 592194244, 363388665}, {2294364571,
2423473437, 2052320134, 4118117831, 2294364571, 1174383839,
3465907219, 4118117831, 1531142753, 2867355922, 4123315,
2837472816, 3501850356, 977593562, 4123315, 2038498018, 3178048957,
977593562}, {2854828033, 3783089951, 4138436503, 184782823,
2854828033, 2190899193, 1797257364, 184782823, 1447842232,
4292230862, 2144494056, 2439698058, 1022614336, 208003776,
2144494056, 1566440482, 91247399, 208003776}, {1344049392,
2047337866, 1934971877, 2313144506, 1344049392, 266781035,
2279972572, 2313144506, 1583393201, 1663896395, 3064180636,
1105919809, 1581808653, 1156200190, 3064180636, 1451707531,
2185205086, 1156200190}, {2559113701, 2236162592, 4146978688,
2262141136, 2559113701, 3549231332, 1078367555, 2262141136,
3892495210, 3471595726, 218486499, 2351513088, 3142931989,
2823846657, 218486499, 3340983277, 4257316854,
2823846657}, {3755683476, 1940882830, 1866420738, 2087946058,
3755683476, 390169161, 4179048351, 2087946058, 3031968622,
2881596813, 3034998254, 2517333902, 535206560, 1190356014,
3034998254, 1803506624, 2329540208, 1190356014}, {781759955,
3182735706, 359509185, 825286037, 781759955, 830287146, 2959985729,
825286037, 1510077366, 1212961148, 2047897620, 965798968,
1674831117, 3562083579, 2047897620, 1588504573, 3058668461,
3562083579}, {590551532, 736016229, 1655805093, 4221110267,
590551532, 1365819997, 1908233913, 4221110267, 290264540,
2222615454, 2404181240, 2563913658, 2273306715, 1685030826,
2404181240, 1157041624, 3746279518, 1685030826}}, {{2343328484,
2893041005, 3465141330, 1189120688, 2343328484, 1087786165,
2848040407, 1189120688, 210787847, 4226264344, 1160174754,
1234984211, 4027094919, 3997368560, 1160174754, 3178383826,
3047771871, 3997368560}, {215280489, 1474823842, 730413847,
3236285682, 215280489, 2873263091, 962036916, 3236285682,
3438170226, 1708226553, 1776841674, 1500495759, 2867287469,
3303179301, 1776841674, 3235964171, 4243968063,
3303179301}, {4177426677, 1193948814, 3908192573, 489218847,
4177426677, 1267710679, 1841260926, 489218847, 1877599976,
2513927224, 1694166096, 1377070160, 593465055, 1859001036,
1694166096, 2391336228, 2962890971, 1859001036}, {653809404,
3710942807, 3082882924, 133010024, 653809404, 2835827516,
2886695328, 133010024, 1566461658, 3398059015, 3932773012,
3350181058, 389193591, 1482944153, 3932773012, 354466071,
3192311574, 1482944153}, {1484000297, 3188783681, 3415991698,
4003411060, 1484000297, 933110981, 3748771156, 4003411060,
1018639212, 2819889512, 2637114657, 909882870, 1117546206,
3478906695, 2637114657, 3563812138, 565159378,
3478906695}, {440283001, 1089550911, 4029103059, 3185784250,
440283001, 1151112872, 1624263326, 3185784250, 2630823869,
1154112679, 3969663510, 1537382635, 915271858, 3406907174,
3969663510, 536198867, 3949116664, 3406907174}, {1972186265,
3731240792, 1890899803, 4161490031, 1972186265, 2993166332,
868072046, 4161490031, 2558003006, 1918378675, 2326140461,
3071215524, 2413540258, 1488624292, 2326140461, 4148140705,
2799268852, 1488624292}, {621546357, 1156650856, 2697142404,
1263786956, 621546357, 762846463, 326001602, 1263786956,
4201558916, 2925300409, 1254989667, 2027371896, 2383815228,
640968550, 1254989667, 922372912, 3226860971,
640968550}, {1223308810, 203294792, 5747672, 1452450798,
1223308810, 1209885024, 4220840636, 1452450798, 2344687902,
3881403814, 4062543339, 2728743727, 2635127075, 4270641330,
4062543339, 1822455048, 1349296096, 4270641330}, {3724096918,
1596355079, 208236284, 3428374439, 3724096918, 3321163606,
651704320, 3428374439, 895018056, 292412816, 452352464, 3648643686,
218390717, 3793896259, 452352464, 2814326437, 1780812891,
3793896259}, {407784204, 38915014, 325099352, 1466266720,
407784204, 1070102291, 336999180, 1466266720, 4118978201,
1467025452, 1802921798, 3679350217, 2749639306, 812520277,
1802921798, 3868105704, 2578531869, 812520277}, {3674853211,
3045714102, 2926968519, 3102951286, 3674853211, 1039842093,
3967585868, 3102951286, 3762480362, 3172107248, 1787821043,
274678961, 114309037, 1962346210, 1787821043, 46968956, 558932075,
1962346210}, {3704702649, 235968343, 2794114303, 1938778500,
3704702649, 525722325, 1883441867, 1938778500, 3201001463,
3749243715, 2025994867, 3161869996, 978727059, 1073516940,
2025994867, 2469877757, 1518940291, 1073516940}, {4067177669,
1043444957, 999473793, 2767155734, 4067177669, 2450190076,
1698789305, 2767155734, 2712550515, 535913450, 201585531,
3030738646, 1607735058, 343873340, 201585531, 1083665713,
2462653191, 343873340}, {1372371305, 2609726990, 2019515078,
1139515116, 1372371305, 481772553, 3800286228, 1139515116,
1153162105, 4026972309, 3689846594, 3253844353, 3753500845,
2231918583, 3689846594, 1165407528, 2183075361,
2231918583}}, {{403962847, 639660658, 1483118807, 2339029465,
403962847, 2669906627, 2430211448, 2339029465, 2655768102,
1897624297, 1689291784, 1650609719, 4255405993, 2313146046,
1689291784, 1377591475, 3910187183, 2313146046}, {3500531602,
3773159052, 24637, 4228486662, 3500531602, 1913148390, 754946994,
4228486662, 3508595200, 4235849984, 3185020283, 2449669145,
1923190496, 3656310954, 3185020283, 2702189958, 882924050,
3656310954}, {3184854268, 2557204628, 2501974390, 676540794,
3184854268, 1997794025, 3402535509, 676540794, 917982480,
103612315, 4247470915, 101546088, 1398719693, 1932812117,
4247470915, 4129096120, 4060977130, 1932812117}, {2873927273,
4095476435, 2167114735, 1143874914, 2873927273, 2970344133,
3205058469, 1143874914, 4024866227, 216160452, 3926377906,
511317726, 600268838, 377232416, 3926377906, 4229103619,
1498446142, 377232416}, {1792666093, 4016848937, 1543204173,
3257189274, 1792666093, 176833036, 2280560724, 3257189274,
1147306446, 328172414, 1507371296, 3523466281, 29926908,
3043105629, 1507371296, 3253631879, 4036239959,
3043105629}, {4205223861, 878825824, 1490976015, 345877551,
4205223861, 2785571028, 1338844195, 345877551, 1924613784,
1236254347, 4161316409, 642959848, 3917474052, 2570368729,
4161316409, 152032198, 2287840344, 2570368729}, {3990133269,
3128995264, 1738689207, 3444164897, 3990133269, 3712933071,
2064904226, 3444164897, 2391813050, 761038241, 1935180159,
170979531, 3728725334, 1306954198, 1935180159, 299888769,
273871383, 1306954198}, {3574053987, 1276667706, 3630964515,
2958366486, 3574053987, 2704486034, 3027708794, 2958366486,
1479401095, 792177412, 3952111894, 3815277103, 575096961,
1969399344, 3952111894, 4220943579, 3273966859,
1969399344}, {1021903948, 2991536458, 2725921657, 1020618701,
1021903948, 547582336, 1519243117, 1020618701, 4206967196,
2892587185, 1298122989, 2445591450, 475488669, 1364818830,
1298122989, 176868453, 3110121937, 1364818830}, {1990940644,
3319755588, 1302742290, 2960959917, 1990940644, 2496813625,
1344370018, 2960959917, 486243349, 3205282294, 350307083,
3501465828, 1203224538, 3238458868, 350307083, 4294608887,
2153874647, 3238458868}, {188494288, 213770946, 3029216841,
3018898373, 188494288, 3334548848, 4213633188, 3018898373,
2892247335, 3996577694, 1751933553, 3067620234, 1428358712,
1521457388, 1751933553, 872030483, 3604618491,
1521457388}, {3855580426, 3723431054, 2974755971, 4212261536,
3855580426, 3210276198, 2870367456, 4212261536, 3471121,
2830179495, 1573179329, 3416534728, 2922475892, 1779275464,
1573179329, 2466688033, 2781126556, 1779275464}, {3921473593,
700976293, 583685154, 1551145624, 3921473593, 1197146456,
3641233485, 1551145624, 3152036247, 3807351913, 689693264,
1832287238, 141617649, 3784802596, 689693264, 1601350390,
1340587043, 3784802596}, {1543625147, 333607140, 4026328395,
504613722, 1543625147, 448131202, 60339876, 504613722, 2156457620,
4274611742, 2596241228, 274244631, 1730347342, 2823386290,
2596241228, 3458355488, 366721249, 2823386290}, {923427765,
129677983, 1847794530, 629349982, 923427765, 2962725713, 869401798,
629349982, 3406615191, 3679893364, 377634006, 2637846534,
1807536786, 814948710, 377634006, 2686254269, 3421740057,
814948710}}, {{3968427114, 2871663372, 1284825950, 1363628533,
3968427114, 3023327955, 818363998, 1363628533, 2035927380,
2557663578, 3949237584, 2564746147, 74149658, 2957238169,
3949237584, 1523740068, 1410010554, 2957238169}, {3221052941,
4281160673, 4178727866, 3897287251, 3221052941, 1527779946,
4106993377, 3897287251, 3827747418, 1477293711, 3022040116,
2387074241, 2549406364, 3377318569, 3022040116, 2556102508,
1927835240, 3377318569}, {1544125908, 2580689227, 2321269870,
1052675384, 1544125908, 92813377, 2488796230, 1052675384,
2512492218, 1791527725, 377790127, 815879636, 1719266365,
1377352274, 377790127, 3998501000, 4140278193,
1377352274}, {3774782533, 3237990203, 3880910680, 2835476193,
3774782533, 850076464, 959978619, 2835476193, 1174358892,
1607696753, 3708493374, 4435502, 4183546362, 257306870, 3708493374,
547809226, 1748489735, 257306870}, {701018289, 2603935570,
3581566907, 639047872, 701018289, 128268716, 1338762710, 639047872,
1121980250, 3604187234, 4290097866, 3379688383, 2022645560,
675681167, 4290097866, 3364059629, 3944294906,
675681167}, {1124911735, 2683742666, 1381972838, 823435890,
1124911735, 3000499818, 1896847210, 823435890, 1400690756,
1375837008, 1844725476, 2839043606, 570318859, 2404623323,
1844725476, 1615062394, 4132820260, 2404623323}, {1785616771,
843456078, 2394423301, 1188894209, 1785616771, 3386998707,
115723361, 1188894209, 1855880557, 3076793694, 2816648919,
2844159661, 2973871282, 3480594085, 2816648919, 620328775,
2014570354, 3480594085}, {2850730926, 2374744218, 1073801764,
1998893251, 2850730926, 1430713735, 1400155768, 1998893251,
3128011728, 2412946221, 675898567, 5625977, 892406741, 4076910933,
675898567, 3433720143, 930542270, 4076910933}, {2135087543,
1525187887, 4227112097, 2761515035, 2135087543, 184926998,
3735543804, 2761515035, 173729517, 3963796804, 142213288,
3009309079, 2551966101, 3928418720, 142213288, 1709878445,
3246094911, 3928418720}, {3684505492, 1299858048, 3524599640,
3000721466, 3684505492, 764299143, 850809698, 3000721466,
1152423219, 1217794308, 973255696, 2327635494, 3968325674,
3143857447, 973255696, 4202002846, 1394551864,
3143857447}, {2895494095, 294169106, 2158601842, 4275076982,
2895494095, 361679506, 2740874794, 4275076982, 2231694171,
3225185820, 142973615, 1757153604, 2433563, 3436532721, 142973615,
1974305123, 2983415849, 3436532721}, {34180751, 194682771,
3382776937, 3090564778, 34180751, 1911908313, 4205068615,
3090564778, 2188428625, 3521935017, 1469753239, 3388871077,
4063581553, 2448094751, 1469753239, 1627957632, 2840824567,
2448094751}, {16962125, 312589334, 2192048768, 2893286420,
16962125, 1459437626, 313567118, 2893286420, 1643785836,
3172602356, 3476246571, 2478813080, 1583665905, 2003796931,
3476246571, 1092519015, 4229763198, 2003796931}, {3026731980,
4200640347, 3527778260, 627769205, 3026731980, 987718671,
3010308075, 627769205, 3050396426, 207847804, 3577868319,
3566899985, 39982755, 1593620760, 3577868319, 2173731154,
1002861683, 1593620760}, {728117846, 2300694121, 631011479,
1340017637, 728117846, 2267723092, 2600113427, 1340017637,
1417809782, 1157833108, 469667457, 2542231883, 1967581696,
3875471800, 469667457, 3326806467, 1143824575, 3875471800}}};
extern "C" __host__ void mrg32k3a_init_device_consts_(){
mrg32k3a_CUDA_CALL(cudaMemcpyToSymbol(matrix_array,matrix__array,sizeof(matrix__array),0,cudaMemcpyHostToDevice));
}
extern "C" __host__ void mrg32k3a_free_device_consts_(){
}
extern "C" __device__ __host__ unsigned mrg32k3a_ModM1(unsigned a1,unsigned b1,unsigned a2,unsigned b2,unsigned a3,unsigned b3){
unsigned long long a,b,x;
x=(unsigned long long)a1*b1; a=(x>>32); b=(a*209)+(x-(a<<32));
x=(unsigned long long)a2*b2; a=(x>>32); b+=(a*209)+(x-(a<<32));
x=(unsigned long long)a3*b3; a=(x>>32); b+=(a*209)+(x-(a<<32));
x=(b>>32); b= (x*209)+(b-(x<<32));
return (b<4294967087) ? b : (b - 4294967087);
}
extern "C" __device__ __host__ unsigned mrg32k3a_ModM2(unsigned a1,unsigned b1,unsigned a2,unsigned b2,unsigned a3,unsigned b3){
unsigned long long a,b,x;
x=(unsigned long long)a1*b1; a=(x>>32); b=(a*22853)+(x-(a<<32));
x=(unsigned long long)a2*b2; a=(x>>32); b+=(a*22853)+(x-(a<<32));
x=(unsigned long long)a3*b3; a=(x>>32); b+=(a*22853)+(x-(a<<32));
x=(b>>32); b= (x*22853)+(b-(x<<32));
return (b<4294944443) ? b : (b - 4294944443);
}
extern "C" __device__ void mrg32k3a_dev_SkipAheadRound(mrg32k3a_state* state, unsigned g, unsigned k){
unsigned s0,s1,s2,t0,t1,t2,gg=g-1;
s2=mrg32k3a_ModM1(matrix_array[k][gg][0],state->x2,matrix_array[k][gg][1],state->x1,matrix_array[k][gg][2],state->x0);
s1=mrg32k3a_ModM1(matrix_array[k][gg][3],state->x2,matrix_array[k][gg][4],state->x1,matrix_array[k][gg][5],state->x0);
s0=mrg32k3a_ModM1(matrix_array[k][gg][6],state->x2,matrix_array[k][gg][7],state->x1,matrix_array[k][gg][8],state->x0);
state->x2=s2; state->x1=s1; state->x0=s0;
t2=mrg32k3a_ModM2(matrix_array[k][gg][9],state->y2,matrix_array[k][gg][10],state->y1,matrix_array[k][gg][11],state->y0);
t1=mrg32k3a_ModM2(matrix_array[k][gg][12],state->y2,matrix_array[k][gg][13],state->y1,matrix_array[k][gg][14],state->y0);
t0=mrg32k3a_ModM2(matrix_array[k][gg][15],state->y2,matrix_array[k][gg][16],state->y1,matrix_array[k][gg][17],state->y0);
state->y2=t2; state->y1=t1; state->y0=t0;
}
extern "C" __device__ void mrg32k3a_dev_skipahead_(mrg32k3a_state* state,unsigned long long offset128,
unsigned long long offset64,unsigned long long offset0){
unsigned long long i=offset0; unsigned k=0,g;
while(i>0){
g=i&15ULL; if(g>0) mrg32k3a_dev_SkipAheadRound(state,g,k);
i=(i>>4); k++;
}
i=offset64; k=16;
while(i>0){
g=i&15ULL; if(g>0) mrg32k3a_dev_SkipAheadRound(state,g,k);
i=(i>>4); k++;
}
i=offset128; k=32;
while(i>0){
g=i&15ULL; if(g>0) mrg32k3a_dev_SkipAheadRound(state,g,k);
i=(i>>4); k++;
}
}
extern "C" __host__ void mrg32k3a_host_SkipAheadRound(mrg32k3a_state* state, unsigned g, unsigned k){
unsigned s0,s1,s2,t0,t1,t2,gg=g-1;
s2=mrg32k3a_ModM1(matrix__array[k][gg][0],state->x2,matrix__array[k][gg][1],state->x1,matrix__array[k][gg][2],state->x0);
s1=mrg32k3a_ModM1(matrix__array[k][gg][3],state->x2,matrix__array[k][gg][4],state->x1,matrix__array[k][gg][5],state->x0);
s0=mrg32k3a_ModM1(matrix__array[k][gg][6],state->x2,matrix__array[k][gg][7],state->x1,matrix__array[k][gg][8],state->x0);
state->x2=s2; state->x1=s1; state->x0=s0;
t2=mrg32k3a_ModM2(matrix__array[k][gg][9],state->y2,matrix__array[k][gg][10],state->y1,matrix__array[k][gg][11],state->y0);
t1=mrg32k3a_ModM2(matrix__array[k][gg][12],state->y2,matrix__array[k][gg][13],state->y1,matrix__array[k][gg][14],state->y0);
t0=mrg32k3a_ModM2(matrix__array[k][gg][15],state->y2,matrix__array[k][gg][16],state->y1,matrix__array[k][gg][17],state->y0);
state->y2=t2; state->y1=t1; state->y0=t0;
}
extern "C" __host__ void mrg32k3a_skipahead_(mrg32k3a_state* state,unsigned long long offset128,
unsigned long long offset64,unsigned long long offset0){
unsigned long long i=offset0; unsigned k=0,g;
while(i>0){
g=i&15ULL; if(g>0) mrg32k3a_host_SkipAheadRound(state,g,k);
i=(i>>4); k++;
}
i=offset64; k=16;
while(i>0){
g=i&15ULL; if(g>0) mrg32k3a_host_SkipAheadRound(state,g,k);
i=(i>>4); k++;
}
i=offset128; k=32;
while(i>0){
g=i&15ULL; if(g>0) mrg32k3a_host_SkipAheadRound(state,g,k);
i=(i>>4); k++;
}
}
extern "C" __device__ __host__ void mrg32k3a_init_(mrg32k3a_state* state){
state->x0=state->x1=state->x2=state->y0=state->y1=state->y2=123;
}
extern "C" __device__ void mrg32k3a_dev_init_sequence_(mrg32k3a_state* state,unsigned long long SequenceNumber){
// 0 < SequenceNumber < 10^19 ; length of each sequence <= 10^38
unsigned long long a0, a32, b0=1029448176, b32=1712288060, b64=2074794764, b96=1914833239,t32,t96;
a32=SequenceNumber>>32; a0=SequenceNumber-(a32<<32);
mrg32k3a_init_(state);
t32=a32*b0+a0*b32; t96=a32*b64+a0*b96;
mrg32k3a_dev_skipahead_(state,a32*b96,a32*b32+a0*b64,a0*b0);
mrg32k3a_dev_skipahead_(state,t96>>32,t32>>32,0);
mrg32k3a_dev_skipahead_(state,0,(t96^4294967295ULL)<<32,(t32^4294967295ULL)<<32);
}
extern "C" __host__ void mrg32k3a_init_sequence_(mrg32k3a_state* state,unsigned long long SequenceNumber){
// 0 < SequenceNumber < 10^19 ; length of each sequence <= 10^38
unsigned long long a0, a32, b0=1029448176, b32=1712288060, b64=2074794764, b96=1914833239,t32,t96;
a32=SequenceNumber>>32; a0=SequenceNumber-(a32<<32);
mrg32k3a_init_(state);
t32=a32*b0+a0*b32; t96=a32*b64+a0*b96;
mrg32k3a_skipahead_(state,a32*b96,a32*b32+a0*b64,a0*b0);
mrg32k3a_skipahead_(state,t96>>32,t32>>32,0);
mrg32k3a_skipahead_(state,0,(t96^4294967295ULL)<<32,(t32^4294967295ULL)<<32);
}
extern "C" __device__ __host__ unsigned int mrg32k3a_generate_(mrg32k3a_state* state){
unsigned p1, p2;
p1 = mrg32k3a_ModM1(mrg32k3a_a12,state->x1 , mrg32k3a_a13,state->x0,0,0);
state->x0 = state->x1; state->x1 = state->x2; state->x2 = p1;
p2 = mrg32k3a_ModM2(mrg32k3a_a21,state->y2 , mrg32k3a_a23,state->y0,0,0);
state->y0 = state->y1; state->y1 = state->y2; state->y2 = p2;
if (p1 <= p2) return ( 4294967087 - (p2 - p1) ); else return (p1 - p2);
}
extern "C" __device__ __host__ float mrg32k3a_generate_uniform_float_(mrg32k3a_state* state){
unsigned p1, p2;
p1 = mrg32k3a_ModM1(mrg32k3a_a12,state->x1 , mrg32k3a_a13,state->x0,0,0);
state->x0 = state->x1; state->x1 = state->x2; state->x2 = p1;
p2 = mrg32k3a_ModM2(mrg32k3a_a21,state->y2 , mrg32k3a_a23,state->y0,0,0);
state->y0 = state->y1; state->y1 = state->y2; state->y2 = p2;
if (p1 <= p2) return ((float)( 4294967087 - (p2 - p1) )) * 2.3283064365386963e-10;
else return ((float)(p1 - p2)) * 2.3283064365386963e-10;
}
extern "C" __host__ void mrg32k3a_print_state_(mrg32k3a_state* state){
printf("Generator State: {{%u,%u,%u},{%u,%u,%u}}\n",
state->x0,state->x1,state->x2,state->y0,state->y1,state->y2);
}
__global__ void mrg32k3a_kernel_generate_array(mrg32k3a_state* state, unsigned int* out, long* length) {
unsigned i,seqNum,s0,s1,s2,t0,t1,t2; mrg32k3a_state mystate=*state;
seqNum = threadIdx.x + blockIdx.x * blockDim.x;
mrg32k3a_dev_skipahead_(&mystate,0,0,seqNum+1); s2=mystate.x2; t2=mystate.y2;
for(i=0;i<(*length);i++){
out[i*65536+seqNum]= ((s2 <= t2) ? ( 4294967087 - (t2 - s2) ) : (s2 - t2));
s2=mrg32k3a_ModM1(3975008370,mystate.x2,754337639,mystate.x1,2405650329,mystate.x0);
s1=mrg32k3a_ModM1(3715182130,mystate.x2,3975008370,mystate.x1,3615342722,mystate.x0);
s0=mrg32k3a_ModM1(642697923,mystate.x2,3715182130,mystate.x1,2256493727,mystate.x0);
mystate.x2=s2; mystate.x1=s1; mystate.x0=s0;
t2=mrg32k3a_ModM2(3174552073,mystate.y2,1030618503,mystate.y1,1998739584,mystate.y0);
t1=mrg32k3a_ModM2(3467650326,mystate.y2,2569413030,mystate.y1,1030618503,mystate.y0);
t0=mrg32k3a_ModM2(2594942403,mystate.y2,1631336015,mystate.y1,2569413030,mystate.y0);
mystate.y2=t2; mystate.y1=t1; mystate.y0=t0;
}
}
extern "C" __host__ void mrg32k3a_generate_gpu_array_(mrg32k3a_state* state, unsigned int* dev_out, unsigned int* length){
long mylength = (*length)/mrg32k3a_ARRAY_SECTIONS;
mrg32k3a_state* dev_state;
long* dev_length;
if((mylength*mrg32k3a_ARRAY_SECTIONS)<(*length)) mylength++;
mrg32k3a_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(mrg32k3a_state)));
mrg32k3a_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
mrg32k3a_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(mrg32k3a_state),cudaMemcpyHostToDevice));
mrg32k3a_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
mrg32k3a_kernel_generate_array<<<mrg32k3a_BLOCKS,mrg32k3a_THREADS>>>(dev_state,dev_out,dev_length);
mrg32k3a_CUDA_CALL(cudaGetLastError());
mrg32k3a_CUDA_CALL(cudaFree(dev_state)); mrg32k3a_CUDA_CALL(cudaFree(dev_length));
}
__global__ void mrg32k3a_kernel_generate_array_float(mrg32k3a_state* state, float* out, long* length) {
unsigned i,seqNum,s0,s1,s2,t0,t1,t2; mrg32k3a_state mystate=*state;
seqNum = threadIdx.x + blockIdx.x * blockDim.x;
mrg32k3a_dev_skipahead_(&mystate,0,0,seqNum+1); s2=mystate.x2; t2=mystate.y2;
for(i=0;i<(*length);i++){
out[i*65536+seqNum]= (float)((s2 <= t2) ? ( 4294967087 - (t2 - s2) ) : (s2 - t2)) * 2.3283064365386963e-10;
s2=mrg32k3a_ModM1(3975008370,mystate.x2,754337639,mystate.x1,2405650329,mystate.x0);
s1=mrg32k3a_ModM1(3715182130,mystate.x2,3975008370,mystate.x1,3615342722,mystate.x0);
s0=mrg32k3a_ModM1(642697923,mystate.x2,3715182130,mystate.x1,2256493727,mystate.x0);
mystate.x2=s2; mystate.x1=s1; mystate.x0=s0;
t2=mrg32k3a_ModM2(3174552073,mystate.y2,1030618503,mystate.y1,1998739584,mystate.y0);
t1=mrg32k3a_ModM2(3467650326,mystate.y2,2569413030,mystate.y1,1030618503,mystate.y0);
t0=mrg32k3a_ModM2(2594942403,mystate.y2,1631336015,mystate.y1,2569413030,mystate.y0);
mystate.y2=t2; mystate.y1=t1; mystate.y0=t0;
}
}
extern "C" __host__ void mrg32k3a_generate_gpu_array_float_(mrg32k3a_state* state, float* dev_out, unsigned int* length){
long mylength = (*length)/mrg32k3a_ARRAY_SECTIONS;
mrg32k3a_state* dev_state;
long* dev_length;
if((mylength*mrg32k3a_ARRAY_SECTIONS)<(*length)) mylength++;
mrg32k3a_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(mrg32k3a_state)));
mrg32k3a_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
mrg32k3a_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(mrg32k3a_state),cudaMemcpyHostToDevice));
mrg32k3a_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
mrg32k3a_kernel_generate_array_float<<<mrg32k3a_BLOCKS,mrg32k3a_THREADS>>>(dev_state,dev_out,dev_length);
mrg32k3a_CUDA_CALL(cudaGetLastError());
mrg32k3a_CUDA_CALL(cudaFree(dev_state)); mrg32k3a_CUDA_CALL(cudaFree(dev_length));
}
__global__ void mrg32k3a_kernel_generate_array_double(mrg32k3a_state* state, double* out, long* length) {
unsigned i,seqNum,s0,s1,s2,t0,t1,t2; mrg32k3a_state mystate=*state;
seqNum = threadIdx.x + blockIdx.x * blockDim.x;
mrg32k3a_dev_skipahead_(&mystate,0,0,seqNum+1); s2=mystate.x2; t2=mystate.y2;
for(i=0;i<(*length);i++){
out[i*65536+seqNum]= (double)((s2 <= t2) ? ( 4294967087 - (t2 - s2) ) : (s2 - t2)) * 2.3283064365386963e-10;
s2=mrg32k3a_ModM1(3975008370,mystate.x2,754337639,mystate.x1,2405650329,mystate.x0);
s1=mrg32k3a_ModM1(3715182130,mystate.x2,3975008370,mystate.x1,3615342722,mystate.x0);
s0=mrg32k3a_ModM1(642697923,mystate.x2,3715182130,mystate.x1,2256493727,mystate.x0);
mystate.x2=s2; mystate.x1=s1; mystate.x0=s0;
t2=mrg32k3a_ModM2(3174552073,mystate.y2,1030618503,mystate.y1,1998739584,mystate.y0);
t1=mrg32k3a_ModM2(3467650326,mystate.y2,2569413030,mystate.y1,1030618503,mystate.y0);
t0=mrg32k3a_ModM2(2594942403,mystate.y2,1631336015,mystate.y1,2569413030,mystate.y0);
mystate.y2=t2; mystate.y1=t1; mystate.y0=t0;
}
}
extern "C" __host__ void mrg32k3a_generate_gpu_array_double_(mrg32k3a_state* state, double* dev_out, unsigned int* length){
long mylength = (*length)/mrg32k3a_ARRAY_SECTIONS;
mrg32k3a_state* dev_state;
long* dev_length;
if((mylength*mrg32k3a_ARRAY_SECTIONS)<(*length)) mylength++;
mrg32k3a_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(mrg32k3a_state)));
mrg32k3a_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
mrg32k3a_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(mrg32k3a_state),cudaMemcpyHostToDevice));
mrg32k3a_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
mrg32k3a_kernel_generate_array_double<<<mrg32k3a_BLOCKS,mrg32k3a_THREADS>>>(dev_state,dev_out,dev_length);
mrg32k3a_CUDA_CALL(cudaGetLastError());
mrg32k3a_CUDA_CALL(cudaFree(dev_state)); mrg32k3a_CUDA_CALL(cudaFree(dev_length));
}
extern "C" __host__ void mrg32k3a_generate_array_(mrg32k3a_state* state, unsigned int* out, unsigned int* length){
long mylength = (*length)/mrg32k3a_ARRAY_SECTIONS;
mrg32k3a_state* dev_state;
unsigned int* dev_out;
long* dev_length;
if((mylength*mrg32k3a_ARRAY_SECTIONS)<(*length)) mylength++;
mrg32k3a_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(mrg32k3a_state)));
mrg32k3a_CUDA_CALL(cudaMalloc((void**)&dev_out,mylength*mrg32k3a_ARRAY_SECTIONS*sizeof(unsigned int)));
mrg32k3a_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
mrg32k3a_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(mrg32k3a_state),cudaMemcpyHostToDevice));
mrg32k3a_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
mrg32k3a_kernel_generate_array<<<mrg32k3a_BLOCKS,mrg32k3a_THREADS>>>(dev_state,dev_out,dev_length);
mrg32k3a_CUDA_CALL(cudaGetLastError());
mrg32k3a_CUDA_CALL(cudaMemcpy(out,dev_out,(*length)*sizeof(unsigned int),cudaMemcpyDeviceToHost));
mrg32k3a_CUDA_CALL(cudaFree(dev_state)); mrg32k3a_CUDA_CALL(cudaFree(dev_out));
mrg32k3a_CUDA_CALL(cudaFree(dev_length));
}
|
1eba9ed604872e524b9d5920a97a7768c3dda4c3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/detection/density_prior_box_op.h"
namespace paddle {
namespace operators {
template <typename T>
static __device__ inline T Clip(T in) {
return min(max(in, 0.), 1.);
}
template <typename T>
static __global__ void GenDensityPriorBox(
const int height, const int width, const int im_height, const int im_width,
const T offset, const T step_width, const T step_height,
const int num_priors, const T* ratios_shift, bool is_clip, const T var_xmin,
const T var_ymin, const T var_xmax, const T var_ymax, T* out, T* var) {
int gidx = blockIdx.x * blockDim.x + threadIdx.x;
int gidy = blockIdx.y * blockDim.y + threadIdx.y;
int step_x = blockDim.x * gridDim.x;
int step_y = blockDim.y * gridDim.y;
const T* width_ratio = ratios_shift;
const T* height_ratio = ratios_shift + num_priors;
const T* width_shift = ratios_shift + 2 * num_priors;
const T* height_shift = ratios_shift + 3 * num_priors;
for (int j = gidy; j < height; j += step_y) {
for (int i = gidx; i < width * num_priors; i += step_x) {
int h = j;
int w = i / num_priors;
int k = i % num_priors;
T center_x = (w + offset) * step_width;
T center_y = (h + offset) * step_height;
T center_x_temp = center_x + width_shift[k];
T center_y_temp = center_y + height_shift[k];
T box_width_ratio = width_ratio[k] / 2.;
T box_height_ratio = height_ratio[k] / 2.;
T xmin = max((center_x_temp - box_width_ratio) / im_width, 0.);
T ymin = max((center_y_temp - box_height_ratio) / im_height, 0.);
T xmax = min((center_x_temp + box_width_ratio) / im_width, 1.);
T ymax = min((center_y_temp + box_height_ratio) / im_height, 1.);
int out_offset = (j * width * num_priors + i) * 4;
out[out_offset] = is_clip ? Clip<T>(xmin) : xmin;
out[out_offset + 1] = is_clip ? Clip<T>(ymin) : ymin;
out[out_offset + 2] = is_clip ? Clip<T>(xmax) : xmax;
out[out_offset + 3] = is_clip ? Clip<T>(ymax) : ymax;
var[out_offset] = var_xmin;
var[out_offset + 1] = var_ymin;
var[out_offset + 2] = var_xmax;
var[out_offset + 3] = var_ymax;
}
}
}
template <typename T>
class DensityPriorBoxOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<paddle::framework::Tensor>("Input");
auto* image = ctx.Input<paddle::framework::Tensor>("Image");
auto* boxes = ctx.Output<paddle::framework::Tensor>("Boxes");
auto* vars = ctx.Output<paddle::framework::Tensor>("Variances");
auto variances = ctx.Attr<std::vector<float>>("variances");
auto is_clip = ctx.Attr<bool>("clip");
auto fixed_sizes = ctx.Attr<std::vector<float>>("fixed_sizes");
auto fixed_ratios = ctx.Attr<std::vector<float>>("fixed_ratios");
auto densities = ctx.Attr<std::vector<int>>("densities");
T step_w = static_cast<T>(ctx.Attr<float>("step_w"));
T step_h = static_cast<T>(ctx.Attr<float>("step_h"));
T offset = static_cast<T>(ctx.Attr<float>("offset"));
auto img_width = image->dims()[3];
auto img_height = image->dims()[2];
auto feature_width = input->dims()[3];
auto feature_height = input->dims()[2];
T step_width, step_height;
if (step_w == 0 || step_h == 0) {
step_width = static_cast<T>(img_width) / feature_width;
step_height = static_cast<T>(img_height) / feature_height;
} else {
step_width = step_w;
step_height = step_h;
}
int num_priors = 0;
for (size_t i = 0; i < densities.size(); ++i) {
num_priors += (fixed_ratios.size()) * (pow(densities[i], 2));
}
int step_average = static_cast<int>((step_width + step_height) * 0.5);
framework::Tensor h_temp;
T* tdata = h_temp.mutable_data<T>({num_priors * 4}, platform::CPUPlace());
int idx = 0;
for (size_t s = 0; s < fixed_sizes.size(); ++s) {
auto fixed_size = fixed_sizes[s];
int density = densities[s];
for (size_t r = 0; r < fixed_ratios.size(); ++r) {
float ar = fixed_ratios[r];
int shift = step_average / density;
float box_width_ratio = fixed_size * sqrt(ar);
float box_height_ratio = fixed_size / sqrt(ar);
for (int di = 0; di < density; ++di) {
for (int dj = 0; dj < density; ++dj) {
float center_x_temp = shift / 2. + dj * shift - step_average / 2.;
float center_y_temp = shift / 2. + di * shift - step_average / 2.;
tdata[idx] = box_width_ratio;
tdata[num_priors + idx] = box_height_ratio;
tdata[2 * num_priors + idx] = center_x_temp;
tdata[3 * num_priors + idx] = center_y_temp;
idx++;
}
}
}
}
boxes->mutable_data<T>(ctx.GetPlace());
vars->mutable_data<T>(ctx.GetPlace());
framework::Tensor d_temp;
framework::TensorCopy(h_temp, ctx.GetPlace(), &d_temp);
// At least use 32 threads, at most 512 threads.
// blockx is multiple of 32.
int blockx = ::min(
static_cast<int64_t>(((feature_width * num_priors + 31) >> 5) << 5),
512L);
int gridx = (feature_width * num_priors + blockx - 1) / blockx;
dim3 threads(blockx, 1);
dim3 grids(gridx, feature_height);
auto stream =
ctx.template device_context<platform::CUDADeviceContext>().stream();
hipLaunchKernelGGL(( GenDensityPriorBox<T>), dim3(grids), dim3(threads), 0, stream,
feature_height, feature_width, img_height, img_width, offset,
step_width, step_height, num_priors, d_temp.data<T>(), is_clip,
variances[0], variances[1], variances[2], variances[3],
boxes->data<T>(), vars->data<T>());
}
}; // namespace operators
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(density_prior_box,
ops::DensityPriorBoxOpCUDAKernel<float>,
ops::DensityPriorBoxOpCUDAKernel<double>);
| 1eba9ed604872e524b9d5920a97a7768c3dda4c3.cu | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/detection/density_prior_box_op.h"
namespace paddle {
namespace operators {
template <typename T>
static __device__ inline T Clip(T in) {
return min(max(in, 0.), 1.);
}
template <typename T>
static __global__ void GenDensityPriorBox(
const int height, const int width, const int im_height, const int im_width,
const T offset, const T step_width, const T step_height,
const int num_priors, const T* ratios_shift, bool is_clip, const T var_xmin,
const T var_ymin, const T var_xmax, const T var_ymax, T* out, T* var) {
int gidx = blockIdx.x * blockDim.x + threadIdx.x;
int gidy = blockIdx.y * blockDim.y + threadIdx.y;
int step_x = blockDim.x * gridDim.x;
int step_y = blockDim.y * gridDim.y;
const T* width_ratio = ratios_shift;
const T* height_ratio = ratios_shift + num_priors;
const T* width_shift = ratios_shift + 2 * num_priors;
const T* height_shift = ratios_shift + 3 * num_priors;
for (int j = gidy; j < height; j += step_y) {
for (int i = gidx; i < width * num_priors; i += step_x) {
int h = j;
int w = i / num_priors;
int k = i % num_priors;
T center_x = (w + offset) * step_width;
T center_y = (h + offset) * step_height;
T center_x_temp = center_x + width_shift[k];
T center_y_temp = center_y + height_shift[k];
T box_width_ratio = width_ratio[k] / 2.;
T box_height_ratio = height_ratio[k] / 2.;
T xmin = max((center_x_temp - box_width_ratio) / im_width, 0.);
T ymin = max((center_y_temp - box_height_ratio) / im_height, 0.);
T xmax = min((center_x_temp + box_width_ratio) / im_width, 1.);
T ymax = min((center_y_temp + box_height_ratio) / im_height, 1.);
int out_offset = (j * width * num_priors + i) * 4;
out[out_offset] = is_clip ? Clip<T>(xmin) : xmin;
out[out_offset + 1] = is_clip ? Clip<T>(ymin) : ymin;
out[out_offset + 2] = is_clip ? Clip<T>(xmax) : xmax;
out[out_offset + 3] = is_clip ? Clip<T>(ymax) : ymax;
var[out_offset] = var_xmin;
var[out_offset + 1] = var_ymin;
var[out_offset + 2] = var_xmax;
var[out_offset + 3] = var_ymax;
}
}
}
template <typename T>
class DensityPriorBoxOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<paddle::framework::Tensor>("Input");
auto* image = ctx.Input<paddle::framework::Tensor>("Image");
auto* boxes = ctx.Output<paddle::framework::Tensor>("Boxes");
auto* vars = ctx.Output<paddle::framework::Tensor>("Variances");
auto variances = ctx.Attr<std::vector<float>>("variances");
auto is_clip = ctx.Attr<bool>("clip");
auto fixed_sizes = ctx.Attr<std::vector<float>>("fixed_sizes");
auto fixed_ratios = ctx.Attr<std::vector<float>>("fixed_ratios");
auto densities = ctx.Attr<std::vector<int>>("densities");
T step_w = static_cast<T>(ctx.Attr<float>("step_w"));
T step_h = static_cast<T>(ctx.Attr<float>("step_h"));
T offset = static_cast<T>(ctx.Attr<float>("offset"));
auto img_width = image->dims()[3];
auto img_height = image->dims()[2];
auto feature_width = input->dims()[3];
auto feature_height = input->dims()[2];
T step_width, step_height;
if (step_w == 0 || step_h == 0) {
step_width = static_cast<T>(img_width) / feature_width;
step_height = static_cast<T>(img_height) / feature_height;
} else {
step_width = step_w;
step_height = step_h;
}
int num_priors = 0;
for (size_t i = 0; i < densities.size(); ++i) {
num_priors += (fixed_ratios.size()) * (pow(densities[i], 2));
}
int step_average = static_cast<int>((step_width + step_height) * 0.5);
framework::Tensor h_temp;
T* tdata = h_temp.mutable_data<T>({num_priors * 4}, platform::CPUPlace());
int idx = 0;
for (size_t s = 0; s < fixed_sizes.size(); ++s) {
auto fixed_size = fixed_sizes[s];
int density = densities[s];
for (size_t r = 0; r < fixed_ratios.size(); ++r) {
float ar = fixed_ratios[r];
int shift = step_average / density;
float box_width_ratio = fixed_size * sqrt(ar);
float box_height_ratio = fixed_size / sqrt(ar);
for (int di = 0; di < density; ++di) {
for (int dj = 0; dj < density; ++dj) {
float center_x_temp = shift / 2. + dj * shift - step_average / 2.;
float center_y_temp = shift / 2. + di * shift - step_average / 2.;
tdata[idx] = box_width_ratio;
tdata[num_priors + idx] = box_height_ratio;
tdata[2 * num_priors + idx] = center_x_temp;
tdata[3 * num_priors + idx] = center_y_temp;
idx++;
}
}
}
}
boxes->mutable_data<T>(ctx.GetPlace());
vars->mutable_data<T>(ctx.GetPlace());
framework::Tensor d_temp;
framework::TensorCopy(h_temp, ctx.GetPlace(), &d_temp);
// At least use 32 threads, at most 512 threads.
// blockx is multiple of 32.
int blockx = std::min(
static_cast<int64_t>(((feature_width * num_priors + 31) >> 5) << 5),
512L);
int gridx = (feature_width * num_priors + blockx - 1) / blockx;
dim3 threads(blockx, 1);
dim3 grids(gridx, feature_height);
auto stream =
ctx.template device_context<platform::CUDADeviceContext>().stream();
GenDensityPriorBox<T><<<grids, threads, 0, stream>>>(
feature_height, feature_width, img_height, img_width, offset,
step_width, step_height, num_priors, d_temp.data<T>(), is_clip,
variances[0], variances[1], variances[2], variances[3],
boxes->data<T>(), vars->data<T>());
}
}; // namespace operators
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(density_prior_box,
ops::DensityPriorBoxOpCUDAKernel<float>,
ops::DensityPriorBoxOpCUDAKernel<double>);
|
437f6b46c9660fdd45c19cf3d291d7ea80e4756a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdint.h>
#include <ctype.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
//CUDA STUFF:
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
//OpenCV stuff
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
hipError_t launch_helper(Mat image, int *CPU_OutputArray, float* Runtimes);
#define BOX_SIZE 1 // ThreadsPerBlock == BOX_SIZE * BOX_SIZE
int M; //number of rows in image
int N; //number of columns in image
int NumRot;
int a = 0;
Mat zero;
//ip.Vpixels <--> M
//ip.Hpixels <--> N
__global__ void rotate_kernel(uchar *GPU_i, uchar *GPU_o, int M, int N, int i, int j){
//Block index
int bx = blockIdx.x;
int by = blockIdx.y;
//Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
__shared__ uchar shared_GPU_data[BOX_SIZE][BOX_SIZE];
int row = bx * BOX_SIZE + tx; //row of image
int col = by * BOX_SIZE + ty; //column of image
int idx = row*N + col; //which pixel in full 1D array
shared_GPU_data[tx][ty] = GPU_i[idx];
__syncthreads();
int h,v,c;
int row2; //new row of image
int col2; //new column of image
double X, Y, newY, newX, ScaleFactor;
double Diagonal, H, V;
double RotDegrees = 360 / j * i; //in degrees
double RotAngle = 2*3.141592/360.000*(double) RotDegrees; //in radians
//printf("We are rotating %d times and iteration# = %d RotAngle = %g\n", j, i, RotAngle);
// transpose image coordinates to Cartesian coordinates
// integer div
c = col;
h=N/2; //halfway of column pixels
v=M/2; //halfway of horizontal pixels
X=(double)c-(double)h;
Y=(double)v-(double)row;
// pixel rotation matrix
newX = cos(RotAngle) * X - sin(RotAngle) * Y;
newY= sin (RotAngle) * X + cos(RotAngle) * Y;
// Scale to fit everything in the image box CONFIRMED TO BE CORRECT
H=(double)N;
V=(double)M;
Diagonal=sqrt(H*H+V*V);
ScaleFactor=(N>M) ? V/Diagonal : H/Diagonal;
newX=newX*ScaleFactor;
newY = newY*ScaleFactor;
// convert back from Cartesian to image coordinates
col2= (int)newX+h;
row2=v-(int)newY;
// maps old pixel to new pixel
int idx2 = row2*N + col2;
GPU_o[idx2] = shared_GPU_data[tx][ty];
}
int main(int argc, char *argv[]){
float GPURuntimes[4]; // run times of the GPU code
float ExecTotalTime, GPUTotalTime;
hipError_t cudaStatus;
char filename[100]; //output file name
int i;
int *CPU_OutputArray = (int*) 0; // where the GPU should copy the output back to
if (argc != 4){
printf("Improper usage!\n");
printf("Usage: %s <input image> <output image> <N rotations>\n", argv[0]);
exit(EXIT_FAILURE);
}
NumRot = atoi(argv[3]);
if (NumRot > 30){
printf("Number of rotations requested is too high! Adjusted to 30.\n");
NumRot = 30;
}
for (i = 0; i<NumRot; i++){
// Load image:
Mat image;
image = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
if (! image.data){
fprintf(stderr, "Could not open or find the image.\n");
exit(EXIT_FAILURE);
}
printf("Loaded image '%s', size = %dx%d (dims = %d).\n", argv[1], image.cols, image.rows, image.dims);
//set up global variables for image size
M = image.rows;
N = image.cols;
// Create CPU memory to store the output;
/*Mat */zero = Mat(M,N,CV_8UC1, Scalar(255)); //start by making every pixel white
sprintf(filename,"%sAROT%d.png", argv[2], i);
imwrite(filename,zero);
CPU_OutputArray = (int*) malloc(M*N*sizeof(int));
if (CPU_OutputArray == NULL){
fprintf(stderr, "OOPS. Can't create CPU_OutputArray using malloc() ...\n");
exit(EXIT_FAILURE);
}
//run it
cudaStatus = launch_helper(image, CPU_OutputArray, GPURuntimes);
if (cudaStatus != hipSuccess){
fprintf(stderr, "launch_helper failed!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
printf("-----------------------------------------------------------------\n");
printf("Tfr CPU->GPU = %5.2f ms ... \nExecution = %5.2f ms ... \nTfr GPU->CPU = %5.2f ms \nSum of Iteration = %5.2f ms\n",
GPURuntimes[1], GPURuntimes[2], GPURuntimes[3], GPURuntimes[0]);
ExecTotalTime += GPURuntimes[0];
GPUTotalTime += GPURuntimes[2];
printf("\nGPU Execution Time = %5.2f ms \n", GPUTotalTime);
printf("Total Execution Time = %5.2f ms\n", ExecTotalTime);
printf("-----------------------------------------------------------------\n");
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess){
fprintf(stderr, "hipDeviceReset failed!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
//save image to disk
Mat result = Mat(M,N,CV_8UC1, CPU_OutputArray);
imwrite(filename,result);
if (!imwrite(filename, result)){
fprintf(stderr, "couldn't write output to disk!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
printf("Saved image '%s', size = %dx%d (dims = %d).\n",
//filename.c_str(), result.cols, result.rows, result.dims);
filename, result.cols, result.rows, result.dims);
free(CPU_OutputArray);
}
exit(EXIT_SUCCESS);
}
hipError_t launch_helper(Mat image, int *CPU_OutputArray, float* Runtimes){
hipEvent_t time1, time2, time3, time4;
int TotalGPUSize; // total size of 1 image in bytes
uchar *GPU_idata;
uchar *GPU_odata;
uchar *GPU_zerodata;
dim3 threadsPerBlock;
dim3 numBlocks;
hipError_t cudaStatus;
cudaStatus = hipSetDevice(0); // use the first GPU
if (cudaStatus != hipSuccess){
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
goto Error;
}
hipEventCreate(&time1);
hipEventCreate(&time2);
hipEventCreate(&time3);
hipEventCreate(&time4);
hipEventRecord(time1, 0);
// Allocate GPU buffer for inputs and outputs:
TotalGPUSize = M * N * sizeof(uchar);
cudaStatus = hipMalloc((void**)&GPU_idata, TotalGPUSize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!\n");
goto Error;
}
cudaStatus = hipMalloc((void**)&GPU_odata, TotalGPUSize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!\n");
goto Error;
}
cudaStatus = hipMalloc((void**)&GPU_zerodata, TotalGPUSize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!\n");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(GPU_odata, zero.data, TotalGPUSize, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "cudaMemcpyzero failed!\n");
goto Error;
}
cudaStatus = hipMemcpy(GPU_idata, image.data, TotalGPUSize, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!\n");
goto Error;
}
hipEventRecord(time2, 0);
/* for (int row=0;row<image.rows;row++)
{
for (int col=0;col<image.cols;col++)
{
image.at<uchar>(row,col) = 0;
}
}*/
// Launch a kernel on the GPU with one thread for each pixel.
threadsPerBlock = dim3(BOX_SIZE, BOX_SIZE);
numBlocks = dim3(M / threadsPerBlock.x, N / threadsPerBlock.y);
hipLaunchKernelGGL(( rotate_kernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, GPU_idata, GPU_odata, M, N, a, NumRot);
// Check for errors immediately after kernel launch.
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "error code %d (%s) launching kernel!\n", cudaStatus, hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d (%s) after launching addKernel!\n", cudaStatus, hipGetErrorString(cudaStatus));
goto Error;
}
hipEventRecord(time3, 0);
// Copy output (results) from GPU buffer to host (CPU) memory.
cudaStatus = hipMemcpy(CPU_OutputArray, GPU_odata, TotalGPUSize, hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!\n");
goto Error;
}
hipEventRecord(time4, 0);
hipEventSynchronize(time1);
hipEventSynchronize(time2);
hipEventSynchronize(time3);
hipEventSynchronize(time4);
float totalTime, tfrCPUtoGPU, tfrGPUtoCPU, kernelExecutionTime;
hipEventElapsedTime(&totalTime, time1, time4);
hipEventElapsedTime(&tfrCPUtoGPU, time1, time2);
hipEventElapsedTime(&kernelExecutionTime, time2, time3);
hipEventElapsedTime(&tfrGPUtoCPU, time3, time4);
Runtimes[0] = totalTime;
Runtimes[1] = tfrCPUtoGPU;
Runtimes[2] = kernelExecutionTime;
Runtimes[3] = tfrGPUtoCPU;
Error:
hipFree(GPU_odata);
hipFree(GPU_idata);
hipEventDestroy(time1);
hipEventDestroy(time2);
hipEventDestroy(time3);
hipEventDestroy(time4);
a++;
return cudaStatus;
}
| 437f6b46c9660fdd45c19cf3d291d7ea80e4756a.cu | #include <stdio.h>
#include <stdint.h>
#include <ctype.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
//CUDA STUFF:
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
//OpenCV stuff
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
cudaError_t launch_helper(Mat image, int *CPU_OutputArray, float* Runtimes);
#define BOX_SIZE 1 // ThreadsPerBlock == BOX_SIZE * BOX_SIZE
int M; //number of rows in image
int N; //number of columns in image
int NumRot;
int a = 0;
Mat zero;
//ip.Vpixels <--> M
//ip.Hpixels <--> N
__global__ void rotate_kernel(uchar *GPU_i, uchar *GPU_o, int M, int N, int i, int j){
//Block index
int bx = blockIdx.x;
int by = blockIdx.y;
//Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
__shared__ uchar shared_GPU_data[BOX_SIZE][BOX_SIZE];
int row = bx * BOX_SIZE + tx; //row of image
int col = by * BOX_SIZE + ty; //column of image
int idx = row*N + col; //which pixel in full 1D array
shared_GPU_data[tx][ty] = GPU_i[idx];
__syncthreads();
int h,v,c;
int row2; //new row of image
int col2; //new column of image
double X, Y, newY, newX, ScaleFactor;
double Diagonal, H, V;
double RotDegrees = 360 / j * i; //in degrees
double RotAngle = 2*3.141592/360.000*(double) RotDegrees; //in radians
//printf("We are rotating %d times and iteration# = %d RotAngle = %g\n", j, i, RotAngle);
// transpose image coordinates to Cartesian coordinates
// integer div
c = col;
h=N/2; //halfway of column pixels
v=M/2; //halfway of horizontal pixels
X=(double)c-(double)h;
Y=(double)v-(double)row;
// pixel rotation matrix
newX = cos(RotAngle) * X - sin(RotAngle) * Y;
newY= sin (RotAngle) * X + cos(RotAngle) * Y;
// Scale to fit everything in the image box CONFIRMED TO BE CORRECT
H=(double)N;
V=(double)M;
Diagonal=sqrt(H*H+V*V);
ScaleFactor=(N>M) ? V/Diagonal : H/Diagonal;
newX=newX*ScaleFactor;
newY = newY*ScaleFactor;
// convert back from Cartesian to image coordinates
col2= (int)newX+h;
row2=v-(int)newY;
// maps old pixel to new pixel
int idx2 = row2*N + col2;
GPU_o[idx2] = shared_GPU_data[tx][ty];
}
int main(int argc, char *argv[]){
float GPURuntimes[4]; // run times of the GPU code
float ExecTotalTime, GPUTotalTime;
cudaError_t cudaStatus;
char filename[100]; //output file name
int i;
int *CPU_OutputArray = (int*) 0; // where the GPU should copy the output back to
if (argc != 4){
printf("Improper usage!\n");
printf("Usage: %s <input image> <output image> <N rotations>\n", argv[0]);
exit(EXIT_FAILURE);
}
NumRot = atoi(argv[3]);
if (NumRot > 30){
printf("Number of rotations requested is too high! Adjusted to 30.\n");
NumRot = 30;
}
for (i = 0; i<NumRot; i++){
// Load image:
Mat image;
image = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
if (! image.data){
fprintf(stderr, "Could not open or find the image.\n");
exit(EXIT_FAILURE);
}
printf("Loaded image '%s', size = %dx%d (dims = %d).\n", argv[1], image.cols, image.rows, image.dims);
//set up global variables for image size
M = image.rows;
N = image.cols;
// Create CPU memory to store the output;
/*Mat */zero = Mat(M,N,CV_8UC1, Scalar(255)); //start by making every pixel white
sprintf(filename,"%sAROT%d.png", argv[2], i);
imwrite(filename,zero);
CPU_OutputArray = (int*) malloc(M*N*sizeof(int));
if (CPU_OutputArray == NULL){
fprintf(stderr, "OOPS. Can't create CPU_OutputArray using malloc() ...\n");
exit(EXIT_FAILURE);
}
//run it
cudaStatus = launch_helper(image, CPU_OutputArray, GPURuntimes);
if (cudaStatus != cudaSuccess){
fprintf(stderr, "launch_helper failed!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
printf("-----------------------------------------------------------------\n");
printf("Tfr CPU->GPU = %5.2f ms ... \nExecution = %5.2f ms ... \nTfr GPU->CPU = %5.2f ms \nSum of Iteration = %5.2f ms\n",
GPURuntimes[1], GPURuntimes[2], GPURuntimes[3], GPURuntimes[0]);
ExecTotalTime += GPURuntimes[0];
GPUTotalTime += GPURuntimes[2];
printf("\nGPU Execution Time = %5.2f ms \n", GPUTotalTime);
printf("Total Execution Time = %5.2f ms\n", ExecTotalTime);
printf("-----------------------------------------------------------------\n");
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess){
fprintf(stderr, "cudaDeviceReset failed!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
//save image to disk
Mat result = Mat(M,N,CV_8UC1, CPU_OutputArray);
imwrite(filename,result);
if (!imwrite(filename, result)){
fprintf(stderr, "couldn't write output to disk!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
printf("Saved image '%s', size = %dx%d (dims = %d).\n",
//filename.c_str(), result.cols, result.rows, result.dims);
filename, result.cols, result.rows, result.dims);
free(CPU_OutputArray);
}
exit(EXIT_SUCCESS);
}
cudaError_t launch_helper(Mat image, int *CPU_OutputArray, float* Runtimes){
cudaEvent_t time1, time2, time3, time4;
int TotalGPUSize; // total size of 1 image in bytes
uchar *GPU_idata;
uchar *GPU_odata;
uchar *GPU_zerodata;
dim3 threadsPerBlock;
dim3 numBlocks;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0); // use the first GPU
if (cudaStatus != cudaSuccess){
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
goto Error;
}
cudaEventCreate(&time1);
cudaEventCreate(&time2);
cudaEventCreate(&time3);
cudaEventCreate(&time4);
cudaEventRecord(time1, 0);
// Allocate GPU buffer for inputs and outputs:
TotalGPUSize = M * N * sizeof(uchar);
cudaStatus = cudaMalloc((void**)&GPU_idata, TotalGPUSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!\n");
goto Error;
}
cudaStatus = cudaMalloc((void**)&GPU_odata, TotalGPUSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!\n");
goto Error;
}
cudaStatus = cudaMalloc((void**)&GPU_zerodata, TotalGPUSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!\n");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(GPU_odata, zero.data, TotalGPUSize, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpyzero failed!\n");
goto Error;
}
cudaStatus = cudaMemcpy(GPU_idata, image.data, TotalGPUSize, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!\n");
goto Error;
}
cudaEventRecord(time2, 0);
/* for (int row=0;row<image.rows;row++)
{
for (int col=0;col<image.cols;col++)
{
image.at<uchar>(row,col) = 0;
}
}*/
// Launch a kernel on the GPU with one thread for each pixel.
threadsPerBlock = dim3(BOX_SIZE, BOX_SIZE);
numBlocks = dim3(M / threadsPerBlock.x, N / threadsPerBlock.y);
rotate_kernel<<<numBlocks, threadsPerBlock>>>(GPU_idata, GPU_odata, M, N, a, NumRot);
// Check for errors immediately after kernel launch.
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "error code %d (%s) launching kernel!\n", cudaStatus, cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d (%s) after launching addKernel!\n", cudaStatus, cudaGetErrorString(cudaStatus));
goto Error;
}
cudaEventRecord(time3, 0);
// Copy output (results) from GPU buffer to host (CPU) memory.
cudaStatus = cudaMemcpy(CPU_OutputArray, GPU_odata, TotalGPUSize, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!\n");
goto Error;
}
cudaEventRecord(time4, 0);
cudaEventSynchronize(time1);
cudaEventSynchronize(time2);
cudaEventSynchronize(time3);
cudaEventSynchronize(time4);
float totalTime, tfrCPUtoGPU, tfrGPUtoCPU, kernelExecutionTime;
cudaEventElapsedTime(&totalTime, time1, time4);
cudaEventElapsedTime(&tfrCPUtoGPU, time1, time2);
cudaEventElapsedTime(&kernelExecutionTime, time2, time3);
cudaEventElapsedTime(&tfrGPUtoCPU, time3, time4);
Runtimes[0] = totalTime;
Runtimes[1] = tfrCPUtoGPU;
Runtimes[2] = kernelExecutionTime;
Runtimes[3] = tfrGPUtoCPU;
Error:
cudaFree(GPU_odata);
cudaFree(GPU_idata);
cudaEventDestroy(time1);
cudaEventDestroy(time2);
cudaEventDestroy(time3);
cudaEventDestroy(time4);
a++;
return cudaStatus;
}
|
edc1ad17e905594d046ea2502de9f691b3d67678.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "occlusion.cuh"
__global__ void detect_occlusionOnGPU(float* disparityLeft, float* disparityRight, const int dOcclusion, const int w, const int h)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int idx = id % w;
if (id < w*h) {
int d = (int) disparityLeft[id];
float diff = 1.0f*(D_LR + 1);
if (idx + d < 0 || idx + d >= w || abs(d + disparityRight[id + d]) > D_LR) {
disparityLeft[id] = dOcclusion;
}
}
}
void detect_occlusion(float* disparityLeft, float* disparityRight, const int dOcclusion, unsigned char* dmapl, unsigned char* dmapr, const int w, const int h)
{
float* d_disparityLeft;
float* d_disparityRight;
float* h_disparityLeft = (float*)malloc(w*h * sizeof(float));
float* h_disparityRight = (float*)malloc(w*h * sizeof(float));
unsigned char* d_dmapl;
unsigned char* d_dmapr;
int n = w * h;
//detect_occlusionOnCPU(disparityLeft, disparityRight, dOcclusion, w, h);
CHECK(hipMalloc((void**)&d_disparityLeft, n * sizeof(float)));
CHECK(hipMalloc((void**)&d_disparityRight, n * sizeof(float)));
CHECK(hipMalloc((void**)&d_dmapl, n ));
CHECK(hipMalloc((void**)&d_dmapr, n));
CHECK(hipMemcpy(d_disparityLeft, disparityLeft, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_disparityRight, disparityRight, n * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_dmapl, dmapl, n , hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_dmapr, dmapr, n , hipMemcpyHostToDevice));
dim3 nThreadsPerBlock(1024);
dim3 nBlocks((n + nThreadsPerBlock.x - 1) / nThreadsPerBlock.x);
int minl = D_MIN;
int maxl = D_MAX;
int maxr = -1*D_MIN;
int minr = -1*D_MAX;
//flToCh2OnGPU << <nBlocks, nThreadsPerBlock >> > (d_disparityLeft, d_dmapl, minl, maxl, n, dOcclusion);
//flToCh2OnGPU << <nBlocks, nThreadsPerBlock >> > (d_disparityRight, d_dmapr, minr, maxr, n, dOcclusion);
detect_occlusionOnGPU << <nBlocks, nThreadsPerBlock >> > (d_disparityLeft, d_disparityRight, dOcclusion, w, h);
//detect_occlusionOnCPU(h_disparityLeft, h_disparityRight, dOcclusion, w, h);
CHECK(hipMemcpy(dmapl, d_dmapl, n, hipMemcpyDeviceToHost));
CHECK(hipMemcpy(dmapr, d_dmapr, n, hipMemcpyDeviceToHost));
CHECK(hipMemcpy(disparityLeft, d_disparityLeft, n * sizeof(float), hipMemcpyDeviceToHost));
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
CHECK(hipFree(d_disparityLeft));
CHECK(hipFree(d_disparityRight));
CHECK(hipFree(d_dmapl));
CHECK(hipFree(d_dmapr));
//for (int i = 0; i < n; i++) {
// cout << i << " ResultGPU = " << disparityLeft[i] << endl;
//}
//float* h_disparityLeft = (float*)malloc(n * sizeof(float));
//float* h_disparityRight = (float*)malloc(n * sizeof(float));
//memcpy(h_disparityLeft, disparityLeft, n * sizeof(float));
//memcpy(h_disparityRight, disparityRight, n * sizeof(float));
//detect_occlusionOnCPU(h_disparityLeft, h_disparityRight, dOcclusion, w, h);
//bool verif = check_errors(h_disparityLeft, disparityLeft, n);
//if (verif) cout << "Disparity left ok!" << endl;
//verif = check_errors(h_disparityRight, disparityRight, n);
//if (verif) cout << "Disparity right ok!" << endl;
//free(h_disparityLeft);
//free(h_disparityRight);
}
/// Detect left-right discrepancies in disparity and put incoherent pixels to
/// value \a dOcclusion in \a disparityLeft.
void detect_occlusionOnCPU(float* disparityLeft, float* disparityRight, const int dOcclusion, const int w, const int h)
{
int occlusion = 0;
for (int y = 0; y < h; y++)
{
for (int x = 0; x < w; x++)
{
int d = (int)disparityLeft[x + w * y];
int dprime = (int)disparityRight[x + w * y + d];
if (x + d < 0 || x + d >= w || abs(d + dprime) > D_LR)
{
occlusion++;
disparityLeft[x + w * y] = dOcclusion;
}
}
}
cout << "occlusions: " << occlusion << endl;
}
// Filling
void fill_occlusion(float* disparity, const int w, const int h, const float vMin)
{
float* d_disparity;
int n = w * h;
CHECK(hipMalloc((void**)&d_disparity, n * sizeof(float)));
CHECK(hipMemcpy(d_disparity, disparity, n * sizeof(float), hipMemcpyHostToDevice));
dim3 nThreadsPerBlock(1024);
dim3 nBlocks((n + nThreadsPerBlock.x - 1) / nThreadsPerBlock.x);
fill_occlusionOnGPU1 << <nBlocks, nThreadsPerBlock >> > (d_disparity, w, h, vMin);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
CHECK(hipMemcpy(disparity, d_disparity, n * sizeof(float), hipMemcpyDeviceToHost));
CHECK(hipFree(d_disparity));
}
__global__ void fill_occlusionOnGPU1(float* disparity, const int w, const int h, const float vMin)
{
int tdx = blockIdx.x * blockDim.x + threadIdx.x;
if (tdx >= w * h) return;
int dX = disparity[tdx];
// ignore non-occluded pixels
if (dX >= vMin) return;
int y = tdx / w;
int xLeft = tdx % w;
float dLeft = vMin;
// find first non occluded left pixel
while (xLeft >= 0)
{
if (disparity[xLeft + w * y] >= vMin) {
dLeft = disparity[xLeft + w * y];
break;
}
xLeft -= 1;
}
int xRight = tdx % w;
float dRight = vMin;
// find first non occluded right pixel
while (xRight < w)
{
// if it is nonoccluded, stop
if (disparity[xRight + w * y] >= vMin) {
dRight = disparity[xRight + w * y];
break;
}
xRight += 1;
}
disparity[tdx] = max(dLeft, dRight);
}
__global__ void fill_occlusionOnGPU2(float* disparity, const int w, const int h, const float vMin)
{
// 1) threads computing dLeft
// 2) threads computing dRight
// syncthreads
// max(dLeft, dRight)
}
/// Fill pixels below value vMin using maximum value between the two closest (at left and at right) pixels on same
/// line above vMin.
void fill_occlusionOnCPU(float* disparity, const int w, const int h, const float vMin) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++)
{
int dX = disparity[x + w * y];
// ignore non-occluded pixels
if (dX >= vMin) continue;
int xLeft = x;
float dLeft = vMin;
// find first non occluded left pixel
while (xLeft >= 0)
{
if (disparity[xLeft + w * y] >= vMin) {
dLeft = disparity[xLeft + w * y];
break;
}
xLeft -= 1;
}
int xRight = x;
float dRight = vMin;
// find first non occluded right pixel
while (xRight < w)
{
// if it is nonoccluded, stop
if (disparity[xRight + w * y] >= vMin) {
dRight = disparity[xRight + w * y];
break;
}
xRight += 1;
}
disparity[x + w * y] = max(dLeft, dRight);
}
}
}
__global__ void flToCh2OnGPU(float* image, unsigned char* result, int min, int max, int len, const int dOcclusion) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= len) { return;}
float pix = image[i];
float c = 1.0f*160*(pix - min)/(1.0f*(max -min));
unsigned char val = (c > 255) ? 255 : (unsigned char)c;
result[i] = val;
} | edc1ad17e905594d046ea2502de9f691b3d67678.cu | #include "occlusion.cuh"
__global__ void detect_occlusionOnGPU(float* disparityLeft, float* disparityRight, const int dOcclusion, const int w, const int h)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int idx = id % w;
if (id < w*h) {
int d = (int) disparityLeft[id];
float diff = 1.0f*(D_LR + 1);
if (idx + d < 0 || idx + d >= w || abs(d + disparityRight[id + d]) > D_LR) {
disparityLeft[id] = dOcclusion;
}
}
}
void detect_occlusion(float* disparityLeft, float* disparityRight, const int dOcclusion, unsigned char* dmapl, unsigned char* dmapr, const int w, const int h)
{
float* d_disparityLeft;
float* d_disparityRight;
float* h_disparityLeft = (float*)malloc(w*h * sizeof(float));
float* h_disparityRight = (float*)malloc(w*h * sizeof(float));
unsigned char* d_dmapl;
unsigned char* d_dmapr;
int n = w * h;
//detect_occlusionOnCPU(disparityLeft, disparityRight, dOcclusion, w, h);
CHECK(cudaMalloc((void**)&d_disparityLeft, n * sizeof(float)));
CHECK(cudaMalloc((void**)&d_disparityRight, n * sizeof(float)));
CHECK(cudaMalloc((void**)&d_dmapl, n ));
CHECK(cudaMalloc((void**)&d_dmapr, n));
CHECK(cudaMemcpy(d_disparityLeft, disparityLeft, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_disparityRight, disparityRight, n * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_dmapl, dmapl, n , cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_dmapr, dmapr, n , cudaMemcpyHostToDevice));
dim3 nThreadsPerBlock(1024);
dim3 nBlocks((n + nThreadsPerBlock.x - 1) / nThreadsPerBlock.x);
int minl = D_MIN;
int maxl = D_MAX;
int maxr = -1*D_MIN;
int minr = -1*D_MAX;
//flToCh2OnGPU << <nBlocks, nThreadsPerBlock >> > (d_disparityLeft, d_dmapl, minl, maxl, n, dOcclusion);
//flToCh2OnGPU << <nBlocks, nThreadsPerBlock >> > (d_disparityRight, d_dmapr, minr, maxr, n, dOcclusion);
detect_occlusionOnGPU << <nBlocks, nThreadsPerBlock >> > (d_disparityLeft, d_disparityRight, dOcclusion, w, h);
//detect_occlusionOnCPU(h_disparityLeft, h_disparityRight, dOcclusion, w, h);
CHECK(cudaMemcpy(dmapl, d_dmapl, n, cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(dmapr, d_dmapr, n, cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(disparityLeft, d_disparityLeft, n * sizeof(float), cudaMemcpyDeviceToHost));
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
CHECK(cudaFree(d_disparityLeft));
CHECK(cudaFree(d_disparityRight));
CHECK(cudaFree(d_dmapl));
CHECK(cudaFree(d_dmapr));
//for (int i = 0; i < n; i++) {
// cout << i << " ResultGPU = " << disparityLeft[i] << endl;
//}
//float* h_disparityLeft = (float*)malloc(n * sizeof(float));
//float* h_disparityRight = (float*)malloc(n * sizeof(float));
//memcpy(h_disparityLeft, disparityLeft, n * sizeof(float));
//memcpy(h_disparityRight, disparityRight, n * sizeof(float));
//detect_occlusionOnCPU(h_disparityLeft, h_disparityRight, dOcclusion, w, h);
//bool verif = check_errors(h_disparityLeft, disparityLeft, n);
//if (verif) cout << "Disparity left ok!" << endl;
//verif = check_errors(h_disparityRight, disparityRight, n);
//if (verif) cout << "Disparity right ok!" << endl;
//free(h_disparityLeft);
//free(h_disparityRight);
}
/// Detect left-right discrepancies in disparity and put incoherent pixels to
/// value \a dOcclusion in \a disparityLeft.
void detect_occlusionOnCPU(float* disparityLeft, float* disparityRight, const int dOcclusion, const int w, const int h)
{
int occlusion = 0;
for (int y = 0; y < h; y++)
{
for (int x = 0; x < w; x++)
{
int d = (int)disparityLeft[x + w * y];
int dprime = (int)disparityRight[x + w * y + d];
if (x + d < 0 || x + d >= w || abs(d + dprime) > D_LR)
{
occlusion++;
disparityLeft[x + w * y] = dOcclusion;
}
}
}
cout << "occlusions: " << occlusion << endl;
}
// Filling
void fill_occlusion(float* disparity, const int w, const int h, const float vMin)
{
float* d_disparity;
int n = w * h;
CHECK(cudaMalloc((void**)&d_disparity, n * sizeof(float)));
CHECK(cudaMemcpy(d_disparity, disparity, n * sizeof(float), cudaMemcpyHostToDevice));
dim3 nThreadsPerBlock(1024);
dim3 nBlocks((n + nThreadsPerBlock.x - 1) / nThreadsPerBlock.x);
fill_occlusionOnGPU1 << <nBlocks, nThreadsPerBlock >> > (d_disparity, w, h, vMin);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(disparity, d_disparity, n * sizeof(float), cudaMemcpyDeviceToHost));
CHECK(cudaFree(d_disparity));
}
__global__ void fill_occlusionOnGPU1(float* disparity, const int w, const int h, const float vMin)
{
int tdx = blockIdx.x * blockDim.x + threadIdx.x;
if (tdx >= w * h) return;
int dX = disparity[tdx];
// ignore non-occluded pixels
if (dX >= vMin) return;
int y = tdx / w;
int xLeft = tdx % w;
float dLeft = vMin;
// find first non occluded left pixel
while (xLeft >= 0)
{
if (disparity[xLeft + w * y] >= vMin) {
dLeft = disparity[xLeft + w * y];
break;
}
xLeft -= 1;
}
int xRight = tdx % w;
float dRight = vMin;
// find first non occluded right pixel
while (xRight < w)
{
// if it is nonoccluded, stop
if (disparity[xRight + w * y] >= vMin) {
dRight = disparity[xRight + w * y];
break;
}
xRight += 1;
}
disparity[tdx] = max(dLeft, dRight);
}
__global__ void fill_occlusionOnGPU2(float* disparity, const int w, const int h, const float vMin)
{
// 1) threads computing dLeft
// 2) threads computing dRight
// syncthreads
// max(dLeft, dRight)
}
/// Fill pixels below value vMin using maximum value between the two closest (at left and at right) pixels on same
/// line above vMin.
void fill_occlusionOnCPU(float* disparity, const int w, const int h, const float vMin) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++)
{
int dX = disparity[x + w * y];
// ignore non-occluded pixels
if (dX >= vMin) continue;
int xLeft = x;
float dLeft = vMin;
// find first non occluded left pixel
while (xLeft >= 0)
{
if (disparity[xLeft + w * y] >= vMin) {
dLeft = disparity[xLeft + w * y];
break;
}
xLeft -= 1;
}
int xRight = x;
float dRight = vMin;
// find first non occluded right pixel
while (xRight < w)
{
// if it is nonoccluded, stop
if (disparity[xRight + w * y] >= vMin) {
dRight = disparity[xRight + w * y];
break;
}
xRight += 1;
}
disparity[x + w * y] = max(dLeft, dRight);
}
}
}
__global__ void flToCh2OnGPU(float* image, unsigned char* result, int min, int max, int len, const int dOcclusion) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= len) { return;}
float pix = image[i];
float c = 1.0f*160*(pix - min)/(1.0f*(max -min));
unsigned char val = (c > 255) ? 255 : (unsigned char)c;
result[i] = val;
} |
87a421056186736b3e614623239006f003d151f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "header.h"
#define MAX_BLOCK_THREADS 1024
__global__ void addKernel(double *dev_a, double *dev_b, double *dev_c, int n, int dim, double t)
{
int i = threadIdx.x;
int b = blockIdx.x;
int d = blockDim.x;
int idx = b * d + i;
if (idx < n * dim)
dev_c[idx] = dev_a[idx] + dev_b[idx] * t;
}
// Helper function for using CUDA to set points in parallel.
hipError_t setPointsWithCuda(Point *points, int n, int dim, double t, double *dev_vecInitloc, double *dev_vecV, double *dev_vecCurrentloc)
{
char errorBuffer[100];
hipError_t cudaStatus;
int numBlocks;
// Choose which GPU to run on
cudaStatus = hipSetDevice(0);
checkError(cudaStatus, dev_vecInitloc, dev_vecV, dev_vecCurrentloc,
"hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
numBlocks = (n*dim) / MAX_BLOCK_THREADS;
if ( (n*dim) % MAX_BLOCK_THREADS)
numBlocks++;
// Launch a kernel on the GPU
hipLaunchKernelGGL(( addKernel), dim3(numBlocks), dim3(MAX_BLOCK_THREADS) , 0, 0, dev_vecInitloc, dev_vecV, dev_vecCurrentloc, n, dim, t);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
sprintf(errorBuffer, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
checkError(cudaStatus, dev_vecInitloc, dev_vecV, dev_vecCurrentloc, errorBuffer);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
sprintf(errorBuffer, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
checkError(cudaStatus, dev_vecInitloc, dev_vecV, dev_vecCurrentloc, errorBuffer);
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(points->vecCurrentloc, dev_vecCurrentloc, dim * sizeof(double) * n , hipMemcpyDeviceToHost);
checkError(cudaStatus, dev_vecInitloc, dev_vecV, dev_vecCurrentloc, errorBuffer);
return cudaStatus;
}
hipError_t initCudaMemory(Point *points, int n, int dim, double **dev_a, double **dev_b, double **dev_c)
{
hipError_t cudaStatus;
char errorBuffer[100];
// Choose which GPU to run on
cudaStatus = hipSetDevice(0);
checkError(cudaStatus, *dev_a, *dev_b, *dev_c,
"hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
// Allocate GPU buffers for three vectors
cudaStatus = hipMalloc((void**)dev_a, n * dim * sizeof(double));
checkError(cudaStatus, *dev_a, *dev_b, *dev_c, "hipMalloc failed!");
cudaStatus = hipMalloc((void**)dev_b, n * dim * sizeof(double));
checkError(cudaStatus, *dev_a, *dev_b, *dev_c, "hipMalloc failed!");
cudaStatus = hipMalloc((void**)dev_c, n * dim * sizeof(double));
checkError(cudaStatus, *dev_a, *dev_b, *dev_c, "hipMalloc failed!");
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(*dev_a, points->vecInitloc, dim * sizeof(double) * n, hipMemcpyHostToDevice);
checkError(cudaStatus, *dev_a, *dev_b, *dev_c, "hipMemcpy failed!\n");
cudaStatus = hipMemcpy(*dev_b, points->vecV, dim * sizeof(double) * n, hipMemcpyHostToDevice);
checkError(cudaStatus, *dev_a, *dev_b, *dev_c, "hipMemcpy failed!\n");
cudaStatus = hipGetLastError();
sprintf(errorBuffer, "initCudaMemory failed: %s\n", hipGetErrorString(cudaStatus));
checkError(cudaStatus, *dev_a, *dev_b, *dev_c, "hipMemcpy failed!\n");
return cudaStatus;
}
void freeCudaMemory(double *dev_a, double *dev_b, double *dev_c)
{
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
}
void checkError(hipError_t cudaStatus, double *dev_a, double *dev_b, double *dev_c, const char* errorMessage)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, errorMessage);
fprintf(stderr, "\n");
freeCudaMemory(dev_a, dev_b, dev_c);
}
}
// unused below
__global__ void fKernel(int *dev_f, double *dev_w, double *dev_points,int n,int dim) {
int i = threadIdx.x;
int b = blockIdx.x;
int d = blockDim.x;
int idx = b * d + i;
if (idx < n) {
double res = dev_w[0];
for (int k = 0; k < dim; k++)
res += dev_w[k + 1] * dev_points[idx*dim+k];
dev_f[idx]=(res >= 0) ? A : B;
}
}
void markWithCuda(Classifier C, double *weights, int *fArray, double *dev_points) {
int *dev_f;
double *dev_w;
int numBlocks;
// Choose which GPU to run on
hipSetDevice(0);
hipMalloc((void**)&dev_f, C.N * sizeof(int));
hipMalloc((void**)&dev_w, (C.K+1) * sizeof(double));
hipMemcpy(dev_f, fArray, sizeof(int) * C.N, hipMemcpyHostToDevice);
hipMemcpy(dev_w, weights, (C.K+1) * sizeof(double), hipMemcpyHostToDevice);
numBlocks = (C.N) / MAX_BLOCK_THREADS;
if ((C.N) % MAX_BLOCK_THREADS)
numBlocks++;
// Launch a kernel on the GPU
hipLaunchKernelGGL(( fKernel) , dim3(numBlocks), dim3(MAX_BLOCK_THREADS) , 0, 0, dev_f, dev_w, dev_points,C.N, C.K);
hipMemcpy(fArray, dev_f,C.N * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_f);
hipFree(dev_w);
}
| 87a421056186736b3e614623239006f003d151f6.cu | #include "header.h"
#define MAX_BLOCK_THREADS 1024
__global__ void addKernel(double *dev_a, double *dev_b, double *dev_c, int n, int dim, double t)
{
int i = threadIdx.x;
int b = blockIdx.x;
int d = blockDim.x;
int idx = b * d + i;
if (idx < n * dim)
dev_c[idx] = dev_a[idx] + dev_b[idx] * t;
}
// Helper function for using CUDA to set points in parallel.
cudaError_t setPointsWithCuda(Point *points, int n, int dim, double t, double *dev_vecInitloc, double *dev_vecV, double *dev_vecCurrentloc)
{
char errorBuffer[100];
cudaError_t cudaStatus;
int numBlocks;
// Choose which GPU to run on
cudaStatus = cudaSetDevice(0);
checkError(cudaStatus, dev_vecInitloc, dev_vecV, dev_vecCurrentloc,
"cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
numBlocks = (n*dim) / MAX_BLOCK_THREADS;
if ( (n*dim) % MAX_BLOCK_THREADS)
numBlocks++;
// Launch a kernel on the GPU
addKernel<<<numBlocks, MAX_BLOCK_THREADS >>>(dev_vecInitloc, dev_vecV, dev_vecCurrentloc, n, dim, t);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
sprintf(errorBuffer, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
checkError(cudaStatus, dev_vecInitloc, dev_vecV, dev_vecCurrentloc, errorBuffer);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
sprintf(errorBuffer, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
checkError(cudaStatus, dev_vecInitloc, dev_vecV, dev_vecCurrentloc, errorBuffer);
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(points->vecCurrentloc, dev_vecCurrentloc, dim * sizeof(double) * n , cudaMemcpyDeviceToHost);
checkError(cudaStatus, dev_vecInitloc, dev_vecV, dev_vecCurrentloc, errorBuffer);
return cudaStatus;
}
cudaError_t initCudaMemory(Point *points, int n, int dim, double **dev_a, double **dev_b, double **dev_c)
{
cudaError_t cudaStatus;
char errorBuffer[100];
// Choose which GPU to run on
cudaStatus = cudaSetDevice(0);
checkError(cudaStatus, *dev_a, *dev_b, *dev_c,
"cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
// Allocate GPU buffers for three vectors
cudaStatus = cudaMalloc((void**)dev_a, n * dim * sizeof(double));
checkError(cudaStatus, *dev_a, *dev_b, *dev_c, "cudaMalloc failed!");
cudaStatus = cudaMalloc((void**)dev_b, n * dim * sizeof(double));
checkError(cudaStatus, *dev_a, *dev_b, *dev_c, "cudaMalloc failed!");
cudaStatus = cudaMalloc((void**)dev_c, n * dim * sizeof(double));
checkError(cudaStatus, *dev_a, *dev_b, *dev_c, "cudaMalloc failed!");
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(*dev_a, points->vecInitloc, dim * sizeof(double) * n, cudaMemcpyHostToDevice);
checkError(cudaStatus, *dev_a, *dev_b, *dev_c, "cudaMemcpy failed!\n");
cudaStatus = cudaMemcpy(*dev_b, points->vecV, dim * sizeof(double) * n, cudaMemcpyHostToDevice);
checkError(cudaStatus, *dev_a, *dev_b, *dev_c, "cudaMemcpy failed!\n");
cudaStatus = cudaGetLastError();
sprintf(errorBuffer, "initCudaMemory failed: %s\n", cudaGetErrorString(cudaStatus));
checkError(cudaStatus, *dev_a, *dev_b, *dev_c, "cudaMemcpy failed!\n");
return cudaStatus;
}
void freeCudaMemory(double *dev_a, double *dev_b, double *dev_c)
{
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
}
void checkError(cudaError_t cudaStatus, double *dev_a, double *dev_b, double *dev_c, const char* errorMessage)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, errorMessage);
fprintf(stderr, "\n");
freeCudaMemory(dev_a, dev_b, dev_c);
}
}
// unused below
__global__ void fKernel(int *dev_f, double *dev_w, double *dev_points,int n,int dim) {
int i = threadIdx.x;
int b = blockIdx.x;
int d = blockDim.x;
int idx = b * d + i;
if (idx < n) {
double res = dev_w[0];
for (int k = 0; k < dim; k++)
res += dev_w[k + 1] * dev_points[idx*dim+k];
dev_f[idx]=(res >= 0) ? A : B;
}
}
void markWithCuda(Classifier C, double *weights, int *fArray, double *dev_points) {
int *dev_f;
double *dev_w;
int numBlocks;
// Choose which GPU to run on
cudaSetDevice(0);
cudaMalloc((void**)&dev_f, C.N * sizeof(int));
cudaMalloc((void**)&dev_w, (C.K+1) * sizeof(double));
cudaMemcpy(dev_f, fArray, sizeof(int) * C.N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_w, weights, (C.K+1) * sizeof(double), cudaMemcpyHostToDevice);
numBlocks = (C.N) / MAX_BLOCK_THREADS;
if ((C.N) % MAX_BLOCK_THREADS)
numBlocks++;
// Launch a kernel on the GPU
fKernel <<<numBlocks, MAX_BLOCK_THREADS >>> (dev_f, dev_w, dev_points,C.N, C.K);
cudaMemcpy(fArray, dev_f,C.N * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_f);
cudaFree(dev_w);
}
|
dc50286033f45912f7a6eb31050065a6db15a695.hip | // !!! This is a file automatically generated by hipify!!!
#include "common/book.h"
#include "common/cpu_anim.h"
#include <hip/hip_runtime.h>
#include <iostream>
#define DIM 1024
#define PI 3.1415926535897932f
#define MAX_TEMP 1.0f
#define MIN_TEMP 0.0001f
#define SPEED 0.25f
texture<float,2> texConstSrc;
texture<float,2> texIn;
texture<float,2> texOut;
//using namespace std;
struct DataBlock {
unsigned char *output_bitmap;
float *dev_inSrc;
float *dev_outSrc;
float *dev_constSrc;
CPUAnimBitmap *bitmap;
hipEvent_t start, stop;
float totalTime;
float frames;
};
__global__ void copy_const_kernel(float *iptr) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x*gridDim.x;
float c = tex2D(texConstSrc, x,y);
if (c != 0) iptr[offset] = c;
}
__global__ void blend_kernel(float *outSrc, bool dstOut) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x*gridDim.x;
int left = offset - 1;
int right = offset + 1;
if (x == 0) left++;
if (x == DIM - 1) right--;
int top = offset - DIM;
int bottom = offset + DIM;
if (y == 0) top += DIM;
if (y == DIM - 1) bottom -= DIM;
float t, l, c, r, b; if (dstOut) {
t = tex2D(texIn, x ,y-1);
l = tex2D(texIn, x-1,y);
c = tex2D(texIn, x ,y);
r = tex2D(texIn, x+1,y);
b = tex2D(texIn, x,y+1);
}
else {
t = tex2D(texOut, x ,y-1);
l = tex2D(texOut, x-1,y);
c = tex2D(texOut, x ,y);
r = tex2D(texOut, x+1,y);
b = tex2D(texOut, x,y+1);
}
outSrc[offset] = c + SPEED*(t + l + r + b - 4 * c);
}
void anim_gpu(DataBlock *d, int ticks) {
HANDLE_ERROR(hipEventRecord(d->start, 0));
dim3 blocks(DIM / 16, DIM / 16);
dim3 threads(16, 16);
CPUAnimBitmap *bitmap = d->bitmap;
volatile bool dstOut = true;
float *in, *out;
for (int i = 0; i<10; i++) {
if (dstOut) {
in = d->dev_inSrc;
out = d->dev_outSrc;
}
else {
out = d->dev_inSrc;
in = d->dev_outSrc;
}
copy_const_kernel << <blocks, threads >> >( in );
blend_kernel << <blocks, threads >> >( out,dstOut);
dstOut = !dstOut;
}
float_to_color << <blocks, threads >> >(d->output_bitmap,
d->dev_inSrc);
HANDLE_ERROR(hipMemcpy(bitmap->get_ptr(),
d->output_bitmap,
bitmap->image_size(),
hipMemcpyDeviceToHost));
HANDLE_ERROR(hipEventRecord(d->stop, 0));
HANDLE_ERROR(hipEventSynchronize(d->stop));
float elapsedTime;
HANDLE_ERROR(hipEventElapsedTime(&elapsedTime,
d->start, d->stop));
d->totalTime += elapsedTime;
++d->frames;
printf("frame:%f,Average Time per frame: %3.1f ms\n",
d->frames,d->totalTime / d->frames);
}
void anim_exit(DataBlock *d) {
hipUnbindTexture(texIn);
hipUnbindTexture(texOut);
hipUnbindTexture(texConstSrc);
hipFree(d->dev_inSrc);
hipFree(d->dev_outSrc);
hipFree(d->dev_constSrc);
HANDLE_ERROR(hipEventDestroy(d->start));
HANDLE_ERROR(hipEventDestroy(d->stop));
}
int main() {
DataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.bitmap = &bitmap;
data.totalTime = 0;
data.frames = 0;
HANDLE_ERROR(hipEventCreate(&data.start));
HANDLE_ERROR(hipEventCreate(&data.stop));
HANDLE_ERROR(hipMalloc((void**)&data.output_bitmap, bitmap.image_size()));
// assume float == 4 chars in size (i.e., rgba)
HANDLE_ERROR(hipMalloc((void**)&data.dev_inSrc,
bitmap.image_size()));
HANDLE_ERROR(hipMalloc((void**)&data.dev_outSrc,
bitmap.image_size()));
HANDLE_ERROR(hipMalloc((void**)&data.dev_constSrc,
bitmap.image_size()));
int image_size = bitmap.image_size();
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
HANDLE_ERROR(hipBindTexture2D(NULL, texConstSrc,
data.dev_constSrc,
desc, DIM, DIM,
sizeof(float) * DIM));
HANDLE_ERROR(hipBindTexture2D(NULL, texIn,
data.dev_inSrc,
desc, DIM, DIM,
sizeof(float) * DIM));
HANDLE_ERROR(hipBindTexture2D(NULL, texOut,
data.dev_outSrc,
desc, DIM, DIM,
sizeof(float) * DIM));
float *temp = (float*)malloc(bitmap.image_size());
for (int i = 0; i<DIM*DIM; i++) {
temp[i] = 0;
int x = i % DIM;
int y = i / DIM;
if ((x>300) && (x<600) && (y>310) && (y<601))
temp[i] = MAX_TEMP;
}
temp[DIM * 100 + 100] = (MAX_TEMP + MIN_TEMP) / 2;
temp[DIM * 700 + 100] = MIN_TEMP;
temp[DIM * 300 + 300] = MIN_TEMP;
temp[DIM * 200 + 700] = MIN_TEMP;
for (int y = 800; y<900; y++) {
for (int x = 400; x<500; x++) {
temp[x + y*DIM] = MIN_TEMP;
}
}
HANDLE_ERROR(hipMemcpy(data.dev_constSrc, temp,
bitmap.image_size(),
hipMemcpyHostToDevice));
for (int y = 800; y<DIM; y++) {
for (int x = 0; x<200; x++) {
temp[x + y*DIM] = MAX_TEMP;
}
}
HANDLE_ERROR(hipMemcpy(data.dev_inSrc, temp,
bitmap.image_size(),
hipMemcpyHostToDevice));
free(temp);
bitmap.anim_and_exit((void(*)(void*, int))anim_gpu,
(void(*)(void*))anim_exit);
} | dc50286033f45912f7a6eb31050065a6db15a695.cu | #include "common/book.h"
#include "common/cpu_anim.h"
#include <cuda.h>
#include <iostream>
#define DIM 1024
#define PI 3.1415926535897932f
#define MAX_TEMP 1.0f
#define MIN_TEMP 0.0001f
#define SPEED 0.25f
texture<float,2> texConstSrc;
texture<float,2> texIn;
texture<float,2> texOut;
//using namespace std;
struct DataBlock {
unsigned char *output_bitmap;
float *dev_inSrc;
float *dev_outSrc;
float *dev_constSrc;
CPUAnimBitmap *bitmap;
cudaEvent_t start, stop;
float totalTime;
float frames;
};
__global__ void copy_const_kernel(float *iptr) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x*gridDim.x;
float c = tex2D(texConstSrc, x,y);
if (c != 0) iptr[offset] = c;
}
__global__ void blend_kernel(float *outSrc, bool dstOut) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x*gridDim.x;
int left = offset - 1;
int right = offset + 1;
if (x == 0) left++;
if (x == DIM - 1) right--;
int top = offset - DIM;
int bottom = offset + DIM;
if (y == 0) top += DIM;
if (y == DIM - 1) bottom -= DIM;
float t, l, c, r, b; if (dstOut) {
t = tex2D(texIn, x ,y-1);
l = tex2D(texIn, x-1,y);
c = tex2D(texIn, x ,y);
r = tex2D(texIn, x+1,y);
b = tex2D(texIn, x,y+1);
}
else {
t = tex2D(texOut, x ,y-1);
l = tex2D(texOut, x-1,y);
c = tex2D(texOut, x ,y);
r = tex2D(texOut, x+1,y);
b = tex2D(texOut, x,y+1);
}
outSrc[offset] = c + SPEED*(t + l + r + b - 4 * c);
}
void anim_gpu(DataBlock *d, int ticks) {
HANDLE_ERROR(cudaEventRecord(d->start, 0));
dim3 blocks(DIM / 16, DIM / 16);
dim3 threads(16, 16);
CPUAnimBitmap *bitmap = d->bitmap;
volatile bool dstOut = true;
float *in, *out;
for (int i = 0; i<10; i++) {
if (dstOut) {
in = d->dev_inSrc;
out = d->dev_outSrc;
}
else {
out = d->dev_inSrc;
in = d->dev_outSrc;
}
copy_const_kernel << <blocks, threads >> >( in );
blend_kernel << <blocks, threads >> >( out,dstOut);
dstOut = !dstOut;
}
float_to_color << <blocks, threads >> >(d->output_bitmap,
d->dev_inSrc);
HANDLE_ERROR(cudaMemcpy(bitmap->get_ptr(),
d->output_bitmap,
bitmap->image_size(),
cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaEventRecord(d->stop, 0));
HANDLE_ERROR(cudaEventSynchronize(d->stop));
float elapsedTime;
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime,
d->start, d->stop));
d->totalTime += elapsedTime;
++d->frames;
printf("frame:%f,Average Time per frame: %3.1f ms\n",
d->frames,d->totalTime / d->frames);
}
void anim_exit(DataBlock *d) {
cudaUnbindTexture(texIn);
cudaUnbindTexture(texOut);
cudaUnbindTexture(texConstSrc);
cudaFree(d->dev_inSrc);
cudaFree(d->dev_outSrc);
cudaFree(d->dev_constSrc);
HANDLE_ERROR(cudaEventDestroy(d->start));
HANDLE_ERROR(cudaEventDestroy(d->stop));
}
int main() {
DataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.bitmap = &bitmap;
data.totalTime = 0;
data.frames = 0;
HANDLE_ERROR(cudaEventCreate(&data.start));
HANDLE_ERROR(cudaEventCreate(&data.stop));
HANDLE_ERROR(cudaMalloc((void**)&data.output_bitmap, bitmap.image_size()));
// assume float == 4 chars in size (i.e., rgba)
HANDLE_ERROR(cudaMalloc((void**)&data.dev_inSrc,
bitmap.image_size()));
HANDLE_ERROR(cudaMalloc((void**)&data.dev_outSrc,
bitmap.image_size()));
HANDLE_ERROR(cudaMalloc((void**)&data.dev_constSrc,
bitmap.image_size()));
int image_size = bitmap.image_size();
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
HANDLE_ERROR(cudaBindTexture2D(NULL, texConstSrc,
data.dev_constSrc,
desc, DIM, DIM,
sizeof(float) * DIM));
HANDLE_ERROR(cudaBindTexture2D(NULL, texIn,
data.dev_inSrc,
desc, DIM, DIM,
sizeof(float) * DIM));
HANDLE_ERROR(cudaBindTexture2D(NULL, texOut,
data.dev_outSrc,
desc, DIM, DIM,
sizeof(float) * DIM));
float *temp = (float*)malloc(bitmap.image_size());
for (int i = 0; i<DIM*DIM; i++) {
temp[i] = 0;
int x = i % DIM;
int y = i / DIM;
if ((x>300) && (x<600) && (y>310) && (y<601))
temp[i] = MAX_TEMP;
}
temp[DIM * 100 + 100] = (MAX_TEMP + MIN_TEMP) / 2;
temp[DIM * 700 + 100] = MIN_TEMP;
temp[DIM * 300 + 300] = MIN_TEMP;
temp[DIM * 200 + 700] = MIN_TEMP;
for (int y = 800; y<900; y++) {
for (int x = 400; x<500; x++) {
temp[x + y*DIM] = MIN_TEMP;
}
}
HANDLE_ERROR(cudaMemcpy(data.dev_constSrc, temp,
bitmap.image_size(),
cudaMemcpyHostToDevice));
for (int y = 800; y<DIM; y++) {
for (int x = 0; x<200; x++) {
temp[x + y*DIM] = MAX_TEMP;
}
}
HANDLE_ERROR(cudaMemcpy(data.dev_inSrc, temp,
bitmap.image_size(),
cudaMemcpyHostToDevice));
free(temp);
bitmap.anim_and_exit((void(*)(void*, int))anim_gpu,
(void(*)(void*))anim_exit);
} |
3b7e2a3c8148926e9f3d1bd4c82b2e9998361660.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
inline float asum_host(int m, float *h_float_m){
float res = 0.0f;
for (int i=0; i<m; i++)
res+=abs(h_float_m[i]);
return res;
}
__global__ void diff(float *d_res, int *d_vec_1, int *d_vec_2, int m){
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if (tid < m){
d_res[tid] = ((float) d_vec_1[tid] - d_vec_2[tid]);
}
}
inline void diff_host(float *h_res, int *h_vec_1, int *h_vec_2, int m){
for (int i=0; i<m; i++){
h_res[i] = ((float) h_vec_1[i] - h_vec_2[i]);
}
}
__global__ void eq_value(float *d_res, int *d_v, int val, int m){
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if (tid < m){
if (d_v[tid] == val){
d_res[tid] = 1.0f;
}else{
d_res[tid] = 0.0f;
}
}
}
inline void eq_value_host(float *h_res, int *h_v, int val, int m){
for (int i=0; i<m; i++){
if (h_v[i] == val){
h_res[i] = 1.0f;
}else{
h_res[i] = 0.0f;
}
}
}
__global__ void eq_vectors(float *d_res, int *d_v1, int *d_v2, int m){
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if (tid < m){
if (d_v1[tid] == d_v2[tid]){
d_res[tid] = 1;
}else{
d_res[tid] = 0;
}
}
}
__global__ void to_float(float *d_float_m, int *d_v, int m){
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if (tid < m){
float val;
val = (float) d_v[tid];
d_float_m[tid] = val;
}
}
inline void to_float_host(float *h_float_m, int *h_v, int m){
for (int i=0; i<m; i++){
h_float_m[i] = ((float) h_v[i]);
}
}
inline float norm_1(float *d_float_m, int m){
return hipblasSasum(m, d_float_m, 1);
}
inline float norm_1_host(float *h_float_m, int m){
return asum_host(m, h_float_m);
}
inline float norm_inf(float *d_float_m, int m){
float ninf = 0.0f;
int pos = hipblasIsamax(m, d_float_m, 1)-1;
hipMemcpy(&ninf, d_float_m+pos, sizeof(float), hipMemcpyDeviceToHost);
return abs(ninf);
}
inline float norm_inf_host(float *h_float_m, int m){
float ninf = 0.0f;
for (int i=0; i<m; i++)
if (abs(h_float_m[i]) > ninf)
ninf = abs(h_float_m[i]);
return abs(ninf);
}
inline void track(int iter, int m,
int *d_low, int *d_ess, int *d_classes, int *d_low_true,
int *d_ess_true, float *d_float_m, float *error_lone,
float *error_linf, float *error_redu,
float *error_ess, float *time_track, float time,
dim3 NBm, dim3 TPBm){
// Time
time_track[iter] = time;
// (float) d_low_err = d_low - d_low_true
hipLaunchKernelGGL(( diff), dim3(NBm), dim3(TPBm), 0, 0, d_float_m, d_low, d_low_true, m);
hipDeviceSynchronize();
// norm_lone(d_low, d_low_true)
error_lone[iter] = norm_1(d_float_m, m);
// norm_linf(d_low, d_low_true)
error_linf[iter] = norm_inf(d_float_m, m);
// |j : d_classes[j] = 0| (number of unreduced columns)
hipLaunchKernelGGL(( eq_value), dim3(NBm), dim3(TPBm), 0, 0, d_float_m, d_classes, 0, m);
hipDeviceSynchronize();
error_redu[iter] = hipblasSasum(m, d_float_m, 1) / ((float) m);
hipDeviceSynchronize();
// |j : d_ess[j] = d_ess_true[j]|/sum(d_ess_true)
hipLaunchKernelGGL(( to_float), dim3(NBm), dim3(TPBm), 0, 0, d_float_m, d_ess_true, m);
hipDeviceSynchronize();
float num_ess_true = hipblasSasum(m, d_float_m, 1);
hipLaunchKernelGGL(( to_float), dim3(NBm), dim3(TPBm), 0, 0, d_float_m, d_ess, m);
hipDeviceSynchronize();
float num_ess_hat = hipblasSasum(m, d_float_m, 1);
// In estimation num_ess_true <= num_ess_hat <= m
if (num_ess_hat == num_ess_true){
error_ess[iter] = 1;
}else{
error_ess[iter] = num_ess_true/num_ess_hat;
}
}
inline void track_host(int iter, int m,
int *h_low, int *h_ess, int *h_classes, int *h_low_true,
int *h_ess_true, float *h_float_m, float *error_lone,
float *error_linf, float *error_redu,
float *error_ess, float *time_track, float time){
// Time
time_track[iter] = time;
// (float) d_low_err = d_low - d_low_true
diff_host(h_float_m, h_low, h_low_true, m);
// norm_lone(d_low, d_low_true)
error_lone[iter] = norm_1_host(h_float_m, m);
// norm_linf(d_low, d_low_true)
error_linf[iter] = norm_inf_host(h_float_m, m);
// |j : d_classes[j] = 0| (number of unreduced columns)
eq_value_host(h_float_m, h_classes, 0, m);
error_redu[iter] = asum_host(m, h_float_m) / ((float) m);
// |j : d_ess[j] = d_ess_true[j]|/sum(d_ess_true)
to_float_host(h_float_m, h_ess_true, m);
float num_ess_true = asum_host(m, h_float_m);
to_float_host(h_float_m, h_ess, m);
float num_ess_hat = asum_host(m, h_float_m);
// In estimation num_ess_true <= num_ess_hat <= m
if (num_ess_hat == num_ess_true){
error_ess[iter] = 1;
}else{
error_ess[iter] = num_ess_true/num_ess_hat;
}
}
| 3b7e2a3c8148926e9f3d1bd4c82b2e9998361660.cu |
inline float asum_host(int m, float *h_float_m){
float res = 0.0f;
for (int i=0; i<m; i++)
res+=abs(h_float_m[i]);
return res;
}
__global__ void diff(float *d_res, int *d_vec_1, int *d_vec_2, int m){
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if (tid < m){
d_res[tid] = ((float) d_vec_1[tid] - d_vec_2[tid]);
}
}
inline void diff_host(float *h_res, int *h_vec_1, int *h_vec_2, int m){
for (int i=0; i<m; i++){
h_res[i] = ((float) h_vec_1[i] - h_vec_2[i]);
}
}
__global__ void eq_value(float *d_res, int *d_v, int val, int m){
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if (tid < m){
if (d_v[tid] == val){
d_res[tid] = 1.0f;
}else{
d_res[tid] = 0.0f;
}
}
}
inline void eq_value_host(float *h_res, int *h_v, int val, int m){
for (int i=0; i<m; i++){
if (h_v[i] == val){
h_res[i] = 1.0f;
}else{
h_res[i] = 0.0f;
}
}
}
__global__ void eq_vectors(float *d_res, int *d_v1, int *d_v2, int m){
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if (tid < m){
if (d_v1[tid] == d_v2[tid]){
d_res[tid] = 1;
}else{
d_res[tid] = 0;
}
}
}
__global__ void to_float(float *d_float_m, int *d_v, int m){
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if (tid < m){
float val;
val = (float) d_v[tid];
d_float_m[tid] = val;
}
}
inline void to_float_host(float *h_float_m, int *h_v, int m){
for (int i=0; i<m; i++){
h_float_m[i] = ((float) h_v[i]);
}
}
inline float norm_1(float *d_float_m, int m){
return cublasSasum(m, d_float_m, 1);
}
inline float norm_1_host(float *h_float_m, int m){
return asum_host(m, h_float_m);
}
inline float norm_inf(float *d_float_m, int m){
float ninf = 0.0f;
int pos = cublasIsamax(m, d_float_m, 1)-1;
cudaMemcpy(&ninf, d_float_m+pos, sizeof(float), cudaMemcpyDeviceToHost);
return abs(ninf);
}
inline float norm_inf_host(float *h_float_m, int m){
float ninf = 0.0f;
for (int i=0; i<m; i++)
if (abs(h_float_m[i]) > ninf)
ninf = abs(h_float_m[i]);
return abs(ninf);
}
inline void track(int iter, int m,
int *d_low, int *d_ess, int *d_classes, int *d_low_true,
int *d_ess_true, float *d_float_m, float *error_lone,
float *error_linf, float *error_redu,
float *error_ess, float *time_track, float time,
dim3 NBm, dim3 TPBm){
// Time
time_track[iter] = time;
// (float) d_low_err = d_low - d_low_true
diff<<<NBm, TPBm>>>(d_float_m, d_low, d_low_true, m);
cudaDeviceSynchronize();
// norm_lone(d_low, d_low_true)
error_lone[iter] = norm_1(d_float_m, m);
// norm_linf(d_low, d_low_true)
error_linf[iter] = norm_inf(d_float_m, m);
// |j : d_classes[j] = 0| (number of unreduced columns)
eq_value<<<NBm, TPBm>>>(d_float_m, d_classes, 0, m);
cudaDeviceSynchronize();
error_redu[iter] = cublasSasum(m, d_float_m, 1) / ((float) m);
cudaDeviceSynchronize();
// |j : d_ess[j] = d_ess_true[j]|/sum(d_ess_true)
to_float<<<NBm, TPBm>>>(d_float_m, d_ess_true, m);
cudaDeviceSynchronize();
float num_ess_true = cublasSasum(m, d_float_m, 1);
to_float<<<NBm, TPBm>>>(d_float_m, d_ess, m);
cudaDeviceSynchronize();
float num_ess_hat = cublasSasum(m, d_float_m, 1);
// In estimation num_ess_true <= num_ess_hat <= m
if (num_ess_hat == num_ess_true){
error_ess[iter] = 1;
}else{
error_ess[iter] = num_ess_true/num_ess_hat;
}
}
inline void track_host(int iter, int m,
int *h_low, int *h_ess, int *h_classes, int *h_low_true,
int *h_ess_true, float *h_float_m, float *error_lone,
float *error_linf, float *error_redu,
float *error_ess, float *time_track, float time){
// Time
time_track[iter] = time;
// (float) d_low_err = d_low - d_low_true
diff_host(h_float_m, h_low, h_low_true, m);
// norm_lone(d_low, d_low_true)
error_lone[iter] = norm_1_host(h_float_m, m);
// norm_linf(d_low, d_low_true)
error_linf[iter] = norm_inf_host(h_float_m, m);
// |j : d_classes[j] = 0| (number of unreduced columns)
eq_value_host(h_float_m, h_classes, 0, m);
error_redu[iter] = asum_host(m, h_float_m) / ((float) m);
// |j : d_ess[j] = d_ess_true[j]|/sum(d_ess_true)
to_float_host(h_float_m, h_ess_true, m);
float num_ess_true = asum_host(m, h_float_m);
to_float_host(h_float_m, h_ess, m);
float num_ess_hat = asum_host(m, h_float_m);
// In estimation num_ess_true <= num_ess_hat <= m
if (num_ess_hat == num_ess_true){
error_ess[iter] = 1;
}else{
error_ess[iter] = num_ess_true/num_ess_hat;
}
}
|
5b17e2019cd719764e6ef70e24917d7e9e4fc14b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "GL\glut.h"
#include "cuda_gl_interop.h"
#include "..\common\book.h"
#include "..\common\cpu_bitmap.h"
#include <hip/hip_runtime.h>
#include"device_launch_parameters.h"
#define DIM 512
//declare functions
PFNGLBINDBUFFERARBPROC glBindBuffer = NULL;
PFNGLDELETEBUFFERSARBPROC glDeleteBuffers = NULL;
PFNGLGENBUFFERSARBPROC glGenBuffers = NULL;
PFNGLBUFFERDATAARBPROC glBufferData = NULL;
GLuint bufferobj;
cudaGraphicsResource *resource;
__global__ void kernel(uchar4 * d)
{
int x=threadIdx.x+blockDim.x*blockIdx.x;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int offerset=x+y*gridDim.x*blockDim.x;
float fx=x/(float)DIM-0.5f;
float fy=y/(float)DIM-0.5f;
float dst=sqrtf(fx*fx+fy*fy);
unsigned char grey=128+127.0f*sin(abs(fx*100)-abs(fy*100));
d[offerset].x=0;
d[offerset].y=grey;
d[offerset].z=0;
d[offerset].w=255;
}
static void draw_func()
{
glDrawPixels(DIM,DIM,GL_BGRA,GL_UNSIGNED_BYTE,0);
glutSwapBuffers();
}
static void key_func(unsigned char t,int x,int y)
{
switch(t)
{
case 27:
HANDLE_ERROR(hipGraphicsUnregisterResource(resource));
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB,0);
glDeleteBuffers(1,&bufferobj);
exit(0);
}
}
int main(int argc,char **argv)
{
hipDeviceProp_t prop;
memset(&prop,0,sizeof(hipDeviceProp_t));
prop.major=1.0;
prop.minor=0.;
int dev;
HANDLE_ERROR(hipChooseDevice(&dev,&prop));
HANDLE_ERROR(hipGLSetGLDevice(dev));
//called before any other opengl functions
glutInit(&argc,argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowSize(DIM,DIM);
glutCreateWindow("bitmap");
//get the address of the functions
glBindBuffer = (PFNGLBINDBUFFERARBPROC)GET_PROC_ADDRESS("glBindBuffer");
glDeleteBuffers = (PFNGLDELETEBUFFERSARBPROC)GET_PROC_ADDRESS("glDeleteBuffers");
glGenBuffers = (PFNGLGENBUFFERSARBPROC)GET_PROC_ADDRESS("glGenBuffers");
glBufferData = (PFNGLBUFFERDATAARBPROC)GET_PROC_ADDRESS("glBufferData");
//the functions is the extension of opengl that put the data into video memory directly
//VBO
glGenBuffers(1,&bufferobj);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB,bufferobj);
glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB,DIM*DIM*4,NULL,GL_DYNAMIC_DRAW_ARB);
HANDLE_ERROR(hipGraphicsGLRegisterBuffer(&resource,bufferobj,hipGraphicsMapFlagsNone));
uchar4 *dev_ptr;
size_t size;
HANDLE_ERROR(hipGraphicsMapResources(1,&resource,NULL));
HANDLE_ERROR(hipGraphicsResourceGetMappedPointer((void **)&dev_ptr,&size,resource));
dim3 grids(DIM/16,DIM/16);
dim3 blocks(16,16);
hipLaunchKernelGGL(( kernel), dim3(grids),dim3(blocks), 0, 0, dev_ptr);
HANDLE_ERROR(hipGraphicsUnmapResources(1,&resource,NULL));
glutKeyboardFunc(key_func);
glutDisplayFunc(draw_func);
glutMainLoop();
} | 5b17e2019cd719764e6ef70e24917d7e9e4fc14b.cu | #include "cuda.h"
#include "GL\glut.h"
#include "cuda_gl_interop.h"
#include "..\common\book.h"
#include "..\common\cpu_bitmap.h"
#include <cuda_runtime.h>
#include"device_launch_parameters.h"
#define DIM 512
//declare functions
PFNGLBINDBUFFERARBPROC glBindBuffer = NULL;
PFNGLDELETEBUFFERSARBPROC glDeleteBuffers = NULL;
PFNGLGENBUFFERSARBPROC glGenBuffers = NULL;
PFNGLBUFFERDATAARBPROC glBufferData = NULL;
GLuint bufferobj;
cudaGraphicsResource *resource;
__global__ void kernel(uchar4 * d)
{
int x=threadIdx.x+blockDim.x*blockIdx.x;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int offerset=x+y*gridDim.x*blockDim.x;
float fx=x/(float)DIM-0.5f;
float fy=y/(float)DIM-0.5f;
float dst=sqrtf(fx*fx+fy*fy);
unsigned char grey=128+127.0f*sin(abs(fx*100)-abs(fy*100));
d[offerset].x=0;
d[offerset].y=grey;
d[offerset].z=0;
d[offerset].w=255;
}
static void draw_func()
{
glDrawPixels(DIM,DIM,GL_BGRA,GL_UNSIGNED_BYTE,0);
glutSwapBuffers();
}
static void key_func(unsigned char t,int x,int y)
{
switch(t)
{
case 27:
HANDLE_ERROR(cudaGraphicsUnregisterResource(resource));
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB,0);
glDeleteBuffers(1,&bufferobj);
exit(0);
}
}
int main(int argc,char **argv)
{
cudaDeviceProp prop;
memset(&prop,0,sizeof(cudaDeviceProp));
prop.major=1.0;
prop.minor=0.;
int dev;
HANDLE_ERROR(cudaChooseDevice(&dev,&prop));
HANDLE_ERROR(cudaGLSetGLDevice(dev));
//called before any other opengl functions
glutInit(&argc,argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowSize(DIM,DIM);
glutCreateWindow("bitmap");
//get the address of the functions
glBindBuffer = (PFNGLBINDBUFFERARBPROC)GET_PROC_ADDRESS("glBindBuffer");
glDeleteBuffers = (PFNGLDELETEBUFFERSARBPROC)GET_PROC_ADDRESS("glDeleteBuffers");
glGenBuffers = (PFNGLGENBUFFERSARBPROC)GET_PROC_ADDRESS("glGenBuffers");
glBufferData = (PFNGLBUFFERDATAARBPROC)GET_PROC_ADDRESS("glBufferData");
//the functions is the extension of opengl that put the data into video memory directly
//VBO
glGenBuffers(1,&bufferobj);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB,bufferobj);
glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB,DIM*DIM*4,NULL,GL_DYNAMIC_DRAW_ARB);
HANDLE_ERROR(cudaGraphicsGLRegisterBuffer(&resource,bufferobj,cudaGraphicsMapFlagsNone));
uchar4 *dev_ptr;
size_t size;
HANDLE_ERROR(cudaGraphicsMapResources(1,&resource,NULL));
HANDLE_ERROR(cudaGraphicsResourceGetMappedPointer((void **)&dev_ptr,&size,resource));
dim3 grids(DIM/16,DIM/16);
dim3 blocks(16,16);
kernel<<<grids,blocks>>>(dev_ptr);
HANDLE_ERROR(cudaGraphicsUnmapResources(1,&resource,NULL));
glutKeyboardFunc(key_func);
glutDisplayFunc(draw_func);
glutMainLoop();
} |
cdba6eeb2086b999d6ea4e26630f502a5d206f89.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "../common/book.h"
#include "../common/cpu_anim.h"
//Global
#define DIM 1024
#define PI 3.1415926535897932f
#define MAX_TEMP 1.0f
#define MIN_TEMP 0.0001f
#define SPEED 0.25f
//setting texture references memory in GPU
texture<float> texConstSrc;
texture<float> texIn;
texture<float> texOut;
//Global
//KKKKKKKKKKKKKKKKKKKKKKKKKKKKK
__global__ void copy_const_kernel(float *iptr) {
/*Given a grid to input temperatures iptr, copies the constant temperatures in the texture texConstSrc
to the pointer iptr.*/
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int offset = x + y *blockDim.x*gridDim.x;
//Giving x,y unique values and linearizing it in offset
//The constants are stored in the texConstSrc texture
float c = tex1Dfetch(texConstSrc, offset);
if (c != 0) iptr[offset] = c;
}
__global__ void blend_kernel(float *dst, bool dstOut) {
/*Using texture memory we cant use directly the pointers to the memory, we will use a boolean
flag instead to know which memory to use as input or output, sending the result through dst*/
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int offset = x + y *blockDim.x*gridDim.x;
//setting neighboorhod
int left = offset - 1;
int right = offset + 1;
if (x == 0) left++;
if (x == DIM - 1) right--;
int top = offset - DIM;
int bottom = offset + DIM;
if (y == 0) top += DIM;
if (y == DIM - 1) bottom -= DIM;
float t,l,c,r,b;
if (dstOut) { //if dstOut is TRUE
t = tex1Dfetch(texIn,top);
l = tex1Dfetch(texIn, left);
c = tex1Dfetch(texIn, offset);
r = tex1Dfetch(texIn, right);
b = tex1Dfetch(texIn, bottom);
} //We take the input from the texture texIn
else { //ELSE
t = tex1Dfetch(texOut, top);
l = tex1Dfetch(texOut, left);
c = tex1Dfetch(texOut, offset);
r = tex1Dfetch(texOut, right);
b = tex1Dfetch(texOut, bottom);
} //We Take the input from texOut
dst[offset] = c + SPEED*(t+b+r+l-4.0*c);
}
//KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK
//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
struct DataBlock {
//Setting bitmaps and grid of temperature
unsigned char *output_bitmap;
float *dev_inSrc; //Input
float *dev_outSrc; //Output
float *dev_constSrc; //Constant Sources
CPUAnimBitmap *bitmap;
//Setting time variables to mesure time taken.
hipEvent_t start, stop;
float totalTime;
float frames;
};
void anim_gpu(DataBlock *d, int ticks) {
hipEventRecord(d->start, 0); //start recording time
//blocks and grids dimension
dim3 blocks(DIM / 16, DIM / 16);
dim3 threads(16, 16);
//starting bitmap from the DataBlock
CPUAnimBitmap *bitmap = d->bitmap;
//Make heat flow 90 times
//since tex is global and bound we set up a boolean to flag which to use
volatile bool dstOut = true;
for (int i = 0; i < 90; i++) {
//Setting input and output
float *in , *out;
if (dstOut) { //if True
in = d->dev_inSrc;
out = d->dev_outSrc;
} //We take input from the expected
else { //Else
out = d->dev_inSrc;
in = d->dev_outSrc;
} //We change the order
hipLaunchKernelGGL(( copy_const_kernel), dim3(blocks), dim3(threads), 0, 0, in);
hipLaunchKernelGGL(( blend_kernel), dim3(blocks),dim3(threads), 0, 0, out , dstOut);
dstOut = !dstOut;
}
//Make bitmap from output
float_to_color << <blocks, threads >> >(d->output_bitmap, d->dev_inSrc);
//copy bitmap to host
hipMemcpy(bitmap->get_ptr(), d->output_bitmap, bitmap->image_size(), hipMemcpyDeviceToHost);
//get and print time
hipEventRecord(d->stop, 0);
hipEventSynchronize(d->stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, d->start, d->stop);
d->totalTime += elapsedTime;
++d->frames;
printf("Avarage Time Per Frame %3.1f ms \n", d->totalTime / d->frames);
}
void anim_exit(DataBlock *d) {
/*Cleans and wraps datablock*/
hipUnbindTexture(texIn);
hipUnbindTexture(texOut);
hipUnbindTexture(texConstSrc);
hipFree(d->dev_inSrc);
hipFree(d->dev_outSrc);
hipFree(d->dev_constSrc);
hipEventDestroy(d->start);
hipEventDestroy(d->stop);
}
//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
int main(void) {
//Setting Bitmaps
DataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.bitmap = &bitmap;
//Setting timeers = 0
data.totalTime = 0;
data.frames = 0;
hipEventCreate(&data.start);
hipEventCreate(&data.stop);
//Allocating space to bitmap and sources.
hipMalloc((void **)&data.output_bitmap, bitmap.image_size());
hipMalloc((void **)&data.dev_inSrc, bitmap.image_size());
hipMalloc((void **)&data.dev_outSrc, bitmap.image_size());
hipMalloc((void **)&data.dev_constSrc, bitmap.image_size());
//Bind texture reference to memory allocate in the device with the designed name (pg 126)
hipBindTexture(NULL, texConstSrc, data.dev_constSrc, bitmap.image_size());
hipBindTexture(NULL, texIn, data.dev_inSrc, bitmap.image_size());
hipBindTexture(NULL, texOut, data.dev_outSrc, bitmap.image_size());
//setting heaters in temp
float *temp = (float *)malloc(bitmap.image_size());
int i = 0;
for (i = 0; i < DIM*DIM; i++) {
temp[i] = 0;
int x = i%DIM;
int y = i / DIM;
if ((x > 300) && (x < 600) && (y > 310 && (y < 601)))
temp[i] = MAX_TEMP;
}
temp[DIM * 100 + 100] = (MAX_TEMP + MIN_TEMP) / 2;
temp[DIM * 700 + 100] = MIN_TEMP;
temp[DIM * 300 + 300] = MIN_TEMP;
temp[DIM * 200 + 700] = MIN_TEMP;
for (int y = 800; y < 900; y++) {
for (int x = 400; x < 500; x++) {
temp[x + y*DIM] = MIN_TEMP;
}
}
hipMemcpy(data.dev_constSrc, temp, bitmap.image_size(), hipMemcpyHostToDevice);
for (int y = 800; y < DIM; y++) {
for (int x = 0;x < 200; x++) {
temp[x + y*DIM] = MAX_TEMP;
}
}
hipMemcpy(data.dev_inSrc, temp, bitmap.image_size(), hipMemcpyHostToDevice);
free(temp);
bitmap.anim_and_exit((void(*)(void*, int))anim_gpu, (void(*)(void*))anim_exit);
return 0;
} | cdba6eeb2086b999d6ea4e26630f502a5d206f89.cu | #include <stdio.h>
#include "../common/book.h"
#include "../common/cpu_anim.h"
//Global
#define DIM 1024
#define PI 3.1415926535897932f
#define MAX_TEMP 1.0f
#define MIN_TEMP 0.0001f
#define SPEED 0.25f
//setting texture references memory in GPU
texture<float> texConstSrc;
texture<float> texIn;
texture<float> texOut;
//Global
//KKKKKKKKKKKKKKKKKKKKKKKKKKKKK
__global__ void copy_const_kernel(float *iptr) {
/*Given a grid to input temperatures iptr, copies the constant temperatures in the texture texConstSrc
to the pointer iptr.*/
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int offset = x + y *blockDim.x*gridDim.x;
//Giving x,y unique values and linearizing it in offset
//The constants are stored in the texConstSrc texture
float c = tex1Dfetch(texConstSrc, offset);
if (c != 0) iptr[offset] = c;
}
__global__ void blend_kernel(float *dst, bool dstOut) {
/*Using texture memory we cant use directly the pointers to the memory, we will use a boolean
flag instead to know which memory to use as input or output, sending the result through dst*/
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int offset = x + y *blockDim.x*gridDim.x;
//setting neighboorhod
int left = offset - 1;
int right = offset + 1;
if (x == 0) left++;
if (x == DIM - 1) right--;
int top = offset - DIM;
int bottom = offset + DIM;
if (y == 0) top += DIM;
if (y == DIM - 1) bottom -= DIM;
float t,l,c,r,b;
if (dstOut) { //if dstOut is TRUE
t = tex1Dfetch(texIn,top);
l = tex1Dfetch(texIn, left);
c = tex1Dfetch(texIn, offset);
r = tex1Dfetch(texIn, right);
b = tex1Dfetch(texIn, bottom);
} //We take the input from the texture texIn
else { //ELSE
t = tex1Dfetch(texOut, top);
l = tex1Dfetch(texOut, left);
c = tex1Dfetch(texOut, offset);
r = tex1Dfetch(texOut, right);
b = tex1Dfetch(texOut, bottom);
} //We Take the input from texOut
dst[offset] = c + SPEED*(t+b+r+l-4.0*c);
}
//KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK
//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
struct DataBlock {
//Setting bitmaps and grid of temperature
unsigned char *output_bitmap;
float *dev_inSrc; //Input
float *dev_outSrc; //Output
float *dev_constSrc; //Constant Sources
CPUAnimBitmap *bitmap;
//Setting time variables to mesure time taken.
cudaEvent_t start, stop;
float totalTime;
float frames;
};
void anim_gpu(DataBlock *d, int ticks) {
cudaEventRecord(d->start, 0); //start recording time
//blocks and grids dimension
dim3 blocks(DIM / 16, DIM / 16);
dim3 threads(16, 16);
//starting bitmap from the DataBlock
CPUAnimBitmap *bitmap = d->bitmap;
//Make heat flow 90 times
//since tex is global and bound we set up a boolean to flag which to use
volatile bool dstOut = true;
for (int i = 0; i < 90; i++) {
//Setting input and output
float *in , *out;
if (dstOut) { //if True
in = d->dev_inSrc;
out = d->dev_outSrc;
} //We take input from the expected
else { //Else
out = d->dev_inSrc;
in = d->dev_outSrc;
} //We change the order
copy_const_kernel<<<blocks, threads>>>(in);
blend_kernel<<<blocks,threads>>>(out , dstOut);
dstOut = !dstOut;
}
//Make bitmap from output
float_to_color << <blocks, threads >> >(d->output_bitmap, d->dev_inSrc);
//copy bitmap to host
cudaMemcpy(bitmap->get_ptr(), d->output_bitmap, bitmap->image_size(), cudaMemcpyDeviceToHost);
//get and print time
cudaEventRecord(d->stop, 0);
cudaEventSynchronize(d->stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, d->start, d->stop);
d->totalTime += elapsedTime;
++d->frames;
printf("Avarage Time Per Frame %3.1f ms \n", d->totalTime / d->frames);
}
void anim_exit(DataBlock *d) {
/*Cleans and wraps datablock*/
cudaUnbindTexture(texIn);
cudaUnbindTexture(texOut);
cudaUnbindTexture(texConstSrc);
cudaFree(d->dev_inSrc);
cudaFree(d->dev_outSrc);
cudaFree(d->dev_constSrc);
cudaEventDestroy(d->start);
cudaEventDestroy(d->stop);
}
//AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
int main(void) {
//Setting Bitmaps
DataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.bitmap = &bitmap;
//Setting timeers = 0
data.totalTime = 0;
data.frames = 0;
cudaEventCreate(&data.start);
cudaEventCreate(&data.stop);
//Allocating space to bitmap and sources.
cudaMalloc((void **)&data.output_bitmap, bitmap.image_size());
cudaMalloc((void **)&data.dev_inSrc, bitmap.image_size());
cudaMalloc((void **)&data.dev_outSrc, bitmap.image_size());
cudaMalloc((void **)&data.dev_constSrc, bitmap.image_size());
//Bind texture reference to memory allocate in the device with the designed name (pg 126)
cudaBindTexture(NULL, texConstSrc, data.dev_constSrc, bitmap.image_size());
cudaBindTexture(NULL, texIn, data.dev_inSrc, bitmap.image_size());
cudaBindTexture(NULL, texOut, data.dev_outSrc, bitmap.image_size());
//setting heaters in temp
float *temp = (float *)malloc(bitmap.image_size());
int i = 0;
for (i = 0; i < DIM*DIM; i++) {
temp[i] = 0;
int x = i%DIM;
int y = i / DIM;
if ((x > 300) && (x < 600) && (y > 310 && (y < 601)))
temp[i] = MAX_TEMP;
}
temp[DIM * 100 + 100] = (MAX_TEMP + MIN_TEMP) / 2;
temp[DIM * 700 + 100] = MIN_TEMP;
temp[DIM * 300 + 300] = MIN_TEMP;
temp[DIM * 200 + 700] = MIN_TEMP;
for (int y = 800; y < 900; y++) {
for (int x = 400; x < 500; x++) {
temp[x + y*DIM] = MIN_TEMP;
}
}
cudaMemcpy(data.dev_constSrc, temp, bitmap.image_size(), cudaMemcpyHostToDevice);
for (int y = 800; y < DIM; y++) {
for (int x = 0;x < 200; x++) {
temp[x + y*DIM] = MAX_TEMP;
}
}
cudaMemcpy(data.dev_inSrc, temp, bitmap.image_size(), cudaMemcpyHostToDevice);
free(temp);
bitmap.anim_and_exit((void(*)(void*, int))anim_gpu, (void(*)(void*))anim_exit);
return 0;
} |
4080fef2d85fc40d0608f80a195b777810e1e52a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/nested/NestedTensorBinaryOps.h>
#include <type_traits>
#include <ATen/ATen.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/MemoryAccess.cuh>
#include <c10/hip/HIPMathCompat.h>
#include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#define BLOCK_DIM 256
namespace at {
namespace native {
// only for nested [B, *, D], dense [B, 1, D]
template <typename T, typename func_t>
__global__ void op_dense_esuhm(
const T* input,
const T* dense,
T* output,
int64_t embedding_dim,
const int64_t* offsets,
const func_t& f)
{
// each batch is handled by a block
const int64_t batch_idx = blockIdx.x;
const int64_t grain_size = blockDim.x;
const int64_t tid = threadIdx.x;
const int64_t range = offsets[batch_idx + 1] - offsets[batch_idx];
// each thread handles (embedding_dim // grain_size + (embedding_dim % grain_size <= tid)) elems
// of the dense embedding
for (int64_t idx = tid; idx < embedding_dim; idx += grain_size) {
const T dense_elem = dense[batch_idx * embedding_dim + idx];
for (int64_t nested_idx = idx; nested_idx < range; nested_idx += embedding_dim) {
output[offsets[batch_idx] + nested_idx] = f(input[offsets[batch_idx] + nested_idx], dense_elem);
}
}
}
template <typename T, typename func_t>
void nested_op_dense_kernelLauncher(
const T* input, // [sum(*) x embedding_dim]
const T* dense, // [batch_size x embedding_dim]
T* output, // [sum(*) x embedding_dim]
int64_t batch_size,
int64_t embedding_dim,
const int64_t* input_offsets, // [batch_size]
func_t f)
{
dim3 grid;
grid.x = batch_size;
const auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( op_dense_esuhm), dim3(grid), dim3(BLOCK_DIM), 0, stream,
input,
dense,
output,
embedding_dim,
input_offsets,
f);
}
template <typename scalar_t, typename func_t>
void _nested_op_dense_esuhm_kernel(Tensor& result, const Tensor& self, const Tensor& other, func_t f) {
auto self_ptr = get_nested_tensor_impl(self);
auto result_ptr = get_nested_tensor_impl(result);
const auto self_buffer = self_ptr->get_buffer();
const auto offsets = self_ptr->get_storage_offsets();
const auto batch_size = other.size(0);
const auto embedding_size = other.size(2);
auto result_buffer = result_ptr->get_buffer();
auto result_offsets = at::cat({at::tensor(offsets), at::tensor(self_ptr->numel())});
result_offsets = result_offsets.to(kCUDA);
const scalar_t* self_data_ptr = self_buffer.data_ptr<scalar_t>();
const scalar_t* other_data_ptr = other.data_ptr<scalar_t>();
scalar_t* result_data_ptr = result_buffer.data_ptr<scalar_t>();
int64_t* result_offsets_ptr = result_offsets.data_ptr<int64_t>();
nested_op_dense_kernelLauncher(
self_data_ptr,
other_data_ptr,
result_data_ptr,
batch_size,
embedding_size,
result_offsets_ptr,
f);
}
void _nested_op_dense_esuhm_cuda(Tensor& result, const Tensor& self, const Tensor& other, const NESTED_DENSE_OP& op) {
AT_DISPATCH_ALL_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16, self.scalar_type(), "_nested_op_dense_esuhm", [&]() {
switch (op) {
case NESTED_DENSE_OP::ADD :
_nested_op_dense_esuhm_kernel<scalar_t>(result, self, other, [] __host__ __device__ (scalar_t a, scalar_t b) -> scalar_t { return a + b; });
break;
case NESTED_DENSE_OP::MUL :
_nested_op_dense_esuhm_kernel<scalar_t>(result, self, other, [] __host__ __device__ (scalar_t a, scalar_t b) -> scalar_t { return a * b; });
break;
}
});
}
REGISTER_CUDA_DISPATCH(nested_dense_elementwise_stub, &_nested_op_dense_esuhm_cuda);
} // namespace native
} // namespace at
| 4080fef2d85fc40d0608f80a195b777810e1e52a.cu | #include <ATen/native/nested/NestedTensorBinaryOps.h>
#include <type_traits>
#include <ATen/ATen.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/MemoryAccess.cuh>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/cuda/CUDAStream.h>
#include <ATen/native/nested/NestedTensorUtils.h>
#define BLOCK_DIM 256
namespace at {
namespace native {
// only for nested [B, *, D], dense [B, 1, D]
template <typename T, typename func_t>
__global__ void op_dense_esuhm(
const T* input,
const T* dense,
T* output,
int64_t embedding_dim,
const int64_t* offsets,
const func_t& f)
{
// each batch is handled by a block
const int64_t batch_idx = blockIdx.x;
const int64_t grain_size = blockDim.x;
const int64_t tid = threadIdx.x;
const int64_t range = offsets[batch_idx + 1] - offsets[batch_idx];
// each thread handles (embedding_dim // grain_size + (embedding_dim % grain_size <= tid)) elems
// of the dense embedding
for (int64_t idx = tid; idx < embedding_dim; idx += grain_size) {
const T dense_elem = dense[batch_idx * embedding_dim + idx];
for (int64_t nested_idx = idx; nested_idx < range; nested_idx += embedding_dim) {
output[offsets[batch_idx] + nested_idx] = f(input[offsets[batch_idx] + nested_idx], dense_elem);
}
}
}
template <typename T, typename func_t>
void nested_op_dense_kernelLauncher(
const T* input, // [sum(*) x embedding_dim]
const T* dense, // [batch_size x embedding_dim]
T* output, // [sum(*) x embedding_dim]
int64_t batch_size,
int64_t embedding_dim,
const int64_t* input_offsets, // [batch_size]
func_t f)
{
dim3 grid;
grid.x = batch_size;
const auto stream = at::cuda::getCurrentCUDAStream();
op_dense_esuhm<<<grid, BLOCK_DIM, 0, stream>>>(
input,
dense,
output,
embedding_dim,
input_offsets,
f);
}
template <typename scalar_t, typename func_t>
void _nested_op_dense_esuhm_kernel(Tensor& result, const Tensor& self, const Tensor& other, func_t f) {
auto self_ptr = get_nested_tensor_impl(self);
auto result_ptr = get_nested_tensor_impl(result);
const auto self_buffer = self_ptr->get_buffer();
const auto offsets = self_ptr->get_storage_offsets();
const auto batch_size = other.size(0);
const auto embedding_size = other.size(2);
auto result_buffer = result_ptr->get_buffer();
auto result_offsets = at::cat({at::tensor(offsets), at::tensor(self_ptr->numel())});
result_offsets = result_offsets.to(kCUDA);
const scalar_t* self_data_ptr = self_buffer.data_ptr<scalar_t>();
const scalar_t* other_data_ptr = other.data_ptr<scalar_t>();
scalar_t* result_data_ptr = result_buffer.data_ptr<scalar_t>();
int64_t* result_offsets_ptr = result_offsets.data_ptr<int64_t>();
nested_op_dense_kernelLauncher(
self_data_ptr,
other_data_ptr,
result_data_ptr,
batch_size,
embedding_size,
result_offsets_ptr,
f);
}
void _nested_op_dense_esuhm_cuda(Tensor& result, const Tensor& self, const Tensor& other, const NESTED_DENSE_OP& op) {
AT_DISPATCH_ALL_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16, self.scalar_type(), "_nested_op_dense_esuhm", [&]() {
switch (op) {
case NESTED_DENSE_OP::ADD :
_nested_op_dense_esuhm_kernel<scalar_t>(result, self, other, [] __host__ __device__ (scalar_t a, scalar_t b) -> scalar_t { return a + b; });
break;
case NESTED_DENSE_OP::MUL :
_nested_op_dense_esuhm_kernel<scalar_t>(result, self, other, [] __host__ __device__ (scalar_t a, scalar_t b) -> scalar_t { return a * b; });
break;
}
});
}
REGISTER_CUDA_DISPATCH(nested_dense_elementwise_stub, &_nested_op_dense_esuhm_cuda);
} // namespace native
} // namespace at
|
554401558a5c13284afccba1e62473473ab302d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Gaussian Blur Computation
*
* Maximo, Andre -- Sep, 2011
*
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <cfloat>
#include <hipfft.h>
#include <util/error.h>
#include <util/symbol.h>
//#include <util/chronos.h>
#include <util/dvector.h>
#include <util/timer.h>
#include <util/recfilter.h>
//#include "pgm.h"
#include "gauss_dir_cpu.h"
using namespace gpufilter;
#define WS 32 ///< Warp Size
#define NW 8 ///< Number of warps per block (with all warps computing)
#define NB 8 ///< Number of blocks per SM
#define WS1 32 ///< for gauss1
#define NW1 8 ///< for gauss1
#define NB1 8 ///< for gauss1
#define WS2 16 ///< for gauss2
#define NW2 8 ///< for gauss2
#define NB2 16 ///< for gauss2
#define WS3 32 ///< for gauss3
#define NW3 16 ///< for gauss3
#define NB3 3 ///< for gauss3
#define MNB 2 ///< Maximum number of blocks per SM
#define MNT 1024 ///< Maximum number of threads per block (with MNB)
unsigned RUN_TIMES=1024;
int res_out;
#define DEFAULT_RES_OUT 1 // res_out: 0 verbose; 1 perf; 2 error
#define KERNEL_RADIUS 33
//#define KERNEL_RADIUS 47
//#define COMPUTE_IN_CPU // compute in cpu debug flag: cpu can be very slow..
//#define RUN_GAUSS_0
//#define RUN_GAUSS_1
//#define RUN_GAUSS_2
//#define RUN_GAUSS_3
//#define RUN_GAUSS_FFT
//#define RUN_GAUSS_REC
// get cactus with Diego Nehab
//#define USE_CACTUS // use cactus debug flag: read/write file names below
#define CACTUS_IN "cactus-1024.pgm"
#define CACTUS_GPU "cactus-1024-direct-gpu.pgm"
#define CACTUS_CPU "cactus-1024-direct-cpu.pgm"
#define CACTUS_FFT_GPU "cactus-1024-fft-gpu.pgm"
#define CACTUS_RECURSIVE_GPU "cactus-1024-recursive-gpu.pgm"
/// Naming conventions are: c_ constant; t_ texture; g_ global memory;
/// s_ shared memory; d_ device pointer; a_ cuda-array; p_ template
/// parameter; f_ surface.
__constant__ int c_width, c_height;
__constant__ Vector< float, KERNEL_RADIUS * 2 + 1 > c_kernel;
__constant__ int c_fft_width;
texture< float, 2, hipReadModeElementType > t_in_img;
surface< void, 2 > f_in_img;
surface< void, 2 > f_out_img;
/// Memory types are: TMEM texture memory; SMEM shared memory; GMEM
/// global memory
///------------------------- AUXILIARY FUNCTIONS ----------------------------
__device__ inline // auxiliary reflect function in device
int d_reflect( const int& i, const int& n ) {
if( i < 0 ) return (-i-1);
else if( i >= n ) return (2*n-i-1);
else return i;
}
__device__ inline // auxiliary clamp function in device
int d_clamp( const int& i, const int& n ) {
if( i < 0 ) return 0;
else if( i >= n ) return n-1;
else return i;
}
///----------------- GAUSS-0 - TWO-PASS CONVOLUTION IN GMEM -----------------
template< int p_radius >
__global__ __launch_bounds__( WS*NW, NB )
void gauss0_rows( float *g_out, const float *g_in ) {
const int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y, col = bx*WS+tx, row = by*NW+ty;
if( row >= c_height or col >= c_width ) return;
g_in += row*c_width;
g_out += row*c_width + col;
float s = 0.f;
#pragma unroll
for (int k = -p_radius; k <= p_radius; ++k) {
s += g_in[ d_clamp(col+k, c_width) ] * c_kernel[ k + p_radius ];
}
*g_out = s;
}
template< int p_radius >
__global__ __launch_bounds__( WS*NW, NB )
void gauss0_cols( float *g_out, const float *g_in ) {
const int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y, col = bx*WS+tx, row = by*NW+ty;
if( row >= c_height or col >= c_width ) return;
g_in += col;
g_out += row*c_width + col;
float s = 0.f;
#pragma unroll
for (int k = -p_radius; k <= p_radius; ++k) {
s += g_in[ d_clamp(row+k, c_height)*c_width ] * c_kernel[ k + p_radius ];
}
*g_out = s;
}
///----------------- GAUSS-1 - TWO-PASS CONVOLUTION IN TMEM -----------------
template< int p_radius >
__global__ __launch_bounds__( WS1*NW1, NB1 )
void gauss1_rows( void ) {
const int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y, col = bx*WS1+tx, row = by*NW1+ty;
if( row >= c_height or col >= c_width ) return;
float tu = col + .5f, tv = row + .5f, s = 0.f;
#pragma unroll
for (int k = -p_radius; k <= p_radius; ++k) {
s += tex2D( t_in_img, tu+k, tv ) * c_kernel[ k + p_radius ];
}
surf2Dwrite( s, f_out_img, col*4, row, hipBoundaryModeTrap ); // trap kills kernel if outside boundary
}
template< int p_radius >
__global__ __launch_bounds__( WS1*NW1, NB1 )
void gauss1_cols( float *g_out ) {
const int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y, col = bx*WS1+tx, row = by*NW1+ty;
if( row >= c_height or col >= c_width ) return;
g_out += row*c_width + col;
float tu = col + .5f, tv = row + .5f;
float s = 0.f;
#pragma unroll
for (int k = -p_radius; k <= p_radius; ++k) {
s += tex2D( t_in_img, tu, tv+k ) * c_kernel[ k + p_radius ];
}
*g_out = s;
}
///----------------- GAUSS-2 - TWO-PASS CONVOLUTION IN SMEM -----------------
template< int p_radius >
__global__ __launch_bounds__( MNT, MNB )
void gauss2_rows( void ) {
const int tx = threadIdx.x, bx = blockIdx.x, by = blockIdx.y;
float tu = bx*MNT+tx + .5f, tv = by + .5f;
float s = 0.f;
volatile __shared__ float s_row[ MNT + p_radius*2 ];
s_row[ p_radius + tx ] = tex2D( t_in_img, tu, tv );
if( tx < p_radius ) s_row[ tx ] = tex2D( t_in_img, tu - p_radius, tv );
else if( tx < 2*p_radius ) s_row[ MNT + tx ] = tex2D( t_in_img, tu - p_radius + MNT, tv );
__syncthreads();
if( bx*MNT+tx >= c_width ) return;
#pragma unroll
for (int k = -p_radius; k <= p_radius; ++k) {
s += s_row[ p_radius + tx + k ] * c_kernel[ k + p_radius ];
}
surf2Dwrite( s, f_out_img, (bx*MNT+tx)*4, by, hipBoundaryModeTrap );
}
template< int p_radius >
__global__ __launch_bounds__( WS2*NW2, NB2 )
void gauss2_cols( float *g_out ) {
const int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y;
float tu = bx*WS2+tx + .5f, tv = by*NW2+ty + .5f;
float s = 0.f;
volatile __shared__ float s_cols[ WS2 ][ NW2 + p_radius*2 + 1 ];
s_cols[ tx ][ p_radius + ty ] = tex2D( t_in_img, tu, tv );
if( p_radius <= NW2/2 ) {
if( ty < p_radius ) s_cols[ tx ][ ty ] = tex2D( t_in_img, tu, tv - p_radius );
else if( ty < 2*p_radius ) s_cols[ tx ][ NW2 + ty ] = tex2D( t_in_img, tu, tv - p_radius + NW2 );
} else if( p_radius <= NW2 ) {
if( ty < p_radius ) {
s_cols[ tx ][ ty ] = tex2D( t_in_img, tu, tv - p_radius );
s_cols[ tx ][ p_radius + NW2 + ty ] = tex2D( t_in_img, tu, tv + NW2 );
}
} else {
for (int i = 0; i < (p_radius+NW2-1)/NW2; ++i) {
int wy = i*NW2+ty;
if( wy < p_radius ) {
s_cols[ tx ][ wy ] = tex2D( t_in_img, tu, tv - p_radius + i*NW2 );
s_cols[ tx ][ p_radius + NW2 + wy ] = tex2D( t_in_img, tu, tv + NW2 + i*NW2 );
}
}
}
__syncthreads();
if( bx*WS2+tx >= c_width or by*NW2+ty >= c_height ) return;
g_out += (by*NW2+ty)*c_width + bx*WS2+tx;
#pragma unroll
for (int k = -p_radius; k <= p_radius; ++k) {
s += s_cols[tx][ p_radius + ty + k ] * c_kernel[ k + p_radius ];
}
*g_out = s;
}
///----------------- GAUSS-3 - ONE-PASS CONVOLUTION -----------------
template< int p_radius >
__device__
void load_convolve_rows( volatile float *s_in, const int& tx, const float& tu, const float& tv ) {
// load middle data
s_in[ p_radius + tx ] = tex2D( t_in_img, tu, tv );
// load left and right data
if( p_radius <= WS3/2 ) {
if( tx < p_radius ) s_in[ tx ] = tex2D( t_in_img, tu - p_radius, tv );
else if( tx < p_radius*2 ) s_in[ WS3 + tx ] = tex2D( t_in_img, tu - p_radius + WS3, tv );
} else if( p_radius <= WS3 ) {
if( tx < p_radius ) {
s_in[ tx ] = tex2D( t_in_img, tu - p_radius, tv );
s_in[ p_radius + WS3 + tx ] = tex2D( t_in_img, tu + WS3, tv );
}
} else {
for (int i = 0; i < (p_radius+WS3-1)/WS3; ++i) {
int wx = i*WS3+tx;
if( wx < p_radius ) {
s_in[ wx ] = tex2D( t_in_img, tu - p_radius + i*WS3, tv );
s_in[ p_radius + WS3 + wx ] = tex2D( t_in_img, tu + WS3 + i*WS3, tv );
}
}
}
// convolve row
float s = 0.f;
for (int k = -p_radius; k <= p_radius; ++k) {
s += s_in[ p_radius + tx + k ] * c_kernel[ k + p_radius ];
}
s_in[ p_radius + tx ] = s;
}
template< int p_radius >
__global__ __launch_bounds__( WS3*NW3, NB3 )
void gauss3( float *g_out ) {
int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y, col = bx*WS3+tx, row = by*NW3+ty;
if( row >= c_height or col >= c_width ) bx = -1;
float tu = col + .5f, tv = row + .5f;
volatile __shared__ float s_inblock[ NW3 + p_radius*2 ][ WS3 + p_radius*2 ];
// load middle data
load_convolve_rows< p_radius >( &s_inblock[ p_radius + ty ][0], tx, tu, tv );
// load upper and lower data
if( p_radius <= NW3/2 ) {
if( ty < p_radius ) load_convolve_rows< p_radius >( &s_inblock[ ty ][0], tx, tu, tv - p_radius );
else if( ty < p_radius*2 ) load_convolve_rows< p_radius >( &s_inblock[ NW3 + ty ][0], tx, tu, tv - p_radius + NW3 );
} else if( p_radius <= NW3 ) {
if( ty < p_radius ) {
load_convolve_rows< p_radius >( &s_inblock[ ty ][0], tx, tu, tv - p_radius );
load_convolve_rows< p_radius >( &s_inblock[ p_radius + NW3 + ty ][0], tx, tu, tv + NW3 );
}
} else {
for (int i = 0; i < (p_radius+NW3-1)/NW3; ++i) {
int wy = i*NW3+ty;
if( wy < p_radius ) {
load_convolve_rows< p_radius >( &s_inblock[ wy ][0], tx, tu, tv - p_radius + i*NW3 );
load_convolve_rows< p_radius >( &s_inblock[ p_radius + NW3 + wy ][0], tx, tu, tv + NW3 + i*NW3 );
}
}
}
__syncthreads();
if( bx == -1 ) return;
g_out += row*c_width + col;
// convolve cols
float s = 0.f;
for (int k = -p_radius; k <= p_radius; ++k) {
s += s_inblock[ p_radius + ty + k ][ p_radius + tx ] * c_kernel[ k + p_radius ];
}
*g_out = s;
}
///----------------- GAUSS-FFT - CONVOLUTION THROUGH FFT -----------------
__global__ __launch_bounds__( WS*NW, NB )
void apply_gauss_hat_kernel( float2 *img, float cte ) {
const int y = blockDim.y * blockIdx.y + threadIdx.y;
const int x = blockDim.x * blockIdx.x + threadIdx.x;
if( x >= c_fft_width or y >= c_height ) return;
float wx = float(x)/c_fft_width/2.f, wy;
if( y < c_height/2 ) wy = float(y)/c_height;
else wy = float(y-c_height)/c_height;
float g = exp( -cte * (wx*wx + wy*wy)/2.f );
float2 val = img[y*c_fft_width+x];
val.x *= g;
val.y *= g;
img[y*c_fft_width+x] = val;
}
template< int radius >
void apply_gauss_hat( float2 *img_spectrum, const dim3& dim_grid, const dim3& dim_block ) {
float sigma = (float)radius/2;
float cte = sigma*2*M_PI;
cte *= cte;
hipLaunchKernelGGL(( apply_gauss_hat_kernel), dim3(dim_grid), dim3(dim_block) , 0, 0, img_spectrum, cte );
}
///---------------- MAIN ----------------
int main( int argc, char** argv ) {
int width = 4096, height = 2048;
if( argc > 2 && ( sscanf( argv[1], "%d", &width ) != 1 || sscanf( argv[2], "%d", &height ) != 1 || width < 1 || height < 1 ) ) { fprintf( stderr, "[error] Invalid argument: %s %s\n[usage] %s [width=%d] [height=%d] [output=0-no|1-perf|2-err]\n", argv[1], argv[2], argv[0], width, height ); return 1; }
else if( argc == 2 || argc > 4 ) { fprintf( stderr, "[usage] %s [width=%d] [height=%d] [output=0-no|1-perf|2-err]\n", argv[0], width, height ); return 1; }
res_out = ( argc == 4 ) ? res_out = argv[3][0]-48 : DEFAULT_RES_OUT;
if( res_out != 0 && res_out != 1 && res_out != 2 ) { fprintf( stderr, "[error] Invalid output type %d\n\tValids are: 0 - no output; 1 - performance; 2 - error\n", res_out ); return 1; }
int ne = width * height; // number of elements
float dt = 0.f;
std::vector< float > kernel;
int radius = KERNEL_RADIUS;
compute_gauss( kernel, radius );
std::vector< float > h_in_img;
#ifdef USE_CACTUS
int maxval;
if( !res_out ) {
printf( "[gauss] Reading %s ... ", CACTUS_IN ); fflush(stdin);
if( load_pgm( h_in_img, width, height, maxval, CACTUS_IN ) ) printf( "done! (image: %d x %d)\n", width, height );
else { fprintf( stderr, "!!! error!" ); return 1; }
ne = width * height;
} else {
h_in_img.resize( ne );
maxval = 255;
srand( 1234 );
for (int i = 0; i < ne; ++i)
h_in_img[i] = rand() / (double)RAND_MAX;
}
#else
if( !res_out ) { printf( "[gauss] Generating random image (%dx%d) ... ", width, height ); fflush(stdin); }
h_in_img.resize( ne );
srand( 1234 );
for (int i = 0; i < ne; ++i)
h_in_img[i] = rand() / (double)RAND_MAX;
if( !res_out ) printf( "done!\n" );
#endif
if( !res_out ) { printf( "[gauss] Allocating memory in CPU ... " ); fflush(stdin); }
std::vector< float > h_ref_img( ne ), h_out_img( ne ), h_fft_img( ne );
if( !res_out ) printf( "done!\n");
#ifdef COMPUTE_IN_CPU
if( !res_out ) { printf( "[gauss] Computing in CPU ... " ); fflush(stdin); }
Chronos te; // time elapsed computation
te.reset();
gauss_cpu( &h_ref_img[0], &h_in_img[0], width, height, kernel, radius );
dt = te.elapsed();
if( !res_out ) printf( "done!\n[CPU] reference done in %gs @ %g MiP/s\n", dt, ne/(1024.*1024.*dt) );
#endif
#ifdef USE_CACTUS
if( !res_out ) {
printf( "[gauss] Writing %s ... ", CACTUS_CPU ); fflush(stdin);
if( save_pgm( h_ref_img, width, height, maxval, CACTUS_CPU ) ) printf( "done!\n" );
else { fprintf( stderr, "!!! error!" ); return 1; }
}
#endif
if( !res_out ) { printf( "[gauss] Allocating memory in GPU ... " ); fflush(stdin); }
hipChannelFormatDesc ccd = hipCreateChannelDesc<float>(); // cuda channel descriptor for texture
hipArray *a_in_img; hipMallocArray( &a_in_img, &ccd, width, height );
hipArray *a_fin_img; hipMallocArray( &a_fin_img, &ccd, width, height, hipArraySurfaceLoadStore ); // array for surface in
hipArray *a_out_img; hipMallocArray( &a_out_img, &ccd, width, height, hipArraySurfaceLoadStore ); // array for surface out
float *d_in_img = 0; hipMalloc( (void**)&d_in_img, sizeof(float)*ne );
float *d_out_img = 0; hipMalloc( (void**)&d_out_img, sizeof(float)*ne );
int fft_width = width/2+1;
Vector< float, KERNEL_RADIUS*2+1 > kernel_gpu;
for (int i=0; i<KERNEL_RADIUS*2+1; ++i)
kernel_gpu[i] = kernel[i];
copy_to_symbol(c_width, width);
copy_to_symbol(c_height, height);
copy_to_symbol(c_kernel, kernel_gpu);
copy_to_symbol(c_fft_width, fft_width);
t_in_img.addressMode[0] = hipAddressModeClamp;
t_in_img.addressMode[1] = hipAddressModeClamp;
t_in_img.filterMode = hipFilterModePoint;
t_in_img.normalized = false;
if( !a_in_img || !a_fin_img || !a_out_img || !d_in_img || !d_out_img ) { fprintf( stderr, "!!! error!\n" ); return 1; }
if( !res_out ) { printf( "done!\n[gauss] Computing in GPU ...\n" ); }
if( !res_out ) { printf( "[gauss] Info: r = %d ; b = %dx%d\n", radius, WS, NW ); fflush(stdin); }
//if( res_out ) { printf( "%d %d", width, height ); fflush(stdin); }
float me = 0.f, mre = 0.f; // maximum error and maximum relative error
//int w_ws = (width+WS-1)/WS, h_nw = (height+NW-1)/NW, w_hws = (width+HWS-1)/HWS, w_mnt = (width+MNT-1)/MNT;
hipEvent_t start_device, stop_device;
hipEventCreate(&start_device); hipEventCreate(&stop_device);
//hipDeviceSetCacheConfig( hipFuncCachePreferL1 );
#ifdef RUN_GAUSS_0
{ // gauss-0
hipMemcpy( d_in_img, &h_in_img[0], sizeof(float)*ne, hipMemcpyHostToDevice );
hipEventRecord( start_device, 0 );
for (int i = 0; i <= res_out*(RUN_TIMES-1); ++i) {
hipLaunchKernelGGL(( gauss0_rows< KERNEL_RADIUS >)
, dim3(dim3((width+WS-1)/WS, (height+NW-1)/NW)), dim3(dim3(WS, NW)) , 0, 0,
d_out_img, d_in_img );
hipLaunchKernelGGL(( gauss0_cols< KERNEL_RADIUS >)
, dim3(dim3((width+WS-1)/WS, (height+NW-1)/NW)), dim3(dim3(WS, NW)) , 0, 0,
d_in_img, d_out_img );
}
hipEventRecord( stop_device, 0 );
hipEventSynchronize( stop_device );
dt = 0.f; hipEventElapsedTime(&dt, start_device, stop_device); dt /= 1000.f;
if( res_out == 1 ) dt /= float(RUN_TIMES);
hipMemcpy( &h_out_img[0], d_in_img, sizeof(float)*ne, hipMemcpyDeviceToHost );
#ifdef COMPUTE_IN_CPU
check_cpu_reference(&h_ref_img[0], &h_out_img[0], ne, me, mre);
#endif
if( res_out == 1 )
printf( " %g", ne/(1024.*1024.*dt) );
else if( res_out == 2 )
printf( " %g %g", me, mre );
else
printf( "[GPU] gauss0 done with max_err = %g in %gs @ %g MiP/s\n", me, dt, ne/(1024.*1024.*dt) );
}
#endif
#ifdef RUN_GAUSS_1
{ // gauss-1
hipMemcpyToArray( a_in_img, 0, 0, &h_in_img[0], sizeof(float)*ne, hipMemcpyHostToDevice );
hipEventRecord( start_device, 0 );
hipBindSurfaceToArray( f_out_img, a_out_img );
for (int i = 0; i <= res_out*(RUN_TIMES-1); ++i) {
hipBindTextureToArray( t_in_img, a_in_img );
hipLaunchKernelGGL(( gauss1_rows< KERNEL_RADIUS >)
, dim3(dim3((width+WS1-1)/WS1, (height+NW1-1)/NW1)), dim3(dim3(WS1, NW1)) , 0, 0, );
// fortunately output surface can be used as input texture afterwards
hipBindTextureToArray( t_in_img, a_out_img );
hipLaunchKernelGGL(( gauss1_cols< KERNEL_RADIUS >)
, dim3(dim3((width+WS1-1)/WS1, (height+NW1-1)/NW1)), dim3(dim3(WS1, NW1)) , 0, 0,
d_out_img );
}
hipEventRecord( stop_device, 0 );
hipEventSynchronize( stop_device );
hipUnbindTexture( t_in_img );
dt = 0.f; hipEventElapsedTime(&dt, start_device, stop_device); dt /= 1000.f;
if( res_out == 1 ) dt /= float(RUN_TIMES);
hipMemcpy( &h_out_img[0], d_out_img, sizeof(float)*ne, hipMemcpyDeviceToHost );
#ifdef COMPUTE_IN_CPU
check_cpu_reference(&h_ref_img[0], &h_out_img[0], ne, me, mre);
#endif
if( res_out == 1 )
printf( " %g", ne/(1024.*1024.*dt) );
else if( res_out == 2 )
printf( " %g %g", me, mre );
else
printf( "[GPU] gauss1 done with max_err = %g in %gs @ %g MiP/s\n", me, dt, ne/(1024.*1024.*dt) );
}
#endif
hipDeviceSetCacheConfig( hipFuncCachePreferShared );
#ifdef RUN_GAUSS_2
{ // gauss-2
hipMemcpyToArray( a_in_img, 0, 0, &h_in_img[0], sizeof(float)*ne, hipMemcpyHostToDevice );
hipEventRecord( start_device, 0 );
hipBindSurfaceToArray( f_out_img, a_out_img );
for (int i = 0; i <= res_out*(RUN_TIMES-1); ++i) {
hipBindTextureToArray( t_in_img, a_in_img );
hipLaunchKernelGGL(( gauss2_rows< KERNEL_RADIUS >)
, dim3(dim3((width+MNT-1)/MNT, height)), dim3(dim3(MNT, 1)) , 0, 0, );
hipBindTextureToArray( t_in_img, a_out_img );
hipLaunchKernelGGL(( gauss2_cols< KERNEL_RADIUS >)
, dim3(dim3((width+WS2-1)/WS2, (height+NW2-1)/NW2)), dim3(dim3(WS2, NW2)) , 0, 0,
d_out_img );
}
hipEventRecord( stop_device, 0 );
hipEventSynchronize( stop_device );
hipUnbindTexture( t_in_img );
dt = 0.f; hipEventElapsedTime(&dt, start_device, stop_device); dt /= 1000.f;
if( res_out == 1 ) dt /= float(RUN_TIMES);
hipMemcpy( &h_out_img[0], d_out_img, sizeof(float)*ne, hipMemcpyDeviceToHost );
#ifdef COMPUTE_IN_CPU
check_cpu_reference(&h_ref_img[0], &h_out_img[0], ne, me, mre);
#endif
if( res_out == 1 )
printf( " %g", ne/(1024.*1024.*dt) );
else if( res_out == 2 )
printf( " %g %g", me, mre );
else
printf( "[GPU] gauss2 done with max_err = %g in %gs @ %g MiP/s\n", me, dt, ne/(1024.*1024.*dt) );
#ifdef USE_CACTUS
if( !res_out ) {
printf( "[gauss] Writing %s ... ", CACTUS_GPU ); fflush(stdin);
if( save_pgm( h_out_img, width, height, maxval, CACTUS_GPU ) ) printf( "done!\n" );
else { fprintf( stderr, "!!! error!" ); return 1; }
}
#endif
}
#endif
#ifdef RUN_GAUSS_3
{ // gauss-3
hipMemcpyToArray( a_in_img, 0, 0, &h_in_img[0], sizeof(float)*ne, hipMemcpyHostToDevice );
hipEventRecord( start_device, 0 );
hipBindTextureToArray( t_in_img, a_in_img );
for (int i = 0; i <= res_out*(RUN_TIMES-1); ++i) {
hipLaunchKernelGGL(( gauss3< KERNEL_RADIUS >)
, dim3(dim3((width+WS3-1)/WS3, (height+NW3-1)/NW3)), dim3(dim3(WS3, NW3)) , 0, 0,
d_out_img );
}
hipEventRecord( stop_device, 0 );
hipEventSynchronize( stop_device );
hipUnbindTexture( t_in_img );
dt = 0.f; hipEventElapsedTime(&dt, start_device, stop_device); dt /= 1000.f;
if( res_out == 1 ) dt /= float(RUN_TIMES);
hipMemcpy( &h_out_img[0], d_out_img, sizeof(float)*ne, hipMemcpyDeviceToHost );
#ifdef COMPUTE_IN_CPU
check_cpu_reference(&h_ref_img[0], &h_out_img[0], ne, me, mre);
#endif
if( res_out == 1 )
printf( " %g", ne/(1024.*1024.*dt) );
else if( res_out == 2 )
printf( " %g %g", me, mre );
else
printf( "[GPU] gauss3 done with max_err = %g in %gs @ %g MiP/s\n", me, dt, ne/(1024.*1024.*dt) );
}
#endif
#ifdef RUN_GAUSS_FFT
{ // gauss-fft
dvector< float > d_img( h_in_img );
int fftW = width/2+1, fftH = height;
dvector< float2 > d_img_hat( fftW * fftH );
dim3 dim_block( WS, NW ), dim_grid( (fftW+1)/dim_block.x, (fftH+1)/dim_block.y );
hipfftHandle planFwd, planInv;
hipfftPlan2d(&planFwd, width, height, HIPFFT_R2C);
hipfftPlan2d(&planInv, width, height, HIPFFT_C2R);
hipEventRecord( start_device, 0 );
for (int i = 0; i <= res_out*(RUN_TIMES-1); ++i) {
hipfftExecR2C(planFwd, d_img, (hipfftComplex *)(float2 *)d_img_hat);
apply_gauss_hat< KERNEL_RADIUS >(d_img_hat, dim_grid, dim_block );
hipfftExecC2R(planInv, (hipfftComplex *)(float2 *)d_img_hat, d_img);
}
hipEventRecord( stop_device, 0 );
hipEventSynchronize( stop_device );
dt = 0.f; hipEventElapsedTime(&dt, start_device, stop_device); dt /= 1000.f;
if( res_out == 1 ) dt /= float(RUN_TIMES);
hipfftDestroy(planFwd);
hipfftDestroy(planInv);
h_fft_img = to_cpu(d_img);
for (int i=0; i<ne; ++i) h_fft_img[i] /= (float)ne;
// gauss-fft calculates the exact convolution, so it might be different
// from the cpu reference
#ifdef COMPUTE_IN_CPU
check_cpu_reference(&h_ref_img[0], &h_out_img[0], ne, me, mre);
#endif
if( res_out == 1 )
printf( " %g", ne/(1024.*1024.*dt) );
else if( res_out == 2 )
printf( " %g %g", me, mre );
else
printf( "[GPU] gauss-fft done with max_err = %g in %gs @ %g MiP/s\n", me, dt, ne/(1024.*1024.*dt) );
#ifdef USE_CACTUS
if( !res_out ) {
printf( "[gauss] Writing %s ... ", CACTUS_FFT_GPU ); fflush(stdin);
if( save_pgm( h_fft_img, width, height, maxval, CACTUS_FFT_GPU ) ) printf( "done!\n" );
else { fprintf( stderr, "!!! error!" ); return 1; }
}
#endif
}
#endif
#ifdef RUN_GAUSS_REC
{ // gauss-recursive
void rec_gauss(float *h_img, int width, int height, float sigma);
std::vector<float> h_img = h_in_img;
rec_gauss(&h_img[0], width, height, radius/2);
#ifdef USE_CACTUS
if( !res_out ) {
printf( "[gauss] Writing %s ... ", CACTUS_RECURSIVE_GPU ); fflush(stdin);
if( save_pgm(h_img, width, height, maxval, CACTUS_RECURSIVE_GPU) ) printf( "done!\n" );
else { fprintf( stderr, "!!! error!" ); return 1; }
}
#endif
}
#endif
//if( res_out ) printf( "\n" );
hipEventDestroy(start_device); hipEventDestroy(stop_device);
hipFreeArray( a_in_img );
hipFreeArray( a_fin_img );
hipFreeArray( a_out_img );
hipFree( d_in_img );
hipFree( d_out_img );
return 0;
}
| 554401558a5c13284afccba1e62473473ab302d1.cu | /**
* Gaussian Blur Computation
*
* Maximo, Andre -- Sep, 2011
*
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <cfloat>
#include <cufft.h>
#include <util/error.h>
#include <util/symbol.h>
//#include <util/chronos.h>
#include <util/dvector.h>
#include <util/timer.h>
#include <util/recfilter.h>
//#include "pgm.h"
#include "gauss_dir_cpu.h"
using namespace gpufilter;
#define WS 32 ///< Warp Size
#define NW 8 ///< Number of warps per block (with all warps computing)
#define NB 8 ///< Number of blocks per SM
#define WS1 32 ///< for gauss1
#define NW1 8 ///< for gauss1
#define NB1 8 ///< for gauss1
#define WS2 16 ///< for gauss2
#define NW2 8 ///< for gauss2
#define NB2 16 ///< for gauss2
#define WS3 32 ///< for gauss3
#define NW3 16 ///< for gauss3
#define NB3 3 ///< for gauss3
#define MNB 2 ///< Maximum number of blocks per SM
#define MNT 1024 ///< Maximum number of threads per block (with MNB)
unsigned RUN_TIMES=1024;
int res_out;
#define DEFAULT_RES_OUT 1 // res_out: 0 verbose; 1 perf; 2 error
#define KERNEL_RADIUS 33
//#define KERNEL_RADIUS 47
//#define COMPUTE_IN_CPU // compute in cpu debug flag: cpu can be very slow..
//#define RUN_GAUSS_0
//#define RUN_GAUSS_1
//#define RUN_GAUSS_2
//#define RUN_GAUSS_3
//#define RUN_GAUSS_FFT
//#define RUN_GAUSS_REC
// get cactus with Diego Nehab
//#define USE_CACTUS // use cactus debug flag: read/write file names below
#define CACTUS_IN "cactus-1024.pgm"
#define CACTUS_GPU "cactus-1024-direct-gpu.pgm"
#define CACTUS_CPU "cactus-1024-direct-cpu.pgm"
#define CACTUS_FFT_GPU "cactus-1024-fft-gpu.pgm"
#define CACTUS_RECURSIVE_GPU "cactus-1024-recursive-gpu.pgm"
/// Naming conventions are: c_ constant; t_ texture; g_ global memory;
/// s_ shared memory; d_ device pointer; a_ cuda-array; p_ template
/// parameter; f_ surface.
__constant__ int c_width, c_height;
__constant__ Vector< float, KERNEL_RADIUS * 2 + 1 > c_kernel;
__constant__ int c_fft_width;
texture< float, 2, cudaReadModeElementType > t_in_img;
surface< void, 2 > f_in_img;
surface< void, 2 > f_out_img;
/// Memory types are: TMEM texture memory; SMEM shared memory; GMEM
/// global memory
///------------------------- AUXILIARY FUNCTIONS ----------------------------
__device__ inline // auxiliary reflect function in device
int d_reflect( const int& i, const int& n ) {
if( i < 0 ) return (-i-1);
else if( i >= n ) return (2*n-i-1);
else return i;
}
__device__ inline // auxiliary clamp function in device
int d_clamp( const int& i, const int& n ) {
if( i < 0 ) return 0;
else if( i >= n ) return n-1;
else return i;
}
///----------------- GAUSS-0 - TWO-PASS CONVOLUTION IN GMEM -----------------
template< int p_radius >
__global__ __launch_bounds__( WS*NW, NB )
void gauss0_rows( float *g_out, const float *g_in ) {
const int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y, col = bx*WS+tx, row = by*NW+ty;
if( row >= c_height or col >= c_width ) return;
g_in += row*c_width;
g_out += row*c_width + col;
float s = 0.f;
#pragma unroll
for (int k = -p_radius; k <= p_radius; ++k) {
s += g_in[ d_clamp(col+k, c_width) ] * c_kernel[ k + p_radius ];
}
*g_out = s;
}
template< int p_radius >
__global__ __launch_bounds__( WS*NW, NB )
void gauss0_cols( float *g_out, const float *g_in ) {
const int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y, col = bx*WS+tx, row = by*NW+ty;
if( row >= c_height or col >= c_width ) return;
g_in += col;
g_out += row*c_width + col;
float s = 0.f;
#pragma unroll
for (int k = -p_radius; k <= p_radius; ++k) {
s += g_in[ d_clamp(row+k, c_height)*c_width ] * c_kernel[ k + p_radius ];
}
*g_out = s;
}
///----------------- GAUSS-1 - TWO-PASS CONVOLUTION IN TMEM -----------------
template< int p_radius >
__global__ __launch_bounds__( WS1*NW1, NB1 )
void gauss1_rows( void ) {
const int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y, col = bx*WS1+tx, row = by*NW1+ty;
if( row >= c_height or col >= c_width ) return;
float tu = col + .5f, tv = row + .5f, s = 0.f;
#pragma unroll
for (int k = -p_radius; k <= p_radius; ++k) {
s += tex2D( t_in_img, tu+k, tv ) * c_kernel[ k + p_radius ];
}
surf2Dwrite( s, f_out_img, col*4, row, cudaBoundaryModeTrap ); // trap kills kernel if outside boundary
}
template< int p_radius >
__global__ __launch_bounds__( WS1*NW1, NB1 )
void gauss1_cols( float *g_out ) {
const int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y, col = bx*WS1+tx, row = by*NW1+ty;
if( row >= c_height or col >= c_width ) return;
g_out += row*c_width + col;
float tu = col + .5f, tv = row + .5f;
float s = 0.f;
#pragma unroll
for (int k = -p_radius; k <= p_radius; ++k) {
s += tex2D( t_in_img, tu, tv+k ) * c_kernel[ k + p_radius ];
}
*g_out = s;
}
///----------------- GAUSS-2 - TWO-PASS CONVOLUTION IN SMEM -----------------
template< int p_radius >
__global__ __launch_bounds__( MNT, MNB )
void gauss2_rows( void ) {
const int tx = threadIdx.x, bx = blockIdx.x, by = blockIdx.y;
float tu = bx*MNT+tx + .5f, tv = by + .5f;
float s = 0.f;
volatile __shared__ float s_row[ MNT + p_radius*2 ];
s_row[ p_radius + tx ] = tex2D( t_in_img, tu, tv );
if( tx < p_radius ) s_row[ tx ] = tex2D( t_in_img, tu - p_radius, tv );
else if( tx < 2*p_radius ) s_row[ MNT + tx ] = tex2D( t_in_img, tu - p_radius + MNT, tv );
__syncthreads();
if( bx*MNT+tx >= c_width ) return;
#pragma unroll
for (int k = -p_radius; k <= p_radius; ++k) {
s += s_row[ p_radius + tx + k ] * c_kernel[ k + p_radius ];
}
surf2Dwrite( s, f_out_img, (bx*MNT+tx)*4, by, cudaBoundaryModeTrap );
}
template< int p_radius >
__global__ __launch_bounds__( WS2*NW2, NB2 )
void gauss2_cols( float *g_out ) {
const int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y;
float tu = bx*WS2+tx + .5f, tv = by*NW2+ty + .5f;
float s = 0.f;
volatile __shared__ float s_cols[ WS2 ][ NW2 + p_radius*2 + 1 ];
s_cols[ tx ][ p_radius + ty ] = tex2D( t_in_img, tu, tv );
if( p_radius <= NW2/2 ) {
if( ty < p_radius ) s_cols[ tx ][ ty ] = tex2D( t_in_img, tu, tv - p_radius );
else if( ty < 2*p_radius ) s_cols[ tx ][ NW2 + ty ] = tex2D( t_in_img, tu, tv - p_radius + NW2 );
} else if( p_radius <= NW2 ) {
if( ty < p_radius ) {
s_cols[ tx ][ ty ] = tex2D( t_in_img, tu, tv - p_radius );
s_cols[ tx ][ p_radius + NW2 + ty ] = tex2D( t_in_img, tu, tv + NW2 );
}
} else {
for (int i = 0; i < (p_radius+NW2-1)/NW2; ++i) {
int wy = i*NW2+ty;
if( wy < p_radius ) {
s_cols[ tx ][ wy ] = tex2D( t_in_img, tu, tv - p_radius + i*NW2 );
s_cols[ tx ][ p_radius + NW2 + wy ] = tex2D( t_in_img, tu, tv + NW2 + i*NW2 );
}
}
}
__syncthreads();
if( bx*WS2+tx >= c_width or by*NW2+ty >= c_height ) return;
g_out += (by*NW2+ty)*c_width + bx*WS2+tx;
#pragma unroll
for (int k = -p_radius; k <= p_radius; ++k) {
s += s_cols[tx][ p_radius + ty + k ] * c_kernel[ k + p_radius ];
}
*g_out = s;
}
///----------------- GAUSS-3 - ONE-PASS CONVOLUTION -----------------
template< int p_radius >
__device__
void load_convolve_rows( volatile float *s_in, const int& tx, const float& tu, const float& tv ) {
// load middle data
s_in[ p_radius + tx ] = tex2D( t_in_img, tu, tv );
// load left and right data
if( p_radius <= WS3/2 ) {
if( tx < p_radius ) s_in[ tx ] = tex2D( t_in_img, tu - p_radius, tv );
else if( tx < p_radius*2 ) s_in[ WS3 + tx ] = tex2D( t_in_img, tu - p_radius + WS3, tv );
} else if( p_radius <= WS3 ) {
if( tx < p_radius ) {
s_in[ tx ] = tex2D( t_in_img, tu - p_radius, tv );
s_in[ p_radius + WS3 + tx ] = tex2D( t_in_img, tu + WS3, tv );
}
} else {
for (int i = 0; i < (p_radius+WS3-1)/WS3; ++i) {
int wx = i*WS3+tx;
if( wx < p_radius ) {
s_in[ wx ] = tex2D( t_in_img, tu - p_radius + i*WS3, tv );
s_in[ p_radius + WS3 + wx ] = tex2D( t_in_img, tu + WS3 + i*WS3, tv );
}
}
}
// convolve row
float s = 0.f;
for (int k = -p_radius; k <= p_radius; ++k) {
s += s_in[ p_radius + tx + k ] * c_kernel[ k + p_radius ];
}
s_in[ p_radius + tx ] = s;
}
template< int p_radius >
__global__ __launch_bounds__( WS3*NW3, NB3 )
void gauss3( float *g_out ) {
int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y, col = bx*WS3+tx, row = by*NW3+ty;
if( row >= c_height or col >= c_width ) bx = -1;
float tu = col + .5f, tv = row + .5f;
volatile __shared__ float s_inblock[ NW3 + p_radius*2 ][ WS3 + p_radius*2 ];
// load middle data
load_convolve_rows< p_radius >( &s_inblock[ p_radius + ty ][0], tx, tu, tv );
// load upper and lower data
if( p_radius <= NW3/2 ) {
if( ty < p_radius ) load_convolve_rows< p_radius >( &s_inblock[ ty ][0], tx, tu, tv - p_radius );
else if( ty < p_radius*2 ) load_convolve_rows< p_radius >( &s_inblock[ NW3 + ty ][0], tx, tu, tv - p_radius + NW3 );
} else if( p_radius <= NW3 ) {
if( ty < p_radius ) {
load_convolve_rows< p_radius >( &s_inblock[ ty ][0], tx, tu, tv - p_radius );
load_convolve_rows< p_radius >( &s_inblock[ p_radius + NW3 + ty ][0], tx, tu, tv + NW3 );
}
} else {
for (int i = 0; i < (p_radius+NW3-1)/NW3; ++i) {
int wy = i*NW3+ty;
if( wy < p_radius ) {
load_convolve_rows< p_radius >( &s_inblock[ wy ][0], tx, tu, tv - p_radius + i*NW3 );
load_convolve_rows< p_radius >( &s_inblock[ p_radius + NW3 + wy ][0], tx, tu, tv + NW3 + i*NW3 );
}
}
}
__syncthreads();
if( bx == -1 ) return;
g_out += row*c_width + col;
// convolve cols
float s = 0.f;
for (int k = -p_radius; k <= p_radius; ++k) {
s += s_inblock[ p_radius + ty + k ][ p_radius + tx ] * c_kernel[ k + p_radius ];
}
*g_out = s;
}
///----------------- GAUSS-FFT - CONVOLUTION THROUGH FFT -----------------
__global__ __launch_bounds__( WS*NW, NB )
void apply_gauss_hat_kernel( float2 *img, float cte ) {
const int y = blockDim.y * blockIdx.y + threadIdx.y;
const int x = blockDim.x * blockIdx.x + threadIdx.x;
if( x >= c_fft_width or y >= c_height ) return;
float wx = float(x)/c_fft_width/2.f, wy;
if( y < c_height/2 ) wy = float(y)/c_height;
else wy = float(y-c_height)/c_height;
float g = exp( -cte * (wx*wx + wy*wy)/2.f );
float2 val = img[y*c_fft_width+x];
val.x *= g;
val.y *= g;
img[y*c_fft_width+x] = val;
}
template< int radius >
void apply_gauss_hat( float2 *img_spectrum, const dim3& dim_grid, const dim3& dim_block ) {
float sigma = (float)radius/2;
float cte = sigma*2*M_PI;
cte *= cte;
apply_gauss_hat_kernel<<< dim_grid, dim_block >>>( img_spectrum, cte );
}
///---------------- MAIN ----------------
int main( int argc, char** argv ) {
int width = 4096, height = 2048;
if( argc > 2 && ( sscanf( argv[1], "%d", &width ) != 1 || sscanf( argv[2], "%d", &height ) != 1 || width < 1 || height < 1 ) ) { fprintf( stderr, "[error] Invalid argument: %s %s\n[usage] %s [width=%d] [height=%d] [output=0-no|1-perf|2-err]\n", argv[1], argv[2], argv[0], width, height ); return 1; }
else if( argc == 2 || argc > 4 ) { fprintf( stderr, "[usage] %s [width=%d] [height=%d] [output=0-no|1-perf|2-err]\n", argv[0], width, height ); return 1; }
res_out = ( argc == 4 ) ? res_out = argv[3][0]-48 : DEFAULT_RES_OUT;
if( res_out != 0 && res_out != 1 && res_out != 2 ) { fprintf( stderr, "[error] Invalid output type %d\n\tValids are: 0 - no output; 1 - performance; 2 - error\n", res_out ); return 1; }
int ne = width * height; // number of elements
float dt = 0.f;
std::vector< float > kernel;
int radius = KERNEL_RADIUS;
compute_gauss( kernel, radius );
std::vector< float > h_in_img;
#ifdef USE_CACTUS
int maxval;
if( !res_out ) {
printf( "[gauss] Reading %s ... ", CACTUS_IN ); fflush(stdin);
if( load_pgm( h_in_img, width, height, maxval, CACTUS_IN ) ) printf( "done! (image: %d x %d)\n", width, height );
else { fprintf( stderr, "!!! error!" ); return 1; }
ne = width * height;
} else {
h_in_img.resize( ne );
maxval = 255;
srand( 1234 );
for (int i = 0; i < ne; ++i)
h_in_img[i] = rand() / (double)RAND_MAX;
}
#else
if( !res_out ) { printf( "[gauss] Generating random image (%dx%d) ... ", width, height ); fflush(stdin); }
h_in_img.resize( ne );
srand( 1234 );
for (int i = 0; i < ne; ++i)
h_in_img[i] = rand() / (double)RAND_MAX;
if( !res_out ) printf( "done!\n" );
#endif
if( !res_out ) { printf( "[gauss] Allocating memory in CPU ... " ); fflush(stdin); }
std::vector< float > h_ref_img( ne ), h_out_img( ne ), h_fft_img( ne );
if( !res_out ) printf( "done!\n");
#ifdef COMPUTE_IN_CPU
if( !res_out ) { printf( "[gauss] Computing in CPU ... " ); fflush(stdin); }
Chronos te; // time elapsed computation
te.reset();
gauss_cpu( &h_ref_img[0], &h_in_img[0], width, height, kernel, radius );
dt = te.elapsed();
if( !res_out ) printf( "done!\n[CPU] reference done in %gs @ %g MiP/s\n", dt, ne/(1024.*1024.*dt) );
#endif
#ifdef USE_CACTUS
if( !res_out ) {
printf( "[gauss] Writing %s ... ", CACTUS_CPU ); fflush(stdin);
if( save_pgm( h_ref_img, width, height, maxval, CACTUS_CPU ) ) printf( "done!\n" );
else { fprintf( stderr, "!!! error!" ); return 1; }
}
#endif
if( !res_out ) { printf( "[gauss] Allocating memory in GPU ... " ); fflush(stdin); }
cudaChannelFormatDesc ccd = cudaCreateChannelDesc<float>(); // cuda channel descriptor for texture
cudaArray *a_in_img; cudaMallocArray( &a_in_img, &ccd, width, height );
cudaArray *a_fin_img; cudaMallocArray( &a_fin_img, &ccd, width, height, cudaArraySurfaceLoadStore ); // array for surface in
cudaArray *a_out_img; cudaMallocArray( &a_out_img, &ccd, width, height, cudaArraySurfaceLoadStore ); // array for surface out
float *d_in_img = 0; cudaMalloc( (void**)&d_in_img, sizeof(float)*ne );
float *d_out_img = 0; cudaMalloc( (void**)&d_out_img, sizeof(float)*ne );
int fft_width = width/2+1;
Vector< float, KERNEL_RADIUS*2+1 > kernel_gpu;
for (int i=0; i<KERNEL_RADIUS*2+1; ++i)
kernel_gpu[i] = kernel[i];
copy_to_symbol(c_width, width);
copy_to_symbol(c_height, height);
copy_to_symbol(c_kernel, kernel_gpu);
copy_to_symbol(c_fft_width, fft_width);
t_in_img.addressMode[0] = cudaAddressModeClamp;
t_in_img.addressMode[1] = cudaAddressModeClamp;
t_in_img.filterMode = cudaFilterModePoint;
t_in_img.normalized = false;
if( !a_in_img || !a_fin_img || !a_out_img || !d_in_img || !d_out_img ) { fprintf( stderr, "!!! error!\n" ); return 1; }
if( !res_out ) { printf( "done!\n[gauss] Computing in GPU ...\n" ); }
if( !res_out ) { printf( "[gauss] Info: r = %d ; b = %dx%d\n", radius, WS, NW ); fflush(stdin); }
//if( res_out ) { printf( "%d %d", width, height ); fflush(stdin); }
float me = 0.f, mre = 0.f; // maximum error and maximum relative error
//int w_ws = (width+WS-1)/WS, h_nw = (height+NW-1)/NW, w_hws = (width+HWS-1)/HWS, w_mnt = (width+MNT-1)/MNT;
cudaEvent_t start_device, stop_device;
cudaEventCreate(&start_device); cudaEventCreate(&stop_device);
//cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 );
#ifdef RUN_GAUSS_0
{ // gauss-0
cudaMemcpy( d_in_img, &h_in_img[0], sizeof(float)*ne, cudaMemcpyHostToDevice );
cudaEventRecord( start_device, 0 );
for (int i = 0; i <= res_out*(RUN_TIMES-1); ++i) {
gauss0_rows< KERNEL_RADIUS >
<<< dim3((width+WS-1)/WS, (height+NW-1)/NW), dim3(WS, NW) >>>(
d_out_img, d_in_img );
gauss0_cols< KERNEL_RADIUS >
<<< dim3((width+WS-1)/WS, (height+NW-1)/NW), dim3(WS, NW) >>>(
d_in_img, d_out_img );
}
cudaEventRecord( stop_device, 0 );
cudaEventSynchronize( stop_device );
dt = 0.f; cudaEventElapsedTime(&dt, start_device, stop_device); dt /= 1000.f;
if( res_out == 1 ) dt /= float(RUN_TIMES);
cudaMemcpy( &h_out_img[0], d_in_img, sizeof(float)*ne, cudaMemcpyDeviceToHost );
#ifdef COMPUTE_IN_CPU
check_cpu_reference(&h_ref_img[0], &h_out_img[0], ne, me, mre);
#endif
if( res_out == 1 )
printf( " %g", ne/(1024.*1024.*dt) );
else if( res_out == 2 )
printf( " %g %g", me, mre );
else
printf( "[GPU] gauss0 done with max_err = %g in %gs @ %g MiP/s\n", me, dt, ne/(1024.*1024.*dt) );
}
#endif
#ifdef RUN_GAUSS_1
{ // gauss-1
cudaMemcpyToArray( a_in_img, 0, 0, &h_in_img[0], sizeof(float)*ne, cudaMemcpyHostToDevice );
cudaEventRecord( start_device, 0 );
cudaBindSurfaceToArray( f_out_img, a_out_img );
for (int i = 0; i <= res_out*(RUN_TIMES-1); ++i) {
cudaBindTextureToArray( t_in_img, a_in_img );
gauss1_rows< KERNEL_RADIUS >
<<< dim3((width+WS1-1)/WS1, (height+NW1-1)/NW1), dim3(WS1, NW1) >>>();
// fortunately output surface can be used as input texture afterwards
cudaBindTextureToArray( t_in_img, a_out_img );
gauss1_cols< KERNEL_RADIUS >
<<< dim3((width+WS1-1)/WS1, (height+NW1-1)/NW1), dim3(WS1, NW1) >>>(
d_out_img );
}
cudaEventRecord( stop_device, 0 );
cudaEventSynchronize( stop_device );
cudaUnbindTexture( t_in_img );
dt = 0.f; cudaEventElapsedTime(&dt, start_device, stop_device); dt /= 1000.f;
if( res_out == 1 ) dt /= float(RUN_TIMES);
cudaMemcpy( &h_out_img[0], d_out_img, sizeof(float)*ne, cudaMemcpyDeviceToHost );
#ifdef COMPUTE_IN_CPU
check_cpu_reference(&h_ref_img[0], &h_out_img[0], ne, me, mre);
#endif
if( res_out == 1 )
printf( " %g", ne/(1024.*1024.*dt) );
else if( res_out == 2 )
printf( " %g %g", me, mre );
else
printf( "[GPU] gauss1 done with max_err = %g in %gs @ %g MiP/s\n", me, dt, ne/(1024.*1024.*dt) );
}
#endif
cudaDeviceSetCacheConfig( cudaFuncCachePreferShared );
#ifdef RUN_GAUSS_2
{ // gauss-2
cudaMemcpyToArray( a_in_img, 0, 0, &h_in_img[0], sizeof(float)*ne, cudaMemcpyHostToDevice );
cudaEventRecord( start_device, 0 );
cudaBindSurfaceToArray( f_out_img, a_out_img );
for (int i = 0; i <= res_out*(RUN_TIMES-1); ++i) {
cudaBindTextureToArray( t_in_img, a_in_img );
gauss2_rows< KERNEL_RADIUS >
<<< dim3((width+MNT-1)/MNT, height), dim3(MNT, 1) >>>();
cudaBindTextureToArray( t_in_img, a_out_img );
gauss2_cols< KERNEL_RADIUS >
<<< dim3((width+WS2-1)/WS2, (height+NW2-1)/NW2), dim3(WS2, NW2) >>>(
d_out_img );
}
cudaEventRecord( stop_device, 0 );
cudaEventSynchronize( stop_device );
cudaUnbindTexture( t_in_img );
dt = 0.f; cudaEventElapsedTime(&dt, start_device, stop_device); dt /= 1000.f;
if( res_out == 1 ) dt /= float(RUN_TIMES);
cudaMemcpy( &h_out_img[0], d_out_img, sizeof(float)*ne, cudaMemcpyDeviceToHost );
#ifdef COMPUTE_IN_CPU
check_cpu_reference(&h_ref_img[0], &h_out_img[0], ne, me, mre);
#endif
if( res_out == 1 )
printf( " %g", ne/(1024.*1024.*dt) );
else if( res_out == 2 )
printf( " %g %g", me, mre );
else
printf( "[GPU] gauss2 done with max_err = %g in %gs @ %g MiP/s\n", me, dt, ne/(1024.*1024.*dt) );
#ifdef USE_CACTUS
if( !res_out ) {
printf( "[gauss] Writing %s ... ", CACTUS_GPU ); fflush(stdin);
if( save_pgm( h_out_img, width, height, maxval, CACTUS_GPU ) ) printf( "done!\n" );
else { fprintf( stderr, "!!! error!" ); return 1; }
}
#endif
}
#endif
#ifdef RUN_GAUSS_3
{ // gauss-3
cudaMemcpyToArray( a_in_img, 0, 0, &h_in_img[0], sizeof(float)*ne, cudaMemcpyHostToDevice );
cudaEventRecord( start_device, 0 );
cudaBindTextureToArray( t_in_img, a_in_img );
for (int i = 0; i <= res_out*(RUN_TIMES-1); ++i) {
gauss3< KERNEL_RADIUS >
<<< dim3((width+WS3-1)/WS3, (height+NW3-1)/NW3), dim3(WS3, NW3) >>>(
d_out_img );
}
cudaEventRecord( stop_device, 0 );
cudaEventSynchronize( stop_device );
cudaUnbindTexture( t_in_img );
dt = 0.f; cudaEventElapsedTime(&dt, start_device, stop_device); dt /= 1000.f;
if( res_out == 1 ) dt /= float(RUN_TIMES);
cudaMemcpy( &h_out_img[0], d_out_img, sizeof(float)*ne, cudaMemcpyDeviceToHost );
#ifdef COMPUTE_IN_CPU
check_cpu_reference(&h_ref_img[0], &h_out_img[0], ne, me, mre);
#endif
if( res_out == 1 )
printf( " %g", ne/(1024.*1024.*dt) );
else if( res_out == 2 )
printf( " %g %g", me, mre );
else
printf( "[GPU] gauss3 done with max_err = %g in %gs @ %g MiP/s\n", me, dt, ne/(1024.*1024.*dt) );
}
#endif
#ifdef RUN_GAUSS_FFT
{ // gauss-fft
dvector< float > d_img( h_in_img );
int fftW = width/2+1, fftH = height;
dvector< float2 > d_img_hat( fftW * fftH );
dim3 dim_block( WS, NW ), dim_grid( (fftW+1)/dim_block.x, (fftH+1)/dim_block.y );
cufftHandle planFwd, planInv;
cufftPlan2d(&planFwd, width, height, CUFFT_R2C);
cufftPlan2d(&planInv, width, height, CUFFT_C2R);
cudaEventRecord( start_device, 0 );
for (int i = 0; i <= res_out*(RUN_TIMES-1); ++i) {
cufftExecR2C(planFwd, d_img, (cufftComplex *)(float2 *)d_img_hat);
apply_gauss_hat< KERNEL_RADIUS >(d_img_hat, dim_grid, dim_block );
cufftExecC2R(planInv, (cufftComplex *)(float2 *)d_img_hat, d_img);
}
cudaEventRecord( stop_device, 0 );
cudaEventSynchronize( stop_device );
dt = 0.f; cudaEventElapsedTime(&dt, start_device, stop_device); dt /= 1000.f;
if( res_out == 1 ) dt /= float(RUN_TIMES);
cufftDestroy(planFwd);
cufftDestroy(planInv);
h_fft_img = to_cpu(d_img);
for (int i=0; i<ne; ++i) h_fft_img[i] /= (float)ne;
// gauss-fft calculates the exact convolution, so it might be different
// from the cpu reference
#ifdef COMPUTE_IN_CPU
check_cpu_reference(&h_ref_img[0], &h_out_img[0], ne, me, mre);
#endif
if( res_out == 1 )
printf( " %g", ne/(1024.*1024.*dt) );
else if( res_out == 2 )
printf( " %g %g", me, mre );
else
printf( "[GPU] gauss-fft done with max_err = %g in %gs @ %g MiP/s\n", me, dt, ne/(1024.*1024.*dt) );
#ifdef USE_CACTUS
if( !res_out ) {
printf( "[gauss] Writing %s ... ", CACTUS_FFT_GPU ); fflush(stdin);
if( save_pgm( h_fft_img, width, height, maxval, CACTUS_FFT_GPU ) ) printf( "done!\n" );
else { fprintf( stderr, "!!! error!" ); return 1; }
}
#endif
}
#endif
#ifdef RUN_GAUSS_REC
{ // gauss-recursive
void rec_gauss(float *h_img, int width, int height, float sigma);
std::vector<float> h_img = h_in_img;
rec_gauss(&h_img[0], width, height, radius/2);
#ifdef USE_CACTUS
if( !res_out ) {
printf( "[gauss] Writing %s ... ", CACTUS_RECURSIVE_GPU ); fflush(stdin);
if( save_pgm(h_img, width, height, maxval, CACTUS_RECURSIVE_GPU) ) printf( "done!\n" );
else { fprintf( stderr, "!!! error!" ); return 1; }
}
#endif
}
#endif
//if( res_out ) printf( "\n" );
cudaEventDestroy(start_device); cudaEventDestroy(stop_device);
cudaFreeArray( a_in_img );
cudaFreeArray( a_fin_img );
cudaFreeArray( a_out_img );
cudaFree( d_in_img );
cudaFree( d_out_img );
return 0;
}
|
1997d262d6da9db86877ce3e56a278c0b382b6f6.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <algorithm>
#include <sys/stat.h>
#include <unistd.h>
#include "settings.cuh"
#include "dq_network.cuh"
/* GLM */
#include <glm/glm.hpp>
using glm::vec3;
using glm::mat3;
using glm::vec4;
using glm::mat4;
inline bool file_exists (const std::string& name) {
struct stat buffer;
return (stat (name.c_str(), &buffer) == 0);
}
// Get all vertices in corrdinate system for the current point
void convert_vertices_to_point_coord_system(
std::vector<float>& converted_vertices,
vec3& pos,
std::vector<float>& vertices
){
for (int i = 0; i < vertices.size(); i+=3){
converted_vertices[i ] = vertices[i ] - pos.x;
converted_vertices[i+1] = vertices[i+1] - pos.y;
converted_vertices[i+2] = vertices[i+2] - pos.z;
}
}
// Read the scene data file and populate the list of scene data
void load_vertices(std::vector<float>& vertices){
// Open the file and read line by line
std::string line;
std::ifstream save_file ("../Radiance_Map_Data/vertices.txt");
if (save_file.is_open()){
int line_idx = 0;
while ( std::getline (save_file, line)){
// For each space
size_t pos = 0;
std::string token;
while ((pos = line.find(" ")) != std::string::npos){
// Add the data to the list (Note first 3 numbers are the position)
token = line.substr(0, pos);
vertices.push_back(std::stof(token));
// Delete the part that we have read
line.erase(0, pos + 1);
}
// Add the final float in
vertices.push_back(std::stof(line));
}
}
else{
printf("Scene Data file could not be opened.\n");
}
}
// Read the radiance_map data file and populate a vector of vectors
void load_radiance_map_data(std::vector<std::vector<float>>& radiance_map_data, int& action_count){
// Open the file and read line by line
std::string line;
std::ifstream save_file ("../Radiance_Map_Data/radiance_map_data.txt");
if (save_file.is_open()){
int line_idx = 0;
while ( std::getline (save_file, line)){
if (line_idx == 0){
action_count = std::stoi(line);
}
else{
// vector of data for the current line read in
std::vector<float> data_line;
// For each space
size_t pos = 0;
std::string token;
while ((pos = line.find(" ")) != std::string::npos){
// Add the data to the list (Note first 3 numbers are the position)
token = line.substr(0, pos);
data_line.push_back(std::stof(token));
// Delete the part that we have read
line.erase(0, pos + 1);
}
// Add the final float in
data_line.push_back(std::stof(line));
// Add data line into the vector
radiance_map_data.push_back(data_line);
}
line_idx++;
}
}
else{
printf("Radiance Map Data file could not be opened.\n");
}
}
int main (int argc, char** argv) {
//////////////////////////////////////////////////////////////
/* Read in and shuffle the radiance map data */
//////////////////////////////////////////////////////////////
std::vector<std::vector<float>> radiance_map_data;
int action_count;
load_radiance_map_data(radiance_map_data, action_count);
std::random_shuffle(radiance_map_data.begin(), radiance_map_data.end());
std::cout << "Read " << radiance_map_data.size() << " lines of radiance_map data." << std::endl;
std::cout << "Action count: " << action_count << std::endl;
//////////////////////////////////////////////////////////////
/* Read in the scene data */
//////////////////////////////////////////////////////////////
std::vector<float> vertices;
load_vertices(vertices);
std::cout << "Read " << vertices.size()/3 << " vertices." << std::endl;
//////////////////////////////////////////////////////////////
/* Select test and training data */
//////////////////////////////////////////////////////////////
std::vector<std::vector<float>> training_data;
std::vector<std::vector<float>> test_data;
for(int i = 0; i < radiance_map_data.size(); i++){
// 3 for position, plus the number of actions on each line
std::vector<float> data_line(3 + action_count);
std::copy( radiance_map_data[i].begin(), radiance_map_data[i].end(), data_line.begin());
double rv = ((double) rand() / (RAND_MAX));
// Training
if( rv < 0.8 ){
training_data.push_back(data_line);
}
// Test
else{
test_data.push_back(data_line);
}
}
std::cout << "Training data set size: " << training_data.size() << std::endl;
std::cout << "Test data set size: " << test_data.size() << std::endl;
//////////////////////////////////////////////////////////////
/* Initialise the DNN */
//////////////////////////////////////////////////////////////
auto dyparams = dynet::extract_dynet_params(argc, argv);
dynet::initialize(dyparams);
dynet::ParameterCollection model;
dynet::AdamTrainer trainer(model);
DQNetwork dnn = DQNetwork();
dnn.initialize(model, vertices.size(), action_count);
//////////////////////////////////////////////////////////////
/* Load in the Parameter Values */
//////////////////////////////////////////////////////////////
std::string fname = "../Radiance_Map_Data/radiance_map_model.model";
if (LOAD_MODEL && file_exists(fname)){
dynet::TextFileLoader loader(fname);
loader.populate(model);
}
//////////////////////////////////////////////////////////////
/* Train and test the DNN */
//////////////////////////////////////////////////////////////
unsigned int num_batches = (training_data.size() + BATCH_SIZE - 1) / BATCH_SIZE;
for (int i = 0; i < EPOCHS; i++){
/* Train */
float loss = 0.f;
for (int b = 0; b < num_batches; b++){
// Initialise the computation graph
dynet::ComputationGraph graph;
// Batch start id and current batch size
unsigned int sidx = b*BATCH_SIZE;
unsigned int batch_size = ::min((unsigned int) training_data.size() - sidx, (unsigned int) BATCH_SIZE);
// Get the input data and targets for the current batch
std::vector<dynet::Expression> current_targets(batch_size);
std::vector<dynet::Expression> current_batch(batch_size);
for( unsigned int k = 0; k < batch_size; k++){
unsigned int data_idx = sidx+k;
// Get the outputs
std::vector<float> batch_targets(training_data[data_idx].begin()+3, training_data[data_idx].end());
// Get the inputs
std::vector<float> converted_vertices(vertices.size());
vec3 pos(
training_data[data_idx][0],
training_data[data_idx][1],
training_data[data_idx][2]
);
// Get the list of vertices in the converted coordinate space
convert_vertices_to_point_coord_system(
converted_vertices,
pos,
vertices
);
current_targets[k] = dynet::input(graph, {(unsigned int)action_count}, batch_targets);
current_batch[k] = dynet::input(graph, {(unsigned int)converted_vertices.size()}, converted_vertices);
}
dynet::Expression targets_batch = reshape(concatenate_cols(current_targets), dynet::Dim({(unsigned int) action_count}, batch_size));
dynet::Expression input_batch = reshape(concatenate_cols(current_batch), dynet::Dim({(unsigned int)vertices.size()}, batch_size));
// Forward pass through the network
dynet::Expression output_batch = dnn.network_inference(graph, input_batch, true);
// Define the loss
dynet::Expression loss_expr = dynet::sum_batches(dynet::squared_distance(targets_batch, output_batch));
loss += dynet::as_scalar(graph.forward(loss_expr));
graph.backward(loss_expr);
trainer.update();
}
float error = 0.f;
/* Test */
for (int t = 0; t < test_data.size(); t++){
// Initialise the computational graph
dynet::ComputationGraph graph;
// Get the input expression and ground truth
std::vector<float> targets(test_data[t].begin()+3, test_data[t].end());
// Get the inputs
std::vector<float> converted_vertices(vertices.size());
vec3 pos(
test_data[t][0],
test_data[t][1],
test_data[t][2]
);
// Get the list of vertices in the converted coordinate space
convert_vertices_to_point_coord_system(
converted_vertices,
pos,
vertices
);
dynet::Expression input = dynet::input(graph, {(unsigned int)vertices.size()}, converted_vertices);
dynet::Expression ground_truth = dynet::input(graph, {(unsigned int) action_count}, targets);
// Get the predictions
dynet::Expression prediciton = dnn.network_inference(graph, input, false);
// Calulate difference between prediction and ground truth
dynet::Expression diff = dynet::squared_distance(ground_truth, prediciton);
error += dynet::as_scalar(graph.forward(diff));
}
std::cout << "---------------- " << i+1 << " ----------------" << std::endl;
std::cout << " Loss: " << loss << std::endl;
std::cout << " Error: " << error << std::endl;
std::cout << "-------------------------------------" << std::endl;
std::cout << std::endl;
}
//////////////////////////////////////////////////////////////
/* Save the Model */
//////////////////////////////////////////////////////////////
if (SAVE_MODEL){
dynet::TextFileSaver saver("../Radiance_Map_Data/radiance_map_model.model");
saver.save(model);
}
return 0;
} | 1997d262d6da9db86877ce3e56a278c0b382b6f6.cu | #include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <algorithm>
#include <sys/stat.h>
#include <unistd.h>
#include "settings.cuh"
#include "dq_network.cuh"
/* GLM */
#include <glm/glm.hpp>
using glm::vec3;
using glm::mat3;
using glm::vec4;
using glm::mat4;
inline bool file_exists (const std::string& name) {
struct stat buffer;
return (stat (name.c_str(), &buffer) == 0);
}
// Get all vertices in corrdinate system for the current point
void convert_vertices_to_point_coord_system(
std::vector<float>& converted_vertices,
vec3& pos,
std::vector<float>& vertices
){
for (int i = 0; i < vertices.size(); i+=3){
converted_vertices[i ] = vertices[i ] - pos.x;
converted_vertices[i+1] = vertices[i+1] - pos.y;
converted_vertices[i+2] = vertices[i+2] - pos.z;
}
}
// Read the scene data file and populate the list of scene data
void load_vertices(std::vector<float>& vertices){
// Open the file and read line by line
std::string line;
std::ifstream save_file ("../Radiance_Map_Data/vertices.txt");
if (save_file.is_open()){
int line_idx = 0;
while ( std::getline (save_file, line)){
// For each space
size_t pos = 0;
std::string token;
while ((pos = line.find(" ")) != std::string::npos){
// Add the data to the list (Note first 3 numbers are the position)
token = line.substr(0, pos);
vertices.push_back(std::stof(token));
// Delete the part that we have read
line.erase(0, pos + 1);
}
// Add the final float in
vertices.push_back(std::stof(line));
}
}
else{
printf("Scene Data file could not be opened.\n");
}
}
// Read the radiance_map data file and populate a vector of vectors
void load_radiance_map_data(std::vector<std::vector<float>>& radiance_map_data, int& action_count){
// Open the file and read line by line
std::string line;
std::ifstream save_file ("../Radiance_Map_Data/radiance_map_data.txt");
if (save_file.is_open()){
int line_idx = 0;
while ( std::getline (save_file, line)){
if (line_idx == 0){
action_count = std::stoi(line);
}
else{
// vector of data for the current line read in
std::vector<float> data_line;
// For each space
size_t pos = 0;
std::string token;
while ((pos = line.find(" ")) != std::string::npos){
// Add the data to the list (Note first 3 numbers are the position)
token = line.substr(0, pos);
data_line.push_back(std::stof(token));
// Delete the part that we have read
line.erase(0, pos + 1);
}
// Add the final float in
data_line.push_back(std::stof(line));
// Add data line into the vector
radiance_map_data.push_back(data_line);
}
line_idx++;
}
}
else{
printf("Radiance Map Data file could not be opened.\n");
}
}
int main (int argc, char** argv) {
//////////////////////////////////////////////////////////////
/* Read in and shuffle the radiance map data */
//////////////////////////////////////////////////////////////
std::vector<std::vector<float>> radiance_map_data;
int action_count;
load_radiance_map_data(radiance_map_data, action_count);
std::random_shuffle(radiance_map_data.begin(), radiance_map_data.end());
std::cout << "Read " << radiance_map_data.size() << " lines of radiance_map data." << std::endl;
std::cout << "Action count: " << action_count << std::endl;
//////////////////////////////////////////////////////////////
/* Read in the scene data */
//////////////////////////////////////////////////////////////
std::vector<float> vertices;
load_vertices(vertices);
std::cout << "Read " << vertices.size()/3 << " vertices." << std::endl;
//////////////////////////////////////////////////////////////
/* Select test and training data */
//////////////////////////////////////////////////////////////
std::vector<std::vector<float>> training_data;
std::vector<std::vector<float>> test_data;
for(int i = 0; i < radiance_map_data.size(); i++){
// 3 for position, plus the number of actions on each line
std::vector<float> data_line(3 + action_count);
std::copy( radiance_map_data[i].begin(), radiance_map_data[i].end(), data_line.begin());
double rv = ((double) rand() / (RAND_MAX));
// Training
if( rv < 0.8 ){
training_data.push_back(data_line);
}
// Test
else{
test_data.push_back(data_line);
}
}
std::cout << "Training data set size: " << training_data.size() << std::endl;
std::cout << "Test data set size: " << test_data.size() << std::endl;
//////////////////////////////////////////////////////////////
/* Initialise the DNN */
//////////////////////////////////////////////////////////////
auto dyparams = dynet::extract_dynet_params(argc, argv);
dynet::initialize(dyparams);
dynet::ParameterCollection model;
dynet::AdamTrainer trainer(model);
DQNetwork dnn = DQNetwork();
dnn.initialize(model, vertices.size(), action_count);
//////////////////////////////////////////////////////////////
/* Load in the Parameter Values */
//////////////////////////////////////////////////////////////
std::string fname = "../Radiance_Map_Data/radiance_map_model.model";
if (LOAD_MODEL && file_exists(fname)){
dynet::TextFileLoader loader(fname);
loader.populate(model);
}
//////////////////////////////////////////////////////////////
/* Train and test the DNN */
//////////////////////////////////////////////////////////////
unsigned int num_batches = (training_data.size() + BATCH_SIZE - 1) / BATCH_SIZE;
for (int i = 0; i < EPOCHS; i++){
/* Train */
float loss = 0.f;
for (int b = 0; b < num_batches; b++){
// Initialise the computation graph
dynet::ComputationGraph graph;
// Batch start id and current batch size
unsigned int sidx = b*BATCH_SIZE;
unsigned int batch_size = std::min((unsigned int) training_data.size() - sidx, (unsigned int) BATCH_SIZE);
// Get the input data and targets for the current batch
std::vector<dynet::Expression> current_targets(batch_size);
std::vector<dynet::Expression> current_batch(batch_size);
for( unsigned int k = 0; k < batch_size; k++){
unsigned int data_idx = sidx+k;
// Get the outputs
std::vector<float> batch_targets(training_data[data_idx].begin()+3, training_data[data_idx].end());
// Get the inputs
std::vector<float> converted_vertices(vertices.size());
vec3 pos(
training_data[data_idx][0],
training_data[data_idx][1],
training_data[data_idx][2]
);
// Get the list of vertices in the converted coordinate space
convert_vertices_to_point_coord_system(
converted_vertices,
pos,
vertices
);
current_targets[k] = dynet::input(graph, {(unsigned int)action_count}, batch_targets);
current_batch[k] = dynet::input(graph, {(unsigned int)converted_vertices.size()}, converted_vertices);
}
dynet::Expression targets_batch = reshape(concatenate_cols(current_targets), dynet::Dim({(unsigned int) action_count}, batch_size));
dynet::Expression input_batch = reshape(concatenate_cols(current_batch), dynet::Dim({(unsigned int)vertices.size()}, batch_size));
// Forward pass through the network
dynet::Expression output_batch = dnn.network_inference(graph, input_batch, true);
// Define the loss
dynet::Expression loss_expr = dynet::sum_batches(dynet::squared_distance(targets_batch, output_batch));
loss += dynet::as_scalar(graph.forward(loss_expr));
graph.backward(loss_expr);
trainer.update();
}
float error = 0.f;
/* Test */
for (int t = 0; t < test_data.size(); t++){
// Initialise the computational graph
dynet::ComputationGraph graph;
// Get the input expression and ground truth
std::vector<float> targets(test_data[t].begin()+3, test_data[t].end());
// Get the inputs
std::vector<float> converted_vertices(vertices.size());
vec3 pos(
test_data[t][0],
test_data[t][1],
test_data[t][2]
);
// Get the list of vertices in the converted coordinate space
convert_vertices_to_point_coord_system(
converted_vertices,
pos,
vertices
);
dynet::Expression input = dynet::input(graph, {(unsigned int)vertices.size()}, converted_vertices);
dynet::Expression ground_truth = dynet::input(graph, {(unsigned int) action_count}, targets);
// Get the predictions
dynet::Expression prediciton = dnn.network_inference(graph, input, false);
// Calulate difference between prediction and ground truth
dynet::Expression diff = dynet::squared_distance(ground_truth, prediciton);
error += dynet::as_scalar(graph.forward(diff));
}
std::cout << "---------------- " << i+1 << " ----------------" << std::endl;
std::cout << " Loss: " << loss << std::endl;
std::cout << " Error: " << error << std::endl;
std::cout << "-------------------------------------" << std::endl;
std::cout << std::endl;
}
//////////////////////////////////////////////////////////////
/* Save the Model */
//////////////////////////////////////////////////////////////
if (SAVE_MODEL){
dynet::TextFileSaver saver("../Radiance_Map_Data/radiance_map_model.model");
saver.save(model);
}
return 0;
} |
be8b14c7571d9419d33a0f23142761892b64a75c.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void mandelKernel(int* d_data, int width, float stepX, float stepY, float lowerX, float lowerY, int count) {
// To avoid error caused by the floating number, use the following pseudo code
//
// float x = lowerX + thisX * stepX;
// float y = lowerY + thisY * stepY;
int thisX = blockIdx.x * blockDim.x + threadIdx.x;
int thisY = blockIdx.y * blockDim.y + threadIdx.y;
float c_x = lowerX + thisX * stepX;
float c_y = lowerY + thisY * stepY;
float z_x = c_x;
float z_y = c_y;
int iter;
for (iter = 0; iter < count; ++iter){
if (z_x * z_x + z_y * z_y > 4.f) break;
float new_x = z_x * z_x - z_y * z_y;
float new_y = 2.f * z_x * z_y;
z_x = c_x + new_x;
z_y = c_y + new_y;
}
int idx = thisX + thisY * width;
d_data[idx] = iter;
}
// Host front-end function that allocates the memory and launches the GPU kernel
void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations)
{
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
int N = resX * resY;
int size = N * sizeof(int);
int *data;
data = (int*) malloc(size);
int *d_data;
hipMalloc(&d_data, size);
dim3 threadsPerBlock(25, 25);
dim3 numBlocks(resX / threadsPerBlock.x, resY / threadsPerBlock.y);
hipLaunchKernelGGL(( mandelKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_data, resX, stepX, stepY, lowerX, lowerY, maxIterations);
hipMemcpy(data, d_data, size, hipMemcpyDeviceToHost);
memcpy(img, data, size);
hipFree(d_data);
free(data);
}
| be8b14c7571d9419d33a0f23142761892b64a75c.cu | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void mandelKernel(int* d_data, int width, float stepX, float stepY, float lowerX, float lowerY, int count) {
// To avoid error caused by the floating number, use the following pseudo code
//
// float x = lowerX + thisX * stepX;
// float y = lowerY + thisY * stepY;
int thisX = blockIdx.x * blockDim.x + threadIdx.x;
int thisY = blockIdx.y * blockDim.y + threadIdx.y;
float c_x = lowerX + thisX * stepX;
float c_y = lowerY + thisY * stepY;
float z_x = c_x;
float z_y = c_y;
int iter;
for (iter = 0; iter < count; ++iter){
if (z_x * z_x + z_y * z_y > 4.f) break;
float new_x = z_x * z_x - z_y * z_y;
float new_y = 2.f * z_x * z_y;
z_x = c_x + new_x;
z_y = c_y + new_y;
}
int idx = thisX + thisY * width;
d_data[idx] = iter;
}
// Host front-end function that allocates the memory and launches the GPU kernel
void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations)
{
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
int N = resX * resY;
int size = N * sizeof(int);
int *data;
data = (int*) malloc(size);
int *d_data;
cudaMalloc(&d_data, size);
dim3 threadsPerBlock(25, 25);
dim3 numBlocks(resX / threadsPerBlock.x, resY / threadsPerBlock.y);
mandelKernel<<<numBlocks, threadsPerBlock>>>(d_data, resX, stepX, stepY, lowerX, lowerY, maxIterations);
cudaMemcpy(data, d_data, size, cudaMemcpyDeviceToHost);
memcpy(img, data, size);
cudaFree(d_data);
free(data);
}
|
e5c99be9e215984f6b25ec56de8cf5ed13598844.hip | // !!! This is a file automatically generated by hipify!!!
#include "SurfaceConverter.h"
#include "seba-video/debug_def.h"
namespace seba
{
namespace
{
// We just implement that one case we use: extracting the green channel from a planar RGB image. We're *never* going to need the other functionalities anyway.
sebaSurfaceFormat_t GetConvertedSurfaceFormat(sebaSurfaceFormat_t surfaceFormat, sebaSurfaceConverter_t surfaceConverterType, sebaSelectChannel_t *staticSurfaceConverterParameters)
{
INSIST(surfaceConverterType == SEBA_SELECT_CHANNEL);
INSIST(((sebaSelectChannel_t *)staticSurfaceConverterParameters)->channel == SEBA_CHANNEL_G);
switch (surfaceFormat)
{
case SEBA_RGB8:
return SEBA_I8;
case SEBA_RGB12:
return SEBA_I12;
}
INSIST(false);
return SEBA_I8;
}
}
SurfaceConverter::SurfaceConverter(
sebaSurfaceConverter_t surfaceConverterType,
void *staticSurfaceConverterParameters,
unsigned maxWidth,
unsigned maxHeight,
const DeviceSurfaceBuffer &srcBuffer
) : Component(GetConvertedSurfaceFormat((sebaSurfaceFormat_t)srcBuffer.GetInfo().surfaceFmt, surfaceConverterType, (sebaSelectChannel_t *)staticSurfaceConverterParameters), Planar, maxWidth, maxHeight),
m_srcBuffer(&srcBuffer)
{
INSIST(srcBuffer.GetStorageFlavor() == Planar);
INSIST((sebaSurfaceFormat_t)srcBuffer.GetInfo().surfaceFmt == SEBA_RGB8 || (sebaSurfaceFormat_t)srcBuffer.GetInfo().surfaceFmt == SEBA_RGB12);
}
SurfaceConverter::~SurfaceConverter()
{
}
void SurfaceConverter::Transform(
void *surfaceConverterParameters,
unsigned width,
unsigned height
)
{
INSIST(surfaceConverterParameters == nullptr);
auto &info = m_deviceSurfaceBuffer.GetInfo();
INSIST(width <= info.maxWidth);
INSIST(height <= info.maxHeight);
info.width = width;
info.height = height;
info.pitch = width * SurfacePixelByteSize((sebaSurfaceFormat_t)info.surfaceFmt);
unsigned channelSize = height * info.pitch;
hipMemcpy(
m_deviceSurfaceBuffer.GetDevicePtr(),
m_srcBuffer->GetDevicePtr() + channelSize, // Skip the Red channel ...
channelSize,
hipMemcpyDeviceToDevice
);
}
} | e5c99be9e215984f6b25ec56de8cf5ed13598844.cu | #include "SurfaceConverter.h"
#include "seba-video/debug_def.h"
namespace seba
{
namespace
{
// We just implement that one case we use: extracting the green channel from a planar RGB image. We're *never* going to need the other functionalities anyway.
sebaSurfaceFormat_t GetConvertedSurfaceFormat(sebaSurfaceFormat_t surfaceFormat, sebaSurfaceConverter_t surfaceConverterType, sebaSelectChannel_t *staticSurfaceConverterParameters)
{
INSIST(surfaceConverterType == SEBA_SELECT_CHANNEL);
INSIST(((sebaSelectChannel_t *)staticSurfaceConverterParameters)->channel == SEBA_CHANNEL_G);
switch (surfaceFormat)
{
case SEBA_RGB8:
return SEBA_I8;
case SEBA_RGB12:
return SEBA_I12;
}
INSIST(false);
return SEBA_I8;
}
}
SurfaceConverter::SurfaceConverter(
sebaSurfaceConverter_t surfaceConverterType,
void *staticSurfaceConverterParameters,
unsigned maxWidth,
unsigned maxHeight,
const DeviceSurfaceBuffer &srcBuffer
) : Component(GetConvertedSurfaceFormat((sebaSurfaceFormat_t)srcBuffer.GetInfo().surfaceFmt, surfaceConverterType, (sebaSelectChannel_t *)staticSurfaceConverterParameters), Planar, maxWidth, maxHeight),
m_srcBuffer(&srcBuffer)
{
INSIST(srcBuffer.GetStorageFlavor() == Planar);
INSIST((sebaSurfaceFormat_t)srcBuffer.GetInfo().surfaceFmt == SEBA_RGB8 || (sebaSurfaceFormat_t)srcBuffer.GetInfo().surfaceFmt == SEBA_RGB12);
}
SurfaceConverter::~SurfaceConverter()
{
}
void SurfaceConverter::Transform(
void *surfaceConverterParameters,
unsigned width,
unsigned height
)
{
INSIST(surfaceConverterParameters == nullptr);
auto &info = m_deviceSurfaceBuffer.GetInfo();
INSIST(width <= info.maxWidth);
INSIST(height <= info.maxHeight);
info.width = width;
info.height = height;
info.pitch = width * SurfacePixelByteSize((sebaSurfaceFormat_t)info.surfaceFmt);
unsigned channelSize = height * info.pitch;
cudaMemcpy(
m_deviceSurfaceBuffer.GetDevicePtr(),
m_srcBuffer->GetDevicePtr() + channelSize, // Skip the Red channel ...
channelSize,
cudaMemcpyDeviceToDevice
);
}
} |
8ecaa33bf516c1974189a86ac87a664f4d4d2e0e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride_x, int stride_y, int size, int pad, float *input, float *output, int *indexes)
{
int h = (in_h + pad - size) / stride_y + 1;
int w = (in_w + pad - size) / stride_x + 1;
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -pad / 2;
int h_offset = -pad / 2;
int out_index = j + w*(i + h*(k + c*b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for(l = 0; l < size; ++l){
for(m = 0; m < size; ++m){
int cur_h = h_offset + i*stride_y + l;
int cur_w = w_offset + j*stride_x + m;
int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c));
int valid = (cur_h >= 0 && cur_h < in_h &&
cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
indexes[out_index] = max_i;
} | 8ecaa33bf516c1974189a86ac87a664f4d4d2e0e.cu | #include "includes.h"
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride_x, int stride_y, int size, int pad, float *input, float *output, int *indexes)
{
int h = (in_h + pad - size) / stride_y + 1;
int w = (in_w + pad - size) / stride_x + 1;
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -pad / 2;
int h_offset = -pad / 2;
int out_index = j + w*(i + h*(k + c*b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for(l = 0; l < size; ++l){
for(m = 0; m < size; ++m){
int cur_h = h_offset + i*stride_y + l;
int cur_w = w_offset + j*stride_x + m;
int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c));
int valid = (cur_h >= 0 && cur_h < in_h &&
cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
indexes[out_index] = max_i;
} |
e65d4b9cc927be2ab2fac9a88b8a09d2016654ec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* MDT_OneStation_client.cu
*
* Source code for the multi-threaded client program running on the
* CPU, hosting the GPU processing node.
*
* Copyright (c) 2021 Nitish Ragoomundun
* <lrugratz gmail com>
* @ .
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
#include "MDT_OneStation_client.h"
int main (int argc, char **argv)
{
/*** BEGIN Variables for telescope ***/
int Nelements, Npols, Nchannels, Ntaps, Nspectra;
int SperElement; // number of complex samples per element per packet
unsigned int NumPerPkt; // number of 32-bit numbers in each packet
unsigned long int sampleIdx; // sample index in the grand scheme
/* Number of analogue inputs for each SNAP board in 1 station */
/* Elements of array are in snapIdx order */
unsigned short int InputsPerSNAP[4] = INPUTSPERSNAP;
/*** END Variables for telescope ***/
/*** BEGIN Variables for array lengths in memory ***/
/* Size of window for PFB */
long NBytes_Window;
/* Size of stacks in RAM */
long NBytes_Stacks;
/* Size of data copied from host to GPU at input */
long NBytes_HtoD;
/* Size of input array to F-engine */
long NBytes_FInput;
/* Size of data to shift in device memory between successive PFB's */
long NBytes_DtoD;
/* Total number of samples in FInput array */
long len_FInput;
/* Size of output array from F-engine */
long NBytes_FOutput;
/* Size of input array to the GPU X-engine */
long NBytes_XInput;
/* Size of cuBLAS pointer arrays */
long NBytes_cuBLASptr_Xin;
long NBytes_cuBLASptr_Xout;
/* Size of output array copied from GPU to host at output */
long NBytes_DtoH;
/* Number of samples comprised in DtoH */
long len_XOutput;
/*** END Variables for array lengths in memory ***/
/*** BEGIN Host variables ***/
/* Stack for incoming signal data */
hipComplex *h_Stacks;
/* Pointer arrays for use with cuBLAS X-engine */
hipComplex **h_XInputPtr, **h_XOutputPtr;
/* cuBLAS scalar coefficients */
hipComplex cublas_alpha, cublas_beta;
/* Output array for GPU FX correlator (pinned) */
hipComplex *h_XOutput;
long int i, j, k, z;
unsigned short int p, q;
unsigned long int Nsamples[2]; // samples written to array per element
unsigned long int stride, stride_sample;
unsigned int stride_buf;
unsigned long int Samples_Nspectra; // number of samples in Nspectra
unsigned long int Samples_begins; // (Ntaps - 1) spectra
unsigned long int stride_Nspectra, stride_begins;
unsigned long int stride_cublas;
unsigned short int stackIdx, curr_stack;
unsigned short int begins = 1;
/* Variables for output file */
FILE *outfile;
char outfilename[OUTFILENAMELEN];
struct tm *now;
time_t now_abs;
/*** END Host variables ***/
/*** BEGIN Device array variables ***/
/* Array for window */
float *d_Window;
/* Input array for signal data */
hipComplex *d_FInput;
/* Output array from GPU PFB */
hipfftComplex *d_FOutput;
/* Input array for X-engine */
hipComplex *d_XInput;
/* Pointer arrays for use with cuBLAS */
hipComplex **d_XInputPtr, **d_XOutputPtr;
/* Output array for X-engine */
hipComplex *d_XOutput;
/*** END Device array variables ***/
/*** BEGIN CUDA control variables ***/
/* Variables for device properties */
int cu_devIdx;
hipDeviceProp_t cu_devProp;
int cu_RetVal;
char *cu_DeviceName;
int cufft_version;
int cu_MaxThreadsPerBlk;
/* GPU thread gridding parameters for device function PpS_Batch() */
int PpS_Batch_thdx, PpS_Batch_thdy, PpS_Batch_thdz; // block dim
int PpS_Batch_blkx, PpS_Batch_blky, PpS_Batch_blkz; // grid dim
/* GPU thread gridding parameters for device function ReorderFOutput() */
int ReorderFOutput_thdx, ReorderFOutput_thdy, ReorderFOutput_thdz; // block dim
int ReorderFOutput_blkx, ReorderFOutput_blky, ReorderFOutput_blkz; // block dim
/* CUDA Streams */
hipStream_t cu_Stream;
/* cuFFT plan handle */
hipfftHandle cu_FFT_Plan;
/* cuBLAS control */
hipblasHandle_t cu_BLAS_XEngine;
hipblasStatus_t cu_BLAS_Stat;
/*** END CUDA control variables ***/
/*** BEGIN MPI variables ***/
int mpi_NumThreads, mpi_threadIdx;
char hostname[MPI_MAX_PROCESSOR_NAME];
int mpi_len;
int mpi_version, mpi_subversion;
int mpi_tag;
MPI_Status mpi_stat[4];
MPI_Request mpi_req[4];
int mpi_retval;
int green_light = 1;
/*** END MPI variables ***/
/*** BEGIN Variables for networking ***/
int listen_sock;
struct addrinfo hints, *servinfo, *addrinfo_ptr;
int retval;
int numbytes;
struct sockaddr_storage remote_addr;
socklen_t addr_len = sizeof(remote_addr);
char str_addr[INET6_ADDRSTRLEN];
/* Buffers to store packets from network socket */
unsigned int *udp_buf, *mpi_buf;
/*** END Variables for networking ***/
/*
* Telescope mode: Standby
* Preliminary settings and system initialisation
*
*/
/* Initialise MPI threads */
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_threadIdx);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_NumThreads);
/* Retrieve device properties */
cu_RetVal = hipGetDevice(&cu_devIdx);
if ( (cu_RetVal = cudaErrCheck()) != 0 ) exit(cu_RetVal);
cu_RetVal = hipGetDeviceProperties(&cu_devProp, cu_devIdx);
if ( (cu_RetVal = cudaErrCheck()) != 0 ) exit(cu_RetVal);
cu_DeviceName = cu_devProp.name; // Name of device
cu_MaxThreadsPerBlk = cu_devProp.maxThreadsPerBlock; // Maximum number of threads per block
/*** BEGIN Master thread parses command line arguments ***/
if (mpi_threadIdx == MPIMASTERIDX)
{
/* Check if required number of MPI threads is met */
if (mpi_NumThreads != REQNUMTHREADS)
{
printf("This program must run with %d threads!\n", REQNUMTHREADS);
MPI_Abort(MPI_COMM_WORLD, 1);
exit(1);
}
/* Check number of command line arguments */
if (argc <= 6) // argc <= (number of expected arguments)
{
print_help(argv[0]);
MPI_Abort(MPI_COMM_WORLD, 2);
exit(2);
}
/* Validate values input to program */
Nelements = atoi(argv[1]);
if (Nelements < MINELEMENTS || Nelements > MAXELEMENTS)
{
printf("%s: Invalid number of elements!\n", argv[0]);
print_help(argv[0]);
MPI_Abort(MPI_COMM_WORLD, 3);
exit(3);
}
/* Number of polarisations for each element */
Npols = atoi(argv[2]);
if (Npols < 1 || Npols > 2)
{
printf("%s: Invalid number of polarisations!\n", argv[0]);
print_help(argv[0]);
MPI_Abort(MPI_COMM_WORLD, 4);
exit(4);
}
/* Number of frequency channels in each spectrum */
Nchannels = atoi(argv[3]);
if (Nchannels < MINCHANNELS || Nchannels > MAXCHANNELS)
{
printf("%s: Invalid number of frequency channels!\n", argv[0]);
print_help(argv[0]);
MPI_Abort(MPI_COMM_WORLD, 5);
exit(5);
}
/* Number of taps in PFB */
Ntaps = atoi(argv[4]);
if (Ntaps < MINTAPS || Ntaps > MAXTAPS)
{
printf("%s: Invalid number of taps for PFB!\n", argv[0]);
print_help(argv[0]);
MPI_Abort(MPI_COMM_WORLD, 6);
exit(6);
}
/* Number of spectra for 1 integration time */
Nspectra = atoi(argv[5]);
if (Nspectra < MINSPECTRA || Nspectra > MAXSPECTRA)
{
printf("%s: Invalid number of spectra!\n", argv[0]);
print_help(argv[0]);
MPI_Abort(MPI_COMM_WORLD, 7);
exit(7);
}
/* Number of complex samples per element in 1 packet */
SperElement = atoi(argv[6]);
if (SperElement <= 0)
{
printf("%s: Invalid SperElement!\n", argv[0]);
print_help(argv[0]);
MPI_Abort(MPI_COMM_WORLD, 8);
exit(8);
}
}
/*** END Master thread parses command line arguments ***/
/*** SYNC MPI Threads **/
MPI_Barrier(MPI_COMM_WORLD);
/*** Create CUDA stream for all MPI threads ***/
if (mpi_threadIdx != MPIMASTERIDX)
hipStreamCreate(&cu_Stream);
/*** BEGIN Pipeline characteristics according to input arguments ***/
/* All MPI threads take command line parameters */
Nelements = atoi(argv[1]);
Npols = atoi(argv[2]);
Nchannels = atoi(argv[3]);
Ntaps = atoi(argv[4]);
Nspectra = atoi(argv[5]);
SperElement = atoi(argv[6]);
/* Number of 32-bit numbers in 1 UDP packet
* Includes the header
* This is an upper limit, assuming that the maximum number
* of inputs is at the first SNAP board
*/
NumPerPkt = (2 + (InputsPerSNAP[0] * SperElement)) * 2;
/* Number of complex samples per element in 1 stack */
Samples_Nspectra = Nspectra * Nchannels;
/* Number of samples, per element, to accumulate in the beginning of
* acquisition due to the required first (Ntaps - 1) x Nchannels samples
*/
Samples_begins = (Ntaps - 1) * Nchannels;
/* Strides due to the above */
stride_Nspectra = Samples_Nspectra * Npols*Nelements;
stride_begins = Samples_begins * Npols*Nelements;
len_FInput = stride_begins + stride_Nspectra;
/*** BEGIN Host array sizes ***/
/* Size of window for PFB */
NBytes_Window = sizeof(float) * Ntaps * Nchannels;
/* Size of stack */
NBytes_Stacks = sizeof(hipComplex) * 2 * Nspectra * Npols*Nelements * Nchannels;
/* Size of data copied from host to device at input */
NBytes_HtoD = sizeof(hipComplex) * Nspectra * Npols*Nelements * Nchannels;
// input
/* Size of output array copied from device to host at output */
NBytes_DtoH = sizeof(hipComplex) * Npols*Nelements * Npols*Nelements * Nchannels;
// output
/* Number of samples comprised in DtoH */
len_XOutput = Npols*Nelements * Npols*Nelements * Nchannels;
/*** END Host array sizes ***/
/*** BEGIN Device array sizes ***/
/* Size of input array to F-engine */
NBytes_FInput = sizeof(hipComplex) * (Nspectra + Ntaps - 1) * Npols*Nelements * Nchannels;
/* Size of data to shift in device memory between successive PFB's */
NBytes_DtoD = sizeof(hipComplex) * (Ntaps - 1) * Npols*Nelements * Nchannels;
/* Size of output array from F-engine */
NBytes_FOutput = sizeof(hipfftComplex) * Nspectra * Npols*Nelements * Nchannels;
/* Size of input array to the GPU X-engine */
NBytes_XInput = sizeof(hipComplex) * Nspectra * Npols*Nelements * Nchannels;
/* Size of cuBLAS pointer arrays */
NBytes_cuBLASptr_Xin = sizeof(hipComplex *) * Nchannels;
NBytes_cuBLASptr_Xout = sizeof(hipComplex *) * Nchannels;
/*** END Device array sizes ***/
/*** END Pipeline characteristics according to input arguments ***/
/*** BEGIN Master MPI thread verifies if packet size is within system limit and print info ***/
if (mpi_threadIdx == MPIMASTERIDX)
{
/* Ensure that requested packet size according to SperElement does not
* exceed the 8.5 KiB limit for the network jumbo frame.
*/
if (NumPerPkt*4 > 8704)
{
printf("%s: Requested Nelements and SperElement cause packet size to exceed 8.5KiB limit!\n", argv[0]);
print_help(argv[0]);
MPI_Abort(MPI_COMM_WORLD, 9);
exit(9);
}
/* If everything OK, print info and carry on */
MPI_Get_processor_name(hostname, &mpi_len);
MPI_Get_version(&mpi_version, &mpi_subversion);
hipfftGetVersion(&cufft_version);
printf("\n");
printf("**************************************************\n");
printf(" Mode: Stand-by \n");
printf("**************************************************\n\n");
printf("Thread %d : Current machine: %s\n", mpi_threadIdx, hostname);
printf("Thread %d : Total number of MPI threads: %d\n", mpi_threadIdx, mpi_NumThreads);
printf("Thread %d : Version of MPI: %d.%d\n", mpi_threadIdx, mpi_version, mpi_subversion);
printf("Thread %d : Using %s (ID: %d) as device.\n", mpi_threadIdx, cu_DeviceName, cu_devIdx);
printf("Thread %d : GPU F-engine is powered by cuFFT version %d.\n", mpi_threadIdx, cufft_version);
printf("\n");
}
/*** END Master MPI thread verifies if packet size is within system limit and print info ***/
/*** SYNC MPI Threads **/
MPI_Barrier(MPI_COMM_WORLD);
switch(mpi_threadIdx)
{
/*** BEGIN Thread 0 ***/
case 0:
{
/* Create buffers to pass packets around */
udp_buf = (unsigned int *)malloc(NumPerPkt * sizeof(unsigned int));
mpi_buf = (unsigned int *)malloc(NumPerPkt * sizeof(unsigned int));
/*** BEGIN Setting up UDP socket for listen ***/
printf("Thread %d: Setting up socket ...\n", mpi_threadIdx);
memset(&hints, 0, sizeof hints);
hints.ai_family = AF_UNSPEC; // force IPv4
hints.ai_socktype = SOCK_DGRAM;
hints.ai_flags = AI_PASSIVE; // use my IP
if ((retval = getaddrinfo(NULL, MYPORT, &hints, &servinfo)) != 0)
{
fprintf(stderr, "getaddrinfo: %s\n", gai_strerror(retval));
MPI_Abort(MPI_COMM_WORLD, 10);
exit(10);
}
// loop through all the results and bind to the first we can
for(addrinfo_ptr=servinfo; addrinfo_ptr!=NULL; addrinfo_ptr=addrinfo_ptr->ai_next)
{
if ((listen_sock = socket(addrinfo_ptr->ai_family, addrinfo_ptr->ai_socktype, addrinfo_ptr->ai_protocol)) == -1)
{
perror("socket");
continue;
}
if (bind(listen_sock, addrinfo_ptr->ai_addr, addrinfo_ptr->ai_addrlen) == -1)
{
close(listen_sock);
perror("bind");
continue;
}
break;
}
if (addrinfo_ptr == NULL)
{
fprintf(stderr, "listener: failed to bind socket\n");
MPI_Abort(MPI_COMM_WORLD, 11);
exit(11);
}
/*** END Setting up UDP socket for listen ***/
}
break;
/*** END Thread 0 ***/
/*** BEGIN Thread 1 ***/
case 1:
{
/* Create buffers to pass packets around */
udp_buf = (unsigned int *)malloc(NumPerPkt * sizeof(unsigned int));
mpi_buf = (unsigned int *)malloc(NumPerPkt * sizeof(unsigned int));
/*** BEGIN Host: Allocate arrays ***/
/* Stack for incoming signal data (page-locked) */
hipHostMalloc((void **)&h_Stacks, NBytes_Stacks, hipHostMallocDefault);
/*** END Host: Allocate arrays ***/
/*** BEGIN Device: Allocate arrays ***/
/* Input array to F-engine */
hipMalloc((void **)&d_FInput, NBytes_FInput);
/*** END Device: Allocate arrays ***/
printf("Thread %d: Done with allocating arrays.\n", mpi_threadIdx);
}
break;
/*** END Thread 1 ***/
/*** BEGIN Thread 2 ***/
case 2:
{
/*** BEGIN Host: Allocate arrays ***/
/* Pointer arrays for use with cuBLAS X-engine */
h_XInputPtr = (hipComplex **)malloc(NBytes_cuBLASptr_Xin);
h_XOutputPtr = (hipComplex **)malloc(NBytes_cuBLASptr_Xout);
/*** END Host: Allocate arrays ***/
/*** BEGIN Device: Allocate arrays ***/
/* Array for window */
hipMalloc((void **)&d_Window, NBytes_Window);
/* Input array to F-engine */
hipMalloc((void **)&d_FInput, NBytes_FInput);
/* Output array from F-engine */
hipMalloc((void **)&d_FOutput, NBytes_FOutput);
/* Input array for X-engine */
hipMalloc((void **)&d_XInput, NBytes_XInput);
/* Pointer arrays for use with cuBLAS */
hipMalloc((void ***)&d_XInputPtr, NBytes_cuBLASptr_Xin);
hipMalloc((void ***)&d_XOutputPtr, NBytes_cuBLASptr_Xout);
/* Output array for X-engine */
hipMalloc((void **)&d_XOutput, NBytes_DtoH);
/*** END Device: Allocate arrays ***/
printf("Thread %d: Done with allocating arrays.\n", mpi_threadIdx);
/*** BEGIN Device: Initialise window coefficients for PFB ***/
printf("\nThread %d: Initialising window coefficients for PFB ...\n", mpi_threadIdx);
printf("Thread %d: Grid configuration = (%6d,%6d,%6d)\n", mpi_threadIdx, cu_MaxThreadsPerBlk, 1, 1);
printf("Thread %d: Block configuration = (%6d,%6d,%6d)\n", mpi_threadIdx, (Ntaps*Nchannels)/cu_MaxThreadsPerBlk + (((Ntaps*Nchannels)%cu_MaxThreadsPerBlk != 0) ? 1 : 0), 1, 1);
hipLaunchKernelGGL(( PFB_Window_ExBlackman), dim3(cu_MaxThreadsPerBlk), dim3((Ntaps*Nchannels)/cu_MaxThreadsPerBlk + (((Ntaps*Nchannels)%cu_MaxThreadsPerBlk != 0) ? 1 : 0)), 0, cu_Stream , Nchannels, Ntaps, d_Window);
hipStreamSynchronize(cu_Stream);
if ((cu_RetVal = cudaErrCheck()) != 0)
{
MPI_Abort(MPI_COMM_WORLD, 12);
exit(12);
}
printf("Thread %d: Done.\n", mpi_threadIdx);
/*** END Device: Initialise window coefficients for PFB ***/
/*** BEGIN Device: PpS_Batch() grid characteristics ***/
printf("\nThread %d: Computing GPU grid structure for the PpS_Batch() kernel ...\n", mpi_threadIdx);
PpS_Batch_thdx = cu_MaxThreadsPerBlk;
PpS_Batch_thdy = 1;
PpS_Batch_thdz = 1;
PpS_Batch_blkx = Nchannels/PpS_Batch_thdx + ((Nchannels%PpS_Batch_thdx != 0) ? 1 : 0);
PpS_Batch_blky = Nspectra;
PpS_Batch_blkz = Npols*Nelements;
printf("Thread %d: Grid configuration = (%6d,%6d,%6d)\n", mpi_threadIdx, PpS_Batch_blkx, PpS_Batch_blky, PpS_Batch_blkz);
printf("Thread %d: Block configuration = (%6d,%6d,%6d)\n", mpi_threadIdx, PpS_Batch_thdx, PpS_Batch_thdy, PpS_Batch_thdz);
/*** END Device: PpS_Batch() grid characteristics ***/
/*** BEGIN Device: prepare cuFFT plan ***/
printf("\nThread %d: Constructing cuFFT plan prior to processing ...\n", mpi_threadIdx);
if ((cu_RetVal = hipfftPlan1d(&cu_FFT_Plan, Nchannels, HIPFFT_C2C, Nspectra*Npols*Nelements)) != HIPFFT_SUCCESS)
{
fprintf(stderr, "CUFFT error (%d): failed to create FFT plan for F-engine.\n", cu_RetVal);
MPI_Abort(MPI_COMM_WORLD, 13);
exit(13);
}
printf("\nThread %d: Setting up stream for cuFFT ...\n", mpi_threadIdx);
hipfftSetStream(cu_FFT_Plan, cu_Stream);
/*** END Device: prepare cuFFT plan ***/
/*** BEGIN Device: ReorderFOutput() grid characteristics ***/
printf("\nThread %d: Computing GPU grid structure for the ReorderFOutput() kernel ...\n", mpi_threadIdx);
ReorderFOutput_thdx = 32;
ReorderFOutput_thdy = 32;
ReorderFOutput_thdz = 1;
ReorderFOutput_blkx = Nspectra;
ReorderFOutput_blky = Nchannels/ReorderFOutput_thdy + ((Nchannels%ReorderFOutput_thdy != 0) ? 1 : 0);
ReorderFOutput_blkz = (Npols*Nelements)/ReorderFOutput_thdx + (((Npols*Nelements)%ReorderFOutput_thdx != 0) ? 1 : 0);
printf("Thread %d: Grid configuration = (%6d,%6d,%6d)\n", mpi_threadIdx, ReorderFOutput_blkx, ReorderFOutput_blky, ReorderFOutput_blkz);
printf("Thread %d: Block configuration = (%6d,%6d,%6d)\n", mpi_threadIdx, ReorderFOutput_thdx, ReorderFOutput_thdy, ReorderFOutput_thdz);
/*** END Device: ReorderFOutput() grid characteristics ***/
/*** BEGIN Device: Initialise cuBLAS ***/
printf("\nThread %d: Initialising pointer arrays for use with cuBLAS ...\n", mpi_threadIdx);
for (i=0 ; i<Nchannels ; i++)
{
h_XInputPtr[i] = d_XInput + i*Npols*Nelements*Nspectra;
h_XOutputPtr[i] = d_XOutput + i*Npols*Nelements*Npols*Nelements;
}
/* Copy pointer arrays to device */
printf("Copying cuBLAS pointer arrays to device ...\n");
hipMemcpy((void **)d_XInputPtr, (void **)h_XInputPtr, NBytes_cuBLASptr_Xin, hipMemcpyHostToDevice);
hipDeviceSynchronize();
if ((cu_RetVal = cudaErrCheck()) != 0)
{
MPI_Abort(MPI_COMM_WORLD, 14);
exit(14);
}
hipMemcpy((void **)d_XOutputPtr, (void **)h_XOutputPtr, NBytes_cuBLASptr_Xout, hipMemcpyHostToDevice);
hipDeviceSynchronize();
if ((cu_RetVal = cudaErrCheck()) != 0)
{
MPI_Abort(MPI_COMM_WORLD, 15);
exit(15);
}
free(h_XInputPtr);
free(h_XOutputPtr);
/* Initialise scalar coefficients */
cublas_alpha.x = 1.0f / Nspectra;
cublas_alpha.y = 0.0f;
cublas_beta.x = 0.0f;
cublas_beta.y = 0.0f;
/* Initialise cuBLAS and set stream */
printf("\nThread %d: Initialising cuBLAS X-engine and associating stream ...\n", mpi_threadIdx);
cu_BLAS_Stat = hipblasCreate(&cu_BLAS_XEngine);
hipDeviceSynchronize();
if (cu_BLAS_Stat != HIPBLAS_STATUS_SUCCESS)
{
printf("\nThread %d: Failed to initialise cuBLAS (cublasStat = %d)!\n", mpi_threadIdx, (int)cu_BLAS_Stat);
MPI_Abort(MPI_COMM_WORLD, 16);
exit(16);
}
cu_BLAS_Stat = hipblasSetStream(cu_BLAS_XEngine, cu_Stream);
if (cu_BLAS_Stat != HIPBLAS_STATUS_SUCCESS)
{
printf("\nThread %d: Failed to set cuBLAS stream (cublasStat = %d)!\n", mpi_threadIdx, (int)cu_BLAS_Stat);
MPI_Abort(MPI_COMM_WORLD, 17);
exit(17);
}
/*** END Device: Initialise cuBLAS ***/
printf("Thread %d: Done.\n", mpi_threadIdx);
}
break;
/*** END Thread 2 ***/
/*** BEGIN Thread 3 ***/
case 3:
{
/*** BEGIN Host: Allocate arrays ***/
hipHostMalloc((void **)&h_XOutput, NBytes_DtoH, hipHostMallocDefault);
/*** END Host: Allocate arrays ***/
/*** BEGIN Device: Allocate arrays ***/
/* Input array to F-engine */
hipMalloc((void **)&d_FInput, NBytes_FInput);
/* Output array for X-engine */
hipMalloc((void **)&d_XOutput, NBytes_DtoH);
/*** END Device: Allocate arrays ***/
printf("Thread %d: Done with allocating arrays.\n", mpi_threadIdx);
}
break;
/*** END Thread 3 ***/
}
/*** SYNC MPI Threads **/
MPI_Barrier(MPI_COMM_WORLD);
if (mpi_threadIdx == MPIMASTERIDX)
printf("\nThread %d: Finished system initialisation.\n\n", mpi_threadIdx);
/*
* Telescope mode: Observing
*
*/
switch(mpi_threadIdx)
{
/*** BEGIN Thread 0 ***/
case 0:
{
printf("\n");
printf("**************************************************\n");
printf(" Mode: Observing \n");
printf("**************************************************\n\n");
printf("Thread %d: Listening for connections on UDP socket ...\n", mpi_threadIdx);
/*** First Listen and transfer to thread 1 ***/
if ((numbytes = recvfrom(listen_sock, udp_buf, NumPerPkt * sizeof(unsigned int), 0, (struct sockaddr *)&remote_addr, &addr_len)) == -1)
{
perror("recvfrom");
MPI_Abort(MPI_COMM_WORLD, 18);
exit(18);
}
/* Transfer packet to MPI buffer */
memcpy((void *)mpi_buf, (const void *)udp_buf, NumPerPkt * sizeof(unsigned int));
/* Send packet to MPI thread 1 */
mpi_tag = 0;
MPI_Isend((const void *)mpi_buf, NumPerPkt, MPI_UINT32_T, 1, mpi_tag, MPI_COMM_WORLD, &mpi_req[0]);
/* &buffer, count, datatype, dest, tag, comm, &request */
/*** BEGIN LOOP Listen and transfer packet ***/
while (1)
{
/* Listen */
if ((numbytes = recvfrom(listen_sock, udp_buf, NumPerPkt * sizeof(unsigned int), 0, (struct sockaddr *)&remote_addr, &addr_len)) == -1)
{
perror("recvfrom");
MPI_Abort(MPI_COMM_WORLD, 19);
exit(19);
}
/* Block if the previous Isend did not finish */
MPI_Wait(&mpi_req[0], &mpi_stat[0]);
/* Transfer packet to MPI buffer */
memcpy((void *)mpi_buf, (const void *)udp_buf, NumPerPkt * sizeof(unsigned int));
/* Send packet to MPI thread 1 */
// mpi_tag = 0
MPI_Isend((const void *)mpi_buf, NumPerPkt, MPI_UINT32_T, 1, mpi_tag, MPI_COMM_WORLD, &mpi_req[0]);
/* &buffer, count, datatype, dest, tag, comm, &request */
}
/*** END LOOP Listen and transfer packet ***/
}
break;
/*** END Thread 0 ***/
/*** BEGIN Thread 1 ***/
case 1:
{
Nsamples[0] = 0;
Nsamples[1] = 0;
/*** BEGIN First (Ntaps - 1) x Nchannels samples ***/
/* First receive packet from MPI thread 0 */
mpi_tag = 0;
MPI_Irecv((void *)mpi_buf, NumPerPkt, MPI_UINT32_T, 0, mpi_tag, MPI_COMM_WORLD, &mpi_req[0]);
/* &buf, count, dtype, src, tag, comm, &stat */
do
{
/* Wait for non-blocking recv to conclude */
MPI_Wait(&mpi_req[0], &mpi_stat[0]);
/* Transfer packet to MPI buffer */
memcpy((void *)udp_buf, (const void *)mpi_buf, NumPerPkt * sizeof(unsigned int));
/* Non-blocking Recv to wait for thread 0 to pass packet */
// mpi_tag = 0
MPI_Irecv((void *)mpi_buf, NumPerPkt, MPI_UINT32_T, 0, mpi_tag, MPI_COMM_WORLD, &mpi_req[0]);
/* &buf, count, dtype, src, tag, comm, &request */
/*** BEGIN Depacketise ***/
// Data array is using the following layout
// Stack -> Spectrum -> Element -> Pol -> Channel
for (i=0 ; i<SperElement ; i++)
{
/* Calculate index for the current samples */
sampleIdx = (udp_buf[2] * SperElement) + i; // element sample index in the grand scheme
// spectrumIdx = sampleIdx / Nchannels
// channelIdx = sampleIdx % Nchannels
// stackIdx = (spectrumIdx / Nspectra) % 2
/* Cater for first (Ntaps - 1) x Nchannels samples at the beginning */
if (sampleIdx < Samples_begins)
stackIdx = 1;
else
{
sampleIdx -= Samples_begins;
stackIdx = 0;
}
/* Calculate the stride where to write data in array */
// Stride due to stack:
// stackIdx * Nspectra * Npols*Nelements * Nchannels
// Spectrum stride:
// (sampleIdx / Nchannels) * Npols*Nelements * Nchannels
stride = (stackIdx*Nspectra + (sampleIdx / Nchannels)) * Npols*Nelements * Nchannels;
// Element stride due to stationIdx
stride += (udp_buf[0] * INPUTSPERSTATION) * Nchannels;
// Element stride due to snapIdx
for (k=0 ; k<udp_buf[1] ; k++)
stride += (InputsPerSNAP[k] * Nchannels);
// Channel stride
stride += (sampleIdx % Nchannels);
/* Loop over elements in this packet */
for (j=0 ; j<InputsPerSNAP[udp_buf[1]] ; j++)
{
/* Stride for current sample in MPI buffer */
stride_buf = 4 + 2*(i*InputsPerSNAP[udp_buf[1]] + j);
/* Stride for current sample in data array */
stride_sample = stride + j*Nchannels;
// Convert back to signed integer, then float and shift point by 19 dec.
// Re
if (udp_buf[stride_buf] >= 2147483648)
h_Stacks[stride_sample].x = -1.9073486328125e-06 * (int)(4294967295 - udp_buf[stride_buf] + 1);
else
h_Stacks[stride_sample].x = 1.9073486328125e-06 * (int)(udp_buf[stride_buf]);
// Im
if (udp_buf[stride_buf + 1] >= 2147483648)
h_Stacks[stride_sample].y = -1.9073486328125e-06 * (int)(4294967295 - udp_buf[stride_buf + 1] + 1);
else
h_Stacks[stride_sample].y = 1.9073486328125e-06 * (int)(udp_buf[stride_buf + 1]);
}
Nsamples[stackIdx]++;
}
}
while (Nsamples[1] < Samples_begins);
/* Transfer the first (Ntaps - 1) x Nchannels samples to data array on device */
hipMemcpyAsync((void *)d_FInput, (void *)&h_Stacks[stride_Nspectra], NBytes_DtoD, hipMemcpyHostToDevice, cu_Stream);
/* Update stack status */
Nsamples[1] = 0;
/*** END Depacketise ***/
/*** END First (Ntaps - 1) x Nchannels samples ***/
/*** BEGIN LOOP Service subsequent packets from MPI thread 0 ***/
curr_stack = 0; // current data stack being prepared to send to GPU
while (1)
{
/* Wait for non-blocking recv to conclude */
MPI_Wait(&mpi_req[0], &mpi_stat[0]);
/* Transfer packet to MPI buffer */
memcpy((void *)udp_buf, (const void *)mpi_buf, NumPerPkt * sizeof(unsigned int));
/* Non-blocking Recv to wait for thread 0 to pass packet */
mpi_tag = 0;
MPI_Irecv((void *)mpi_buf, NumPerPkt, MPI_UINT32_T, 0, mpi_tag, MPI_COMM_WORLD, &mpi_req[0]);
/* &buf, count, dtype, src, tag, comm, &request */
/*** BEGIN Depacketise ***/
// Data array is using the following layout
// Stack -> Spectrum -> Element -> Pol -> Channel
for (i=0 ; i<SperElement ; i++)
{
/* Calculate indices for the current sample */
sampleIdx = (udp_buf[2] * SperElement) - Samples_begins + i; // element sample index in the grand scheme
// spectrumIdx = sampleIdx / Nchannels
// channelIdx = sampleIdx % Nchannels
// stackIdx = (spectrumIdx / Nspectra) % 2
/* Calculate which part of the stack sample should be placed */
stackIdx = (sampleIdx / Samples_Nspectra) % 2; // either 0 or 1
/* Calculate the stride where to write data in array */
// Stride due to stack:
// stackIdx * Nspectra * Npols*Nelements * Nchannels
// Spectrum stride:
// ((sampleIdx / Nchannels) % Nspectra) * Npols*Nelements * Nchannels
stride = (stackIdx*Nspectra + ((sampleIdx / Nchannels) % Nspectra)) * Npols*Nelements * Nchannels;
// Element stride due to stationIdx
stride += (udp_buf[0] * INPUTSPERSTATION) * Nchannels;
// Element stride due to snapIdx
for (k=0 ; k<udp_buf[1] ; k++)
stride += (InputsPerSNAP[k] * Nchannels);
// Channel stride
stride += (sampleIdx % Nchannels);
/* Loop over elements in this packet */
for (j=0 ; j<InputsPerSNAP[udp_buf[1]] ; j++)
{
/* Stride for current sample in MPI buffer */
stride_buf = 4 + 2*(i*InputsPerSNAP[udp_buf[1]] + j);
/* Stride for current sample in data array */
stride_sample = stride + j*Nchannels;
// Convert back to signed integer, then float and shift point by 19 dec.
// Re
if (udp_buf[stride_buf] >= 2147483648)
h_Stacks[stride_sample].x = -1.9073486328125e-06 * (int)(4294967295 - udp_buf[stride_buf] + 1);
else
h_Stacks[stride_sample].x = 1.9073486328125e-06 * (int)(udp_buf[stride_buf]);
// Im
if (udp_buf[stride_buf + 1] >= 2147483648)
h_Stacks[stride_sample].y = -1.9073486328125e-06 * (int)(4294967295 - udp_buf[stride_buf + 1] + 1);
else
h_Stacks[stride_sample].y = 1.9073486328125e-06 * (int)(udp_buf[stride_buf + 1]);
}
Nsamples[stackIdx]++;
}
/*** END Depacketise ***/
/*** BEGIN Transfer data to device if current stack full ***/
if (Nsamples[curr_stack] == Samples_Nspectra)
{
if (begins == 1)
{
/* Sync with device to see if last hipMemcpyAsync finished */
hipStreamSynchronize(cu_Stream);
if ((cu_RetVal = cudaErrCheck()) != 0)
{
MPI_Abort(MPI_COMM_WORLD, 20);
exit(20);
}
/* Transfer content of current stack to data array */
hipMemcpyAsync((void *)&d_FInput[stride_begins], (void *)&h_Stacks[curr_stack * stride_Nspectra], NBytes_HtoD, hipMemcpyHostToDevice, cu_Stream);
begins = 0;
}
else
{
/* Sync with device to see if last hipMemcpyAsync finished */
hipStreamSynchronize(cu_Stream);
if ((cu_RetVal = cudaErrCheck()) != 0)
{
MPI_Abort(MPI_COMM_WORLD, 21);
exit(21);
}
/* Share d_FInput with MPI thread 2 to trigger processing */
mpi_tag = 1;
MPI_Isend((const void *)d_FInput, len_FInput, MPI_UNSIGNED_LONG, 2, mpi_tag, MPI_COMM_WORLD, &mpi_req[1]);
/* &buffer, count, datatype, dest, tag, comm, &request */
/* Receive device pointers from MPI thread 3 to be able to carry on */
mpi_tag = 3;
MPI_Recv((void *)d_FInput, len_FInput, MPI_UNSIGNED_LONG, 3, mpi_tag, MPI_COMM_WORLD, &mpi_stat[1]);
/* &buf, count, dtype, src, tag, comm, &stat */
/* If lights are green, transfer content of current stack to data array */
hipMemcpyAsync((void *)&d_FInput[stride_begins], (void *)&h_Stacks[curr_stack * stride_Nspectra], NBytes_HtoD, hipMemcpyHostToDevice, cu_Stream);
}
/* Update stack status */
Nsamples[curr_stack] = 0;
/* Swap current stack being serviced for GPU */
curr_stack = (curr_stack == 0);
}
/*** END Transfer data to device if current stack full ***/
}
/*** END LOOP Service subsequent packets from MPI thread 0 ***/
}
break;
/*** END Thread 1 ***/
/*** BEGIN Thread 2 ***/
case 2:
{
while (1)
{
/* Receive d_FInput pointers to begin processing */
mpi_tag = 1;
MPI_Recv((void *)d_FInput, len_FInput, MPI_UNSIGNED_LONG, 1, mpi_tag, MPI_COMM_WORLD, &mpi_stat[0]);
/* &buf, count, dtype, src, tag, comm, &stat */
/*** BEGIN Device: F-engine ***/
/*** Compute polyphase structure ***/
hipLaunchKernelGGL(( PpS_Batch), dim3(dim3(PpS_Batch_blkx,PpS_Batch_blky,PpS_Batch_blkz)), dim3(dim3(PpS_Batch_thdx,PpS_Batch_thdy,PpS_Batch_thdz)), 0, cu_Stream , Nchannels, Ntaps, d_Window, d_FInput, d_FOutput);
/* Sync with device to see if PpS_Batch finished */
hipStreamSynchronize(cu_Stream);
if ((cu_RetVal = cudaErrCheck()) != 0)
{
MPI_Abort(MPI_COMM_WORLD, 22);
exit(22);
}
/* Share input array pointers with MPI thread 3 for (Ntaps - 1) shift */
if (begins != 1)
MPI_Wait(&mpi_req[1], &mpi_stat[1]); // Wait for non-blocking send to conclude
mpi_tag = 2;
MPI_Isend((const void *)d_FInput, len_FInput, MPI_UNSIGNED_LONG, 3, mpi_tag, MPI_COMM_WORLD, &mpi_req[1]);
/* &buffer, count, datatype, dest, tag, comm, &request */
/*** In-place Fast Fourier Transform ***/
hipfftExecC2C(cu_FFT_Plan, d_FOutput, d_FOutput, HIPFFT_FORWARD);
/*** END Device: F-engine ***/
/*** BEGIN Device: X-engine ***/
/* Re-order array for input to cuBLAS function */
hipLaunchKernelGGL(( ReorderFOutput), dim3(dim3(ReorderFOutput_blkx,ReorderFOutput_blky,ReorderFOutput_blkz)), dim3(dim3(ReorderFOutput_thdx,ReorderFOutput_thdy,ReorderFOutput_thdz)), 0, cu_Stream , Nelements, Npols, Nchannels, d_FOutput, d_XInput);
/* Block until MPI thread 3 gives green light to overwrite device output array */
if (begins != 1)
{
mpi_tag = 5;
MPI_Recv((void *)&green_light, 1, MPI_INT, 3, mpi_tag, MPI_COMM_WORLD, &mpi_stat[2]);
/* &buf, count, dtype, src, tag, comm, &stat */
}
/* Cross-Correlation engine using cuBLAS ***/
cu_BLAS_Stat = HIPBLAS_STATUS_EXECUTION_FAILED;
cu_BLAS_Stat = hipblasCgemmBatched(cu_BLAS_XEngine, HIPBLAS_OP_N, HIPBLAS_OP_C, Npols*Nelements, Npols*Nelements, Nspectra, &cublas_alpha, (const hipComplex **)d_XInputPtr, Npols*Nelements, (const hipComplex **)d_XInputPtr, Npols*Nelements, &cublas_beta, d_XOutputPtr, Npols*Nelements, Nchannels);
hipStreamSynchronize(cu_Stream);
if (cu_BLAS_Stat != HIPBLAS_STATUS_SUCCESS)
{
printf("\nThread %d: cuBLAS X-engine failed (cublasStat = %d)!\n", mpi_threadIdx, (int)cu_BLAS_Stat);
MPI_Abort(MPI_COMM_WORLD, 23);
exit(23);
}
/*** END Device: X-engine ***/
/* Signal MPI thread 3 that processing is done and it can carry on
* with writing output to file.
*/
if (begins != 1)
MPI_Wait(&mpi_req[3], &mpi_stat[3]); // Wait for non-blocking send to conclude
else
begins = 0;
mpi_tag = 4;
MPI_Isend((const void *)d_XOutput, len_XOutput, MPI_UNSIGNED_LONG, 3, mpi_tag, MPI_COMM_WORLD, &mpi_req[3]);
/* &buffer, count, datatype, dest, tag, comm, &request */
}
}
break;
/*** END Thread 2 ***/
/*** BEGIN Thread 3 ***/
case 3:
{
/*** BEGIN LOOP Copy data/output and write to file ***/
while (1)
{
/* Receive d_FInput pointers from MPI thread 2 */
mpi_tag = 2;
MPI_Recv((void *)d_FInput, len_FInput, MPI_UNSIGNED_LONG, 2, mpi_tag, MPI_COMM_WORLD, &mpi_stat[0]);
/* &buf, count, dtype, src, tag, comm, &stat */
/* Copy last (Ntaps - 1) spectra to the beginning of input array to PFB */
hipMemcpyAsync((void *)d_FInput, (void *)&d_FInput[stride_Nspectra], NBytes_DtoD, hipMemcpyDeviceToDevice, cu_Stream);
/* Sync with device to see if copying completed */
hipStreamSynchronize(cu_Stream);
if ((cu_RetVal = cudaErrCheck()) != 0)
{
MPI_Abort(MPI_COMM_WORLD, 24);
exit(24);
}
/* If hipMemcpyDeviceToDevice finished, share d_FInput device
* pointers with MPI thread 1 */
if (begins != 1)
MPI_Wait(&mpi_req[1], &mpi_stat[1]); // Wait for non-blocking send to conclude
mpi_tag = 3;
MPI_Isend((const void *)d_FInput, len_FInput, MPI_UNSIGNED_LONG, 1, mpi_tag, MPI_COMM_WORLD, &mpi_req[1]);
/* &buffer, count, datatype, dest, tag, comm, &request */
/* Receive d_XOutput device pointers to retrieve results */
mpi_tag = 4;
MPI_Recv((void *)d_XOutput, len_XOutput, MPI_UNSIGNED_LONG, 2, mpi_tag, MPI_COMM_WORLD, &mpi_stat[2]);
/* &buf, count, dtype, src, tag, comm, &stat */
/* Copy results from device to host memory */
hipMemcpyAsync((void *)h_XOutput, (void *)d_XOutput, NBytes_DtoH, hipMemcpyDeviceToHost, cu_Stream);
/*** BEGIN Write output to file ***/
/* Take time stamp */
now_abs = time(0);
now = localtime(&now_abs);
/* Construct output file name */
strftime(outfilename, OUTFILENAMELEN*sizeof(char), "MDT_%Y%m%d_%H%M%S.csv", now);
if ((outfile = fopen(outfilename, "w")) == NULL)
{
printf("Cannot open or create file %s!\n", outfilename);
MPI_Abort(MPI_COMM_WORLD, 25);
exit(25);
}
/* Sync with device so that asynchronous copy to host completes */
hipStreamSynchronize(cu_Stream);
if ((cu_RetVal = cudaErrCheck()) != 0)
{
MPI_Abort(MPI_COMM_WORLD, 26);
exit(26);
}
/* Signal MPI thread 2 that it can overwrite the device output array */
if (begins != 1)
MPI_Wait(&mpi_req[3], &mpi_stat[3]); // Wait for non-blocking send to conclude
else
begins = 0;
mpi_tag = 5;
MPI_Isend((const void *)&green_light, 1, MPI_INT, 2, mpi_tag, MPI_COMM_WORLD, &mpi_req[3]);
/* &buffer, count, datatype, dest, tag, comm, &request */
printf("Writing output to file %s ...\n", outfilename);
for (j=0 ; j<Nelements ; j++) // element row index
for (p=0 ; p<Npols ; p++) // polarisation of element j
for (k=j ; k<Nelements ; k++) // element column index
for (q=p ; q<Npols ; q++) // polarisation of element k
{
/* CUFFT will put the positive frequency spectrum at the first
Nchannels/2 positions in the array, then the negative
frequencies will follow. Therefore, the first B/2 frequencies
are at the end of the array */
z = 0;
for (i=Nchannels/2 ; i<Nchannels ; i++)
{
stride_cublas = i*Npols*Nelements*Npols*Nelements + (k*Npols + q)*Npols*Nelements + (j*Npols + p); // column-major array
fprintf(outfile, "%ld,%d,%ld,%d,%ld,%.6f,%.6f\n", j, p, k, q, z, h_XOutput[stride_cublas].x, h_XOutput[stride_cublas].y);
// elementIdx_k, polIdx_q, elementIdx_j, polIdx_p, channelIdx, Re_GPU, Im_GPU
z++;
}
for (i=0 ; i<Nchannels/2 ; i++)
{
stride_cublas = i*Npols*Nelements*Npols*Nelements + (k*Npols + q)*Npols*Nelements + (j*Npols + p); // column-major array
fprintf(outfile, "%ld,%d,%ld,%d,%ld,%.6f,%.6f\n", j, p, k, q, z, h_XOutput[stride_cublas].x, h_XOutput[stride_cublas].y);
// elementIdx_k, polIdx_q, elementIdx_j, polIdx_p, channelIdx, Re_GPU, Im_GPU
z++;
}
}
/* Close file */
fclose(outfile);
/*** END Write output to file ***/
}
/*** END LOOP Copy data/output and write to file ***/
}
break;
/*** END Thread 3 ***/
}
/* Close all MPI instances */
mpi_retval = MPI_Finalize();
printf("\n");
return(mpi_retval);
}
| e65d4b9cc927be2ab2fac9a88b8a09d2016654ec.cu | /*
* MDT_OneStation_client.cu
*
* Source code for the multi-threaded client program running on the
* CPU, hosting the GPU processing node.
*
* Copyright (c) 2021 Nitish Ragoomundun
* <lrugratz gmail com>
* @ .
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
#include "MDT_OneStation_client.h"
int main (int argc, char **argv)
{
/*** BEGIN Variables for telescope ***/
int Nelements, Npols, Nchannels, Ntaps, Nspectra;
int SperElement; // number of complex samples per element per packet
unsigned int NumPerPkt; // number of 32-bit numbers in each packet
unsigned long int sampleIdx; // sample index in the grand scheme
/* Number of analogue inputs for each SNAP board in 1 station */
/* Elements of array are in snapIdx order */
unsigned short int InputsPerSNAP[4] = INPUTSPERSNAP;
/*** END Variables for telescope ***/
/*** BEGIN Variables for array lengths in memory ***/
/* Size of window for PFB */
long NBytes_Window;
/* Size of stacks in RAM */
long NBytes_Stacks;
/* Size of data copied from host to GPU at input */
long NBytes_HtoD;
/* Size of input array to F-engine */
long NBytes_FInput;
/* Size of data to shift in device memory between successive PFB's */
long NBytes_DtoD;
/* Total number of samples in FInput array */
long len_FInput;
/* Size of output array from F-engine */
long NBytes_FOutput;
/* Size of input array to the GPU X-engine */
long NBytes_XInput;
/* Size of cuBLAS pointer arrays */
long NBytes_cuBLASptr_Xin;
long NBytes_cuBLASptr_Xout;
/* Size of output array copied from GPU to host at output */
long NBytes_DtoH;
/* Number of samples comprised in DtoH */
long len_XOutput;
/*** END Variables for array lengths in memory ***/
/*** BEGIN Host variables ***/
/* Stack for incoming signal data */
cuComplex *h_Stacks;
/* Pointer arrays for use with cuBLAS X-engine */
cuComplex **h_XInputPtr, **h_XOutputPtr;
/* cuBLAS scalar coefficients */
cuComplex cublas_alpha, cublas_beta;
/* Output array for GPU FX correlator (pinned) */
cuComplex *h_XOutput;
long int i, j, k, z;
unsigned short int p, q;
unsigned long int Nsamples[2]; // samples written to array per element
unsigned long int stride, stride_sample;
unsigned int stride_buf;
unsigned long int Samples_Nspectra; // number of samples in Nspectra
unsigned long int Samples_begins; // (Ntaps - 1) spectra
unsigned long int stride_Nspectra, stride_begins;
unsigned long int stride_cublas;
unsigned short int stackIdx, curr_stack;
unsigned short int begins = 1;
/* Variables for output file */
FILE *outfile;
char outfilename[OUTFILENAMELEN];
struct tm *now;
time_t now_abs;
/*** END Host variables ***/
/*** BEGIN Device array variables ***/
/* Array for window */
float *d_Window;
/* Input array for signal data */
cuComplex *d_FInput;
/* Output array from GPU PFB */
cufftComplex *d_FOutput;
/* Input array for X-engine */
cuComplex *d_XInput;
/* Pointer arrays for use with cuBLAS */
cuComplex **d_XInputPtr, **d_XOutputPtr;
/* Output array for X-engine */
cuComplex *d_XOutput;
/*** END Device array variables ***/
/*** BEGIN CUDA control variables ***/
/* Variables for device properties */
int cu_devIdx;
cudaDeviceProp cu_devProp;
int cu_RetVal;
char *cu_DeviceName;
int cufft_version;
int cu_MaxThreadsPerBlk;
/* GPU thread gridding parameters for device function PpS_Batch() */
int PpS_Batch_thdx, PpS_Batch_thdy, PpS_Batch_thdz; // block dim
int PpS_Batch_blkx, PpS_Batch_blky, PpS_Batch_blkz; // grid dim
/* GPU thread gridding parameters for device function ReorderFOutput() */
int ReorderFOutput_thdx, ReorderFOutput_thdy, ReorderFOutput_thdz; // block dim
int ReorderFOutput_blkx, ReorderFOutput_blky, ReorderFOutput_blkz; // block dim
/* CUDA Streams */
cudaStream_t cu_Stream;
/* cuFFT plan handle */
cufftHandle cu_FFT_Plan;
/* cuBLAS control */
cublasHandle_t cu_BLAS_XEngine;
cublasStatus_t cu_BLAS_Stat;
/*** END CUDA control variables ***/
/*** BEGIN MPI variables ***/
int mpi_NumThreads, mpi_threadIdx;
char hostname[MPI_MAX_PROCESSOR_NAME];
int mpi_len;
int mpi_version, mpi_subversion;
int mpi_tag;
MPI_Status mpi_stat[4];
MPI_Request mpi_req[4];
int mpi_retval;
int green_light = 1;
/*** END MPI variables ***/
/*** BEGIN Variables for networking ***/
int listen_sock;
struct addrinfo hints, *servinfo, *addrinfo_ptr;
int retval;
int numbytes;
struct sockaddr_storage remote_addr;
socklen_t addr_len = sizeof(remote_addr);
char str_addr[INET6_ADDRSTRLEN];
/* Buffers to store packets from network socket */
unsigned int *udp_buf, *mpi_buf;
/*** END Variables for networking ***/
/*
* Telescope mode: Standby
* Preliminary settings and system initialisation
*
*/
/* Initialise MPI threads */
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_threadIdx);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_NumThreads);
/* Retrieve device properties */
cu_RetVal = cudaGetDevice(&cu_devIdx);
if ( (cu_RetVal = cudaErrCheck()) != 0 ) exit(cu_RetVal);
cu_RetVal = cudaGetDeviceProperties(&cu_devProp, cu_devIdx);
if ( (cu_RetVal = cudaErrCheck()) != 0 ) exit(cu_RetVal);
cu_DeviceName = cu_devProp.name; // Name of device
cu_MaxThreadsPerBlk = cu_devProp.maxThreadsPerBlock; // Maximum number of threads per block
/*** BEGIN Master thread parses command line arguments ***/
if (mpi_threadIdx == MPIMASTERIDX)
{
/* Check if required number of MPI threads is met */
if (mpi_NumThreads != REQNUMTHREADS)
{
printf("This program must run with %d threads!\n", REQNUMTHREADS);
MPI_Abort(MPI_COMM_WORLD, 1);
exit(1);
}
/* Check number of command line arguments */
if (argc <= 6) // argc <= (number of expected arguments)
{
print_help(argv[0]);
MPI_Abort(MPI_COMM_WORLD, 2);
exit(2);
}
/* Validate values input to program */
Nelements = atoi(argv[1]);
if (Nelements < MINELEMENTS || Nelements > MAXELEMENTS)
{
printf("%s: Invalid number of elements!\n", argv[0]);
print_help(argv[0]);
MPI_Abort(MPI_COMM_WORLD, 3);
exit(3);
}
/* Number of polarisations for each element */
Npols = atoi(argv[2]);
if (Npols < 1 || Npols > 2)
{
printf("%s: Invalid number of polarisations!\n", argv[0]);
print_help(argv[0]);
MPI_Abort(MPI_COMM_WORLD, 4);
exit(4);
}
/* Number of frequency channels in each spectrum */
Nchannels = atoi(argv[3]);
if (Nchannels < MINCHANNELS || Nchannels > MAXCHANNELS)
{
printf("%s: Invalid number of frequency channels!\n", argv[0]);
print_help(argv[0]);
MPI_Abort(MPI_COMM_WORLD, 5);
exit(5);
}
/* Number of taps in PFB */
Ntaps = atoi(argv[4]);
if (Ntaps < MINTAPS || Ntaps > MAXTAPS)
{
printf("%s: Invalid number of taps for PFB!\n", argv[0]);
print_help(argv[0]);
MPI_Abort(MPI_COMM_WORLD, 6);
exit(6);
}
/* Number of spectra for 1 integration time */
Nspectra = atoi(argv[5]);
if (Nspectra < MINSPECTRA || Nspectra > MAXSPECTRA)
{
printf("%s: Invalid number of spectra!\n", argv[0]);
print_help(argv[0]);
MPI_Abort(MPI_COMM_WORLD, 7);
exit(7);
}
/* Number of complex samples per element in 1 packet */
SperElement = atoi(argv[6]);
if (SperElement <= 0)
{
printf("%s: Invalid SperElement!\n", argv[0]);
print_help(argv[0]);
MPI_Abort(MPI_COMM_WORLD, 8);
exit(8);
}
}
/*** END Master thread parses command line arguments ***/
/*** SYNC MPI Threads **/
MPI_Barrier(MPI_COMM_WORLD);
/*** Create CUDA stream for all MPI threads ***/
if (mpi_threadIdx != MPIMASTERIDX)
cudaStreamCreate(&cu_Stream);
/*** BEGIN Pipeline characteristics according to input arguments ***/
/* All MPI threads take command line parameters */
Nelements = atoi(argv[1]);
Npols = atoi(argv[2]);
Nchannels = atoi(argv[3]);
Ntaps = atoi(argv[4]);
Nspectra = atoi(argv[5]);
SperElement = atoi(argv[6]);
/* Number of 32-bit numbers in 1 UDP packet
* Includes the header
* This is an upper limit, assuming that the maximum number
* of inputs is at the first SNAP board
*/
NumPerPkt = (2 + (InputsPerSNAP[0] * SperElement)) * 2;
/* Number of complex samples per element in 1 stack */
Samples_Nspectra = Nspectra * Nchannels;
/* Number of samples, per element, to accumulate in the beginning of
* acquisition due to the required first (Ntaps - 1) x Nchannels samples
*/
Samples_begins = (Ntaps - 1) * Nchannels;
/* Strides due to the above */
stride_Nspectra = Samples_Nspectra * Npols*Nelements;
stride_begins = Samples_begins * Npols*Nelements;
len_FInput = stride_begins + stride_Nspectra;
/*** BEGIN Host array sizes ***/
/* Size of window for PFB */
NBytes_Window = sizeof(float) * Ntaps * Nchannels;
/* Size of stack */
NBytes_Stacks = sizeof(cuComplex) * 2 * Nspectra * Npols*Nelements * Nchannels;
/* Size of data copied from host to device at input */
NBytes_HtoD = sizeof(cuComplex) * Nspectra * Npols*Nelements * Nchannels;
// input
/* Size of output array copied from device to host at output */
NBytes_DtoH = sizeof(cuComplex) * Npols*Nelements * Npols*Nelements * Nchannels;
// output
/* Number of samples comprised in DtoH */
len_XOutput = Npols*Nelements * Npols*Nelements * Nchannels;
/*** END Host array sizes ***/
/*** BEGIN Device array sizes ***/
/* Size of input array to F-engine */
NBytes_FInput = sizeof(cuComplex) * (Nspectra + Ntaps - 1) * Npols*Nelements * Nchannels;
/* Size of data to shift in device memory between successive PFB's */
NBytes_DtoD = sizeof(cuComplex) * (Ntaps - 1) * Npols*Nelements * Nchannels;
/* Size of output array from F-engine */
NBytes_FOutput = sizeof(cufftComplex) * Nspectra * Npols*Nelements * Nchannels;
/* Size of input array to the GPU X-engine */
NBytes_XInput = sizeof(cuComplex) * Nspectra * Npols*Nelements * Nchannels;
/* Size of cuBLAS pointer arrays */
NBytes_cuBLASptr_Xin = sizeof(cuComplex *) * Nchannels;
NBytes_cuBLASptr_Xout = sizeof(cuComplex *) * Nchannels;
/*** END Device array sizes ***/
/*** END Pipeline characteristics according to input arguments ***/
/*** BEGIN Master MPI thread verifies if packet size is within system limit and print info ***/
if (mpi_threadIdx == MPIMASTERIDX)
{
/* Ensure that requested packet size according to SperElement does not
* exceed the 8.5 KiB limit for the network jumbo frame.
*/
if (NumPerPkt*4 > 8704)
{
printf("%s: Requested Nelements and SperElement cause packet size to exceed 8.5KiB limit!\n", argv[0]);
print_help(argv[0]);
MPI_Abort(MPI_COMM_WORLD, 9);
exit(9);
}
/* If everything OK, print info and carry on */
MPI_Get_processor_name(hostname, &mpi_len);
MPI_Get_version(&mpi_version, &mpi_subversion);
cufftGetVersion(&cufft_version);
printf("\n");
printf("**************************************************\n");
printf(" Mode: Stand-by \n");
printf("**************************************************\n\n");
printf("Thread %d : Current machine: %s\n", mpi_threadIdx, hostname);
printf("Thread %d : Total number of MPI threads: %d\n", mpi_threadIdx, mpi_NumThreads);
printf("Thread %d : Version of MPI: %d.%d\n", mpi_threadIdx, mpi_version, mpi_subversion);
printf("Thread %d : Using %s (ID: %d) as device.\n", mpi_threadIdx, cu_DeviceName, cu_devIdx);
printf("Thread %d : GPU F-engine is powered by cuFFT version %d.\n", mpi_threadIdx, cufft_version);
printf("\n");
}
/*** END Master MPI thread verifies if packet size is within system limit and print info ***/
/*** SYNC MPI Threads **/
MPI_Barrier(MPI_COMM_WORLD);
switch(mpi_threadIdx)
{
/*** BEGIN Thread 0 ***/
case 0:
{
/* Create buffers to pass packets around */
udp_buf = (unsigned int *)malloc(NumPerPkt * sizeof(unsigned int));
mpi_buf = (unsigned int *)malloc(NumPerPkt * sizeof(unsigned int));
/*** BEGIN Setting up UDP socket for listen ***/
printf("Thread %d: Setting up socket ...\n", mpi_threadIdx);
memset(&hints, 0, sizeof hints);
hints.ai_family = AF_UNSPEC; // force IPv4
hints.ai_socktype = SOCK_DGRAM;
hints.ai_flags = AI_PASSIVE; // use my IP
if ((retval = getaddrinfo(NULL, MYPORT, &hints, &servinfo)) != 0)
{
fprintf(stderr, "getaddrinfo: %s\n", gai_strerror(retval));
MPI_Abort(MPI_COMM_WORLD, 10);
exit(10);
}
// loop through all the results and bind to the first we can
for(addrinfo_ptr=servinfo; addrinfo_ptr!=NULL; addrinfo_ptr=addrinfo_ptr->ai_next)
{
if ((listen_sock = socket(addrinfo_ptr->ai_family, addrinfo_ptr->ai_socktype, addrinfo_ptr->ai_protocol)) == -1)
{
perror("socket");
continue;
}
if (bind(listen_sock, addrinfo_ptr->ai_addr, addrinfo_ptr->ai_addrlen) == -1)
{
close(listen_sock);
perror("bind");
continue;
}
break;
}
if (addrinfo_ptr == NULL)
{
fprintf(stderr, "listener: failed to bind socket\n");
MPI_Abort(MPI_COMM_WORLD, 11);
exit(11);
}
/*** END Setting up UDP socket for listen ***/
}
break;
/*** END Thread 0 ***/
/*** BEGIN Thread 1 ***/
case 1:
{
/* Create buffers to pass packets around */
udp_buf = (unsigned int *)malloc(NumPerPkt * sizeof(unsigned int));
mpi_buf = (unsigned int *)malloc(NumPerPkt * sizeof(unsigned int));
/*** BEGIN Host: Allocate arrays ***/
/* Stack for incoming signal data (page-locked) */
cudaHostAlloc((void **)&h_Stacks, NBytes_Stacks, cudaHostAllocDefault);
/*** END Host: Allocate arrays ***/
/*** BEGIN Device: Allocate arrays ***/
/* Input array to F-engine */
cudaMalloc((void **)&d_FInput, NBytes_FInput);
/*** END Device: Allocate arrays ***/
printf("Thread %d: Done with allocating arrays.\n", mpi_threadIdx);
}
break;
/*** END Thread 1 ***/
/*** BEGIN Thread 2 ***/
case 2:
{
/*** BEGIN Host: Allocate arrays ***/
/* Pointer arrays for use with cuBLAS X-engine */
h_XInputPtr = (cuComplex **)malloc(NBytes_cuBLASptr_Xin);
h_XOutputPtr = (cuComplex **)malloc(NBytes_cuBLASptr_Xout);
/*** END Host: Allocate arrays ***/
/*** BEGIN Device: Allocate arrays ***/
/* Array for window */
cudaMalloc((void **)&d_Window, NBytes_Window);
/* Input array to F-engine */
cudaMalloc((void **)&d_FInput, NBytes_FInput);
/* Output array from F-engine */
cudaMalloc((void **)&d_FOutput, NBytes_FOutput);
/* Input array for X-engine */
cudaMalloc((void **)&d_XInput, NBytes_XInput);
/* Pointer arrays for use with cuBLAS */
cudaMalloc((void ***)&d_XInputPtr, NBytes_cuBLASptr_Xin);
cudaMalloc((void ***)&d_XOutputPtr, NBytes_cuBLASptr_Xout);
/* Output array for X-engine */
cudaMalloc((void **)&d_XOutput, NBytes_DtoH);
/*** END Device: Allocate arrays ***/
printf("Thread %d: Done with allocating arrays.\n", mpi_threadIdx);
/*** BEGIN Device: Initialise window coefficients for PFB ***/
printf("\nThread %d: Initialising window coefficients for PFB ...\n", mpi_threadIdx);
printf("Thread %d: Grid configuration = (%6d,%6d,%6d)\n", mpi_threadIdx, cu_MaxThreadsPerBlk, 1, 1);
printf("Thread %d: Block configuration = (%6d,%6d,%6d)\n", mpi_threadIdx, (Ntaps*Nchannels)/cu_MaxThreadsPerBlk + (((Ntaps*Nchannels)%cu_MaxThreadsPerBlk != 0) ? 1 : 0), 1, 1);
PFB_Window_ExBlackman<<< cu_MaxThreadsPerBlk, (Ntaps*Nchannels)/cu_MaxThreadsPerBlk + (((Ntaps*Nchannels)%cu_MaxThreadsPerBlk != 0) ? 1 : 0), 0, cu_Stream >>>(Nchannels, Ntaps, d_Window);
cudaStreamSynchronize(cu_Stream);
if ((cu_RetVal = cudaErrCheck()) != 0)
{
MPI_Abort(MPI_COMM_WORLD, 12);
exit(12);
}
printf("Thread %d: Done.\n", mpi_threadIdx);
/*** END Device: Initialise window coefficients for PFB ***/
/*** BEGIN Device: PpS_Batch() grid characteristics ***/
printf("\nThread %d: Computing GPU grid structure for the PpS_Batch() kernel ...\n", mpi_threadIdx);
PpS_Batch_thdx = cu_MaxThreadsPerBlk;
PpS_Batch_thdy = 1;
PpS_Batch_thdz = 1;
PpS_Batch_blkx = Nchannels/PpS_Batch_thdx + ((Nchannels%PpS_Batch_thdx != 0) ? 1 : 0);
PpS_Batch_blky = Nspectra;
PpS_Batch_blkz = Npols*Nelements;
printf("Thread %d: Grid configuration = (%6d,%6d,%6d)\n", mpi_threadIdx, PpS_Batch_blkx, PpS_Batch_blky, PpS_Batch_blkz);
printf("Thread %d: Block configuration = (%6d,%6d,%6d)\n", mpi_threadIdx, PpS_Batch_thdx, PpS_Batch_thdy, PpS_Batch_thdz);
/*** END Device: PpS_Batch() grid characteristics ***/
/*** BEGIN Device: prepare cuFFT plan ***/
printf("\nThread %d: Constructing cuFFT plan prior to processing ...\n", mpi_threadIdx);
if ((cu_RetVal = cufftPlan1d(&cu_FFT_Plan, Nchannels, CUFFT_C2C, Nspectra*Npols*Nelements)) != CUFFT_SUCCESS)
{
fprintf(stderr, "CUFFT error (%d): failed to create FFT plan for F-engine.\n", cu_RetVal);
MPI_Abort(MPI_COMM_WORLD, 13);
exit(13);
}
printf("\nThread %d: Setting up stream for cuFFT ...\n", mpi_threadIdx);
cufftSetStream(cu_FFT_Plan, cu_Stream);
/*** END Device: prepare cuFFT plan ***/
/*** BEGIN Device: ReorderFOutput() grid characteristics ***/
printf("\nThread %d: Computing GPU grid structure for the ReorderFOutput() kernel ...\n", mpi_threadIdx);
ReorderFOutput_thdx = 32;
ReorderFOutput_thdy = 32;
ReorderFOutput_thdz = 1;
ReorderFOutput_blkx = Nspectra;
ReorderFOutput_blky = Nchannels/ReorderFOutput_thdy + ((Nchannels%ReorderFOutput_thdy != 0) ? 1 : 0);
ReorderFOutput_blkz = (Npols*Nelements)/ReorderFOutput_thdx + (((Npols*Nelements)%ReorderFOutput_thdx != 0) ? 1 : 0);
printf("Thread %d: Grid configuration = (%6d,%6d,%6d)\n", mpi_threadIdx, ReorderFOutput_blkx, ReorderFOutput_blky, ReorderFOutput_blkz);
printf("Thread %d: Block configuration = (%6d,%6d,%6d)\n", mpi_threadIdx, ReorderFOutput_thdx, ReorderFOutput_thdy, ReorderFOutput_thdz);
/*** END Device: ReorderFOutput() grid characteristics ***/
/*** BEGIN Device: Initialise cuBLAS ***/
printf("\nThread %d: Initialising pointer arrays for use with cuBLAS ...\n", mpi_threadIdx);
for (i=0 ; i<Nchannels ; i++)
{
h_XInputPtr[i] = d_XInput + i*Npols*Nelements*Nspectra;
h_XOutputPtr[i] = d_XOutput + i*Npols*Nelements*Npols*Nelements;
}
/* Copy pointer arrays to device */
printf("Copying cuBLAS pointer arrays to device ...\n");
cudaMemcpy((void **)d_XInputPtr, (void **)h_XInputPtr, NBytes_cuBLASptr_Xin, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
if ((cu_RetVal = cudaErrCheck()) != 0)
{
MPI_Abort(MPI_COMM_WORLD, 14);
exit(14);
}
cudaMemcpy((void **)d_XOutputPtr, (void **)h_XOutputPtr, NBytes_cuBLASptr_Xout, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
if ((cu_RetVal = cudaErrCheck()) != 0)
{
MPI_Abort(MPI_COMM_WORLD, 15);
exit(15);
}
free(h_XInputPtr);
free(h_XOutputPtr);
/* Initialise scalar coefficients */
cublas_alpha.x = 1.0f / Nspectra;
cublas_alpha.y = 0.0f;
cublas_beta.x = 0.0f;
cublas_beta.y = 0.0f;
/* Initialise cuBLAS and set stream */
printf("\nThread %d: Initialising cuBLAS X-engine and associating stream ...\n", mpi_threadIdx);
cu_BLAS_Stat = cublasCreate(&cu_BLAS_XEngine);
cudaDeviceSynchronize();
if (cu_BLAS_Stat != CUBLAS_STATUS_SUCCESS)
{
printf("\nThread %d: Failed to initialise cuBLAS (cublasStat = %d)!\n", mpi_threadIdx, (int)cu_BLAS_Stat);
MPI_Abort(MPI_COMM_WORLD, 16);
exit(16);
}
cu_BLAS_Stat = cublasSetStream(cu_BLAS_XEngine, cu_Stream);
if (cu_BLAS_Stat != CUBLAS_STATUS_SUCCESS)
{
printf("\nThread %d: Failed to set cuBLAS stream (cublasStat = %d)!\n", mpi_threadIdx, (int)cu_BLAS_Stat);
MPI_Abort(MPI_COMM_WORLD, 17);
exit(17);
}
/*** END Device: Initialise cuBLAS ***/
printf("Thread %d: Done.\n", mpi_threadIdx);
}
break;
/*** END Thread 2 ***/
/*** BEGIN Thread 3 ***/
case 3:
{
/*** BEGIN Host: Allocate arrays ***/
cudaHostAlloc((void **)&h_XOutput, NBytes_DtoH, cudaHostAllocDefault);
/*** END Host: Allocate arrays ***/
/*** BEGIN Device: Allocate arrays ***/
/* Input array to F-engine */
cudaMalloc((void **)&d_FInput, NBytes_FInput);
/* Output array for X-engine */
cudaMalloc((void **)&d_XOutput, NBytes_DtoH);
/*** END Device: Allocate arrays ***/
printf("Thread %d: Done with allocating arrays.\n", mpi_threadIdx);
}
break;
/*** END Thread 3 ***/
}
/*** SYNC MPI Threads **/
MPI_Barrier(MPI_COMM_WORLD);
if (mpi_threadIdx == MPIMASTERIDX)
printf("\nThread %d: Finished system initialisation.\n\n", mpi_threadIdx);
/*
* Telescope mode: Observing
*
*/
switch(mpi_threadIdx)
{
/*** BEGIN Thread 0 ***/
case 0:
{
printf("\n");
printf("**************************************************\n");
printf(" Mode: Observing \n");
printf("**************************************************\n\n");
printf("Thread %d: Listening for connections on UDP socket ...\n", mpi_threadIdx);
/*** First Listen and transfer to thread 1 ***/
if ((numbytes = recvfrom(listen_sock, udp_buf, NumPerPkt * sizeof(unsigned int), 0, (struct sockaddr *)&remote_addr, &addr_len)) == -1)
{
perror("recvfrom");
MPI_Abort(MPI_COMM_WORLD, 18);
exit(18);
}
/* Transfer packet to MPI buffer */
memcpy((void *)mpi_buf, (const void *)udp_buf, NumPerPkt * sizeof(unsigned int));
/* Send packet to MPI thread 1 */
mpi_tag = 0;
MPI_Isend((const void *)mpi_buf, NumPerPkt, MPI_UINT32_T, 1, mpi_tag, MPI_COMM_WORLD, &mpi_req[0]);
/* &buffer, count, datatype, dest, tag, comm, &request */
/*** BEGIN LOOP Listen and transfer packet ***/
while (1)
{
/* Listen */
if ((numbytes = recvfrom(listen_sock, udp_buf, NumPerPkt * sizeof(unsigned int), 0, (struct sockaddr *)&remote_addr, &addr_len)) == -1)
{
perror("recvfrom");
MPI_Abort(MPI_COMM_WORLD, 19);
exit(19);
}
/* Block if the previous Isend did not finish */
MPI_Wait(&mpi_req[0], &mpi_stat[0]);
/* Transfer packet to MPI buffer */
memcpy((void *)mpi_buf, (const void *)udp_buf, NumPerPkt * sizeof(unsigned int));
/* Send packet to MPI thread 1 */
// mpi_tag = 0
MPI_Isend((const void *)mpi_buf, NumPerPkt, MPI_UINT32_T, 1, mpi_tag, MPI_COMM_WORLD, &mpi_req[0]);
/* &buffer, count, datatype, dest, tag, comm, &request */
}
/*** END LOOP Listen and transfer packet ***/
}
break;
/*** END Thread 0 ***/
/*** BEGIN Thread 1 ***/
case 1:
{
Nsamples[0] = 0;
Nsamples[1] = 0;
/*** BEGIN First (Ntaps - 1) x Nchannels samples ***/
/* First receive packet from MPI thread 0 */
mpi_tag = 0;
MPI_Irecv((void *)mpi_buf, NumPerPkt, MPI_UINT32_T, 0, mpi_tag, MPI_COMM_WORLD, &mpi_req[0]);
/* &buf, count, dtype, src, tag, comm, &stat */
do
{
/* Wait for non-blocking recv to conclude */
MPI_Wait(&mpi_req[0], &mpi_stat[0]);
/* Transfer packet to MPI buffer */
memcpy((void *)udp_buf, (const void *)mpi_buf, NumPerPkt * sizeof(unsigned int));
/* Non-blocking Recv to wait for thread 0 to pass packet */
// mpi_tag = 0
MPI_Irecv((void *)mpi_buf, NumPerPkt, MPI_UINT32_T, 0, mpi_tag, MPI_COMM_WORLD, &mpi_req[0]);
/* &buf, count, dtype, src, tag, comm, &request */
/*** BEGIN Depacketise ***/
// Data array is using the following layout
// Stack -> Spectrum -> Element -> Pol -> Channel
for (i=0 ; i<SperElement ; i++)
{
/* Calculate index for the current samples */
sampleIdx = (udp_buf[2] * SperElement) + i; // element sample index in the grand scheme
// spectrumIdx = sampleIdx / Nchannels
// channelIdx = sampleIdx % Nchannels
// stackIdx = (spectrumIdx / Nspectra) % 2
/* Cater for first (Ntaps - 1) x Nchannels samples at the beginning */
if (sampleIdx < Samples_begins)
stackIdx = 1;
else
{
sampleIdx -= Samples_begins;
stackIdx = 0;
}
/* Calculate the stride where to write data in array */
// Stride due to stack:
// stackIdx * Nspectra * Npols*Nelements * Nchannels
// Spectrum stride:
// (sampleIdx / Nchannels) * Npols*Nelements * Nchannels
stride = (stackIdx*Nspectra + (sampleIdx / Nchannels)) * Npols*Nelements * Nchannels;
// Element stride due to stationIdx
stride += (udp_buf[0] * INPUTSPERSTATION) * Nchannels;
// Element stride due to snapIdx
for (k=0 ; k<udp_buf[1] ; k++)
stride += (InputsPerSNAP[k] * Nchannels);
// Channel stride
stride += (sampleIdx % Nchannels);
/* Loop over elements in this packet */
for (j=0 ; j<InputsPerSNAP[udp_buf[1]] ; j++)
{
/* Stride for current sample in MPI buffer */
stride_buf = 4 + 2*(i*InputsPerSNAP[udp_buf[1]] + j);
/* Stride for current sample in data array */
stride_sample = stride + j*Nchannels;
// Convert back to signed integer, then float and shift point by 19 dec.
// Re
if (udp_buf[stride_buf] >= 2147483648)
h_Stacks[stride_sample].x = -1.9073486328125e-06 * (int)(4294967295 - udp_buf[stride_buf] + 1);
else
h_Stacks[stride_sample].x = 1.9073486328125e-06 * (int)(udp_buf[stride_buf]);
// Im
if (udp_buf[stride_buf + 1] >= 2147483648)
h_Stacks[stride_sample].y = -1.9073486328125e-06 * (int)(4294967295 - udp_buf[stride_buf + 1] + 1);
else
h_Stacks[stride_sample].y = 1.9073486328125e-06 * (int)(udp_buf[stride_buf + 1]);
}
Nsamples[stackIdx]++;
}
}
while (Nsamples[1] < Samples_begins);
/* Transfer the first (Ntaps - 1) x Nchannels samples to data array on device */
cudaMemcpyAsync((void *)d_FInput, (void *)&h_Stacks[stride_Nspectra], NBytes_DtoD, cudaMemcpyHostToDevice, cu_Stream);
/* Update stack status */
Nsamples[1] = 0;
/*** END Depacketise ***/
/*** END First (Ntaps - 1) x Nchannels samples ***/
/*** BEGIN LOOP Service subsequent packets from MPI thread 0 ***/
curr_stack = 0; // current data stack being prepared to send to GPU
while (1)
{
/* Wait for non-blocking recv to conclude */
MPI_Wait(&mpi_req[0], &mpi_stat[0]);
/* Transfer packet to MPI buffer */
memcpy((void *)udp_buf, (const void *)mpi_buf, NumPerPkt * sizeof(unsigned int));
/* Non-blocking Recv to wait for thread 0 to pass packet */
mpi_tag = 0;
MPI_Irecv((void *)mpi_buf, NumPerPkt, MPI_UINT32_T, 0, mpi_tag, MPI_COMM_WORLD, &mpi_req[0]);
/* &buf, count, dtype, src, tag, comm, &request */
/*** BEGIN Depacketise ***/
// Data array is using the following layout
// Stack -> Spectrum -> Element -> Pol -> Channel
for (i=0 ; i<SperElement ; i++)
{
/* Calculate indices for the current sample */
sampleIdx = (udp_buf[2] * SperElement) - Samples_begins + i; // element sample index in the grand scheme
// spectrumIdx = sampleIdx / Nchannels
// channelIdx = sampleIdx % Nchannels
// stackIdx = (spectrumIdx / Nspectra) % 2
/* Calculate which part of the stack sample should be placed */
stackIdx = (sampleIdx / Samples_Nspectra) % 2; // either 0 or 1
/* Calculate the stride where to write data in array */
// Stride due to stack:
// stackIdx * Nspectra * Npols*Nelements * Nchannels
// Spectrum stride:
// ((sampleIdx / Nchannels) % Nspectra) * Npols*Nelements * Nchannels
stride = (stackIdx*Nspectra + ((sampleIdx / Nchannels) % Nspectra)) * Npols*Nelements * Nchannels;
// Element stride due to stationIdx
stride += (udp_buf[0] * INPUTSPERSTATION) * Nchannels;
// Element stride due to snapIdx
for (k=0 ; k<udp_buf[1] ; k++)
stride += (InputsPerSNAP[k] * Nchannels);
// Channel stride
stride += (sampleIdx % Nchannels);
/* Loop over elements in this packet */
for (j=0 ; j<InputsPerSNAP[udp_buf[1]] ; j++)
{
/* Stride for current sample in MPI buffer */
stride_buf = 4 + 2*(i*InputsPerSNAP[udp_buf[1]] + j);
/* Stride for current sample in data array */
stride_sample = stride + j*Nchannels;
// Convert back to signed integer, then float and shift point by 19 dec.
// Re
if (udp_buf[stride_buf] >= 2147483648)
h_Stacks[stride_sample].x = -1.9073486328125e-06 * (int)(4294967295 - udp_buf[stride_buf] + 1);
else
h_Stacks[stride_sample].x = 1.9073486328125e-06 * (int)(udp_buf[stride_buf]);
// Im
if (udp_buf[stride_buf + 1] >= 2147483648)
h_Stacks[stride_sample].y = -1.9073486328125e-06 * (int)(4294967295 - udp_buf[stride_buf + 1] + 1);
else
h_Stacks[stride_sample].y = 1.9073486328125e-06 * (int)(udp_buf[stride_buf + 1]);
}
Nsamples[stackIdx]++;
}
/*** END Depacketise ***/
/*** BEGIN Transfer data to device if current stack full ***/
if (Nsamples[curr_stack] == Samples_Nspectra)
{
if (begins == 1)
{
/* Sync with device to see if last cudaMemcpyAsync finished */
cudaStreamSynchronize(cu_Stream);
if ((cu_RetVal = cudaErrCheck()) != 0)
{
MPI_Abort(MPI_COMM_WORLD, 20);
exit(20);
}
/* Transfer content of current stack to data array */
cudaMemcpyAsync((void *)&d_FInput[stride_begins], (void *)&h_Stacks[curr_stack * stride_Nspectra], NBytes_HtoD, cudaMemcpyHostToDevice, cu_Stream);
begins = 0;
}
else
{
/* Sync with device to see if last cudaMemcpyAsync finished */
cudaStreamSynchronize(cu_Stream);
if ((cu_RetVal = cudaErrCheck()) != 0)
{
MPI_Abort(MPI_COMM_WORLD, 21);
exit(21);
}
/* Share d_FInput with MPI thread 2 to trigger processing */
mpi_tag = 1;
MPI_Isend((const void *)d_FInput, len_FInput, MPI_UNSIGNED_LONG, 2, mpi_tag, MPI_COMM_WORLD, &mpi_req[1]);
/* &buffer, count, datatype, dest, tag, comm, &request */
/* Receive device pointers from MPI thread 3 to be able to carry on */
mpi_tag = 3;
MPI_Recv((void *)d_FInput, len_FInput, MPI_UNSIGNED_LONG, 3, mpi_tag, MPI_COMM_WORLD, &mpi_stat[1]);
/* &buf, count, dtype, src, tag, comm, &stat */
/* If lights are green, transfer content of current stack to data array */
cudaMemcpyAsync((void *)&d_FInput[stride_begins], (void *)&h_Stacks[curr_stack * stride_Nspectra], NBytes_HtoD, cudaMemcpyHostToDevice, cu_Stream);
}
/* Update stack status */
Nsamples[curr_stack] = 0;
/* Swap current stack being serviced for GPU */
curr_stack = (curr_stack == 0);
}
/*** END Transfer data to device if current stack full ***/
}
/*** END LOOP Service subsequent packets from MPI thread 0 ***/
}
break;
/*** END Thread 1 ***/
/*** BEGIN Thread 2 ***/
case 2:
{
while (1)
{
/* Receive d_FInput pointers to begin processing */
mpi_tag = 1;
MPI_Recv((void *)d_FInput, len_FInput, MPI_UNSIGNED_LONG, 1, mpi_tag, MPI_COMM_WORLD, &mpi_stat[0]);
/* &buf, count, dtype, src, tag, comm, &stat */
/*** BEGIN Device: F-engine ***/
/*** Compute polyphase structure ***/
PpS_Batch<<< dim3(PpS_Batch_blkx,PpS_Batch_blky,PpS_Batch_blkz), dim3(PpS_Batch_thdx,PpS_Batch_thdy,PpS_Batch_thdz), 0, cu_Stream >>>(Nchannels, Ntaps, d_Window, d_FInput, d_FOutput);
/* Sync with device to see if PpS_Batch finished */
cudaStreamSynchronize(cu_Stream);
if ((cu_RetVal = cudaErrCheck()) != 0)
{
MPI_Abort(MPI_COMM_WORLD, 22);
exit(22);
}
/* Share input array pointers with MPI thread 3 for (Ntaps - 1) shift */
if (begins != 1)
MPI_Wait(&mpi_req[1], &mpi_stat[1]); // Wait for non-blocking send to conclude
mpi_tag = 2;
MPI_Isend((const void *)d_FInput, len_FInput, MPI_UNSIGNED_LONG, 3, mpi_tag, MPI_COMM_WORLD, &mpi_req[1]);
/* &buffer, count, datatype, dest, tag, comm, &request */
/*** In-place Fast Fourier Transform ***/
cufftExecC2C(cu_FFT_Plan, d_FOutput, d_FOutput, CUFFT_FORWARD);
/*** END Device: F-engine ***/
/*** BEGIN Device: X-engine ***/
/* Re-order array for input to cuBLAS function */
ReorderFOutput<<< dim3(ReorderFOutput_blkx,ReorderFOutput_blky,ReorderFOutput_blkz), dim3(ReorderFOutput_thdx,ReorderFOutput_thdy,ReorderFOutput_thdz), 0, cu_Stream >>>(Nelements, Npols, Nchannels, d_FOutput, d_XInput);
/* Block until MPI thread 3 gives green light to overwrite device output array */
if (begins != 1)
{
mpi_tag = 5;
MPI_Recv((void *)&green_light, 1, MPI_INT, 3, mpi_tag, MPI_COMM_WORLD, &mpi_stat[2]);
/* &buf, count, dtype, src, tag, comm, &stat */
}
/* Cross-Correlation engine using cuBLAS ***/
cu_BLAS_Stat = CUBLAS_STATUS_EXECUTION_FAILED;
cu_BLAS_Stat = cublasCgemmBatched(cu_BLAS_XEngine, CUBLAS_OP_N, CUBLAS_OP_C, Npols*Nelements, Npols*Nelements, Nspectra, &cublas_alpha, (const cuComplex **)d_XInputPtr, Npols*Nelements, (const cuComplex **)d_XInputPtr, Npols*Nelements, &cublas_beta, d_XOutputPtr, Npols*Nelements, Nchannels);
cudaStreamSynchronize(cu_Stream);
if (cu_BLAS_Stat != CUBLAS_STATUS_SUCCESS)
{
printf("\nThread %d: cuBLAS X-engine failed (cublasStat = %d)!\n", mpi_threadIdx, (int)cu_BLAS_Stat);
MPI_Abort(MPI_COMM_WORLD, 23);
exit(23);
}
/*** END Device: X-engine ***/
/* Signal MPI thread 3 that processing is done and it can carry on
* with writing output to file.
*/
if (begins != 1)
MPI_Wait(&mpi_req[3], &mpi_stat[3]); // Wait for non-blocking send to conclude
else
begins = 0;
mpi_tag = 4;
MPI_Isend((const void *)d_XOutput, len_XOutput, MPI_UNSIGNED_LONG, 3, mpi_tag, MPI_COMM_WORLD, &mpi_req[3]);
/* &buffer, count, datatype, dest, tag, comm, &request */
}
}
break;
/*** END Thread 2 ***/
/*** BEGIN Thread 3 ***/
case 3:
{
/*** BEGIN LOOP Copy data/output and write to file ***/
while (1)
{
/* Receive d_FInput pointers from MPI thread 2 */
mpi_tag = 2;
MPI_Recv((void *)d_FInput, len_FInput, MPI_UNSIGNED_LONG, 2, mpi_tag, MPI_COMM_WORLD, &mpi_stat[0]);
/* &buf, count, dtype, src, tag, comm, &stat */
/* Copy last (Ntaps - 1) spectra to the beginning of input array to PFB */
cudaMemcpyAsync((void *)d_FInput, (void *)&d_FInput[stride_Nspectra], NBytes_DtoD, cudaMemcpyDeviceToDevice, cu_Stream);
/* Sync with device to see if copying completed */
cudaStreamSynchronize(cu_Stream);
if ((cu_RetVal = cudaErrCheck()) != 0)
{
MPI_Abort(MPI_COMM_WORLD, 24);
exit(24);
}
/* If cudaMemcpyDeviceToDevice finished, share d_FInput device
* pointers with MPI thread 1 */
if (begins != 1)
MPI_Wait(&mpi_req[1], &mpi_stat[1]); // Wait for non-blocking send to conclude
mpi_tag = 3;
MPI_Isend((const void *)d_FInput, len_FInput, MPI_UNSIGNED_LONG, 1, mpi_tag, MPI_COMM_WORLD, &mpi_req[1]);
/* &buffer, count, datatype, dest, tag, comm, &request */
/* Receive d_XOutput device pointers to retrieve results */
mpi_tag = 4;
MPI_Recv((void *)d_XOutput, len_XOutput, MPI_UNSIGNED_LONG, 2, mpi_tag, MPI_COMM_WORLD, &mpi_stat[2]);
/* &buf, count, dtype, src, tag, comm, &stat */
/* Copy results from device to host memory */
cudaMemcpyAsync((void *)h_XOutput, (void *)d_XOutput, NBytes_DtoH, cudaMemcpyDeviceToHost, cu_Stream);
/*** BEGIN Write output to file ***/
/* Take time stamp */
now_abs = time(0);
now = localtime(&now_abs);
/* Construct output file name */
strftime(outfilename, OUTFILENAMELEN*sizeof(char), "MDT_%Y%m%d_%H%M%S.csv", now);
if ((outfile = fopen(outfilename, "w")) == NULL)
{
printf("Cannot open or create file %s!\n", outfilename);
MPI_Abort(MPI_COMM_WORLD, 25);
exit(25);
}
/* Sync with device so that asynchronous copy to host completes */
cudaStreamSynchronize(cu_Stream);
if ((cu_RetVal = cudaErrCheck()) != 0)
{
MPI_Abort(MPI_COMM_WORLD, 26);
exit(26);
}
/* Signal MPI thread 2 that it can overwrite the device output array */
if (begins != 1)
MPI_Wait(&mpi_req[3], &mpi_stat[3]); // Wait for non-blocking send to conclude
else
begins = 0;
mpi_tag = 5;
MPI_Isend((const void *)&green_light, 1, MPI_INT, 2, mpi_tag, MPI_COMM_WORLD, &mpi_req[3]);
/* &buffer, count, datatype, dest, tag, comm, &request */
printf("Writing output to file %s ...\n", outfilename);
for (j=0 ; j<Nelements ; j++) // element row index
for (p=0 ; p<Npols ; p++) // polarisation of element j
for (k=j ; k<Nelements ; k++) // element column index
for (q=p ; q<Npols ; q++) // polarisation of element k
{
/* CUFFT will put the positive frequency spectrum at the first
Nchannels/2 positions in the array, then the negative
frequencies will follow. Therefore, the first B/2 frequencies
are at the end of the array */
z = 0;
for (i=Nchannels/2 ; i<Nchannels ; i++)
{
stride_cublas = i*Npols*Nelements*Npols*Nelements + (k*Npols + q)*Npols*Nelements + (j*Npols + p); // column-major array
fprintf(outfile, "%ld,%d,%ld,%d,%ld,%.6f,%.6f\n", j, p, k, q, z, h_XOutput[stride_cublas].x, h_XOutput[stride_cublas].y);
// elementIdx_k, polIdx_q, elementIdx_j, polIdx_p, channelIdx, Re_GPU, Im_GPU
z++;
}
for (i=0 ; i<Nchannels/2 ; i++)
{
stride_cublas = i*Npols*Nelements*Npols*Nelements + (k*Npols + q)*Npols*Nelements + (j*Npols + p); // column-major array
fprintf(outfile, "%ld,%d,%ld,%d,%ld,%.6f,%.6f\n", j, p, k, q, z, h_XOutput[stride_cublas].x, h_XOutput[stride_cublas].y);
// elementIdx_k, polIdx_q, elementIdx_j, polIdx_p, channelIdx, Re_GPU, Im_GPU
z++;
}
}
/* Close file */
fclose(outfile);
/*** END Write output to file ***/
}
/*** END LOOP Copy data/output and write to file ***/
}
break;
/*** END Thread 3 ***/
}
/* Close all MPI instances */
mpi_retval = MPI_Finalize();
printf("\n");
return(mpi_retval);
}
|
621af3679e1f1a34986efe46ee6d7c9f2c808ab0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file mlkswap.cu
*
* @brief MLP swap search in GPU.
*
* @author Eyder Rios
* @date 2014-06-01
*/
#include <iostream>
#include <stdint.h>
#include <stdarg.h>
#include "log.h"
#include "utils.h"
#include "gpu.h"
//#include "mlgputask.h"
#include "mlkswap.h"
// ################################################################################# //
// ## ## //
// ## CONSTANTS & MACROS ## //
// ## ## //
// ################################################################################# //
#define tx threadIdx.x
#define by blockIdx.y
// ################################################################################# //
// ## ## //
// ## KERNEL FUNCTIONS ## //
// ## ## //
// ################################################################################# //
#ifdef MLP_GPU_ADS
//#if 0
/*
* GPU Auxiliary Data Structures (ADS) code
*/
__global__
void
kernelSwap(const MLADSData *gm_ads, MLMovePack *gm_move, int size)
{
/*!
* Shared memory variables
*/
extern
__shared__
int sm_buffer[]; // Dynamic shared memory buffer
/*!
* Shared variables
*/
__shared__
int *sm_coordx, // Clients x-coordinates
*sm_coordy, // Clients y-coordinates
*sm_move, // Thread movement id/cost
*sm_time, // ADS time
*sm_cost, // ADS cost
sm_rsize, // ADS row size
sm_scost, // Solution cost
sm_tour, // Tour/path
sm_cost0; // ADS cost element
__shared__
float sm_round; // Round value
/*!
* Local memory variables
*/
uint *gm_data; // Points to some ADS data
int dist, // Distance
cost, // Solution cost
time, // Travel time
wait, // Wait
bcost, // Best cost in chunk
bmove; // Best move in chunk
int c, // Chunk no
ctx, // Tx index for chunk
cmax; // Number of chunks
int i,j, // Movement indexes
n; // Last solution index
if(tx >= size)
return;
/*
* Dynamic shared memory buffer usage
*
* buffer
* |
* v
* +--------+--------+-----------------+
* | coordx | coordy | movid | movcost |
* +--------+--------+-----------------+
* ^ ^
* | |
* etime ecost
*/
// Only thread 0 initializes shared variables
if(tx == 0) {
sm_rsize = gm_ads->s.rowElems;
sm_scost = gm_ads->s.solCost;
sm_tour = gm_ads->s.tour;
sm_round = gm_ads->s.round * 0.5F;
//sm_coordx = sm_buffer + 2; -- was like this in 2016-05-08
sm_coordx = sm_buffer;
sm_coordy = sm_coordx + size;
sm_move = sm_coordy + size;
sm_time = sm_move;
sm_cost = sm_move + size;
// if(by == 0) {
// printf("size\t\t%d\n",size);
// printf("sm_buffer\t%p\n",sm_buffer);
// printf("sm_coordx\t%p\n",sm_coordx);
// printf("sm_coordy\t%p\n",sm_coordy);
// printf("sm_move\t\t%p\n",sm_move);
// printf("sm_time\t\t%p\n",sm_time);
// printf("sm_cost\t\t%p\n",sm_cost);
// }
// Row 0 from ADS: needs only column i - 1
gm_data = ADS_COST_PTR(gm_ads,sm_rsize);
sm_cost0 = gm_data[by];
}
__syncthreads();
// Number of chunks
cmax = GPU_DIVCEIL(size,blockDim.x);
/*
* Copy clients coordinates
*/
gm_data = ADS_COORD_PTR(gm_ads);
for(c=0;(c < cmax) && ((ctx = c*blockDim.x + tx) < size);c++) {
// Split coordinates
sm_coordx[ctx] = GPU_HI_USHORT(gm_data[ctx]);
sm_coordy[ctx] = GPU_LO_USHORT(gm_data[ctx]);
}
/*!
* Copy ADS.T
*/
// Points to ADS.T
gm_data = ADS_TIME_PTR(gm_ads,sm_rsize);
// Copy ADS.T data
for(c=0;(c < cmax) && ((ctx = c*blockDim.x + tx) < size);c++)
sm_time[ctx] = gm_data[ctx];
/*!
* Copy ADS.C
*/
// Points to ADS.C
gm_data = ADS_COST_PTR(gm_ads,sm_rsize);
// Row i + 1 from ADS
n = (by + 2) * sm_rsize;
for(c=0;(c < cmax) && ((ctx = c*blockDim.x + tx) < size);c++)
sm_cost[ctx] = gm_data[n + ctx]; // C[i+1]
// Row 0 from ADS: needs only column i - 1
if(tx == 0)
sm_cost0 = gm_data[by]; // C[0,i-1]
// Wait all threads synchronize
__syncthreads();
bmove = 0;
bcost = COST_INFTY;
/*
if(tx == 0 && by == 2) {
for(i=0;i < size;i++)
kprintf("%d\t(%d,%d)\n",i,sm_coordx[i],sm_coordy[i]);
kprintf("\n");
for(i=1;i < size;i++)
kprintf("%d-%d: %d\n",i-1,i,sm_cdist[i]);
kprintf("\n");
kprintf("T\t: ");
for(i=0;i < size;i++)
kprintf(" %d",sm_time[i]);
kprintf("\n");
kprintf("C[%d]\t: ",by + 2);
for(i=0;i < size;i++)
kprintf(" %d",sm_cost[i]);
kprintf("\n");
kprintf("C[0,%d]\t: %d\n",by,sm_cost0);
kprintf("C[%d,%d]\t: %d\n",by+1,by+1,sm_cost1);
}
*/
for(c=0;(c < cmax) && ((ctx = c*blockDim.x + tx) < size);c++) {
// Movement indexes
i = by + 1;
j = ctx;
cost = COST_INFTY;
if((j > i + 1) && (j < size - sm_tour)) {
// Last solution index
n = size - 1;
/*
* [0,i-1] + [j,j]
*/
dist = GPU_DIST_COORD(i - 1,j); // D[i-1,j]
// wait = 1; // W[j,j] = j - j + (j > 0)
// = 1
cost = sm_cost0 + // C[0,i-1]
0 + // C[j,j] = 0
1 * ( // W[j,j] = 1
sm_time[i - 1] + // T[0,i-1]
dist ); // D[i-1,j]
time = sm_time[i - 1] + // T[0,i-1]
0 + // T[j,j] = 0
dist; // D[i-1,j]
/*
* [0,i-1] + [j,j] + [i+1,j-1]
*/
dist = GPU_DIST_COORD(j,i + 1); // D[j,i+1]
wait = j - i - 1; // W[i+1,j-1] = j - 1 - i - 1 + (i+1 > 0)
// = j - i - 2 + 1 = j - i - 1
cost += sm_cost[j - 1] + // C[i+1,j-1]
wait * ( // W[i+1,j-1]
time + // T([0,i-1] + [j,j])
dist ); // D[j,i+1]
time += GPU_ADS_TIME(i + 1,j - 1) + // T[i+1,j-1]
dist; // D[j,i+1]
/*
* [0,i-1] + [j,j] + [i+1,j-1] + [i,i]
*/
dist = GPU_DIST_COORD(j - 1,i); // D[j-1,i]
// wait = 1; // W[i,i] = i - i + (i > 0)
// = 0 + 1 = 1
cost += 0 + // C[i,i] = 0
1 * ( // W[i,i] = 1
time + // T([0,i-1] + [j,j] + [i+1,j-1])
dist ); // D[j-1,i]
time += 0 + // T[i,i] = 0
dist; // D[j-1,i]
/*
* [0,i-1] + [j,j] + [i+1,j-1] + [i,i] + [j+1,n]
*/
if(j + 1 <= n) {
// Row j + 1 from ADS: needs only column n
gm_data = ADS_COST_PTR(gm_ads,sm_rsize) +
sm_rsize*(j + 1);
dist = GPU_DIST_COORD(i,j + 1); // D[i,j+1]
wait = n - j; // W[j+1,n] = n - j - 1 + (j+1 > 0)
// = n - j - 1 + 1 = n - j
cost += gm_data[n] + // C[j+1,n]
wait * ( // W[j+1,n]
time + // T([0,i-1] + [j,j] + [i+1,j-1] + [i,i])
dist ); // D[i,j+1]
}
cost = cost - sm_scost;
k4printf("GPU_SWAP(%d,%d) = %d\n",i,j,cost);
}
if(cost < bcost) {
bcost = cost;
bmove = GPU_MOVE_PACKID(i,j,MLMI_SWAP);
}
/*
if((tx==283) && (by == 171))
kprintf("WWWW Block %d tx:%d bdim:%d ID %d: GPU_SWAP(%u,%u) = %d bcost=%d bmove=%d\n",
by, tx, blockDim.x, by*blockDim.x+tx,
i,
j,
cost, bcost, bmove);
*/
}
//#undef sm_ecost0
__syncthreads();
// Chunk size
n = GPU_MIN(size,int(blockDim.x));
sm_move[tx] = bcost;
sm_move[tx + n] = bmove;
__syncthreads();
/*
* Minimum cost reduction
*/
for(i=GPU_DIVCEIL(n,2);i > 1;i=GPU_DIVCEIL(i,2)) {
if(tx < i) {
if((tx + i < n) && (sm_move[tx] > sm_move[tx + i])) {
sm_move[tx] = sm_move[tx + i];
sm_move[tx + n] = sm_move[tx + n + i];
}
}
__syncthreads();
}
if(tx == 0) {
// The first 2 elements was not compared
if(sm_move[0] > sm_move[1]) {
sm_move[0] = sm_move[1];
sm_move[n] = sm_move[n + 1];
}
gm_move[by].w = GPU_MOVE_PACK64(sm_move[0],sm_move[n]);
k4printf("Block %d: GPU_SWAP(%u,%u) = %d\n",
by,
gm_move[by].s.i,
gm_move[by].s.j,
gm_move[by].s.cost);
}
}
__global__
void
kernelSwapTotal(const MLADSData *gm_ads, MLMovePack *gm_move, int size)
{
/*!
* Shared memory variables
*/
extern
__shared__
int sm_buffer[]; // Dynamic shared memory buffer
/*!
* Shared variables
*/
__shared__
int *sm_coordx, // Clients x-coordinates
*sm_coordy, // Clients y-coordinates
*sm_move, // Thread movement id/cost
*sm_time, // ADS time
*sm_cost, // ADS cost
sm_rsize, // ADS row size
sm_scost, // Solution cost
sm_tour, // Tour/path
sm_cost0; // ADS cost element
__shared__
float sm_round; // Round value
/*!
* Local memory variables
*/
uint *gm_data; // Points to some ADS data
int dist, // Distance
cost, // Solution cost
time, // Travel time
wait, // Wait
bcost, // Best cost in chunk
bmove; // Best move in chunk
int c, // Chunk no
ctx, // Tx index for chunk
cmax; // Number of chunks
int i,j, // Movement indexes
n; // Last solution index
if(tx >= size)
return;
// Only thread 0 initializes shared variables
if(tx == 0) {
sm_rsize = gm_ads->s.rowElems;
sm_scost = gm_ads->s.solCost;
sm_tour = gm_ads->s.tour;
sm_round = gm_ads->s.round * 0.5F;
//sm_coordx = sm_buffer + 2; -- was like this in 2016-05-08
sm_coordx = sm_buffer;
sm_coordy = sm_coordx + size;
sm_move = sm_coordy + size;
sm_time = sm_move;
sm_cost = sm_move + size;
// Row 0 from ADS: needs only column i - 1
gm_data = ADS_COST_PTR(gm_ads,sm_rsize);
sm_cost0 = gm_data[by];
}
__syncthreads();
// Number of chunks
cmax = GPU_DIVCEIL(size,blockDim.x);
/*
* Copy clients coordinates
*/
gm_data = ADS_COORD_PTR(gm_ads);
for(c=0;(c < cmax) && ((ctx = c*blockDim.x + tx) < size);c++) {
// Split coordinates
sm_coordx[ctx] = GPU_HI_USHORT(gm_data[ctx]);
sm_coordy[ctx] = GPU_LO_USHORT(gm_data[ctx]);
}
/*!
* Copy ADS.T
*/
// Points to ADS.T
gm_data = ADS_TIME_PTR(gm_ads,sm_rsize);
// Copy ADS.T data
for(c=0;(c < cmax) && ((ctx = c*blockDim.x + tx) < size);c++)
sm_time[ctx] = gm_data[ctx];
/*!
* Copy ADS.C
*/
// Points to ADS.C
gm_data = ADS_COST_PTR(gm_ads,sm_rsize);
// Row i + 1 from ADS
n = (by + 2) * sm_rsize;
for(c=0;(c < cmax) && ((ctx = c*blockDim.x + tx) < size);c++)
sm_cost[ctx] = gm_data[n + ctx]; // C[i+1]
// Row 0 from ADS: needs only column i - 1
if(tx == 0)
sm_cost0 = gm_data[by]; // C[0,i-1]
// Wait all threads synchronize
__syncthreads();
bmove = 0;
bcost = COST_INFTY;
for(c=0;(c < cmax) && ((ctx = c*blockDim.x + tx) < size);c++) {
// Movement indexes
i = by + 1;
j = ctx;
cost = COST_INFTY;
if((j > i + 1) && (j < size - sm_tour)) {
// Last solution index
n = size - 1;
/*
* [0,i-1] + [j,j]
*/
dist = GPU_DIST_COORD(i - 1,j); // D[i-1,j]
// wait = 1; // W[j,j] = j - j + (j > 0)
// = 1
cost = sm_cost0 + // C[0,i-1]
0 + // C[j,j] = 0
1 * ( // W[j,j] = 1
sm_time[i - 1] + // T[0,i-1]
dist ); // D[i-1,j]
time = sm_time[i - 1] + // T[0,i-1]
0 + // T[j,j] = 0
dist; // D[i-1,j]
/*
* [0,i-1] + [j,j] + [i+1,j-1]
*/
dist = GPU_DIST_COORD(j,i + 1); // D[j,i+1]
wait = j - i - 1; // W[i+1,j-1] = j - 1 - i - 1 + (i+1 > 0)
// = j - i - 2 + 1 = j - i - 1
cost += sm_cost[j - 1] + // C[i+1,j-1]
wait * ( // W[i+1,j-1]
time + // T([0,i-1] + [j,j])
dist ); // D[j,i+1]
time += GPU_ADS_TIME(i + 1,j - 1) + // T[i+1,j-1]
dist; // D[j,i+1]
/*
* [0,i-1] + [j,j] + [i+1,j-1] + [i,i]
*/
dist = GPU_DIST_COORD(j - 1,i); // D[j-1,i]
// wait = 1; // W[i,i] = i - i + (i > 0)
// = 0 + 1 = 1
cost += 0 + // C[i,i] = 0
1 * ( // W[i,i] = 1
time + // T([0,i-1] + [j,j] + [i+1,j-1])
dist ); // D[j-1,i]
time += 0 + // T[i,i] = 0
dist; // D[j-1,i]
/*
* [0,i-1] + [j,j] + [i+1,j-1] + [i,i] + [j+1,n]
*/
if(j + 1 <= n) {
// Row j + 1 from ADS: needs only column n
gm_data = ADS_COST_PTR(gm_ads,sm_rsize) +
sm_rsize*(j + 1);
dist = GPU_DIST_COORD(i,j + 1); // D[i,j+1]
wait = n - j; // W[j+1,n] = n - j - 1 + (j+1 > 0)
// = n - j - 1 + 1 = n - j
cost += gm_data[n] + // C[j+1,n]
wait * ( // W[j+1,n]
time + // T([0,i-1] + [j,j] + [i+1,j-1] + [i,i])
dist ); // D[i,j+1]
}
cost = cost - sm_scost;
k4printf("GPU_SWAP(%d,%d) = %d\n",i,j,cost);
} // if (j > i + 1)
if(cost < bcost) {
bcost = cost;
bmove = GPU_MOVE_PACKID(i,j,MLMI_SWAP);
}
} // for c=0
//#undef sm_ecost0
__syncthreads();
gm_move[by*blockDim.x+tx].w = GPU_MOVE_PACK64(bcost,bmove);
}
#else
/*
* GPU On Demand Calculation (ODC) code
*
*/
__global__
void
kernelSwap(const MLADSData *gm_ads, MLMovePack *gm_move, int size)
{
/*!
* Shared memory variables
*/
extern __shared__
int sm_buffer[]; // Dynamic shared memory buffer
/*!
* Shared variables
*/
__shared__
int *sm_soldist, // Solution distance (adjacent clients)
*sm_coordx, // Clients x-coordinates
*sm_coordy, // Clients y-coordinates
*sm_move, // Thread movement id/cost
sm_rsize, // ADS row size
sm_tour; // Tour/path
/*!
* Local variables
*/
uint *gm_data; // Points to some data
int dcost; // Cost improvement
int c, // Chunk no
ctx, // Tx index for chunk
cmax, // Number of chunks
csize; // Chunk size
int i,j,k; // Auxiliary
if(tx >= size)
return;
/*
* Dynamic shared memory buffer usage
*
* buffer
* |
* v
* +---------+--------+--------+-----------------+
* | soldist | coordx | coordy | movid | movcost |
* +---------+--------+--------+-----------------+
*/
if(tx == 0) {
sm_soldist = sm_buffer;
sm_coordx = sm_soldist + size;
sm_coordy = sm_coordx + size;
sm_move = sm_coordy + size;
sm_rsize = gm_ads->s.rowElems;
sm_tour = gm_ads->s.tour;
}
__syncthreads();
cmax = GPU_DIVCEIL(size,blockDim.x);
csize = GPU_MIN(size,int(blockDim.x));
// Get coordinates
gm_data = ADS_COORD_PTR(gm_ads);
for(c=0;(c < cmax) && ((ctx = c*blockDim.x + tx) < size);c++) {
// Split coordinates
sm_coordx[ctx] = GPU_HI_USHORT(gm_data[ctx]);
sm_coordy[ctx] = GPU_LO_USHORT(gm_data[ctx]);
}
// Get solution distances
gm_data = ADS_SOLUTION_PTR(gm_ads,sm_rsize);
for(c=0;(c < cmax) && ((ctx = c*blockDim.x + tx) < size);c++)
sm_soldist[ctx] = GPU_LO_USHORT(gm_data[ctx]);
// Initialize movement/cost
sm_move[tx] = COST_INFTY;
// Wait all threads arrange data
__syncthreads();
for(c=0;c < cmax;c++) {
ctx = c*blockDim.x + tx;
i = j = 0;
dcost = COST_INFTY;
if(ctx < size - sm_tour - 3) {
// Compute movement indexes performed by thread
k = (ctx >= by);
i = k*(by + 1) + (!k)*(size - sm_tour - by - 2);
j = k*(ctx + 3) + (!k)*(size - sm_tour - ctx - 1);
dcost = (size - i) * ( int(GPU_DIST_COORD(i - 1,j)) - sm_soldist[i] );
dcost += (size - i - 1) * ( int(GPU_DIST_COORD(j,i + 1)) - sm_soldist[i + 1] );
dcost += (size - j) * ( int(GPU_DIST_COORD(j - 1,i)) - sm_soldist[j] );
// When computing PATHs and j = size - 1, there's no j+1 element,
// so threre's no related cost
if(j + 1 < size)
dcost += (size - j - 1) * ( int(GPU_DIST_COORD(i,j + 1)) - sm_soldist[j+1] );
k4printf("GPU_SWAP(%d,%d) = %d\n",i,j,dcost);
}
if(dcost < sm_move[tx]) {
sm_move[tx] = dcost;
sm_move[tx + csize] = GPU_MOVE_PACKID(i,j,MLMI_SWAP);
}
}
__syncthreads();
/*
* Minimum cost reduction
*/
for(i=GPU_DIVCEIL(csize,2);i > 1;i=GPU_DIVCEIL(i,2)) {
if(tx < i) {
if((tx + i < csize) && (sm_move[tx] > sm_move[tx + i])) {
sm_move[tx] = sm_move[tx + i];
sm_move[tx + csize] = sm_move[tx + csize + i];
}
}
__syncthreads();
}
if(tx == 0) {
// The first 2 elements was not compared
if(sm_move[0] > sm_move[1]) {
sm_move[0] = sm_move[1];
sm_move[csize] = sm_move[csize + 1];
}
gm_move[by].w = GPU_MOVE_PACK64(sm_move[0],sm_move[csize]);
k4printf("Block %d: GPU_SWAP(%u,%u) = %d\n",
by,
gm_move[by].s.i,
gm_move[by].s.j,
gm_move[by].s.cost);
}
/*
// MIN cost reduction
for(i=GPU_DIVCEIL(csize,2);i > 0;i >>= 1) {
if(tx < i) {
if(tx + i < csize) {
if(sm_move[tx] > sm_move[tx + i]) {
sm_move[tx] = sm_move[tx + i];
sm_move[tx + csize] = sm_move[tx + i + csize];
}
}
}
__syncthreads();
}
if(tx == 0) {
gm_move[by].w = GPU_MOVE_PACK64(sm_move[0],sm_move[csize]);
k4printf("Block %d: MIN SWAP(%u,%u) = %d\n",by,
gm_move[by].s.i,
gm_move[by].s.j,
gm_move[by].s.cost);
}
*/
}
#endif
// ################################################################################# //
// ## ## //
// ## KERNEL LAUNCHERS ## //
// ## ## //
// ################################################################################# //
void
MLKernelSwap::defineKernelGrid()
{
int gsize,
bsize;
#ifdef MLP_GPU_ADS
/*
* Compute dynamic shared memory size
*/
shared = 4 * GPU_BLOCK_CEIL(int,solSize);
/*
* Compute grid
*/
grid.x = 1;
grid.y = solSize - problem.costTour - 3;
grid.z = 1;
#else
/*
* Compute dynamic shared memory size
*/
shared = 5 * GPU_BLOCK_CEIL(int,solSize);
/*
* Compute grid
*/
grid.x = 1;
grid.y = ((solSize + 1) / 2) - 1;
grid.z = 1;
#endif
if(gpuOccupancyMaxPotentialBlockSizeVariableSMem(&gsize,&bsize,kernelSwap,
MLKernelSharedSize(this),solSize) == hipSuccess)
block.x = bsize;
//else
// block.x = problem.params.blockSize ? problem.params.blockSize : solSize;
block.y = 1;
block.z = 1;
/*
* Number of movements returned
*/
moveElems = grid.y;
if(isTotal)
moveElems = grid.y*block.x;
//if(problem.params.maxMerge)
// maxMerge = problem.params.maxMerge;
//else
maxMerge = moveElems;
/*
l4printf("Kernel %s\tgrid(%d,%d,%d)\tblck(%d,%d,%d)\tshared=%u (%u KB)\n",
name,
grid.x,grid.y,grid.z,
block.x,block.y,block.z,
shared,shared / 1024);
*/
}
void
MLKernelSwap::launchKernel()
{
////lprintf("Calling kernel %s\n",name);
// Update kernel calls counter
callCount++;
// Kernel in execution
flagExec = true;
/*
lprintf("Kernel %s\tgrid(%d,%d,%d)\tblck(%d,%d,%d)\tshared=%u (%u KB)\tsize=%u\n",
name,
grid.x,grid.y,grid.z,
block.x,block.y,block.z,
shared,shared / 1024,
solSize);
lprintf("adsData=%p\n",adsData);
*/
// Calls kernel
// if(!isTotal)
//std::cout << "MLKernelSwap::launchKernel - grid(x: " << grid.x << ", y: " << grid.y << ", z: " << grid.z
// << ") block(x: " << block.x << ", y: " << block.y << ", z: " << block.z << ") shared: " << shared << std::endl;
hipLaunchKernelGGL(( kernelSwap), dim3(grid),dim3(block),shared,stream, adsData,moveData,solSize);
// else
// kernelSwapTotal<<<grid,block,shared,stream>>>(adsData,moveData,solSize);
gpuDeviceSynchronize();
}
void
MLKernelSwap::applyMove(MLMove &move)
{
int i,j;
ushort t;
i = move.i;
j = move.j;
//solution->showCostCalc("BASE : ");
solution->cost += move.cost;
solution->time = sysTimer();
solution->move = move;
t = solution->clients[i];
solution->clients[i] = solution->clients[j];
solution->clients[j] = t;
solution->weights[i] = solution->dist(i - 1,i);
solution->weights[i + 1] = solution->dist(i,i + 1);
solution->weights[j] = solution->dist(j - 1,j);
if(j + 1 < solution->clientCount) {
solution->weights[j + 1] = solution->dist(j,j + 1);
}
// DEFINIR -DMLP_COST_CHECK
#ifdef MLP_COST_CHECK
if(problem.params.checkCost) {
uint ccost = solution->costCalc();
l4printf("CHECK COST: %s,\tcost=%u, check=%u\n",name,solution->cost,ccost);
if(solution->cost != ccost) {
lprintf("%s(%u,%u): wrong=%u, right=%u\n",name ,move.i ,move.j ,solution->cost ,ccost );
solution->showCostCalc("SOLUTION: ");
EXCEPTION("INVALID COST: %s",problem.name);
}
}
#endif
}
| 621af3679e1f1a34986efe46ee6d7c9f2c808ab0.cu | /**
* @file mlkswap.cu
*
* @brief MLP swap search in GPU.
*
* @author Eyder Rios
* @date 2014-06-01
*/
#include <iostream>
#include <stdint.h>
#include <stdarg.h>
#include "log.h"
#include "utils.h"
#include "gpu.h"
//#include "mlgputask.h"
#include "mlkswap.h"
// ################################################################################# //
// ## ## //
// ## CONSTANTS & MACROS ## //
// ## ## //
// ################################################################################# //
#define tx threadIdx.x
#define by blockIdx.y
// ################################################################################# //
// ## ## //
// ## KERNEL FUNCTIONS ## //
// ## ## //
// ################################################################################# //
#ifdef MLP_GPU_ADS
//#if 0
/*
* GPU Auxiliary Data Structures (ADS) code
*/
__global__
void
kernelSwap(const MLADSData *gm_ads, MLMovePack *gm_move, int size)
{
/*!
* Shared memory variables
*/
extern
__shared__
int sm_buffer[]; // Dynamic shared memory buffer
/*!
* Shared variables
*/
__shared__
int *sm_coordx, // Clients x-coordinates
*sm_coordy, // Clients y-coordinates
*sm_move, // Thread movement id/cost
*sm_time, // ADS time
*sm_cost, // ADS cost
sm_rsize, // ADS row size
sm_scost, // Solution cost
sm_tour, // Tour/path
sm_cost0; // ADS cost element
__shared__
float sm_round; // Round value
/*!
* Local memory variables
*/
uint *gm_data; // Points to some ADS data
int dist, // Distance
cost, // Solution cost
time, // Travel time
wait, // Wait
bcost, // Best cost in chunk
bmove; // Best move in chunk
int c, // Chunk no
ctx, // Tx index for chunk
cmax; // Number of chunks
int i,j, // Movement indexes
n; // Last solution index
if(tx >= size)
return;
/*
* Dynamic shared memory buffer usage
*
* buffer
* |
* v
* +--------+--------+-----------------+
* | coordx | coordy | movid | movcost |
* +--------+--------+-----------------+
* ^ ^
* | |
* etime ecost
*/
// Only thread 0 initializes shared variables
if(tx == 0) {
sm_rsize = gm_ads->s.rowElems;
sm_scost = gm_ads->s.solCost;
sm_tour = gm_ads->s.tour;
sm_round = gm_ads->s.round * 0.5F;
//sm_coordx = sm_buffer + 2; -- was like this in 2016-05-08
sm_coordx = sm_buffer;
sm_coordy = sm_coordx + size;
sm_move = sm_coordy + size;
sm_time = sm_move;
sm_cost = sm_move + size;
// if(by == 0) {
// printf("size\t\t%d\n",size);
// printf("sm_buffer\t%p\n",sm_buffer);
// printf("sm_coordx\t%p\n",sm_coordx);
// printf("sm_coordy\t%p\n",sm_coordy);
// printf("sm_move\t\t%p\n",sm_move);
// printf("sm_time\t\t%p\n",sm_time);
// printf("sm_cost\t\t%p\n",sm_cost);
// }
// Row 0 from ADS: needs only column i - 1
gm_data = ADS_COST_PTR(gm_ads,sm_rsize);
sm_cost0 = gm_data[by];
}
__syncthreads();
// Number of chunks
cmax = GPU_DIVCEIL(size,blockDim.x);
/*
* Copy clients coordinates
*/
gm_data = ADS_COORD_PTR(gm_ads);
for(c=0;(c < cmax) && ((ctx = c*blockDim.x + tx) < size);c++) {
// Split coordinates
sm_coordx[ctx] = GPU_HI_USHORT(gm_data[ctx]);
sm_coordy[ctx] = GPU_LO_USHORT(gm_data[ctx]);
}
/*!
* Copy ADS.T
*/
// Points to ADS.T
gm_data = ADS_TIME_PTR(gm_ads,sm_rsize);
// Copy ADS.T data
for(c=0;(c < cmax) && ((ctx = c*blockDim.x + tx) < size);c++)
sm_time[ctx] = gm_data[ctx];
/*!
* Copy ADS.C
*/
// Points to ADS.C
gm_data = ADS_COST_PTR(gm_ads,sm_rsize);
// Row i + 1 from ADS
n = (by + 2) * sm_rsize;
for(c=0;(c < cmax) && ((ctx = c*blockDim.x + tx) < size);c++)
sm_cost[ctx] = gm_data[n + ctx]; // C[i+1]
// Row 0 from ADS: needs only column i - 1
if(tx == 0)
sm_cost0 = gm_data[by]; // C[0,i-1]
// Wait all threads synchronize
__syncthreads();
bmove = 0;
bcost = COST_INFTY;
/*
if(tx == 0 && by == 2) {
for(i=0;i < size;i++)
kprintf("%d\t(%d,%d)\n",i,sm_coordx[i],sm_coordy[i]);
kprintf("\n");
for(i=1;i < size;i++)
kprintf("%d-%d: %d\n",i-1,i,sm_cdist[i]);
kprintf("\n");
kprintf("T\t: ");
for(i=0;i < size;i++)
kprintf(" %d",sm_time[i]);
kprintf("\n");
kprintf("C[%d]\t: ",by + 2);
for(i=0;i < size;i++)
kprintf(" %d",sm_cost[i]);
kprintf("\n");
kprintf("C[0,%d]\t: %d\n",by,sm_cost0);
kprintf("C[%d,%d]\t: %d\n",by+1,by+1,sm_cost1);
}
*/
for(c=0;(c < cmax) && ((ctx = c*blockDim.x + tx) < size);c++) {
// Movement indexes
i = by + 1;
j = ctx;
cost = COST_INFTY;
if((j > i + 1) && (j < size - sm_tour)) {
// Last solution index
n = size - 1;
/*
* [0,i-1] + [j,j]
*/
dist = GPU_DIST_COORD(i - 1,j); // D[i-1,j]
// wait = 1; // W[j,j] = j - j + (j > 0)
// = 1
cost = sm_cost0 + // C[0,i-1]
0 + // C[j,j] = 0
1 * ( // W[j,j] = 1
sm_time[i - 1] + // T[0,i-1]
dist ); // D[i-1,j]
time = sm_time[i - 1] + // T[0,i-1]
0 + // T[j,j] = 0
dist; // D[i-1,j]
/*
* [0,i-1] + [j,j] + [i+1,j-1]
*/
dist = GPU_DIST_COORD(j,i + 1); // D[j,i+1]
wait = j - i - 1; // W[i+1,j-1] = j - 1 - i - 1 + (i+1 > 0)
// = j - i - 2 + 1 = j - i - 1
cost += sm_cost[j - 1] + // C[i+1,j-1]
wait * ( // W[i+1,j-1]
time + // T([0,i-1] + [j,j])
dist ); // D[j,i+1]
time += GPU_ADS_TIME(i + 1,j - 1) + // T[i+1,j-1]
dist; // D[j,i+1]
/*
* [0,i-1] + [j,j] + [i+1,j-1] + [i,i]
*/
dist = GPU_DIST_COORD(j - 1,i); // D[j-1,i]
// wait = 1; // W[i,i] = i - i + (i > 0)
// = 0 + 1 = 1
cost += 0 + // C[i,i] = 0
1 * ( // W[i,i] = 1
time + // T([0,i-1] + [j,j] + [i+1,j-1])
dist ); // D[j-1,i]
time += 0 + // T[i,i] = 0
dist; // D[j-1,i]
/*
* [0,i-1] + [j,j] + [i+1,j-1] + [i,i] + [j+1,n]
*/
if(j + 1 <= n) {
// Row j + 1 from ADS: needs only column n
gm_data = ADS_COST_PTR(gm_ads,sm_rsize) +
sm_rsize*(j + 1);
dist = GPU_DIST_COORD(i,j + 1); // D[i,j+1]
wait = n - j; // W[j+1,n] = n - j - 1 + (j+1 > 0)
// = n - j - 1 + 1 = n - j
cost += gm_data[n] + // C[j+1,n]
wait * ( // W[j+1,n]
time + // T([0,i-1] + [j,j] + [i+1,j-1] + [i,i])
dist ); // D[i,j+1]
}
cost = cost - sm_scost;
k4printf("GPU_SWAP(%d,%d) = %d\n",i,j,cost);
}
if(cost < bcost) {
bcost = cost;
bmove = GPU_MOVE_PACKID(i,j,MLMI_SWAP);
}
/*
if((tx==283) && (by == 171))
kprintf("WWWW Block %d tx:%d bdim:%d ID %d: GPU_SWAP(%u,%u) = %d bcost=%d bmove=%d\n",
by, tx, blockDim.x, by*blockDim.x+tx,
i,
j,
cost, bcost, bmove);
*/
}
//#undef sm_ecost0
__syncthreads();
// Chunk size
n = GPU_MIN(size,int(blockDim.x));
sm_move[tx] = bcost;
sm_move[tx + n] = bmove;
__syncthreads();
/*
* Minimum cost reduction
*/
for(i=GPU_DIVCEIL(n,2);i > 1;i=GPU_DIVCEIL(i,2)) {
if(tx < i) {
if((tx + i < n) && (sm_move[tx] > sm_move[tx + i])) {
sm_move[tx] = sm_move[tx + i];
sm_move[tx + n] = sm_move[tx + n + i];
}
}
__syncthreads();
}
if(tx == 0) {
// The first 2 elements was not compared
if(sm_move[0] > sm_move[1]) {
sm_move[0] = sm_move[1];
sm_move[n] = sm_move[n + 1];
}
gm_move[by].w = GPU_MOVE_PACK64(sm_move[0],sm_move[n]);
k4printf("Block %d: GPU_SWAP(%u,%u) = %d\n",
by,
gm_move[by].s.i,
gm_move[by].s.j,
gm_move[by].s.cost);
}
}
__global__
void
kernelSwapTotal(const MLADSData *gm_ads, MLMovePack *gm_move, int size)
{
/*!
* Shared memory variables
*/
extern
__shared__
int sm_buffer[]; // Dynamic shared memory buffer
/*!
* Shared variables
*/
__shared__
int *sm_coordx, // Clients x-coordinates
*sm_coordy, // Clients y-coordinates
*sm_move, // Thread movement id/cost
*sm_time, // ADS time
*sm_cost, // ADS cost
sm_rsize, // ADS row size
sm_scost, // Solution cost
sm_tour, // Tour/path
sm_cost0; // ADS cost element
__shared__
float sm_round; // Round value
/*!
* Local memory variables
*/
uint *gm_data; // Points to some ADS data
int dist, // Distance
cost, // Solution cost
time, // Travel time
wait, // Wait
bcost, // Best cost in chunk
bmove; // Best move in chunk
int c, // Chunk no
ctx, // Tx index for chunk
cmax; // Number of chunks
int i,j, // Movement indexes
n; // Last solution index
if(tx >= size)
return;
// Only thread 0 initializes shared variables
if(tx == 0) {
sm_rsize = gm_ads->s.rowElems;
sm_scost = gm_ads->s.solCost;
sm_tour = gm_ads->s.tour;
sm_round = gm_ads->s.round * 0.5F;
//sm_coordx = sm_buffer + 2; -- was like this in 2016-05-08
sm_coordx = sm_buffer;
sm_coordy = sm_coordx + size;
sm_move = sm_coordy + size;
sm_time = sm_move;
sm_cost = sm_move + size;
// Row 0 from ADS: needs only column i - 1
gm_data = ADS_COST_PTR(gm_ads,sm_rsize);
sm_cost0 = gm_data[by];
}
__syncthreads();
// Number of chunks
cmax = GPU_DIVCEIL(size,blockDim.x);
/*
* Copy clients coordinates
*/
gm_data = ADS_COORD_PTR(gm_ads);
for(c=0;(c < cmax) && ((ctx = c*blockDim.x + tx) < size);c++) {
// Split coordinates
sm_coordx[ctx] = GPU_HI_USHORT(gm_data[ctx]);
sm_coordy[ctx] = GPU_LO_USHORT(gm_data[ctx]);
}
/*!
* Copy ADS.T
*/
// Points to ADS.T
gm_data = ADS_TIME_PTR(gm_ads,sm_rsize);
// Copy ADS.T data
for(c=0;(c < cmax) && ((ctx = c*blockDim.x + tx) < size);c++)
sm_time[ctx] = gm_data[ctx];
/*!
* Copy ADS.C
*/
// Points to ADS.C
gm_data = ADS_COST_PTR(gm_ads,sm_rsize);
// Row i + 1 from ADS
n = (by + 2) * sm_rsize;
for(c=0;(c < cmax) && ((ctx = c*blockDim.x + tx) < size);c++)
sm_cost[ctx] = gm_data[n + ctx]; // C[i+1]
// Row 0 from ADS: needs only column i - 1
if(tx == 0)
sm_cost0 = gm_data[by]; // C[0,i-1]
// Wait all threads synchronize
__syncthreads();
bmove = 0;
bcost = COST_INFTY;
for(c=0;(c < cmax) && ((ctx = c*blockDim.x + tx) < size);c++) {
// Movement indexes
i = by + 1;
j = ctx;
cost = COST_INFTY;
if((j > i + 1) && (j < size - sm_tour)) {
// Last solution index
n = size - 1;
/*
* [0,i-1] + [j,j]
*/
dist = GPU_DIST_COORD(i - 1,j); // D[i-1,j]
// wait = 1; // W[j,j] = j - j + (j > 0)
// = 1
cost = sm_cost0 + // C[0,i-1]
0 + // C[j,j] = 0
1 * ( // W[j,j] = 1
sm_time[i - 1] + // T[0,i-1]
dist ); // D[i-1,j]
time = sm_time[i - 1] + // T[0,i-1]
0 + // T[j,j] = 0
dist; // D[i-1,j]
/*
* [0,i-1] + [j,j] + [i+1,j-1]
*/
dist = GPU_DIST_COORD(j,i + 1); // D[j,i+1]
wait = j - i - 1; // W[i+1,j-1] = j - 1 - i - 1 + (i+1 > 0)
// = j - i - 2 + 1 = j - i - 1
cost += sm_cost[j - 1] + // C[i+1,j-1]
wait * ( // W[i+1,j-1]
time + // T([0,i-1] + [j,j])
dist ); // D[j,i+1]
time += GPU_ADS_TIME(i + 1,j - 1) + // T[i+1,j-1]
dist; // D[j,i+1]
/*
* [0,i-1] + [j,j] + [i+1,j-1] + [i,i]
*/
dist = GPU_DIST_COORD(j - 1,i); // D[j-1,i]
// wait = 1; // W[i,i] = i - i + (i > 0)
// = 0 + 1 = 1
cost += 0 + // C[i,i] = 0
1 * ( // W[i,i] = 1
time + // T([0,i-1] + [j,j] + [i+1,j-1])
dist ); // D[j-1,i]
time += 0 + // T[i,i] = 0
dist; // D[j-1,i]
/*
* [0,i-1] + [j,j] + [i+1,j-1] + [i,i] + [j+1,n]
*/
if(j + 1 <= n) {
// Row j + 1 from ADS: needs only column n
gm_data = ADS_COST_PTR(gm_ads,sm_rsize) +
sm_rsize*(j + 1);
dist = GPU_DIST_COORD(i,j + 1); // D[i,j+1]
wait = n - j; // W[j+1,n] = n - j - 1 + (j+1 > 0)
// = n - j - 1 + 1 = n - j
cost += gm_data[n] + // C[j+1,n]
wait * ( // W[j+1,n]
time + // T([0,i-1] + [j,j] + [i+1,j-1] + [i,i])
dist ); // D[i,j+1]
}
cost = cost - sm_scost;
k4printf("GPU_SWAP(%d,%d) = %d\n",i,j,cost);
} // if (j > i + 1)
if(cost < bcost) {
bcost = cost;
bmove = GPU_MOVE_PACKID(i,j,MLMI_SWAP);
}
} // for c=0
//#undef sm_ecost0
__syncthreads();
gm_move[by*blockDim.x+tx].w = GPU_MOVE_PACK64(bcost,bmove);
}
#else
/*
* GPU On Demand Calculation (ODC) code
*
*/
__global__
void
kernelSwap(const MLADSData *gm_ads, MLMovePack *gm_move, int size)
{
/*!
* Shared memory variables
*/
extern __shared__
int sm_buffer[]; // Dynamic shared memory buffer
/*!
* Shared variables
*/
__shared__
int *sm_soldist, // Solution distance (adjacent clients)
*sm_coordx, // Clients x-coordinates
*sm_coordy, // Clients y-coordinates
*sm_move, // Thread movement id/cost
sm_rsize, // ADS row size
sm_tour; // Tour/path
/*!
* Local variables
*/
uint *gm_data; // Points to some data
int dcost; // Cost improvement
int c, // Chunk no
ctx, // Tx index for chunk
cmax, // Number of chunks
csize; // Chunk size
int i,j,k; // Auxiliary
if(tx >= size)
return;
/*
* Dynamic shared memory buffer usage
*
* buffer
* |
* v
* +---------+--------+--------+-----------------+
* | soldist | coordx | coordy | movid | movcost |
* +---------+--------+--------+-----------------+
*/
if(tx == 0) {
sm_soldist = sm_buffer;
sm_coordx = sm_soldist + size;
sm_coordy = sm_coordx + size;
sm_move = sm_coordy + size;
sm_rsize = gm_ads->s.rowElems;
sm_tour = gm_ads->s.tour;
}
__syncthreads();
cmax = GPU_DIVCEIL(size,blockDim.x);
csize = GPU_MIN(size,int(blockDim.x));
// Get coordinates
gm_data = ADS_COORD_PTR(gm_ads);
for(c=0;(c < cmax) && ((ctx = c*blockDim.x + tx) < size);c++) {
// Split coordinates
sm_coordx[ctx] = GPU_HI_USHORT(gm_data[ctx]);
sm_coordy[ctx] = GPU_LO_USHORT(gm_data[ctx]);
}
// Get solution distances
gm_data = ADS_SOLUTION_PTR(gm_ads,sm_rsize);
for(c=0;(c < cmax) && ((ctx = c*blockDim.x + tx) < size);c++)
sm_soldist[ctx] = GPU_LO_USHORT(gm_data[ctx]);
// Initialize movement/cost
sm_move[tx] = COST_INFTY;
// Wait all threads arrange data
__syncthreads();
for(c=0;c < cmax;c++) {
ctx = c*blockDim.x + tx;
i = j = 0;
dcost = COST_INFTY;
if(ctx < size - sm_tour - 3) {
// Compute movement indexes performed by thread
k = (ctx >= by);
i = k*(by + 1) + (!k)*(size - sm_tour - by - 2);
j = k*(ctx + 3) + (!k)*(size - sm_tour - ctx - 1);
dcost = (size - i) * ( int(GPU_DIST_COORD(i - 1,j)) - sm_soldist[i] );
dcost += (size - i - 1) * ( int(GPU_DIST_COORD(j,i + 1)) - sm_soldist[i + 1] );
dcost += (size - j) * ( int(GPU_DIST_COORD(j - 1,i)) - sm_soldist[j] );
// When computing PATHs and j = size - 1, there's no j+1 element,
// so threre's no related cost
if(j + 1 < size)
dcost += (size - j - 1) * ( int(GPU_DIST_COORD(i,j + 1)) - sm_soldist[j+1] );
k4printf("GPU_SWAP(%d,%d) = %d\n",i,j,dcost);
}
if(dcost < sm_move[tx]) {
sm_move[tx] = dcost;
sm_move[tx + csize] = GPU_MOVE_PACKID(i,j,MLMI_SWAP);
}
}
__syncthreads();
/*
* Minimum cost reduction
*/
for(i=GPU_DIVCEIL(csize,2);i > 1;i=GPU_DIVCEIL(i,2)) {
if(tx < i) {
if((tx + i < csize) && (sm_move[tx] > sm_move[tx + i])) {
sm_move[tx] = sm_move[tx + i];
sm_move[tx + csize] = sm_move[tx + csize + i];
}
}
__syncthreads();
}
if(tx == 0) {
// The first 2 elements was not compared
if(sm_move[0] > sm_move[1]) {
sm_move[0] = sm_move[1];
sm_move[csize] = sm_move[csize + 1];
}
gm_move[by].w = GPU_MOVE_PACK64(sm_move[0],sm_move[csize]);
k4printf("Block %d: GPU_SWAP(%u,%u) = %d\n",
by,
gm_move[by].s.i,
gm_move[by].s.j,
gm_move[by].s.cost);
}
/*
// MIN cost reduction
for(i=GPU_DIVCEIL(csize,2);i > 0;i >>= 1) {
if(tx < i) {
if(tx + i < csize) {
if(sm_move[tx] > sm_move[tx + i]) {
sm_move[tx] = sm_move[tx + i];
sm_move[tx + csize] = sm_move[tx + i + csize];
}
}
}
__syncthreads();
}
if(tx == 0) {
gm_move[by].w = GPU_MOVE_PACK64(sm_move[0],sm_move[csize]);
k4printf("Block %d: MIN SWAP(%u,%u) = %d\n",by,
gm_move[by].s.i,
gm_move[by].s.j,
gm_move[by].s.cost);
}
*/
}
#endif
// ################################################################################# //
// ## ## //
// ## KERNEL LAUNCHERS ## //
// ## ## //
// ################################################################################# //
void
MLKernelSwap::defineKernelGrid()
{
int gsize,
bsize;
#ifdef MLP_GPU_ADS
/*
* Compute dynamic shared memory size
*/
shared = 4 * GPU_BLOCK_CEIL(int,solSize);
/*
* Compute grid
*/
grid.x = 1;
grid.y = solSize - problem.costTour - 3;
grid.z = 1;
#else
/*
* Compute dynamic shared memory size
*/
shared = 5 * GPU_BLOCK_CEIL(int,solSize);
/*
* Compute grid
*/
grid.x = 1;
grid.y = ((solSize + 1) / 2) - 1;
grid.z = 1;
#endif
if(gpuOccupancyMaxPotentialBlockSizeVariableSMem(&gsize,&bsize,kernelSwap,
MLKernelSharedSize(this),solSize) == cudaSuccess)
block.x = bsize;
//else
// block.x = problem.params.blockSize ? problem.params.blockSize : solSize;
block.y = 1;
block.z = 1;
/*
* Number of movements returned
*/
moveElems = grid.y;
if(isTotal)
moveElems = grid.y*block.x;
//if(problem.params.maxMerge)
// maxMerge = problem.params.maxMerge;
//else
maxMerge = moveElems;
/*
l4printf("Kernel %s\tgrid(%d,%d,%d)\tblck(%d,%d,%d)\tshared=%u (%u KB)\n",
name,
grid.x,grid.y,grid.z,
block.x,block.y,block.z,
shared,shared / 1024);
*/
}
void
MLKernelSwap::launchKernel()
{
////lprintf("Calling kernel %s\n",name);
// Update kernel calls counter
callCount++;
// Kernel in execution
flagExec = true;
/*
lprintf("Kernel %s\tgrid(%d,%d,%d)\tblck(%d,%d,%d)\tshared=%u (%u KB)\tsize=%u\n",
name,
grid.x,grid.y,grid.z,
block.x,block.y,block.z,
shared,shared / 1024,
solSize);
lprintf("adsData=%p\n",adsData);
*/
// Calls kernel
// if(!isTotal)
//std::cout << "MLKernelSwap::launchKernel - grid(x: " << grid.x << ", y: " << grid.y << ", z: " << grid.z
// << ") block(x: " << block.x << ", y: " << block.y << ", z: " << block.z << ") shared: " << shared << std::endl;
kernelSwap<<<grid,block,shared,stream>>>(adsData,moveData,solSize);
// else
// kernelSwapTotal<<<grid,block,shared,stream>>>(adsData,moveData,solSize);
gpuDeviceSynchronize();
}
void
MLKernelSwap::applyMove(MLMove &move)
{
int i,j;
ushort t;
i = move.i;
j = move.j;
//solution->showCostCalc("BASE : ");
solution->cost += move.cost;
solution->time = sysTimer();
solution->move = move;
t = solution->clients[i];
solution->clients[i] = solution->clients[j];
solution->clients[j] = t;
solution->weights[i] = solution->dist(i - 1,i);
solution->weights[i + 1] = solution->dist(i,i + 1);
solution->weights[j] = solution->dist(j - 1,j);
if(j + 1 < solution->clientCount) {
solution->weights[j + 1] = solution->dist(j,j + 1);
}
// DEFINIR -DMLP_COST_CHECK
#ifdef MLP_COST_CHECK
if(problem.params.checkCost) {
uint ccost = solution->costCalc();
l4printf("CHECK COST: %s,\tcost=%u, check=%u\n",name,solution->cost,ccost);
if(solution->cost != ccost) {
lprintf("%s(%u,%u): wrong=%u, right=%u\n",name ,move.i ,move.j ,solution->cost ,ccost );
solution->showCostCalc("SOLUTION: ");
EXCEPTION("INVALID COST: %s",problem.name);
}
}
#endif
}
|
6c319b271c4371da360e9a2cd6461c13fb5cc02e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation.
// SPDX-License-Identifier: Apache-2.0
// DeepSpeed Team
#include "custom_cuda_layers.h"
namespace cg = cooperative_groups;
/*
Fused bias add, residual (elementwise) add, and normalization layer.
For FP16, this kernel does not promote to FP32 in order to utilize the 2x throughput for
__half2 instructions, and avoid the conversion overhead (1/8 of __hal2 arithmetic).
For specific launch constraints, see the launch functions.
*/
#define NORM_REG (MAX_REGISTERS / 4)
__global__ void fused_bias_residual_layer_norm(float* vals,
const float* residual,
const float* gamma,
const float* beta,
float epsilon,
bool preLayerNorm,
bool training,
float* vars,
float* means,
int row_stride)
{
int iteration_stride = blockDim.x;
int iterations = row_stride / iteration_stride;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int gid = id / WARP_SIZE;
float vals_arr[NORM_REG];
__shared__ float shr[MAX_WARP_NUM];
residual += (row * row_stride);
vals += (row * row_stride);
float sum = 0.f;
int high_index = iterations * iteration_stride + id;
#pragma unroll
for (int i = 0; i < iterations; i++) {
vals_arr[i] = residual[i * iteration_stride + id];
sum += vals_arr[i];
}
if (high_index < row_stride) {
vals_arr[iterations] = residual[high_index];
sum += vals_arr[iterations];
iterations++;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()];
#if !defined(__STOCHASTIC_MODE__) || __CUDA_ARCH__ < 700
b.sync();
#endif
for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) {
sum += g.shfl_down(sum, i);
}
sum = g.shfl(sum, 0);
float mean = sum / row_stride;
if (training)
if (threadIdx.x == 0) means[row] = mean;
float variance = 0.f;
for (int i = 0; i < iterations; i++) {
vals_arr[i] -= mean;
variance += vals_arr[i] * vals_arr[i];
}
for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); }
if (g.thread_rank() == 0) shr[gid] = variance;
b.sync();
if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) {
variance += g.shfl_down(variance, i);
}
variance = g.shfl(variance, 0);
variance /= row_stride;
variance += epsilon;
if (training)
if (threadIdx.x == 0) vars[row] = variance;
iterations = row_stride / iteration_stride;
for (int i = 0; i < iterations; i++) {
vals_arr[i] = vals_arr[i] * rsqrtf(variance);
vals_arr[i] =
vals_arr[i] * gamma[i * iteration_stride + id] + beta[i * iteration_stride + id];
vals[i * iteration_stride + id] = vals_arr[i];
}
if ((high_index) < row_stride) {
vals_arr[iterations] = vals_arr[iterations] * rsqrtf(variance);
vals_arr[iterations] = vals_arr[iterations] * gamma[high_index] + beta[high_index];
vals[high_index] = vals_arr[iterations];
}
}
__global__ void fused_bias_residual_layer_norm(__half* vals,
const __half* residual,
const __half* gamma,
const __half* beta,
float epsilon,
bool preLayerNorm,
bool training,
__half* vars,
__half* means,
int row_stride)
{
#ifdef HALF_PRECISION_AVAILABLE
int iteration_stride = blockDim.x;
int iterations = row_stride / iteration_stride;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int gid = id >> WARP_SIZE_BITS;
float2 vals_f[NORM_REG];
__shared__ float shr[MAX_WARP_NUM];
__half2* vals_cast = reinterpret_cast<__half2*>(vals);
const __half2* residual_cast = reinterpret_cast<const __half2*>(residual);
residual_cast += (row * row_stride);
vals_cast += (row * row_stride);
float sum = 0.f;
int high_index = iterations * iteration_stride + id;
#pragma unroll
for (int i = 0; i < iterations; i++) {
vals_f[i] = __half22float2(residual_cast[i * iteration_stride + id]);
sum += vals_f[i].x;
sum += vals_f[i].y;
}
if ((high_index) < row_stride) {
vals_f[iterations] = __half22float2(residual_cast[high_index]);
sum += vals_f[iterations].x;
sum += vals_f[iterations].y;
iterations++;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) {
sum += g.shfl_down(sum, i);
}
sum = g.shfl(sum, 0);
float mean = sum / (row_stride * 2);
float variance = 0.f;
for (int i = 0; i < iterations; i++) {
vals_f[i].x -= mean;
vals_f[i].y -= mean;
variance += vals_f[i].x * vals_f[i].x;
variance += vals_f[i].y * vals_f[i].y;
}
for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); }
if (g.thread_rank() == 0) shr[gid] = variance;
b.sync();
if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) {
variance += g.shfl_down(variance, i);
}
variance = g.shfl(variance, 0);
variance /= (row_stride * 2);
variance += epsilon;
__half2 variance_h = __float2half2_rn(variance);
const __half2* gamma_cast = reinterpret_cast<const __half2*>(gamma);
const __half2* beta_cast = reinterpret_cast<const __half2*>(beta);
if (training && threadIdx.x == 0) {
vars[row] = __float2half(variance);
means[row] = __float2half(mean);
}
iterations = row_stride / iteration_stride;
for (int i = 0; i < iterations; i++) {
__half2 vals_arr = __float22half2_rn(vals_f[i]);
vals_arr = vals_arr * h2rsqrt(variance_h);
vals_arr =
vals_arr * gamma_cast[i * iteration_stride + id] + beta_cast[i * iteration_stride + id];
vals_cast[i * iteration_stride + id] = vals_arr;
}
if ((high_index) < row_stride) {
__half2 vals_arr = __float22half2_rn(vals_f[iterations]);
vals_arr = vals_arr * h2rsqrt(variance_h);
vals_arr = vals_arr * gamma_cast[high_index] + beta_cast[high_index];
vals_cast[high_index] = vals_arr;
}
#endif
}
template <typename T>
void launch_bias_residual_layer_norm(T* vals,
const T* residual,
const T* gamma,
const T* beta,
float epsilon,
int batch_size,
int hidden_dim,
hipStream_t stream,
bool preLayerNorm,
bool training,
T* vars,
T* means);
template <>
void launch_bias_residual_layer_norm<float>(float* vals,
const float* residual,
const float* gamma,
const float* beta,
float epsilon,
int batch_size,
int hidden_dim,
hipStream_t stream,
bool preLayerNorm,
bool training,
float* vars,
float* means)
{
int threads = THREADS;
dim3 grid_dim(batch_size);
if (hidden_dim > 16384 && hidden_dim <= 32768)
threads <<= 1;
else if (hidden_dim > 32768 && hidden_dim <= 65536)
threads <<= 2;
else if (hidden_dim > 65536)
throw std::runtime_error("Unsupport hidden_dim.");
dim3 block_dim(threads);
hipLaunchKernelGGL(( fused_bias_residual_layer_norm), dim3(grid_dim), dim3(block_dim), 0, stream,
vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, means, hidden_dim);
}
template <>
void launch_bias_residual_layer_norm<__half>(__half* vals,
const __half* residual,
const __half* gamma,
const __half* beta,
float epsilon,
int batch_size,
int hidden_dim,
hipStream_t stream,
bool preLayerNorm,
bool training,
__half* vars,
__half* means)
{
int threads = 128;
dim3 grid_dim(batch_size);
if (hidden_dim > 8192 && hidden_dim <= 16384)
threads <<= 1;
else if (hidden_dim > 16384 && hidden_dim <= 32768)
threads <<= 2;
else if (hidden_dim > 32768 && hidden_dim <= 65536)
threads <<= 3;
else if (hidden_dim > 65536)
throw std::runtime_error("Unsupport hidden_dim.");
dim3 block_dim(threads);
hipLaunchKernelGGL(( fused_bias_residual_layer_norm), dim3(grid_dim), dim3(block_dim), 0, stream,
vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, means, hidden_dim / 2);
}
__global__ void fused_bias_residual_layer_norm(float* vals,
const float* residual,
const float* gamma,
const float* beta,
float epsilon,
bool preLayerNorm,
bool training,
float* vars,
int row_stride)
{
int iteration_stride = blockDim.x;
int iterations = row_stride / iteration_stride;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int gid = id / 32;
float vals_arr[NORM_REG];
__shared__ float shr[MAX_WARP_NUM];
residual += (row * row_stride);
vals += (row * row_stride);
float sum = 0.f;
int high_index = iterations * iteration_stride + id;
#pragma unroll
for (int i = 0; i < iterations; i++) {
vals_arr[i] = residual[i * iteration_stride + id];
sum += vals_arr[i];
}
if ((high_index) < row_stride) {
vals_arr[iterations] = residual[high_index];
sum += vals_arr[iterations];
iterations++;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()];
#if !defined(__STOCHASTIC_MODE__) || __CUDA_ARCH__ < 700
b.sync();
#endif
for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) {
sum += g.shfl_down(sum, i);
}
sum = g.shfl(sum, 0);
float mean = sum / row_stride;
float variance = 0.f;
for (int i = 0; i < iterations; i++) {
vals_arr[i] -= mean;
variance += vals_arr[i] * vals_arr[i];
}
for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); }
if (g.thread_rank() == 0) shr[gid] = variance;
b.sync();
if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) {
variance += g.shfl_down(variance, i);
}
variance = g.shfl(variance, 0);
variance /= row_stride;
variance += epsilon;
if (training)
if (threadIdx.x == 0) vars[row] = variance;
iterations = row_stride / iteration_stride;
for (int i = 0; i < iterations; i++) {
vals_arr[i] = vals_arr[i] * rsqrtf(variance);
vals_arr[i] =
vals_arr[i] * gamma[i * iteration_stride + id] + beta[i * iteration_stride + id];
vals[i * iteration_stride + id] = vals_arr[i];
}
if ((high_index) < row_stride) {
vals_arr[iterations] = vals_arr[iterations] * rsqrtf(variance);
vals_arr[iterations] = vals_arr[iterations] * gamma[high_index] + beta[high_index];
vals[high_index] = vals_arr[iterations];
}
}
__global__ void fused_bias_residual_layer_norm(__half* vals,
const __half* residual,
const __half* gamma,
const __half* beta,
float epsilon,
bool preLayerNorm,
bool training,
__half* vars,
int row_stride)
{
#ifdef HALF_PRECISION_AVAILABLE
int iteration_stride = blockDim.x;
int iterations = row_stride / iteration_stride;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int gid = id >> WARP_SIZE_BITS;
float2 vals_f[NORM_REG];
__shared__ float shr[MAX_WARP_NUM];
__half2* vals_cast = reinterpret_cast<__half2*>(vals);
const __half2* residual_cast = reinterpret_cast<const __half2*>(residual);
residual_cast += (row * row_stride);
vals_cast += (row * row_stride);
float sum = 0.f;
int high_index = iterations * iteration_stride + id;
#pragma unroll
for (int i = 0; i < iterations; i++) {
vals_f[i] = __half22float2(residual_cast[i * iteration_stride + id]);
sum += vals_f[i].x;
sum += vals_f[i].y;
}
if ((high_index) < row_stride) {
vals_f[iterations] = __half22float2(residual_cast[high_index]);
sum += vals_f[iterations].x;
sum += vals_f[iterations].y;
iterations++;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) {
sum += g.shfl_down(sum, i);
}
sum = g.shfl(sum, 0);
float mean = sum / (row_stride * 2);
float variance = 0.f;
for (int i = 0; i < iterations; i++) {
vals_f[i].x -= mean;
vals_f[i].y -= mean;
variance += vals_f[i].x * vals_f[i].x;
variance += vals_f[i].y * vals_f[i].y;
}
for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); }
if (g.thread_rank() == 0) shr[gid] = variance;
b.sync();
if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) {
variance += g.shfl_down(variance, i);
}
variance = g.shfl(variance, 0);
variance /= (row_stride * 2);
variance += epsilon;
__half2 variance_h = __float2half2_rn(variance);
const __half2* gamma_cast = reinterpret_cast<const __half2*>(gamma);
const __half2* beta_cast = reinterpret_cast<const __half2*>(beta);
if (training && threadIdx.x == 0) vars[row] = __float2half(variance);
iterations = row_stride / iteration_stride;
for (int i = 0; i < iterations; i++) {
__half2 vals_arr = __float22half2_rn(vals_f[i]);
vals_arr = vals_arr * h2rsqrt(variance_h);
vals_arr =
vals_arr * gamma_cast[i * iteration_stride + id] + beta_cast[i * iteration_stride + id];
vals_cast[i * iteration_stride + id] = vals_arr;
}
if ((high_index) < row_stride) {
__half2 vals_arr = __float22half2_rn(vals_f[iterations]);
vals_arr = vals_arr * h2rsqrt(variance_h);
vals_arr = vals_arr * gamma_cast[high_index] + beta_cast[high_index];
vals_cast[high_index] = vals_arr;
}
#endif
}
template <typename T>
void launch_bias_residual_layer_norm(T* vals,
const T* residual,
const T* gamma,
const T* beta,
float epsilon,
int batch_size,
int hidden_dim,
hipStream_t stream,
bool preLayerNorm,
bool training,
T* vars);
/*
To tune this launch the following restrictions must be met:
For float:
row_stride == hidden_size
threads * iterations == row_stride
threads is in [32, 64, 128, 256, 512, 1024]
For half:
row_stride == hidden_size / 2
threads * iterations == row_stride
threads is in [32, 64, 128, 256, 512, 1024]
*/
template <>
void launch_bias_residual_layer_norm<float>(float* vals,
const float* residual,
const float* gamma,
const float* beta,
float epsilon,
int batch_size,
int hidden_dim,
hipStream_t stream,
bool preLayerNorm,
bool training,
float* vars)
{
int threads = THREADS;
dim3 grid_dim(batch_size);
// There are some limitations to call below functions, now just enumerate the situations.
if (hidden_dim > 16384 && hidden_dim <= 32768)
threads <<= 1;
else if (hidden_dim > 32768 && hidden_dim <= 65536)
threads <<= 2;
else if (hidden_dim > 65536)
throw std::runtime_error("Unsupport hidden_dim.");
dim3 block_dim(threads);
hipLaunchKernelGGL(( fused_bias_residual_layer_norm), dim3(grid_dim), dim3(block_dim), 0, stream,
vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, hidden_dim);
}
template <>
void launch_bias_residual_layer_norm<__half>(__half* vals,
const __half* residual,
const __half* gamma,
const __half* beta,
float epsilon,
int batch_size,
int hidden_dim,
hipStream_t stream,
bool preLayerNorm,
bool training,
__half* vars)
{
int threads = 128;
dim3 grid_dim(batch_size);
// There are some limitations to call below functions, now just enumerate the situations.
if (hidden_dim > 8192 && hidden_dim <= 16384)
threads <<= 1;
else if (hidden_dim > 16384 && hidden_dim <= 32768)
threads <<= 2;
else if (hidden_dim > 32768 && hidden_dim <= 65536)
threads <<= 3;
else if (hidden_dim > 65536)
throw std::runtime_error("Unsupport hidden_dim.");
dim3 block_dim(threads);
hipLaunchKernelGGL(( fused_bias_residual_layer_norm), dim3(grid_dim), dim3(block_dim), 0, stream,
vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, hidden_dim / 2);
}
/* Normalize Gamma & Betta gradients
* Compute gradients using either X_hat or
* normalize input (invertible).
* Combine transpose with gradients computation.
*/
template <typename T>
__global__ void LayerNormBackward1(const T* __restrict__ out_grad,
const T* __restrict__ vals_hat,
const T* __restrict__ gamma,
const T* __restrict__ betta,
T* __restrict__ gamma_grad,
T* __restrict__ betta_grad,
int rows,
int width,
bool invertible)
{
__shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1];
__shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1];
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<TILE_DIM> g = cg::tiled_partition<TILE_DIM>(b);
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = threadIdx.y * width + idx;
int y_stride = width * TILE_DIM;
float betta_reg = (invertible ? (float)betta[idx] : 0.0f);
float gamma_reg = (float)gamma[idx];
// Loop across matrix height
float betta_tmp = 0;
float gamma_tmp = 0;
for (int r = threadIdx.y; r < rows; r += TILE_DIM) {
float grad = (float)out_grad[offset];
float val = (invertible ? ((float)vals_hat[offset] - betta_reg) / gamma_reg
: (float)vals_hat[offset]);
betta_tmp += grad;
gamma_tmp += (val * grad);
offset += y_stride;
}
betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp;
gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp;
__syncthreads();
// Sum the shared buffer.
float s1 = betta_buffer[threadIdx.y][threadIdx.x];
float s2 = gamma_buffer[threadIdx.y][threadIdx.x];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < TILE_DIM; i <<= 1) {
s1 += g.shfl_down(s1, i);
s2 += g.shfl_down(s2, i);
}
if (threadIdx.x == 0) {
int pos = blockIdx.x * TILE_DIM + threadIdx.y;
betta_grad[pos] = s1;
gamma_grad[pos] = s2;
}
}
/* Normalize Gamma & Betta gradients
* Compute gradients using the input to
* the normalize.
* Combine transpose with gradients computation.
*/
template <typename T>
__global__ void LayerNormBackward1(const T* __restrict__ out_grad,
const T* __restrict__ X_data,
const T* __restrict__ vars,
const T* __restrict__ means,
T* __restrict__ gamma_grad,
T* __restrict__ betta_grad,
int rows,
int width)
{
__shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1];
__shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1];
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<TILE_DIM> g = cg::tiled_partition<TILE_DIM>(b);
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = threadIdx.y * width + idx;
int y_stride = width * TILE_DIM;
int pos = blockIdx.x * TILE_DIM + threadIdx.y;
// Loop across matrix height
float betta_tmp = 0;
float gamma_tmp = 0;
for (int r = threadIdx.y; r < rows; r += TILE_DIM) {
float grad = (float)out_grad[offset];
float val = (float)X_data[offset];
val = (val - (float)means[r]) * rsqrtf((float)vars[r]);
betta_tmp += grad;
gamma_tmp += (val * grad);
offset += y_stride;
}
betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp;
gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp;
__syncthreads();
// Sum the shared buffer.
float s1 = betta_buffer[threadIdx.y][threadIdx.x];
float s2 = gamma_buffer[threadIdx.y][threadIdx.x];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < TILE_DIM; i <<= 1) {
s1 += g.shfl_down(s1, i);
s2 += g.shfl_down(s2, i);
}
if (threadIdx.x == 0) {
betta_grad[pos] = s1;
gamma_grad[pos] = s2;
}
}
/*
/* Backward Normalize (Input-Gradient)
* Using the means and variances from the input
* This type of backward is invertible!
* We do the backward using the X_hat (X - u) / sqrt(variance) or the output of Normalization.
*/
__global__ void LayerNormBackward2(const float* out_grad,
const float* vals_hat,
const float* gamma,
const float* betta,
const float* vars,
float* inp_grad,
bool invertible,
int row_stride)
{
int iteration_stride = blockDim.x;
int iterations = row_stride / iteration_stride;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int wid = id / WARP_SIZE;
int warp_num = iteration_stride >> WARP_SIZE_BITS;
__shared__ float partialSum[MAX_WARP_NUM];
out_grad += (row * row_stride);
vals_hat += (row * row_stride);
inp_grad += (row * row_stride);
float vals_arr[NORM_REG];
float vals_hat_arr[NORM_REG];
int high_index = iterations * iteration_stride + id;
#pragma unroll
for (int i = 0; i < iterations; i++) {
float gamma_reg = gamma[i * iteration_stride + id];
vals_arr[i] = out_grad[i * iteration_stride + id];
vals_arr[i] *= gamma_reg;
vals_hat_arr[i] =
(invertible ? (vals_hat[i * iteration_stride + id] - betta[i * iteration_stride + id]) /
gamma_reg
: vals_hat[i * iteration_stride + id]);
}
if ((high_index) < row_stride) {
float gamma_reg = gamma[high_index];
vals_arr[iterations] = out_grad[high_index];
vals_arr[iterations] *= gamma_reg;
vals_hat_arr[iterations] =
(invertible ? (vals_hat[high_index] - betta[high_index]) / gamma_reg
: vals_hat[high_index]);
iterations++;
}
float var_reg = vars[row];
float sum = 0;
for (int i = 0; i < iterations; i++) {
sum += vals_hat_arr[i] * vals_arr[i] *
sqrtf(var_reg); // dval_hat = gamma * (x - u) * out_grad
vals_arr[i] *= rsqrtf(var_reg); // dvar_inv = gamma * out_grad / sqrt(var)
}
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= row_stride;
for (int i = 0; i < iterations; i++) { vals_arr[i] += ((-sum * vals_hat_arr[i]) / var_reg); }
sum = 0;
for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; }
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= row_stride;
iterations = row_stride / iteration_stride;
for (int i = 0; i < iterations; i++) inp_grad[i * iteration_stride + id] = (vals_arr[i] - sum);
if ((high_index) < row_stride) inp_grad[high_index] = (vals_arr[iterations] - sum);
}
__global__ void LayerNormBackward2(const __half* out_grad,
const __half* vals_hat,
const __half* gamma,
const __half* betta,
const __half* vars,
__half* inp_grad,
bool invertible,
int row_stride)
{
#ifdef HALF_PRECISION_AVAILABLE
int iteration_stride = blockDim.x;
int iterations = row_stride / iteration_stride;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int wid = id / WARP_SIZE;
int warp_num = iteration_stride >> WARP_SIZE_BITS;
__shared__ float partialSum[MAX_WARP_NUM];
__half2 vals_arr[NORM_REG];
float2 vals_arr_f[NORM_REG];
__half2 vals_hat_arr[NORM_REG];
__half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad);
const __half2* out_grad_h = reinterpret_cast<const __half2*>(out_grad);
const __half2* vals_hat_h = reinterpret_cast<const __half2*>(vals_hat);
inp_grad_h += (row * row_stride);
out_grad_h += (row * row_stride);
vals_hat_h += (row * row_stride);
const __half2* gamma_h = reinterpret_cast<const __half2*>(gamma);
const __half2* betta_h = (invertible ? reinterpret_cast<const __half2*>(betta) : nullptr);
int high_index = iterations * iteration_stride + id;
#pragma unroll
for (int i = 0; i < iterations; i++) {
__half2 gamma_reg = gamma_h[i * iteration_stride + id];
vals_arr[i] = out_grad_h[i * iteration_stride + id];
vals_arr[i] *= gamma_reg;
vals_hat_arr[i] =
(invertible
? (vals_hat_h[i * iteration_stride + id] - betta_h[i * iteration_stride + id]) /
gamma_reg
: vals_hat_h[i * iteration_stride + id]);
}
if ((high_index) < row_stride) {
__half2 gamma_reg = gamma_h[high_index];
vals_arr[iterations] = out_grad_h[high_index];
vals_arr[iterations] *= gamma_reg;
vals_hat_arr[iterations] =
(invertible ? (vals_hat_h[high_index] - betta_h[high_index]) / gamma_reg
: vals_hat_h[high_index]);
iterations++;
}
__half var_h = vars[row];
__half2 var_reg = __halves2half2(var_h, var_h);
float sum = 0.f;
for (int i = 0; i < iterations; i++) {
__half2 result_h = (vals_hat_arr[i] * vals_arr[i] * h2sqrt(var_reg));
float2 result_f = __half22float2(result_h);
sum += result_f.x;
sum += result_f.y;
vals_arr[i] *= h2rsqrt(var_reg);
}
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= (2 * row_stride);
__half2 sum_h = __float2half2_rn(sum);
for (int i = 0; i < iterations; i++) {
__half2 temp = ((-sum_h * vals_hat_arr[i]) / (var_reg));
vals_arr_f[i] = __half22float2(vals_arr[i]);
float2 temp_f = __half22float2(temp);
vals_arr_f[i].x += temp_f.x;
vals_arr_f[i].y += temp_f.y;
}
sum = 0.f;
for (int i = 0; i < iterations; i++) {
sum += (vals_arr_f[i].x);
sum += (vals_arr_f[i].y);
}
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= (2 * row_stride);
iterations = row_stride / iteration_stride;
for (int i = 0; i < iterations; i++) {
vals_arr_f[i].x -= sum;
vals_arr_f[i].y -= sum;
__half2 temp = __float22half2_rn(vals_arr_f[i]);
inp_grad_h[i * iteration_stride + id] = temp;
}
if ((high_index) < row_stride) {
vals_arr_f[iterations].x -= sum;
vals_arr_f[iterations].y -= sum;
__half2 temp = __float22half2_rn(vals_arr_f[iterations]);
inp_grad_h[high_index] = temp;
}
#endif
}
template <>
void launch_layerNorm_backward<float>(const float* out_grad,
const float* vals_hat,
const float* vars,
const float* gamma,
float* gamma_grad,
float* betta_grad,
float* inp_grad,
int batch,
int hidden_dim,
hipStream_t stream[2],
bool invertible,
const float* betta)
{
int threads = THREADS;
dim3 grid_dim(hidden_dim / TILE_DIM);
dim3 block_dim(TILE_DIM, TILE_DIM);
hipLaunchKernelGGL(( LayerNormBackward1<float>), dim3(grid_dim), dim3(block_dim), 0, stream[0],
out_grad, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible);
dim3 grid_dim2(batch);
if (hidden_dim > 16384 && hidden_dim <= 32768)
threads <<= 1;
else if (hidden_dim > 32768 && hidden_dim <= 65536)
threads <<= 2;
else if (hidden_dim > 65536)
throw std::runtime_error("Unsupport hidden_dim.");
dim3 block_dim2(threads);
hipLaunchKernelGGL(( LayerNormBackward2), dim3(grid_dim2), dim3(block_dim2), 0, stream[1],
out_grad, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim);
}
template <>
void launch_layerNorm_backward<__half>(const __half* out_grad,
const __half* vals_hat,
const __half* vars,
const __half* gamma,
__half* gamma_grad,
__half* betta_grad,
__half* inp_grad,
int batch,
int hidden_dim,
hipStream_t stream[2],
bool invertible,
const __half* betta)
{
int threads = THREADS;
dim3 grid_dim(hidden_dim / TILE_DIM);
dim3 block_dim(TILE_DIM, TILE_DIM);
//hipLaunchKernelGGL(( LayerNormBackward1<__half>), dim3(grid_dim), dim3(block_dim), 0, stream[0],
// out_grad, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible);
dim3 grid_dim2(batch);
if (hidden_dim > 8192 && hidden_dim <= 16384)
threads <<= 1;
else if (hidden_dim > 16384 && hidden_dim <= 32768)
threads <<= 2;
else if (hidden_dim > 32768 && hidden_dim <= 65536)
threads <<= 3;
else if (hidden_dim > 65536)
throw std::runtime_error("Unsupport hidden_dim.");
dim3 block_dim2(threads / 2);
hipLaunchKernelGGL(( LayerNormBackward2), dim3(grid_dim2), dim3(block_dim2), 0, stream[1],
out_grad, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim / 2);
}
/* Backward Normalize (Input-Gradient)
* Using the means and variances from the input
* This type of backward is not invertible!
* We do the backward using the input (X)
*/
__global__ void LayerNormBackward2(const float* out_grad,
const float* X_vals,
const float* gamma,
const float* vars,
const float* means,
float* inp_grad,
int row_stride)
{
int iteration_stride = blockDim.x;
int iterations = row_stride / iteration_stride;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int wid = id >> WARP_SIZE_BITS;
int warp_num = iteration_stride >> WARP_SIZE_BITS;
__shared__ float partialSum[MAX_WARP_NUM];
out_grad += (row * row_stride);
X_vals += (row * row_stride);
inp_grad += (row * row_stride);
float vals_arr[NORM_REG];
int high_index = iterations * iteration_stride + id;
#pragma unroll
for (int i = 0; i < iterations; i++) {
float gamma_reg = gamma[i * iteration_stride + id];
vals_arr[i] = out_grad[i * iteration_stride + id];
vals_arr[i] *= gamma_reg;
}
if ((high_index) < row_stride) {
float gamma_reg = gamma[high_index];
vals_arr[iterations] = out_grad[high_index];
vals_arr[iterations] *= gamma_reg;
iterations++;
}
float var_reg = vars[row];
float mean_reg = means[row];
float sum = 0;
float xu[NORM_REG];
for (int i = 0; i < iterations; i++) {
xu[i] = (X_vals[i * iteration_stride + id] - mean_reg);
sum += vals_arr[i] * xu[i];
vals_arr[i] *= rsqrtf(var_reg);
}
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= row_stride;
for (int i = 0; i < iterations; i++) {
vals_arr[i] += (-sum * xu[i] * rsqrtf(var_reg) / (var_reg));
}
sum = 0;
for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; }
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= row_stride;
iterations = row_stride / iteration_stride;
for (int i = 0; i < iterations; i++) inp_grad[i * iteration_stride + id] = (vals_arr[i] - sum);
if ((high_index) < row_stride) inp_grad[high_index] = (vals_arr[iterations] - sum);
}
__global__ void LayerNormBackward2(const __half* out_grad,
const __half* X_vals,
const __half* gamma,
const __half* vars,
const __half* means,
__half* inp_grad,
int row_stride)
{
#ifdef HALF_PRECISION_AVAILABLE
int iteration_stride = blockDim.x;
int iterations = row_stride / iteration_stride;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int wid = id >> WARP_SIZE_BITS;
int warp_num = iteration_stride >> WARP_SIZE_BITS;
__shared__ float partialSum[MAX_WARP_NUM];
__half2 vals_arr[NORM_REG];
float2 vals_arr_f[NORM_REG];
__half2 xu[NORM_REG];
__half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad);
const __half2* out_grad_h = reinterpret_cast<const __half2*>(out_grad);
const __half2* vals_hat_h = reinterpret_cast<const __half2*>(X_vals);
inp_grad_h += (row * row_stride);
out_grad_h += (row * row_stride);
vals_hat_h += (row * row_stride);
const __half2* gamma_h = reinterpret_cast<const __half2*>(gamma);
int high_index = iterations * iteration_stride + id;
__half mean_h = means[row];
__half2 mean_reg = __halves2half2(mean_h, mean_h);
#pragma unroll
for (int i = 0; i < iterations; i++) {
__half2 gamma_reg = gamma_h[i * iteration_stride + id];
vals_arr[i] = out_grad_h[i * iteration_stride + id];
vals_arr[i] *= gamma_reg; // out_grad * gamma
xu[i] = (vals_hat_h[i * iteration_stride + id] - mean_reg);
}
if ((high_index) < row_stride) {
__half2 gamma_reg = gamma_h[high_index];
vals_arr[iterations] = out_grad_h[high_index];
vals_arr[iterations] *= gamma_reg; // out_grad * gamma
xu[iterations] = (vals_hat_h[high_index] - mean_reg);
iterations++;
}
__half var_h = vars[row];
__half2 var_reg = __halves2half2(var_h, var_h);
float sum = 0.f;
for (int i = 0; i < iterations; i++) {
__half2 result_h = (xu[i] * vals_arr[i]);
float2 result_f = __half22float2(result_h);
sum += result_f.x;
sum += result_f.y;
vals_arr[i] *= h2rsqrt(var_reg);
}
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= (2 * row_stride);
__half2 sum_h = __float2half2_rn(sum);
for (int i = 0; i < iterations; i++) {
__half2 xu_grad = ((-sum_h * xu[i] * h2rsqrt(var_reg)) / (var_reg));
vals_arr_f[i] = __half22float2(vals_arr[i]);
float2 xu_grad_f = __half22float2(xu_grad);
vals_arr_f[i].x += xu_grad_f.x;
vals_arr_f[i].y += xu_grad_f.y;
}
sum = 0.f;
for (int i = 0; i < iterations; i++) {
sum += (vals_arr_f[i].x);
sum += (vals_arr_f[i].y);
}
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= (2 * row_stride);
iterations = row_stride / iteration_stride;
for (int i = 0; i < iterations; i++) {
vals_arr_f[i].x -= sum;
vals_arr_f[i].y -= sum;
__half2 temp = __float22half2_rn(vals_arr_f[i]);
inp_grad_h[i * iteration_stride + id] = temp;
}
if ((high_index) < row_stride) {
vals_arr_f[iterations].x -= sum;
vals_arr_f[iterations].y -= sum;
__half2 temp = __float22half2_rn(vals_arr_f[iterations]);
inp_grad_h[high_index] = temp;
}
#endif
}
template <>
void launch_layerNorm_backward<float>(const float* out_grad,
const float* X_data,
const float* vars,
const float* means,
const float* gamma,
float* gamma_grad,
float* betta_grad,
float* inp_grad,
int batch,
int hidden_dim,
hipStream_t stream[2])
{
int threads = THREADS;
dim3 grid_dim(hidden_dim / TILE_DIM);
dim3 block_dim(TILE_DIM, TILE_DIM);
hipLaunchKernelGGL(( LayerNormBackward1<float>), dim3(grid_dim), dim3(block_dim), 0, stream[0],
out_grad, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim);
dim3 grid_dim2(batch);
if (hidden_dim > 16384 && hidden_dim <= 32768)
threads <<= 1;
else if (hidden_dim > 32768 && hidden_dim <= 65536)
threads <<= 2;
else if (hidden_dim > 65536)
throw std::runtime_error("Unsupport hidden_dim.");
dim3 block_dim2(threads);
hipLaunchKernelGGL(( LayerNormBackward2), dim3(grid_dim2), dim3(block_dim2), 0, stream[1],
out_grad, X_data, gamma, vars, means, inp_grad, hidden_dim);
}
template <>
void launch_layerNorm_backward<__half>(const __half* out_grad,
const __half* X_data,
const __half* vars,
const __half* means,
const __half* gamma,
__half* gamma_grad,
__half* betta_grad,
__half* inp_grad,
int batch,
int hidden_dim,
hipStream_t stream[2])
{
int threads = THREADS;
dim3 grid_dim(hidden_dim / TILE_DIM);
dim3 block_dim(TILE_DIM, TILE_DIM);
hipLaunchKernelGGL(( LayerNormBackward1<__half>), dim3(grid_dim), dim3(block_dim), 0, stream[0],
out_grad, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim);
dim3 grid_dim2(batch);
if (hidden_dim > 8192 && hidden_dim <= 16384)
threads <<= 1;
else if (hidden_dim > 16384 && hidden_dim <= 32768)
threads <<= 2;
else if (hidden_dim > 32768 && hidden_dim <= 65536)
threads <<= 3;
else if (hidden_dim > 65536)
throw std::runtime_error("Unsupport hidden_dim.");
dim3 block_dim2(threads / 2);
hipLaunchKernelGGL(( LayerNormBackward2), dim3(grid_dim2), dim3(block_dim2), 0, stream[1],
out_grad, X_data, gamma, vars, means, inp_grad, hidden_dim / 2);
}
template <typename T>
__global__ void LayerNormBackward1_fused_add(const T* __restrict__ out_grad1,
const T* __restrict__ out_grad2,
const T* __restrict__ vals_hat,
const T* __restrict__ gamma,
const T* __restrict__ betta,
T* __restrict__ gamma_grad,
T* __restrict__ betta_grad,
int rows,
int width,
bool invertible)
{
__shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1];
__shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1];
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<TILE_DIM> g = cg::tiled_partition<TILE_DIM>(b);
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = threadIdx.y * width + idx;
int y_stride = width * TILE_DIM;
float betta_reg = (invertible ? (float)betta[idx] : 0.0f);
float gamma_reg = (float)gamma[idx];
// Loop across matrix height
float betta_tmp = 0;
float gamma_tmp = 0;
for (int r = threadIdx.y; r < rows; r += TILE_DIM) {
float grad = (float)out_grad1[offset] + (float)out_grad2[offset];
float val = (invertible ? ((float)vals_hat[offset] - betta_reg) / gamma_reg
: (float)vals_hat[offset]);
betta_tmp += grad;
gamma_tmp += (val * grad);
offset += y_stride;
}
betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp;
gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp;
__syncthreads();
// Sum the shared buffer.
float s1 = betta_buffer[threadIdx.y][threadIdx.x];
float s2 = gamma_buffer[threadIdx.y][threadIdx.x];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < TILE_DIM; i <<= 1) {
s1 += g.shfl_down(s1, i);
s2 += g.shfl_down(s2, i);
}
if (threadIdx.x == 0) {
int pos = blockIdx.x * TILE_DIM + threadIdx.y;
betta_grad[pos] = s1;
gamma_grad[pos] = s2;
}
}
template <typename T>
__global__ void LayerNormBackward1_fused_add(const T* __restrict__ out_grad1,
const T* __restrict__ out_grad2,
const T* __restrict__ X_data,
const T* __restrict__ vars,
const T* __restrict__ means,
T* __restrict__ gamma_grad,
T* __restrict__ betta_grad,
int rows,
int width)
{
__shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1];
__shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1];
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<TILE_DIM> g = cg::tiled_partition<TILE_DIM>(b);
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = threadIdx.y * width + idx;
int y_stride = width * TILE_DIM;
int pos = blockIdx.x * TILE_DIM + threadIdx.y;
// Loop across matrix height
float betta_tmp = 0;
float gamma_tmp = 0;
for (int r = threadIdx.y; r < rows; r += TILE_DIM) {
float grad = (float)out_grad1[offset] + (float)out_grad2[offset];
float val = (float)X_data[offset];
val = (val - (float)means[r]) * rsqrtf((float)vars[r]);
betta_tmp += grad;
gamma_tmp += (val * grad);
offset += y_stride;
}
betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp;
gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp;
__syncthreads();
// Sum the shared buffer.
float s1 = betta_buffer[threadIdx.y][threadIdx.x];
float s2 = gamma_buffer[threadIdx.y][threadIdx.x];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < TILE_DIM; i <<= 1) {
s1 += g.shfl_down(s1, i);
s2 += g.shfl_down(s2, i);
}
if (threadIdx.x == 0) {
betta_grad[pos] = s1;
gamma_grad[pos] = s2;
}
}
__global__ void LayerNormBackward2_fused_add(const float* out_grad1,
const float* out_grad2,
const float* vals_hat,
const float* gamma,
const float* betta,
const float* vars,
float* inp_grad,
bool invertible,
int row_stride)
{
int iteration_stride = blockDim.x;
int iterations = row_stride / iteration_stride;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int wid = id / WARP_SIZE;
int warp_num = iteration_stride >> WARP_SIZE_BITS;
__shared__ float partialSum[MAX_WARP_NUM];
out_grad1 += (row * row_stride);
out_grad2 += (row * row_stride);
vals_hat += (row * row_stride);
inp_grad += (row * row_stride);
float vals_arr[NORM_REG];
float vals_hat_arr[NORM_REG];
int high_index = iterations * iteration_stride + id;
#pragma unroll
for (int i = 0; i < iterations; i++) {
float gamma_reg = gamma[i * iteration_stride + id];
vals_arr[i] = out_grad1[i * iteration_stride + id];
vals_arr[i] *= gamma_reg;
vals_hat_arr[i] =
(invertible ? (vals_hat[i * iteration_stride + id] - betta[i * iteration_stride + id]) /
gamma_reg
: vals_hat[i * iteration_stride + id]);
}
if ((high_index) < row_stride) {
float gamma_reg = gamma[high_index];
vals_arr[iterations] = out_grad1[high_index];
vals_arr[iterations] *= gamma_reg;
vals_hat_arr[iterations] =
(invertible ? (vals_hat[high_index] - betta[high_index]) / gamma_reg
: vals_hat[high_index]);
iterations++;
}
float var_reg = vars[row];
float sum = 0;
for (int i = 0; i < iterations; i++) {
sum += vals_hat_arr[i] * vals_arr[i] * sqrtf(var_reg);
vals_arr[i] *= rsqrtf(var_reg);
}
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= row_stride;
for (int i = 0; i < iterations; i++) { vals_arr[i] += ((-sum * vals_hat_arr[i]) / var_reg); }
sum = 0;
for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; }
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= row_stride;
iterations = row_stride / iteration_stride;
for (int i = 0; i < iterations; i++)
inp_grad[i * iteration_stride + id] =
(vals_arr[i] - sum) + out_grad2[i * iteration_stride + id];
if ((high_index) < row_stride)
inp_grad[high_index] = (vals_arr[iterations] - sum) + out_grad2[high_index];
}
__global__ void LayerNormBackward2_fused_add(const __half* out_grad1,
const __half* out_grad2,
const __half* vals_hat,
const __half* gamma,
const __half* betta,
const __half* vars,
__half* inp_grad,
bool invertible,
int row_stride)
{
#ifdef HALF_PRECISION_AVAILABLE
int iteration_stride = blockDim.x;
int iterations = row_stride / iteration_stride;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int wid = id / WARP_SIZE;
int warp_num = iteration_stride >> WARP_SIZE_BITS;
__shared__ float partialSum[MAX_WARP_NUM];
__half2 vals_arr[NORM_REG];
float2 vals_arr_f[NORM_REG];
__half2 vals_hat_arr[NORM_REG];
// float2 result[iterations];
__half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad);
const __half2* out_grad_h1 = reinterpret_cast<const __half2*>(out_grad1);
const __half2* out_grad_h2 = reinterpret_cast<const __half2*>(out_grad2);
const __half2* vals_hat_h = reinterpret_cast<const __half2*>(vals_hat);
inp_grad_h += (row * row_stride);
out_grad_h1 += (row * row_stride);
out_grad_h2 += (row * row_stride);
vals_hat_h += (row * row_stride);
const __half2* gamma_h = reinterpret_cast<const __half2*>(gamma);
const __half2* betta_h = (invertible ? reinterpret_cast<const __half2*>(betta) : nullptr);
int high_index = iterations * iteration_stride + id;
#pragma unroll
for (int i = 0; i < iterations; i++) {
__half2 gamma_reg = gamma_h[i * iteration_stride + id];
vals_arr[i] = out_grad_h1[i * iteration_stride + id];
vals_arr[i] *= gamma_reg; // out_grad * gamma
vals_hat_arr[i] =
(invertible
? (vals_hat_h[i * iteration_stride + id] - betta_h[i * iteration_stride + id]) /
gamma_reg
: vals_hat_h[i * iteration_stride + id]);
}
if ((high_index) < row_stride) {
__half2 gamma_reg = gamma_h[high_index];
vals_arr[iterations] = out_grad_h1[high_index];
vals_arr[iterations] *= gamma_reg; // out_grad * gamma
vals_hat_arr[iterations] =
(invertible ? (vals_hat_h[high_index] - betta_h[high_index]) / gamma_reg
: vals_hat_h[high_index]);
iterations++;
}
__half var_h = vars[row];
__half2 var_reg = __halves2half2(var_h, var_h);
float sum = 0.f;
for (int i = 0; i < iterations; i++) {
__half2 result_h = (vals_hat_arr[i] * vals_arr[i] * h2sqrt(var_reg));
float2 result_f = __half22float2(result_h);
sum += result_f.x;
sum += result_f.y;
vals_arr[i] *= h2rsqrt(var_reg);
}
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= (2 * row_stride);
__half2 sum_h = __float2half2_rn(sum);
for (int i = 0; i < iterations; i++) {
__half2 temp = ((-sum_h * vals_hat_arr[i]) / (var_reg));
vals_arr_f[i] = __half22float2(vals_arr[i]);
float2 temp_f = __half22float2(temp);
vals_arr_f[i].x += temp_f.x;
vals_arr_f[i].y += temp_f.y;
}
sum = 0.f;
for (int i = 0; i < iterations; i++) {
sum += (vals_arr_f[i].x);
sum += (vals_arr_f[i].y);
}
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= (2 * row_stride);
iterations = row_stride / iteration_stride;
for (int i = 0; i < iterations; i++) {
vals_arr_f[i].x -= sum;
vals_arr_f[i].y -= sum;
__half2 temp = __float22half2_rn(vals_arr_f[i]);
inp_grad_h[i * iteration_stride + id] = temp + out_grad_h2[i * iteration_stride + id];
}
if ((high_index) < row_stride) {
vals_arr_f[iterations].x -= sum;
vals_arr_f[iterations].y -= sum;
__half2 temp = __float22half2_rn(vals_arr_f[iterations]);
inp_grad_h[high_index] = temp + out_grad_h2[high_index];
}
#endif
}
template <>
void launch_layerNorm_backward_fused_add<float>(const float* out_grad1,
const float* out_grad2,
const float* vals_hat,
const float* vars,
const float* gamma,
float* gamma_grad,
float* betta_grad,
float* inp_grad,
int batch,
int hidden_dim,
hipStream_t stream[2],
bool invertible,
const float* betta)
{
int threads = THREADS;
dim3 grid_dim(hidden_dim / TILE_DIM);
dim3 block_dim(TILE_DIM, TILE_DIM);
hipLaunchKernelGGL(( LayerNormBackward1<float>), dim3(grid_dim), dim3(block_dim), 0, stream[0],
out_grad1, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible);
dim3 grid_dim2(batch);
if (hidden_dim > 16384 && hidden_dim <= 32768)
threads <<= 1;
else if (hidden_dim > 32768 && hidden_dim <= 65536)
threads <<= 2;
else if (hidden_dim > 65536)
throw std::runtime_error("Unsupport hidden_dim.");
dim3 block_dim2(threads);
hipLaunchKernelGGL(( LayerNormBackward2_fused_add), dim3(grid_dim2), dim3(block_dim2), 0, stream[1],
out_grad1, out_grad2, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim);
}
template <>
void launch_layerNorm_backward_fused_add<__half>(const __half* out_grad1,
const __half* out_grad2,
const __half* vals_hat,
const __half* vars,
const __half* gamma,
__half* gamma_grad,
__half* betta_grad,
__half* inp_grad,
int batch,
int hidden_dim,
hipStream_t stream[2],
bool invertible,
const __half* betta)
{
int threads = THREADS;
dim3 grid_dim(hidden_dim / TILE_DIM);
dim3 block_dim(TILE_DIM, TILE_DIM);
hipLaunchKernelGGL(( LayerNormBackward1<__half>), dim3(grid_dim), dim3(block_dim), 0, stream[0],
out_grad1, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible);
dim3 grid_dim2(batch);
if (hidden_dim > 8192 && hidden_dim <= 16384)
threads <<= 1;
else if (hidden_dim > 16384 && hidden_dim <= 32768)
threads <<= 2;
else if (hidden_dim > 32768 && hidden_dim <= 65536)
threads <<= 3;
else if (hidden_dim > 65536)
throw std::runtime_error("Unsupport hidden_dim.");
dim3 block_dim2(threads / 2);
hipLaunchKernelGGL(( LayerNormBackward2_fused_add), dim3(grid_dim2), dim3(block_dim2), 0, stream[1],
out_grad1, out_grad2, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim / 2);
}
/* Backward Normalize (Input-Gradient)
* Using the means and variances from the input
* This type of backward is not invertible!
* We do the backward using the input (X)
*/
__global__ void LayerNormBackward2_fused_add(const float* out_grad1,
const float* out_grad2,
const float* X_vals,
const float* gamma,
const float* vars,
const float* means,
float* inp_grad,
int row_stride)
{
int iteration_stride = blockDim.x;
int iterations = row_stride / iteration_stride;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int wid = id / WARP_SIZE;
int warp_num = iteration_stride >> WARP_SIZE_BITS;
__shared__ float partialSum[MAX_WARP_NUM];
float vals_arr[NORM_REG];
float vals_hat_arr[NORM_REG];
out_grad1 += (row * row_stride);
out_grad2 += (row * row_stride);
X_vals += (row * row_stride);
inp_grad += (row * row_stride);
int high_index = iterations * iteration_stride + id;
#pragma unroll
for (int i = 0; i < iterations; i++) {
float gamma_reg = gamma[i * iteration_stride + id];
vals_arr[i] = out_grad1[i * iteration_stride + id];
vals_arr[i] *= gamma_reg;
vals_hat_arr[i] = X_vals[i * iteration_stride + id];
}
if ((high_index) < row_stride) {
float gamma_reg = gamma[high_index];
vals_arr[iterations] = out_grad1[high_index];
vals_arr[iterations] *= gamma_reg;
vals_hat_arr[iterations] = X_vals[high_index];
iterations++;
}
float var_reg = vars[row];
float mean_reg = means[row];
float sum = 0;
float xu[NORM_REG];
for (int i = 0; i < iterations; i++) {
xu[i] = (vals_hat_arr[i] - mean_reg);
sum += vals_arr[i] * xu[i];
vals_arr[i] *= rsqrtf(var_reg);
}
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= row_stride;
for (int i = 0; i < iterations; i++) {
vals_arr[i] += (-sum * xu[i] * rsqrtf(var_reg) / (var_reg));
}
sum = 0;
for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; }
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= row_stride;
iterations = row_stride / iteration_stride;
for (int i = 0; i < iterations; i++)
inp_grad[i * iteration_stride + id] =
(vals_arr[i] - sum) + out_grad2[i * iteration_stride + id];
if ((high_index) < row_stride)
inp_grad[high_index] = (vals_arr[iterations] - sum) + out_grad2[high_index];
}
__global__ void LayerNormBackward2_fused_add(const __half* out_grad1,
const __half* out_grad2,
const __half* X_vals,
const __half* gamma,
const __half* vars,
const __half* means,
__half* inp_grad,
int row_stride)
{
#ifdef HALF_PRECISION_AVAILABLE
int iteration_stride = blockDim.x;
int iterations = row_stride / iteration_stride;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int wid = id / WARP_SIZE;
int warp_num = iteration_stride >> WARP_SIZE_BITS;
__shared__ float partialSum[MAX_WARP_NUM];
__half2 vals_arr[NORM_REG];
float2 vals_arr_f[NORM_REG];
__half2 vals_hat_arr[NORM_REG];
__half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad);
const __half2* out_grad_h1 = reinterpret_cast<const __half2*>(out_grad1);
const __half2* out_grad_h2 = reinterpret_cast<const __half2*>(out_grad2);
const __half2* vals_hat_h = reinterpret_cast<const __half2*>(X_vals);
out_grad_h1 += (row * row_stride);
out_grad_h2 += (row * row_stride);
inp_grad_h += (row * row_stride);
vals_hat_h += (row * row_stride);
const __half2* gamma_h = reinterpret_cast<const __half2*>(gamma);
int high_index = iterations * iteration_stride + id;
#pragma unroll
for (int i = 0; i < iterations; i++) {
__half2 gamma_reg = gamma_h[i * iteration_stride + id];
vals_arr[i] = out_grad_h1[i * iteration_stride + id];
vals_arr[i] *= gamma_reg; // out_grad * gamma
vals_hat_arr[i] = vals_hat_h[i * iteration_stride + id];
}
if ((high_index) < row_stride) {
__half2 gamma_reg = gamma_h[high_index];
vals_arr[iterations] = out_grad_h1[high_index];
vals_arr[iterations] *= gamma_reg; // out_grad * gamma
vals_hat_arr[iterations] = vals_hat_h[high_index];
iterations++;
}
__half mean_h = means[row];
__half var_h = vars[row];
__half2 var_reg = __halves2half2(var_h, var_h);
__half2 mean_reg = __halves2half2(mean_h, mean_h);
__half2 xu[NORM_REG];
float sum = 0.f;
for (int i = 0; i < iterations; i++) {
xu[i] = (vals_hat_arr[i] - mean_reg);
__half2 result_h = (xu[i] * vals_arr[i]);
float2 result_f = __half22float2(result_h);
sum += result_f.x;
sum += result_f.y;
vals_arr[i] *= h2rsqrt(var_reg);
}
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= (2 * row_stride);
__half2 sum_h = __float2half2_rn(sum);
for (int i = 0; i < iterations; i++) {
__half2 xu_grad = ((-sum_h * xu[i] * h2rsqrt(var_reg)) / (var_reg));
vals_arr_f[i] = __half22float2(vals_arr[i]);
float2 xu_grad_f = __half22float2(xu_grad);
vals_arr_f[i].x += xu_grad_f.x;
vals_arr_f[i].y += xu_grad_f.y;
}
sum = 0.f;
for (int i = 0; i < iterations; i++) {
sum += (vals_arr_f[i].x);
sum += (vals_arr_f[i].y);
}
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= (2 * row_stride);
iterations = row_stride / iteration_stride;
for (int i = 0; i < iterations; i++) {
vals_arr_f[i].x -= sum;
vals_arr_f[i].y -= sum;
__half2 temp = __float22half2_rn(vals_arr_f[i]);
inp_grad_h[i * iteration_stride + id] = temp + out_grad_h2[i * iteration_stride + id];
}
if ((high_index) < row_stride) {
vals_arr_f[iterations].x -= sum;
vals_arr_f[iterations].y -= sum;
__half2 temp = __float22half2_rn(vals_arr_f[iterations]);
inp_grad_h[high_index] = temp + out_grad_h2[high_index];
}
#endif
}
template <>
void launch_layerNorm_backward_fused_add<float>(const float* out_grad1,
const float* out_grad2,
const float* X_data,
const float* vars,
const float* means,
const float* gamma,
float* gamma_grad,
float* betta_grad,
float* inp_grad,
int batch,
int hidden_dim,
hipStream_t stream[2])
{
int threads = THREADS;
dim3 grid_dim(hidden_dim / TILE_DIM);
dim3 block_dim(TILE_DIM, TILE_DIM);
hipLaunchKernelGGL(( LayerNormBackward1<float>), dim3(grid_dim), dim3(block_dim), 0, stream[0],
out_grad1, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim);
dim3 grid_dim2(batch);
if (hidden_dim > 16384 && hidden_dim <= 32768)
threads <<= 1;
else if (hidden_dim > 32768 && hidden_dim <= 65536)
threads <<= 2;
else if (hidden_dim > 65536)
throw std::runtime_error("Unsupport hidden_dim.");
dim3 block_dim2(threads);
hipLaunchKernelGGL(( LayerNormBackward2_fused_add), dim3(grid_dim2), dim3(block_dim2), 0, stream[1],
out_grad1, out_grad2, X_data, gamma, vars, means, inp_grad, hidden_dim);
}
template <>
void launch_layerNorm_backward_fused_add<__half>(const __half* out_grad1,
const __half* out_grad2,
const __half* X_data,
const __half* vars,
const __half* means,
const __half* gamma,
__half* gamma_grad,
__half* betta_grad,
__half* inp_grad,
int batch,
int hidden_dim,
hipStream_t stream[2])
{
int threads = THREADS;
dim3 grid_dim(hidden_dim / TILE_DIM);
dim3 block_dim(TILE_DIM, TILE_DIM);
hipLaunchKernelGGL(( LayerNormBackward1<__half>), dim3(grid_dim), dim3(block_dim), 0, stream[0],
out_grad1, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim);
dim3 grid_dim2(batch);
if (hidden_dim > 8192 && hidden_dim <= 16384)
threads <<= 1;
else if (hidden_dim > 16384 && hidden_dim <= 32768)
threads <<= 2;
else if (hidden_dim > 32768 && hidden_dim <= 65536)
threads <<= 3;
else if (hidden_dim > 65536)
throw std::runtime_error("Unsupport hidden_dim.");
dim3 block_dim2(threads / 2);
hipLaunchKernelGGL(( LayerNormBackward2_fused_add), dim3(grid_dim2), dim3(block_dim2), 0, stream[1],
out_grad1, out_grad2, X_data, gamma, vars, means, inp_grad, hidden_dim / 2);
}
| 6c319b271c4371da360e9a2cd6461c13fb5cc02e.cu | // Copyright (c) Microsoft Corporation.
// SPDX-License-Identifier: Apache-2.0
// DeepSpeed Team
#include "custom_cuda_layers.h"
namespace cg = cooperative_groups;
/*
Fused bias add, residual (elementwise) add, and normalization layer.
For FP16, this kernel does not promote to FP32 in order to utilize the 2x throughput for
__half2 instructions, and avoid the conversion overhead (1/8 of __hal2 arithmetic).
For specific launch constraints, see the launch functions.
*/
#define NORM_REG (MAX_REGISTERS / 4)
__global__ void fused_bias_residual_layer_norm(float* vals,
const float* residual,
const float* gamma,
const float* beta,
float epsilon,
bool preLayerNorm,
bool training,
float* vars,
float* means,
int row_stride)
{
int iteration_stride = blockDim.x;
int iterations = row_stride / iteration_stride;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int gid = id / WARP_SIZE;
float vals_arr[NORM_REG];
__shared__ float shr[MAX_WARP_NUM];
residual += (row * row_stride);
vals += (row * row_stride);
float sum = 0.f;
int high_index = iterations * iteration_stride + id;
#pragma unroll
for (int i = 0; i < iterations; i++) {
vals_arr[i] = residual[i * iteration_stride + id];
sum += vals_arr[i];
}
if (high_index < row_stride) {
vals_arr[iterations] = residual[high_index];
sum += vals_arr[iterations];
iterations++;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()];
#if !defined(__STOCHASTIC_MODE__) || __CUDA_ARCH__ < 700
b.sync();
#endif
for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) {
sum += g.shfl_down(sum, i);
}
sum = g.shfl(sum, 0);
float mean = sum / row_stride;
if (training)
if (threadIdx.x == 0) means[row] = mean;
float variance = 0.f;
for (int i = 0; i < iterations; i++) {
vals_arr[i] -= mean;
variance += vals_arr[i] * vals_arr[i];
}
for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); }
if (g.thread_rank() == 0) shr[gid] = variance;
b.sync();
if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) {
variance += g.shfl_down(variance, i);
}
variance = g.shfl(variance, 0);
variance /= row_stride;
variance += epsilon;
if (training)
if (threadIdx.x == 0) vars[row] = variance;
iterations = row_stride / iteration_stride;
for (int i = 0; i < iterations; i++) {
vals_arr[i] = vals_arr[i] * rsqrtf(variance);
vals_arr[i] =
vals_arr[i] * gamma[i * iteration_stride + id] + beta[i * iteration_stride + id];
vals[i * iteration_stride + id] = vals_arr[i];
}
if ((high_index) < row_stride) {
vals_arr[iterations] = vals_arr[iterations] * rsqrtf(variance);
vals_arr[iterations] = vals_arr[iterations] * gamma[high_index] + beta[high_index];
vals[high_index] = vals_arr[iterations];
}
}
__global__ void fused_bias_residual_layer_norm(__half* vals,
const __half* residual,
const __half* gamma,
const __half* beta,
float epsilon,
bool preLayerNorm,
bool training,
__half* vars,
__half* means,
int row_stride)
{
#ifdef HALF_PRECISION_AVAILABLE
int iteration_stride = blockDim.x;
int iterations = row_stride / iteration_stride;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int gid = id >> WARP_SIZE_BITS;
float2 vals_f[NORM_REG];
__shared__ float shr[MAX_WARP_NUM];
__half2* vals_cast = reinterpret_cast<__half2*>(vals);
const __half2* residual_cast = reinterpret_cast<const __half2*>(residual);
residual_cast += (row * row_stride);
vals_cast += (row * row_stride);
float sum = 0.f;
int high_index = iterations * iteration_stride + id;
#pragma unroll
for (int i = 0; i < iterations; i++) {
vals_f[i] = __half22float2(residual_cast[i * iteration_stride + id]);
sum += vals_f[i].x;
sum += vals_f[i].y;
}
if ((high_index) < row_stride) {
vals_f[iterations] = __half22float2(residual_cast[high_index]);
sum += vals_f[iterations].x;
sum += vals_f[iterations].y;
iterations++;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) {
sum += g.shfl_down(sum, i);
}
sum = g.shfl(sum, 0);
float mean = sum / (row_stride * 2);
float variance = 0.f;
for (int i = 0; i < iterations; i++) {
vals_f[i].x -= mean;
vals_f[i].y -= mean;
variance += vals_f[i].x * vals_f[i].x;
variance += vals_f[i].y * vals_f[i].y;
}
for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); }
if (g.thread_rank() == 0) shr[gid] = variance;
b.sync();
if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) {
variance += g.shfl_down(variance, i);
}
variance = g.shfl(variance, 0);
variance /= (row_stride * 2);
variance += epsilon;
__half2 variance_h = __float2half2_rn(variance);
const __half2* gamma_cast = reinterpret_cast<const __half2*>(gamma);
const __half2* beta_cast = reinterpret_cast<const __half2*>(beta);
if (training && threadIdx.x == 0) {
vars[row] = __float2half(variance);
means[row] = __float2half(mean);
}
iterations = row_stride / iteration_stride;
for (int i = 0; i < iterations; i++) {
__half2 vals_arr = __float22half2_rn(vals_f[i]);
vals_arr = vals_arr * h2rsqrt(variance_h);
vals_arr =
vals_arr * gamma_cast[i * iteration_stride + id] + beta_cast[i * iteration_stride + id];
vals_cast[i * iteration_stride + id] = vals_arr;
}
if ((high_index) < row_stride) {
__half2 vals_arr = __float22half2_rn(vals_f[iterations]);
vals_arr = vals_arr * h2rsqrt(variance_h);
vals_arr = vals_arr * gamma_cast[high_index] + beta_cast[high_index];
vals_cast[high_index] = vals_arr;
}
#endif
}
template <typename T>
void launch_bias_residual_layer_norm(T* vals,
const T* residual,
const T* gamma,
const T* beta,
float epsilon,
int batch_size,
int hidden_dim,
cudaStream_t stream,
bool preLayerNorm,
bool training,
T* vars,
T* means);
template <>
void launch_bias_residual_layer_norm<float>(float* vals,
const float* residual,
const float* gamma,
const float* beta,
float epsilon,
int batch_size,
int hidden_dim,
cudaStream_t stream,
bool preLayerNorm,
bool training,
float* vars,
float* means)
{
int threads = THREADS;
dim3 grid_dim(batch_size);
if (hidden_dim > 16384 && hidden_dim <= 32768)
threads <<= 1;
else if (hidden_dim > 32768 && hidden_dim <= 65536)
threads <<= 2;
else if (hidden_dim > 65536)
throw std::runtime_error("Unsupport hidden_dim.");
dim3 block_dim(threads);
fused_bias_residual_layer_norm<<<grid_dim, block_dim, 0, stream>>>(
vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, means, hidden_dim);
}
template <>
void launch_bias_residual_layer_norm<__half>(__half* vals,
const __half* residual,
const __half* gamma,
const __half* beta,
float epsilon,
int batch_size,
int hidden_dim,
cudaStream_t stream,
bool preLayerNorm,
bool training,
__half* vars,
__half* means)
{
int threads = 128;
dim3 grid_dim(batch_size);
if (hidden_dim > 8192 && hidden_dim <= 16384)
threads <<= 1;
else if (hidden_dim > 16384 && hidden_dim <= 32768)
threads <<= 2;
else if (hidden_dim > 32768 && hidden_dim <= 65536)
threads <<= 3;
else if (hidden_dim > 65536)
throw std::runtime_error("Unsupport hidden_dim.");
dim3 block_dim(threads);
fused_bias_residual_layer_norm<<<grid_dim, block_dim, 0, stream>>>(
vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, means, hidden_dim / 2);
}
__global__ void fused_bias_residual_layer_norm(float* vals,
const float* residual,
const float* gamma,
const float* beta,
float epsilon,
bool preLayerNorm,
bool training,
float* vars,
int row_stride)
{
int iteration_stride = blockDim.x;
int iterations = row_stride / iteration_stride;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int gid = id / 32;
float vals_arr[NORM_REG];
__shared__ float shr[MAX_WARP_NUM];
residual += (row * row_stride);
vals += (row * row_stride);
float sum = 0.f;
int high_index = iterations * iteration_stride + id;
#pragma unroll
for (int i = 0; i < iterations; i++) {
vals_arr[i] = residual[i * iteration_stride + id];
sum += vals_arr[i];
}
if ((high_index) < row_stride) {
vals_arr[iterations] = residual[high_index];
sum += vals_arr[iterations];
iterations++;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()];
#if !defined(__STOCHASTIC_MODE__) || __CUDA_ARCH__ < 700
b.sync();
#endif
for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) {
sum += g.shfl_down(sum, i);
}
sum = g.shfl(sum, 0);
float mean = sum / row_stride;
float variance = 0.f;
for (int i = 0; i < iterations; i++) {
vals_arr[i] -= mean;
variance += vals_arr[i] * vals_arr[i];
}
for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); }
if (g.thread_rank() == 0) shr[gid] = variance;
b.sync();
if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) {
variance += g.shfl_down(variance, i);
}
variance = g.shfl(variance, 0);
variance /= row_stride;
variance += epsilon;
if (training)
if (threadIdx.x == 0) vars[row] = variance;
iterations = row_stride / iteration_stride;
for (int i = 0; i < iterations; i++) {
vals_arr[i] = vals_arr[i] * rsqrtf(variance);
vals_arr[i] =
vals_arr[i] * gamma[i * iteration_stride + id] + beta[i * iteration_stride + id];
vals[i * iteration_stride + id] = vals_arr[i];
}
if ((high_index) < row_stride) {
vals_arr[iterations] = vals_arr[iterations] * rsqrtf(variance);
vals_arr[iterations] = vals_arr[iterations] * gamma[high_index] + beta[high_index];
vals[high_index] = vals_arr[iterations];
}
}
__global__ void fused_bias_residual_layer_norm(__half* vals,
const __half* residual,
const __half* gamma,
const __half* beta,
float epsilon,
bool preLayerNorm,
bool training,
__half* vars,
int row_stride)
{
#ifdef HALF_PRECISION_AVAILABLE
int iteration_stride = blockDim.x;
int iterations = row_stride / iteration_stride;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int gid = id >> WARP_SIZE_BITS;
float2 vals_f[NORM_REG];
__shared__ float shr[MAX_WARP_NUM];
__half2* vals_cast = reinterpret_cast<__half2*>(vals);
const __half2* residual_cast = reinterpret_cast<const __half2*>(residual);
residual_cast += (row * row_stride);
vals_cast += (row * row_stride);
float sum = 0.f;
int high_index = iterations * iteration_stride + id;
#pragma unroll
for (int i = 0; i < iterations; i++) {
vals_f[i] = __half22float2(residual_cast[i * iteration_stride + id]);
sum += vals_f[i].x;
sum += vals_f[i].y;
}
if ((high_index) < row_stride) {
vals_f[iterations] = __half22float2(residual_cast[high_index]);
sum += vals_f[iterations].x;
sum += vals_f[iterations].y;
iterations++;
}
for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) shr[gid] = sum;
b.sync();
if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) {
sum += g.shfl_down(sum, i);
}
sum = g.shfl(sum, 0);
float mean = sum / (row_stride * 2);
float variance = 0.f;
for (int i = 0; i < iterations; i++) {
vals_f[i].x -= mean;
vals_f[i].y -= mean;
variance += vals_f[i].x * vals_f[i].x;
variance += vals_f[i].y * vals_f[i].y;
}
for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); }
if (g.thread_rank() == 0) shr[gid] = variance;
b.sync();
if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
b.sync();
#endif
for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) {
variance += g.shfl_down(variance, i);
}
variance = g.shfl(variance, 0);
variance /= (row_stride * 2);
variance += epsilon;
__half2 variance_h = __float2half2_rn(variance);
const __half2* gamma_cast = reinterpret_cast<const __half2*>(gamma);
const __half2* beta_cast = reinterpret_cast<const __half2*>(beta);
if (training && threadIdx.x == 0) vars[row] = __float2half(variance);
iterations = row_stride / iteration_stride;
for (int i = 0; i < iterations; i++) {
__half2 vals_arr = __float22half2_rn(vals_f[i]);
vals_arr = vals_arr * h2rsqrt(variance_h);
vals_arr =
vals_arr * gamma_cast[i * iteration_stride + id] + beta_cast[i * iteration_stride + id];
vals_cast[i * iteration_stride + id] = vals_arr;
}
if ((high_index) < row_stride) {
__half2 vals_arr = __float22half2_rn(vals_f[iterations]);
vals_arr = vals_arr * h2rsqrt(variance_h);
vals_arr = vals_arr * gamma_cast[high_index] + beta_cast[high_index];
vals_cast[high_index] = vals_arr;
}
#endif
}
template <typename T>
void launch_bias_residual_layer_norm(T* vals,
const T* residual,
const T* gamma,
const T* beta,
float epsilon,
int batch_size,
int hidden_dim,
cudaStream_t stream,
bool preLayerNorm,
bool training,
T* vars);
/*
To tune this launch the following restrictions must be met:
For float:
row_stride == hidden_size
threads * iterations == row_stride
threads is in [32, 64, 128, 256, 512, 1024]
For half:
row_stride == hidden_size / 2
threads * iterations == row_stride
threads is in [32, 64, 128, 256, 512, 1024]
*/
template <>
void launch_bias_residual_layer_norm<float>(float* vals,
const float* residual,
const float* gamma,
const float* beta,
float epsilon,
int batch_size,
int hidden_dim,
cudaStream_t stream,
bool preLayerNorm,
bool training,
float* vars)
{
int threads = THREADS;
dim3 grid_dim(batch_size);
// There are some limitations to call below functions, now just enumerate the situations.
if (hidden_dim > 16384 && hidden_dim <= 32768)
threads <<= 1;
else if (hidden_dim > 32768 && hidden_dim <= 65536)
threads <<= 2;
else if (hidden_dim > 65536)
throw std::runtime_error("Unsupport hidden_dim.");
dim3 block_dim(threads);
fused_bias_residual_layer_norm<<<grid_dim, block_dim, 0, stream>>>(
vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, hidden_dim);
}
template <>
void launch_bias_residual_layer_norm<__half>(__half* vals,
const __half* residual,
const __half* gamma,
const __half* beta,
float epsilon,
int batch_size,
int hidden_dim,
cudaStream_t stream,
bool preLayerNorm,
bool training,
__half* vars)
{
int threads = 128;
dim3 grid_dim(batch_size);
// There are some limitations to call below functions, now just enumerate the situations.
if (hidden_dim > 8192 && hidden_dim <= 16384)
threads <<= 1;
else if (hidden_dim > 16384 && hidden_dim <= 32768)
threads <<= 2;
else if (hidden_dim > 32768 && hidden_dim <= 65536)
threads <<= 3;
else if (hidden_dim > 65536)
throw std::runtime_error("Unsupport hidden_dim.");
dim3 block_dim(threads);
fused_bias_residual_layer_norm<<<grid_dim, block_dim, 0, stream>>>(
vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, hidden_dim / 2);
}
/* Normalize Gamma & Betta gradients
* Compute gradients using either X_hat or
* normalize input (invertible).
* Combine transpose with gradients computation.
*/
template <typename T>
__global__ void LayerNormBackward1(const T* __restrict__ out_grad,
const T* __restrict__ vals_hat,
const T* __restrict__ gamma,
const T* __restrict__ betta,
T* __restrict__ gamma_grad,
T* __restrict__ betta_grad,
int rows,
int width,
bool invertible)
{
__shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1];
__shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1];
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<TILE_DIM> g = cg::tiled_partition<TILE_DIM>(b);
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = threadIdx.y * width + idx;
int y_stride = width * TILE_DIM;
float betta_reg = (invertible ? (float)betta[idx] : 0.0f);
float gamma_reg = (float)gamma[idx];
// Loop across matrix height
float betta_tmp = 0;
float gamma_tmp = 0;
for (int r = threadIdx.y; r < rows; r += TILE_DIM) {
float grad = (float)out_grad[offset];
float val = (invertible ? ((float)vals_hat[offset] - betta_reg) / gamma_reg
: (float)vals_hat[offset]);
betta_tmp += grad;
gamma_tmp += (val * grad);
offset += y_stride;
}
betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp;
gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp;
__syncthreads();
// Sum the shared buffer.
float s1 = betta_buffer[threadIdx.y][threadIdx.x];
float s2 = gamma_buffer[threadIdx.y][threadIdx.x];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < TILE_DIM; i <<= 1) {
s1 += g.shfl_down(s1, i);
s2 += g.shfl_down(s2, i);
}
if (threadIdx.x == 0) {
int pos = blockIdx.x * TILE_DIM + threadIdx.y;
betta_grad[pos] = s1;
gamma_grad[pos] = s2;
}
}
/* Normalize Gamma & Betta gradients
* Compute gradients using the input to
* the normalize.
* Combine transpose with gradients computation.
*/
template <typename T>
__global__ void LayerNormBackward1(const T* __restrict__ out_grad,
const T* __restrict__ X_data,
const T* __restrict__ vars,
const T* __restrict__ means,
T* __restrict__ gamma_grad,
T* __restrict__ betta_grad,
int rows,
int width)
{
__shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1];
__shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1];
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<TILE_DIM> g = cg::tiled_partition<TILE_DIM>(b);
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = threadIdx.y * width + idx;
int y_stride = width * TILE_DIM;
int pos = blockIdx.x * TILE_DIM + threadIdx.y;
// Loop across matrix height
float betta_tmp = 0;
float gamma_tmp = 0;
for (int r = threadIdx.y; r < rows; r += TILE_DIM) {
float grad = (float)out_grad[offset];
float val = (float)X_data[offset];
val = (val - (float)means[r]) * rsqrtf((float)vars[r]);
betta_tmp += grad;
gamma_tmp += (val * grad);
offset += y_stride;
}
betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp;
gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp;
__syncthreads();
// Sum the shared buffer.
float s1 = betta_buffer[threadIdx.y][threadIdx.x];
float s2 = gamma_buffer[threadIdx.y][threadIdx.x];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < TILE_DIM; i <<= 1) {
s1 += g.shfl_down(s1, i);
s2 += g.shfl_down(s2, i);
}
if (threadIdx.x == 0) {
betta_grad[pos] = s1;
gamma_grad[pos] = s2;
}
}
/*
/* Backward Normalize (Input-Gradient)
* Using the means and variances from the input
* This type of backward is invertible!
* We do the backward using the X_hat (X - u) / sqrt(variance) or the output of Normalization.
*/
__global__ void LayerNormBackward2(const float* out_grad,
const float* vals_hat,
const float* gamma,
const float* betta,
const float* vars,
float* inp_grad,
bool invertible,
int row_stride)
{
int iteration_stride = blockDim.x;
int iterations = row_stride / iteration_stride;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int wid = id / WARP_SIZE;
int warp_num = iteration_stride >> WARP_SIZE_BITS;
__shared__ float partialSum[MAX_WARP_NUM];
out_grad += (row * row_stride);
vals_hat += (row * row_stride);
inp_grad += (row * row_stride);
float vals_arr[NORM_REG];
float vals_hat_arr[NORM_REG];
int high_index = iterations * iteration_stride + id;
#pragma unroll
for (int i = 0; i < iterations; i++) {
float gamma_reg = gamma[i * iteration_stride + id];
vals_arr[i] = out_grad[i * iteration_stride + id];
vals_arr[i] *= gamma_reg;
vals_hat_arr[i] =
(invertible ? (vals_hat[i * iteration_stride + id] - betta[i * iteration_stride + id]) /
gamma_reg
: vals_hat[i * iteration_stride + id]);
}
if ((high_index) < row_stride) {
float gamma_reg = gamma[high_index];
vals_arr[iterations] = out_grad[high_index];
vals_arr[iterations] *= gamma_reg;
vals_hat_arr[iterations] =
(invertible ? (vals_hat[high_index] - betta[high_index]) / gamma_reg
: vals_hat[high_index]);
iterations++;
}
float var_reg = vars[row];
float sum = 0;
for (int i = 0; i < iterations; i++) {
sum += vals_hat_arr[i] * vals_arr[i] *
sqrtf(var_reg); // dval_hat = gamma * (x - u) * out_grad
vals_arr[i] *= rsqrtf(var_reg); // dvar_inv = gamma * out_grad / sqrt(var)
}
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= row_stride;
for (int i = 0; i < iterations; i++) { vals_arr[i] += ((-sum * vals_hat_arr[i]) / var_reg); }
sum = 0;
for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; }
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= row_stride;
iterations = row_stride / iteration_stride;
for (int i = 0; i < iterations; i++) inp_grad[i * iteration_stride + id] = (vals_arr[i] - sum);
if ((high_index) < row_stride) inp_grad[high_index] = (vals_arr[iterations] - sum);
}
__global__ void LayerNormBackward2(const __half* out_grad,
const __half* vals_hat,
const __half* gamma,
const __half* betta,
const __half* vars,
__half* inp_grad,
bool invertible,
int row_stride)
{
#ifdef HALF_PRECISION_AVAILABLE
int iteration_stride = blockDim.x;
int iterations = row_stride / iteration_stride;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int wid = id / WARP_SIZE;
int warp_num = iteration_stride >> WARP_SIZE_BITS;
__shared__ float partialSum[MAX_WARP_NUM];
__half2 vals_arr[NORM_REG];
float2 vals_arr_f[NORM_REG];
__half2 vals_hat_arr[NORM_REG];
__half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad);
const __half2* out_grad_h = reinterpret_cast<const __half2*>(out_grad);
const __half2* vals_hat_h = reinterpret_cast<const __half2*>(vals_hat);
inp_grad_h += (row * row_stride);
out_grad_h += (row * row_stride);
vals_hat_h += (row * row_stride);
const __half2* gamma_h = reinterpret_cast<const __half2*>(gamma);
const __half2* betta_h = (invertible ? reinterpret_cast<const __half2*>(betta) : nullptr);
int high_index = iterations * iteration_stride + id;
#pragma unroll
for (int i = 0; i < iterations; i++) {
__half2 gamma_reg = gamma_h[i * iteration_stride + id];
vals_arr[i] = out_grad_h[i * iteration_stride + id];
vals_arr[i] *= gamma_reg;
vals_hat_arr[i] =
(invertible
? (vals_hat_h[i * iteration_stride + id] - betta_h[i * iteration_stride + id]) /
gamma_reg
: vals_hat_h[i * iteration_stride + id]);
}
if ((high_index) < row_stride) {
__half2 gamma_reg = gamma_h[high_index];
vals_arr[iterations] = out_grad_h[high_index];
vals_arr[iterations] *= gamma_reg;
vals_hat_arr[iterations] =
(invertible ? (vals_hat_h[high_index] - betta_h[high_index]) / gamma_reg
: vals_hat_h[high_index]);
iterations++;
}
__half var_h = vars[row];
__half2 var_reg = __halves2half2(var_h, var_h);
float sum = 0.f;
for (int i = 0; i < iterations; i++) {
__half2 result_h = (vals_hat_arr[i] * vals_arr[i] * h2sqrt(var_reg));
float2 result_f = __half22float2(result_h);
sum += result_f.x;
sum += result_f.y;
vals_arr[i] *= h2rsqrt(var_reg);
}
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= (2 * row_stride);
__half2 sum_h = __float2half2_rn(sum);
for (int i = 0; i < iterations; i++) {
__half2 temp = ((-sum_h * vals_hat_arr[i]) / (var_reg));
vals_arr_f[i] = __half22float2(vals_arr[i]);
float2 temp_f = __half22float2(temp);
vals_arr_f[i].x += temp_f.x;
vals_arr_f[i].y += temp_f.y;
}
sum = 0.f;
for (int i = 0; i < iterations; i++) {
sum += (vals_arr_f[i].x);
sum += (vals_arr_f[i].y);
}
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= (2 * row_stride);
iterations = row_stride / iteration_stride;
for (int i = 0; i < iterations; i++) {
vals_arr_f[i].x -= sum;
vals_arr_f[i].y -= sum;
__half2 temp = __float22half2_rn(vals_arr_f[i]);
inp_grad_h[i * iteration_stride + id] = temp;
}
if ((high_index) < row_stride) {
vals_arr_f[iterations].x -= sum;
vals_arr_f[iterations].y -= sum;
__half2 temp = __float22half2_rn(vals_arr_f[iterations]);
inp_grad_h[high_index] = temp;
}
#endif
}
template <>
void launch_layerNorm_backward<float>(const float* out_grad,
const float* vals_hat,
const float* vars,
const float* gamma,
float* gamma_grad,
float* betta_grad,
float* inp_grad,
int batch,
int hidden_dim,
cudaStream_t stream[2],
bool invertible,
const float* betta)
{
int threads = THREADS;
dim3 grid_dim(hidden_dim / TILE_DIM);
dim3 block_dim(TILE_DIM, TILE_DIM);
LayerNormBackward1<float><<<grid_dim, block_dim, 0, stream[0]>>>(
out_grad, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible);
dim3 grid_dim2(batch);
if (hidden_dim > 16384 && hidden_dim <= 32768)
threads <<= 1;
else if (hidden_dim > 32768 && hidden_dim <= 65536)
threads <<= 2;
else if (hidden_dim > 65536)
throw std::runtime_error("Unsupport hidden_dim.");
dim3 block_dim2(threads);
LayerNormBackward2<<<grid_dim2, block_dim2, 0, stream[1]>>>(
out_grad, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim);
}
template <>
void launch_layerNorm_backward<__half>(const __half* out_grad,
const __half* vals_hat,
const __half* vars,
const __half* gamma,
__half* gamma_grad,
__half* betta_grad,
__half* inp_grad,
int batch,
int hidden_dim,
cudaStream_t stream[2],
bool invertible,
const __half* betta)
{
int threads = THREADS;
dim3 grid_dim(hidden_dim / TILE_DIM);
dim3 block_dim(TILE_DIM, TILE_DIM);
// LayerNormBackward1<__half><<<grid_dim, block_dim, 0, stream[0]>>>(
// out_grad, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible);
dim3 grid_dim2(batch);
if (hidden_dim > 8192 && hidden_dim <= 16384)
threads <<= 1;
else if (hidden_dim > 16384 && hidden_dim <= 32768)
threads <<= 2;
else if (hidden_dim > 32768 && hidden_dim <= 65536)
threads <<= 3;
else if (hidden_dim > 65536)
throw std::runtime_error("Unsupport hidden_dim.");
dim3 block_dim2(threads / 2);
LayerNormBackward2<<<grid_dim2, block_dim2, 0, stream[1]>>>(
out_grad, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim / 2);
}
/* Backward Normalize (Input-Gradient)
* Using the means and variances from the input
* This type of backward is not invertible!
* We do the backward using the input (X)
*/
__global__ void LayerNormBackward2(const float* out_grad,
const float* X_vals,
const float* gamma,
const float* vars,
const float* means,
float* inp_grad,
int row_stride)
{
int iteration_stride = blockDim.x;
int iterations = row_stride / iteration_stride;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int wid = id >> WARP_SIZE_BITS;
int warp_num = iteration_stride >> WARP_SIZE_BITS;
__shared__ float partialSum[MAX_WARP_NUM];
out_grad += (row * row_stride);
X_vals += (row * row_stride);
inp_grad += (row * row_stride);
float vals_arr[NORM_REG];
int high_index = iterations * iteration_stride + id;
#pragma unroll
for (int i = 0; i < iterations; i++) {
float gamma_reg = gamma[i * iteration_stride + id];
vals_arr[i] = out_grad[i * iteration_stride + id];
vals_arr[i] *= gamma_reg;
}
if ((high_index) < row_stride) {
float gamma_reg = gamma[high_index];
vals_arr[iterations] = out_grad[high_index];
vals_arr[iterations] *= gamma_reg;
iterations++;
}
float var_reg = vars[row];
float mean_reg = means[row];
float sum = 0;
float xu[NORM_REG];
for (int i = 0; i < iterations; i++) {
xu[i] = (X_vals[i * iteration_stride + id] - mean_reg);
sum += vals_arr[i] * xu[i];
vals_arr[i] *= rsqrtf(var_reg);
}
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= row_stride;
for (int i = 0; i < iterations; i++) {
vals_arr[i] += (-sum * xu[i] * rsqrtf(var_reg) / (var_reg));
}
sum = 0;
for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; }
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= row_stride;
iterations = row_stride / iteration_stride;
for (int i = 0; i < iterations; i++) inp_grad[i * iteration_stride + id] = (vals_arr[i] - sum);
if ((high_index) < row_stride) inp_grad[high_index] = (vals_arr[iterations] - sum);
}
__global__ void LayerNormBackward2(const __half* out_grad,
const __half* X_vals,
const __half* gamma,
const __half* vars,
const __half* means,
__half* inp_grad,
int row_stride)
{
#ifdef HALF_PRECISION_AVAILABLE
int iteration_stride = blockDim.x;
int iterations = row_stride / iteration_stride;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int wid = id >> WARP_SIZE_BITS;
int warp_num = iteration_stride >> WARP_SIZE_BITS;
__shared__ float partialSum[MAX_WARP_NUM];
__half2 vals_arr[NORM_REG];
float2 vals_arr_f[NORM_REG];
__half2 xu[NORM_REG];
__half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad);
const __half2* out_grad_h = reinterpret_cast<const __half2*>(out_grad);
const __half2* vals_hat_h = reinterpret_cast<const __half2*>(X_vals);
inp_grad_h += (row * row_stride);
out_grad_h += (row * row_stride);
vals_hat_h += (row * row_stride);
const __half2* gamma_h = reinterpret_cast<const __half2*>(gamma);
int high_index = iterations * iteration_stride + id;
__half mean_h = means[row];
__half2 mean_reg = __halves2half2(mean_h, mean_h);
#pragma unroll
for (int i = 0; i < iterations; i++) {
__half2 gamma_reg = gamma_h[i * iteration_stride + id];
vals_arr[i] = out_grad_h[i * iteration_stride + id];
vals_arr[i] *= gamma_reg; // out_grad * gamma
xu[i] = (vals_hat_h[i * iteration_stride + id] - mean_reg);
}
if ((high_index) < row_stride) {
__half2 gamma_reg = gamma_h[high_index];
vals_arr[iterations] = out_grad_h[high_index];
vals_arr[iterations] *= gamma_reg; // out_grad * gamma
xu[iterations] = (vals_hat_h[high_index] - mean_reg);
iterations++;
}
__half var_h = vars[row];
__half2 var_reg = __halves2half2(var_h, var_h);
float sum = 0.f;
for (int i = 0; i < iterations; i++) {
__half2 result_h = (xu[i] * vals_arr[i]);
float2 result_f = __half22float2(result_h);
sum += result_f.x;
sum += result_f.y;
vals_arr[i] *= h2rsqrt(var_reg);
}
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= (2 * row_stride);
__half2 sum_h = __float2half2_rn(sum);
for (int i = 0; i < iterations; i++) {
__half2 xu_grad = ((-sum_h * xu[i] * h2rsqrt(var_reg)) / (var_reg));
vals_arr_f[i] = __half22float2(vals_arr[i]);
float2 xu_grad_f = __half22float2(xu_grad);
vals_arr_f[i].x += xu_grad_f.x;
vals_arr_f[i].y += xu_grad_f.y;
}
sum = 0.f;
for (int i = 0; i < iterations; i++) {
sum += (vals_arr_f[i].x);
sum += (vals_arr_f[i].y);
}
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= (2 * row_stride);
iterations = row_stride / iteration_stride;
for (int i = 0; i < iterations; i++) {
vals_arr_f[i].x -= sum;
vals_arr_f[i].y -= sum;
__half2 temp = __float22half2_rn(vals_arr_f[i]);
inp_grad_h[i * iteration_stride + id] = temp;
}
if ((high_index) < row_stride) {
vals_arr_f[iterations].x -= sum;
vals_arr_f[iterations].y -= sum;
__half2 temp = __float22half2_rn(vals_arr_f[iterations]);
inp_grad_h[high_index] = temp;
}
#endif
}
template <>
void launch_layerNorm_backward<float>(const float* out_grad,
const float* X_data,
const float* vars,
const float* means,
const float* gamma,
float* gamma_grad,
float* betta_grad,
float* inp_grad,
int batch,
int hidden_dim,
cudaStream_t stream[2])
{
int threads = THREADS;
dim3 grid_dim(hidden_dim / TILE_DIM);
dim3 block_dim(TILE_DIM, TILE_DIM);
LayerNormBackward1<float><<<grid_dim, block_dim, 0, stream[0]>>>(
out_grad, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim);
dim3 grid_dim2(batch);
if (hidden_dim > 16384 && hidden_dim <= 32768)
threads <<= 1;
else if (hidden_dim > 32768 && hidden_dim <= 65536)
threads <<= 2;
else if (hidden_dim > 65536)
throw std::runtime_error("Unsupport hidden_dim.");
dim3 block_dim2(threads);
LayerNormBackward2<<<grid_dim2, block_dim2, 0, stream[1]>>>(
out_grad, X_data, gamma, vars, means, inp_grad, hidden_dim);
}
template <>
void launch_layerNorm_backward<__half>(const __half* out_grad,
const __half* X_data,
const __half* vars,
const __half* means,
const __half* gamma,
__half* gamma_grad,
__half* betta_grad,
__half* inp_grad,
int batch,
int hidden_dim,
cudaStream_t stream[2])
{
int threads = THREADS;
dim3 grid_dim(hidden_dim / TILE_DIM);
dim3 block_dim(TILE_DIM, TILE_DIM);
LayerNormBackward1<__half><<<grid_dim, block_dim, 0, stream[0]>>>(
out_grad, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim);
dim3 grid_dim2(batch);
if (hidden_dim > 8192 && hidden_dim <= 16384)
threads <<= 1;
else if (hidden_dim > 16384 && hidden_dim <= 32768)
threads <<= 2;
else if (hidden_dim > 32768 && hidden_dim <= 65536)
threads <<= 3;
else if (hidden_dim > 65536)
throw std::runtime_error("Unsupport hidden_dim.");
dim3 block_dim2(threads / 2);
LayerNormBackward2<<<grid_dim2, block_dim2, 0, stream[1]>>>(
out_grad, X_data, gamma, vars, means, inp_grad, hidden_dim / 2);
}
template <typename T>
__global__ void LayerNormBackward1_fused_add(const T* __restrict__ out_grad1,
const T* __restrict__ out_grad2,
const T* __restrict__ vals_hat,
const T* __restrict__ gamma,
const T* __restrict__ betta,
T* __restrict__ gamma_grad,
T* __restrict__ betta_grad,
int rows,
int width,
bool invertible)
{
__shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1];
__shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1];
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<TILE_DIM> g = cg::tiled_partition<TILE_DIM>(b);
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = threadIdx.y * width + idx;
int y_stride = width * TILE_DIM;
float betta_reg = (invertible ? (float)betta[idx] : 0.0f);
float gamma_reg = (float)gamma[idx];
// Loop across matrix height
float betta_tmp = 0;
float gamma_tmp = 0;
for (int r = threadIdx.y; r < rows; r += TILE_DIM) {
float grad = (float)out_grad1[offset] + (float)out_grad2[offset];
float val = (invertible ? ((float)vals_hat[offset] - betta_reg) / gamma_reg
: (float)vals_hat[offset]);
betta_tmp += grad;
gamma_tmp += (val * grad);
offset += y_stride;
}
betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp;
gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp;
__syncthreads();
// Sum the shared buffer.
float s1 = betta_buffer[threadIdx.y][threadIdx.x];
float s2 = gamma_buffer[threadIdx.y][threadIdx.x];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < TILE_DIM; i <<= 1) {
s1 += g.shfl_down(s1, i);
s2 += g.shfl_down(s2, i);
}
if (threadIdx.x == 0) {
int pos = blockIdx.x * TILE_DIM + threadIdx.y;
betta_grad[pos] = s1;
gamma_grad[pos] = s2;
}
}
template <typename T>
__global__ void LayerNormBackward1_fused_add(const T* __restrict__ out_grad1,
const T* __restrict__ out_grad2,
const T* __restrict__ X_data,
const T* __restrict__ vars,
const T* __restrict__ means,
T* __restrict__ gamma_grad,
T* __restrict__ betta_grad,
int rows,
int width)
{
__shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1];
__shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1];
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<TILE_DIM> g = cg::tiled_partition<TILE_DIM>(b);
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = threadIdx.y * width + idx;
int y_stride = width * TILE_DIM;
int pos = blockIdx.x * TILE_DIM + threadIdx.y;
// Loop across matrix height
float betta_tmp = 0;
float gamma_tmp = 0;
for (int r = threadIdx.y; r < rows; r += TILE_DIM) {
float grad = (float)out_grad1[offset] + (float)out_grad2[offset];
float val = (float)X_data[offset];
val = (val - (float)means[r]) * rsqrtf((float)vars[r]);
betta_tmp += grad;
gamma_tmp += (val * grad);
offset += y_stride;
}
betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp;
gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp;
__syncthreads();
// Sum the shared buffer.
float s1 = betta_buffer[threadIdx.y][threadIdx.x];
float s2 = gamma_buffer[threadIdx.y][threadIdx.x];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < TILE_DIM; i <<= 1) {
s1 += g.shfl_down(s1, i);
s2 += g.shfl_down(s2, i);
}
if (threadIdx.x == 0) {
betta_grad[pos] = s1;
gamma_grad[pos] = s2;
}
}
__global__ void LayerNormBackward2_fused_add(const float* out_grad1,
const float* out_grad2,
const float* vals_hat,
const float* gamma,
const float* betta,
const float* vars,
float* inp_grad,
bool invertible,
int row_stride)
{
int iteration_stride = blockDim.x;
int iterations = row_stride / iteration_stride;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int wid = id / WARP_SIZE;
int warp_num = iteration_stride >> WARP_SIZE_BITS;
__shared__ float partialSum[MAX_WARP_NUM];
out_grad1 += (row * row_stride);
out_grad2 += (row * row_stride);
vals_hat += (row * row_stride);
inp_grad += (row * row_stride);
float vals_arr[NORM_REG];
float vals_hat_arr[NORM_REG];
int high_index = iterations * iteration_stride + id;
#pragma unroll
for (int i = 0; i < iterations; i++) {
float gamma_reg = gamma[i * iteration_stride + id];
vals_arr[i] = out_grad1[i * iteration_stride + id];
vals_arr[i] *= gamma_reg;
vals_hat_arr[i] =
(invertible ? (vals_hat[i * iteration_stride + id] - betta[i * iteration_stride + id]) /
gamma_reg
: vals_hat[i * iteration_stride + id]);
}
if ((high_index) < row_stride) {
float gamma_reg = gamma[high_index];
vals_arr[iterations] = out_grad1[high_index];
vals_arr[iterations] *= gamma_reg;
vals_hat_arr[iterations] =
(invertible ? (vals_hat[high_index] - betta[high_index]) / gamma_reg
: vals_hat[high_index]);
iterations++;
}
float var_reg = vars[row];
float sum = 0;
for (int i = 0; i < iterations; i++) {
sum += vals_hat_arr[i] * vals_arr[i] * sqrtf(var_reg);
vals_arr[i] *= rsqrtf(var_reg);
}
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= row_stride;
for (int i = 0; i < iterations; i++) { vals_arr[i] += ((-sum * vals_hat_arr[i]) / var_reg); }
sum = 0;
for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; }
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= row_stride;
iterations = row_stride / iteration_stride;
for (int i = 0; i < iterations; i++)
inp_grad[i * iteration_stride + id] =
(vals_arr[i] - sum) + out_grad2[i * iteration_stride + id];
if ((high_index) < row_stride)
inp_grad[high_index] = (vals_arr[iterations] - sum) + out_grad2[high_index];
}
__global__ void LayerNormBackward2_fused_add(const __half* out_grad1,
const __half* out_grad2,
const __half* vals_hat,
const __half* gamma,
const __half* betta,
const __half* vars,
__half* inp_grad,
bool invertible,
int row_stride)
{
#ifdef HALF_PRECISION_AVAILABLE
int iteration_stride = blockDim.x;
int iterations = row_stride / iteration_stride;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int wid = id / WARP_SIZE;
int warp_num = iteration_stride >> WARP_SIZE_BITS;
__shared__ float partialSum[MAX_WARP_NUM];
__half2 vals_arr[NORM_REG];
float2 vals_arr_f[NORM_REG];
__half2 vals_hat_arr[NORM_REG];
// float2 result[iterations];
__half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad);
const __half2* out_grad_h1 = reinterpret_cast<const __half2*>(out_grad1);
const __half2* out_grad_h2 = reinterpret_cast<const __half2*>(out_grad2);
const __half2* vals_hat_h = reinterpret_cast<const __half2*>(vals_hat);
inp_grad_h += (row * row_stride);
out_grad_h1 += (row * row_stride);
out_grad_h2 += (row * row_stride);
vals_hat_h += (row * row_stride);
const __half2* gamma_h = reinterpret_cast<const __half2*>(gamma);
const __half2* betta_h = (invertible ? reinterpret_cast<const __half2*>(betta) : nullptr);
int high_index = iterations * iteration_stride + id;
#pragma unroll
for (int i = 0; i < iterations; i++) {
__half2 gamma_reg = gamma_h[i * iteration_stride + id];
vals_arr[i] = out_grad_h1[i * iteration_stride + id];
vals_arr[i] *= gamma_reg; // out_grad * gamma
vals_hat_arr[i] =
(invertible
? (vals_hat_h[i * iteration_stride + id] - betta_h[i * iteration_stride + id]) /
gamma_reg
: vals_hat_h[i * iteration_stride + id]);
}
if ((high_index) < row_stride) {
__half2 gamma_reg = gamma_h[high_index];
vals_arr[iterations] = out_grad_h1[high_index];
vals_arr[iterations] *= gamma_reg; // out_grad * gamma
vals_hat_arr[iterations] =
(invertible ? (vals_hat_h[high_index] - betta_h[high_index]) / gamma_reg
: vals_hat_h[high_index]);
iterations++;
}
__half var_h = vars[row];
__half2 var_reg = __halves2half2(var_h, var_h);
float sum = 0.f;
for (int i = 0; i < iterations; i++) {
__half2 result_h = (vals_hat_arr[i] * vals_arr[i] * h2sqrt(var_reg));
float2 result_f = __half22float2(result_h);
sum += result_f.x;
sum += result_f.y;
vals_arr[i] *= h2rsqrt(var_reg);
}
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= (2 * row_stride);
__half2 sum_h = __float2half2_rn(sum);
for (int i = 0; i < iterations; i++) {
__half2 temp = ((-sum_h * vals_hat_arr[i]) / (var_reg));
vals_arr_f[i] = __half22float2(vals_arr[i]);
float2 temp_f = __half22float2(temp);
vals_arr_f[i].x += temp_f.x;
vals_arr_f[i].y += temp_f.y;
}
sum = 0.f;
for (int i = 0; i < iterations; i++) {
sum += (vals_arr_f[i].x);
sum += (vals_arr_f[i].y);
}
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= (2 * row_stride);
iterations = row_stride / iteration_stride;
for (int i = 0; i < iterations; i++) {
vals_arr_f[i].x -= sum;
vals_arr_f[i].y -= sum;
__half2 temp = __float22half2_rn(vals_arr_f[i]);
inp_grad_h[i * iteration_stride + id] = temp + out_grad_h2[i * iteration_stride + id];
}
if ((high_index) < row_stride) {
vals_arr_f[iterations].x -= sum;
vals_arr_f[iterations].y -= sum;
__half2 temp = __float22half2_rn(vals_arr_f[iterations]);
inp_grad_h[high_index] = temp + out_grad_h2[high_index];
}
#endif
}
template <>
void launch_layerNorm_backward_fused_add<float>(const float* out_grad1,
const float* out_grad2,
const float* vals_hat,
const float* vars,
const float* gamma,
float* gamma_grad,
float* betta_grad,
float* inp_grad,
int batch,
int hidden_dim,
cudaStream_t stream[2],
bool invertible,
const float* betta)
{
int threads = THREADS;
dim3 grid_dim(hidden_dim / TILE_DIM);
dim3 block_dim(TILE_DIM, TILE_DIM);
LayerNormBackward1<float><<<grid_dim, block_dim, 0, stream[0]>>>(
out_grad1, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible);
dim3 grid_dim2(batch);
if (hidden_dim > 16384 && hidden_dim <= 32768)
threads <<= 1;
else if (hidden_dim > 32768 && hidden_dim <= 65536)
threads <<= 2;
else if (hidden_dim > 65536)
throw std::runtime_error("Unsupport hidden_dim.");
dim3 block_dim2(threads);
LayerNormBackward2_fused_add<<<grid_dim2, block_dim2, 0, stream[1]>>>(
out_grad1, out_grad2, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim);
}
template <>
void launch_layerNorm_backward_fused_add<__half>(const __half* out_grad1,
const __half* out_grad2,
const __half* vals_hat,
const __half* vars,
const __half* gamma,
__half* gamma_grad,
__half* betta_grad,
__half* inp_grad,
int batch,
int hidden_dim,
cudaStream_t stream[2],
bool invertible,
const __half* betta)
{
int threads = THREADS;
dim3 grid_dim(hidden_dim / TILE_DIM);
dim3 block_dim(TILE_DIM, TILE_DIM);
LayerNormBackward1<__half><<<grid_dim, block_dim, 0, stream[0]>>>(
out_grad1, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible);
dim3 grid_dim2(batch);
if (hidden_dim > 8192 && hidden_dim <= 16384)
threads <<= 1;
else if (hidden_dim > 16384 && hidden_dim <= 32768)
threads <<= 2;
else if (hidden_dim > 32768 && hidden_dim <= 65536)
threads <<= 3;
else if (hidden_dim > 65536)
throw std::runtime_error("Unsupport hidden_dim.");
dim3 block_dim2(threads / 2);
LayerNormBackward2_fused_add<<<grid_dim2, block_dim2, 0, stream[1]>>>(
out_grad1, out_grad2, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim / 2);
}
/* Backward Normalize (Input-Gradient)
* Using the means and variances from the input
* This type of backward is not invertible!
* We do the backward using the input (X)
*/
__global__ void LayerNormBackward2_fused_add(const float* out_grad1,
const float* out_grad2,
const float* X_vals,
const float* gamma,
const float* vars,
const float* means,
float* inp_grad,
int row_stride)
{
int iteration_stride = blockDim.x;
int iterations = row_stride / iteration_stride;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int wid = id / WARP_SIZE;
int warp_num = iteration_stride >> WARP_SIZE_BITS;
__shared__ float partialSum[MAX_WARP_NUM];
float vals_arr[NORM_REG];
float vals_hat_arr[NORM_REG];
out_grad1 += (row * row_stride);
out_grad2 += (row * row_stride);
X_vals += (row * row_stride);
inp_grad += (row * row_stride);
int high_index = iterations * iteration_stride + id;
#pragma unroll
for (int i = 0; i < iterations; i++) {
float gamma_reg = gamma[i * iteration_stride + id];
vals_arr[i] = out_grad1[i * iteration_stride + id];
vals_arr[i] *= gamma_reg;
vals_hat_arr[i] = X_vals[i * iteration_stride + id];
}
if ((high_index) < row_stride) {
float gamma_reg = gamma[high_index];
vals_arr[iterations] = out_grad1[high_index];
vals_arr[iterations] *= gamma_reg;
vals_hat_arr[iterations] = X_vals[high_index];
iterations++;
}
float var_reg = vars[row];
float mean_reg = means[row];
float sum = 0;
float xu[NORM_REG];
for (int i = 0; i < iterations; i++) {
xu[i] = (vals_hat_arr[i] - mean_reg);
sum += vals_arr[i] * xu[i];
vals_arr[i] *= rsqrtf(var_reg);
}
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= row_stride;
for (int i = 0; i < iterations; i++) {
vals_arr[i] += (-sum * xu[i] * rsqrtf(var_reg) / (var_reg));
}
sum = 0;
for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; }
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= row_stride;
iterations = row_stride / iteration_stride;
for (int i = 0; i < iterations; i++)
inp_grad[i * iteration_stride + id] =
(vals_arr[i] - sum) + out_grad2[i * iteration_stride + id];
if ((high_index) < row_stride)
inp_grad[high_index] = (vals_arr[iterations] - sum) + out_grad2[high_index];
}
__global__ void LayerNormBackward2_fused_add(const __half* out_grad1,
const __half* out_grad2,
const __half* X_vals,
const __half* gamma,
const __half* vars,
const __half* means,
__half* inp_grad,
int row_stride)
{
#ifdef HALF_PRECISION_AVAILABLE
int iteration_stride = blockDim.x;
int iterations = row_stride / iteration_stride;
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
int row = blockIdx.x;
int id = threadIdx.x;
int wid = id / WARP_SIZE;
int warp_num = iteration_stride >> WARP_SIZE_BITS;
__shared__ float partialSum[MAX_WARP_NUM];
__half2 vals_arr[NORM_REG];
float2 vals_arr_f[NORM_REG];
__half2 vals_hat_arr[NORM_REG];
__half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad);
const __half2* out_grad_h1 = reinterpret_cast<const __half2*>(out_grad1);
const __half2* out_grad_h2 = reinterpret_cast<const __half2*>(out_grad2);
const __half2* vals_hat_h = reinterpret_cast<const __half2*>(X_vals);
out_grad_h1 += (row * row_stride);
out_grad_h2 += (row * row_stride);
inp_grad_h += (row * row_stride);
vals_hat_h += (row * row_stride);
const __half2* gamma_h = reinterpret_cast<const __half2*>(gamma);
int high_index = iterations * iteration_stride + id;
#pragma unroll
for (int i = 0; i < iterations; i++) {
__half2 gamma_reg = gamma_h[i * iteration_stride + id];
vals_arr[i] = out_grad_h1[i * iteration_stride + id];
vals_arr[i] *= gamma_reg; // out_grad * gamma
vals_hat_arr[i] = vals_hat_h[i * iteration_stride + id];
}
if ((high_index) < row_stride) {
__half2 gamma_reg = gamma_h[high_index];
vals_arr[iterations] = out_grad_h1[high_index];
vals_arr[iterations] *= gamma_reg; // out_grad * gamma
vals_hat_arr[iterations] = vals_hat_h[high_index];
iterations++;
}
__half mean_h = means[row];
__half var_h = vars[row];
__half2 var_reg = __halves2half2(var_h, var_h);
__half2 mean_reg = __halves2half2(mean_h, mean_h);
__half2 xu[NORM_REG];
float sum = 0.f;
for (int i = 0; i < iterations; i++) {
xu[i] = (vals_hat_arr[i] - mean_reg);
__half2 result_h = (xu[i] * vals_arr[i]);
float2 result_f = __half22float2(result_h);
sum += result_f.x;
sum += result_f.y;
vals_arr[i] *= h2rsqrt(var_reg);
}
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= (2 * row_stride);
__half2 sum_h = __float2half2_rn(sum);
for (int i = 0; i < iterations; i++) {
__half2 xu_grad = ((-sum_h * xu[i] * h2rsqrt(var_reg)) / (var_reg));
vals_arr_f[i] = __half22float2(vals_arr[i]);
float2 xu_grad_f = __half22float2(xu_grad);
vals_arr_f[i].x += xu_grad_f.x;
vals_arr_f[i].y += xu_grad_f.y;
}
sum = 0.f;
for (int i = 0; i < iterations; i++) {
sum += (vals_arr_f[i].x);
sum += (vals_arr_f[i].y);
}
for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); }
if (g.thread_rank() == 0) partialSum[wid] = sum;
__syncthreads();
if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()];
#ifndef __STOCHASTIC_MODE__
__syncthreads();
#endif
for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i);
sum = g.shfl(sum, 0);
sum /= (2 * row_stride);
iterations = row_stride / iteration_stride;
for (int i = 0; i < iterations; i++) {
vals_arr_f[i].x -= sum;
vals_arr_f[i].y -= sum;
__half2 temp = __float22half2_rn(vals_arr_f[i]);
inp_grad_h[i * iteration_stride + id] = temp + out_grad_h2[i * iteration_stride + id];
}
if ((high_index) < row_stride) {
vals_arr_f[iterations].x -= sum;
vals_arr_f[iterations].y -= sum;
__half2 temp = __float22half2_rn(vals_arr_f[iterations]);
inp_grad_h[high_index] = temp + out_grad_h2[high_index];
}
#endif
}
template <>
void launch_layerNorm_backward_fused_add<float>(const float* out_grad1,
const float* out_grad2,
const float* X_data,
const float* vars,
const float* means,
const float* gamma,
float* gamma_grad,
float* betta_grad,
float* inp_grad,
int batch,
int hidden_dim,
cudaStream_t stream[2])
{
int threads = THREADS;
dim3 grid_dim(hidden_dim / TILE_DIM);
dim3 block_dim(TILE_DIM, TILE_DIM);
LayerNormBackward1<float><<<grid_dim, block_dim, 0, stream[0]>>>(
out_grad1, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim);
dim3 grid_dim2(batch);
if (hidden_dim > 16384 && hidden_dim <= 32768)
threads <<= 1;
else if (hidden_dim > 32768 && hidden_dim <= 65536)
threads <<= 2;
else if (hidden_dim > 65536)
throw std::runtime_error("Unsupport hidden_dim.");
dim3 block_dim2(threads);
LayerNormBackward2_fused_add<<<grid_dim2, block_dim2, 0, stream[1]>>>(
out_grad1, out_grad2, X_data, gamma, vars, means, inp_grad, hidden_dim);
}
template <>
void launch_layerNorm_backward_fused_add<__half>(const __half* out_grad1,
const __half* out_grad2,
const __half* X_data,
const __half* vars,
const __half* means,
const __half* gamma,
__half* gamma_grad,
__half* betta_grad,
__half* inp_grad,
int batch,
int hidden_dim,
cudaStream_t stream[2])
{
int threads = THREADS;
dim3 grid_dim(hidden_dim / TILE_DIM);
dim3 block_dim(TILE_DIM, TILE_DIM);
LayerNormBackward1<__half><<<grid_dim, block_dim, 0, stream[0]>>>(
out_grad1, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim);
dim3 grid_dim2(batch);
if (hidden_dim > 8192 && hidden_dim <= 16384)
threads <<= 1;
else if (hidden_dim > 16384 && hidden_dim <= 32768)
threads <<= 2;
else if (hidden_dim > 32768 && hidden_dim <= 65536)
threads <<= 3;
else if (hidden_dim > 65536)
throw std::runtime_error("Unsupport hidden_dim.");
dim3 block_dim2(threads / 2);
LayerNormBackward2_fused_add<<<grid_dim2, block_dim2, 0, stream[1]>>>(
out_grad1, out_grad2, X_data, gamma, vars, means, inp_grad, hidden_dim / 2);
}
|
e33fc56aa62c0a1e5ee3215d6c0f5dec71f2c8dd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifdef WITH_CUTLASS
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/ep/cuda/cuda_stream.h"
#include "oneflow/core/cuda/elementwise.cuh"
#include "oneflow/core/ep/include/primitive/permute.h"
#include "cutlass/arch/mma.h"
#include "cutlass/gemm/warp/mma.h"
#include "kernel_forward.h"
#include "oneflow/core/kernel/cuda_graph_support.h"
#include "trt_flash_attention/fmha.h"
#include "trt_flash_attention/fmha_flash_attention.h"
namespace oneflow {
namespace user_op {
namespace {
void ParseDims(const ShapeView& shape, const std::string& layout,
const Optional<int64_t>& batch_size, const Optional<int64_t>& seq_len,
const Optional<int64_t>& num_heads, const Optional<int64_t>& head_size,
int64_t tensor_index, int64_t* b, int64_t* m, int64_t* h, int64_t* k,
int64_t* b_stride, int64_t* m_stride, int64_t* h_stride, int64_t* offset,
bool* bm_packed) {
if (shape.NumAxes() == 2) {
if (layout == "(BM)(HK)" || layout == "(BM)(H2K)" || layout == "(BM)(H3K)") {
*bm_packed = true;
CHECK(batch_size);
CHECK(seq_len);
*b = CHECK_JUST(batch_size);
*m = CHECK_JUST(seq_len);
int64_t packed_n = 0;
if (layout == "(BM)(HK)") {
packed_n = 1;
} else if (layout == "(BM)(H2K)") {
packed_n = 2;
} else if (layout == "(BM)(H3K)") {
packed_n = 3;
} else {
UNIMPLEMENTED();
}
const int64_t hidden_size = shape.At(1);
if (num_heads) {
const int64_t expected_h = CHECK_JUST(num_heads);
const int64_t packed_h = packed_n * expected_h;
CHECK_EQ(hidden_size % packed_h, 0);
*h = expected_h;
*k = hidden_size / packed_h;
} else if (head_size) {
const int64_t expected_k = CHECK_JUST(head_size);
const int64_t packed_k = packed_n * expected_k;
CHECK_EQ(hidden_size % packed_k, 0);
*h = hidden_size / packed_k;
*k = expected_k;
} else {
UNIMPLEMENTED();
}
*h_stride = *k * packed_n;
*m_stride = *h_stride * *h;
*b_stride = 0;
if (packed_n == 1) {
*offset = 0;
} else if (packed_n == 2) {
CHECK_GE(tensor_index, 1);
*offset = (tensor_index - 1) * *k;
} else if (packed_n == 3) {
*offset = tensor_index * *k;
} else {
UNIMPLEMENTED();
}
} else {
UNIMPLEMENTED();
}
} else if (shape.NumAxes() == 3) {
if (layout == "BM(HK)" || layout == "BM(H2K)" || layout == "BM(H3K)" || layout == "MB(HK)"
|| layout == "MB(H2K)" || layout == "MB(H3K)") {
*bm_packed = false;
bool batch_first = false;
int64_t packed_n = 0;
const std::string layout_bm = layout.substr(0, 2);
const std::string layout_hk = layout.substr(2);
if (layout_bm == "BM") {
*b = shape.At(0);
*m = shape.At(1);
batch_first = true;
} else if (layout_bm == "MB") {
*b = shape.At(1);
*m = shape.At(0);
batch_first = false;
} else {
UNIMPLEMENTED();
}
if (layout_hk == "(HK)") {
packed_n = 1;
} else if (layout_hk == "(H2K)") {
packed_n = 2;
} else if (layout_hk == "(H3K)") {
packed_n = 3;
} else {
UNIMPLEMENTED();
}
const int64_t hidden_size = shape.At(2);
if (num_heads) {
const int64_t expected_h = CHECK_JUST(num_heads);
const int64_t packed_h = packed_n * expected_h;
CHECK_EQ(hidden_size % packed_h, 0);
*h = expected_h;
*k = hidden_size / packed_h;
} else if (head_size) {
const int64_t expected_k = CHECK_JUST(head_size);
const int64_t packed_k = packed_n * expected_k;
CHECK_EQ(hidden_size % packed_k, 0);
*h = hidden_size / packed_k;
*k = expected_k;
} else {
UNIMPLEMENTED();
}
*h_stride = *k * packed_n;
if (batch_first) {
*m_stride = *h_stride * *h;
*b_stride = *m_stride * *m;
} else {
*b_stride = *h_stride * *h;
*m_stride = *b_stride * *b;
}
if (packed_n == 1) {
*offset = 0;
} else if (packed_n == 2) {
CHECK_GE(tensor_index, 1);
*offset = (tensor_index - 1) * *k;
} else if (packed_n == 3) {
*offset = tensor_index * *k;
} else {
UNIMPLEMENTED();
}
} else if (layout == "(BM)HK") {
*bm_packed = true;
CHECK(batch_size);
CHECK(seq_len);
*b = CHECK_JUST(batch_size);
*m = CHECK_JUST(seq_len);
*h = shape.At(1);
*k = shape.At(2);
*h_stride = *k;
*m_stride = *h_stride * *h;
*b_stride = 0;
} else {
UNIMPLEMENTED();
}
} else if (shape.NumAxes() == 4) {
*bm_packed = false;
if (layout == "BMHK") {
*b = shape.At(0);
*m = shape.At(1);
*h = shape.At(2);
*k = shape.At(3);
*h_stride = *k;
*m_stride = *h_stride * *h;
*b_stride = *m_stride * *m;
} else if (layout == "BHMK") {
*b = shape.At(0);
*m = shape.At(2);
*h = shape.At(1);
*k = shape.At(3);
*m_stride = *k;
*h_stride = *m_stride * *m;
*b_stride = *h_stride * *h;
} else if (layout == "MBHK") {
*b = shape.At(1);
*m = shape.At(0);
*h = shape.At(2);
*k = shape.At(3);
*h_stride = *k;
*b_stride = *h_stride * *h;
*m_stride = *b_stride * *b;
} else {
UNIMPLEMENTED();
}
*offset = 0;
} else {
UNIMPLEMENTED();
};
if (batch_size) {
const int64_t expected_b = CHECK_JUST(batch_size);
CHECK_EQ(*b, expected_b);
}
if (seq_len) {
const int64_t expected_m = CHECK_JUST(seq_len);
CHECK_EQ(*m, expected_m);
}
if (num_heads) {
const int64_t expected_h = CHECK_JUST(num_heads);
CHECK_EQ(*h, expected_h);
}
if (head_size) {
const int64_t expected_k = CHECK_JUST(head_size);
CHECK_EQ(*k, expected_k);
}
}
void ParseDims(const ShapeView& shape, const std::string& layout,
const Optional<int64_t>& num_heads, const Optional<int64_t>& head_size,
int64_t tensor_index, int64_t* b, int64_t* m, int64_t* h, int64_t* k,
int64_t* b_stride, int64_t* m_stride, int64_t* h_stride, int64_t* offset) {
bool bm_packed{};
ParseDims(shape, layout, Optional<int64_t>(), Optional<int64_t>(), num_heads, head_size,
tensor_index, b, m, h, k, b_stride, m_stride, h_stride, offset, &bm_packed);
}
template<typename T, int pack_size>
struct alignas(pack_size * sizeof(T)) Pack {
T elem[pack_size];
};
template<typename T>
__global__ void PackQkv(int b, int s, int nh, int d, const T* q, const T* k, const T* v, T* o,
int32_t* seq_len) {
int count = b * s * nh * d * 3;
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < count; i += blockDim.x * gridDim.x) {
int row = i / (d * 3);
int out_col = i - row * (d * 3);
T out;
if (out_col < d) {
out = q[row * d + out_col];
} else if (out_col < 2 * d) {
out = k[row * d + out_col - d];
} else {
out = v[row * d + out_col - d * 2];
}
o[i] = out;
}
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < b + 1; i += blockDim.x * gridDim.x) {
seq_len[i] = i * s;
}
}
struct Params {
DataType data_type;
int64_t num_batches;
int64_t num_heads;
int64_t query_seq_len;
int64_t kv_seq_len;
int64_t head_size;
int64_t value_head_size;
int64_t q_stride_b;
int64_t q_stride_m;
int64_t q_stride_h;
int64_t k_stride_b;
int64_t k_stride_m;
int64_t k_stride_h;
int64_t v_stride_b;
int64_t v_stride_m;
int64_t v_stride_h;
std::string attn_mask_type;
int64_t causal_diagonal_offset;
const void* query_ptr;
const void* key_ptr;
const void* value_ptr;
const void* attn_bias_ptr;
const void* query_seq_start_ptr;
const void* key_seq_start_ptr;
const void* key_seq_len_ptr;
int64_t attn_bias_stride_b;
int64_t attn_bias_stride_h;
int64_t attn_bias_stride_m;
void* out_ptr;
void* workspace;
int64_t workspace_size;
float scale;
};
template<typename T, typename ArchTag, bool is_aligned, int queries_per_block, int keys_per_block,
bool single_value_iteration, bool with_attn_bias>
void LaunchCutlassFmha(const Params& params, ep::CudaStream* stream) {
// The fmha implementation below is based on xformers's fmha
// implementation at:
// https://github.com/facebookresearch/xformers/tree/main/xformers/csrc/attention/cuda/fmha
using Attention = AttentionKernel<T, ArchTag, is_aligned, queries_per_block, keys_per_block,
single_value_iteration, false, with_attn_bias>;
typename Attention::Params p{};
p.query_ptr = const_cast<T*>(reinterpret_cast<const T*>(params.query_ptr));
p.key_ptr = const_cast<T*>(reinterpret_cast<const T*>(params.key_ptr));
p.value_ptr = const_cast<T*>(reinterpret_cast<const T*>(params.value_ptr));
p.attn_bias_ptr = const_cast<T*>(reinterpret_cast<const T*>(params.attn_bias_ptr));
p.seqstart_q_ptr =
const_cast<int32_t*>(reinterpret_cast<const int32_t*>(params.query_seq_start_ptr));
p.seqstart_k_ptr =
const_cast<int32_t*>(reinterpret_cast<const int32_t*>(params.key_seq_start_ptr));
p.seqlen_k_ptr = const_cast<int32_t*>(reinterpret_cast<const int32_t*>(params.key_seq_len_ptr));
p.logsumexp_ptr = nullptr;
p.output_ptr = reinterpret_cast<T*>(params.out_ptr);
if (Attention::kNeedsOutputAccumulatorBuffer) {
using Acc = typename Attention::accum_t;
CHECK_GE(params.workspace_size, params.num_batches * params.query_seq_len * params.num_heads
* params.value_head_size * sizeof(Acc));
p.output_accum_ptr = reinterpret_cast<Acc*>(params.workspace);
} else {
p.output_accum_ptr = nullptr;
}
p.num_heads = params.num_heads;
p.num_batches = params.num_batches;
p.head_dim = params.head_size;
p.head_dim_value = params.value_head_size;
p.num_queries = params.query_seq_len;
p.num_keys = params.kv_seq_len;
p.q_strideM = params.q_stride_m;
p.k_strideM = params.k_stride_m;
p.v_strideM = params.v_stride_m;
p.o_strideM = p.head_dim_value * p.num_heads;
p.bias_strideM = params.attn_bias_stride_m;
p.q_strideH = params.q_stride_h;
p.k_strideH = params.k_stride_h;
p.v_strideH = params.v_stride_h;
p.bias_strideH = params.attn_bias_stride_h;
p.q_strideB = params.q_stride_b;
p.k_strideB = params.k_stride_b;
p.v_strideB = params.v_stride_b;
p.bias_strideB = params.attn_bias_stride_b;
p.scale = params.scale;
if (params.attn_mask_type == "none") {
p.custom_mask_type = Attention::NoCustomMask;
} else if (params.attn_mask_type == "causal_from_top_left") {
p.custom_mask_type = Attention::CausalFromTopLeft;
} else if (params.attn_mask_type == "causal_from_bottom_right") {
p.custom_mask_type = Attention::CausalFromBottomRight;
} else {
UNIMPLEMENTED();
}
p.causal_diagonal_offset = params.causal_diagonal_offset;
p.use_dropout = false;
constexpr auto kernel_fn = attention_kernel_batched_impl<Attention>;
int smem_bytes = sizeof(typename Attention::SharedStorage);
if (smem_bytes > 0xc000) {
static bool once = [&]() {
hipFuncSetAttribute(kernel_fn, hipFuncAttributeMaxDynamicSharedMemorySize, smem_bytes);
return true;
}();
}
CHECK(Attention::check_supported(p));
hipLaunchKernelGGL(( kernel_fn), dim3(p.getBlocksGrid()), dim3(p.getThreadsGrid()), smem_bytes, stream->cuda_stream(), p);
}
template<typename T, typename ArchTag, bool is_aligned, int queries_per_block, int keys_per_block,
bool single_value_iteration>
void DispatchWithAttnBias(const Params& params, ep::CudaStream* stream) {
if (params.attn_bias_ptr != nullptr) {
LaunchCutlassFmha<T, ArchTag, is_aligned, queries_per_block, keys_per_block,
single_value_iteration, true>(params, stream);
} else {
LaunchCutlassFmha<T, ArchTag, is_aligned, queries_per_block, keys_per_block,
single_value_iteration, false>(params, stream);
}
}
template<typename T, typename ArchTag, bool is_aligned, int queries_per_block, int keys_per_block>
void DispatchSingleValueIteration(const Params& params, ep::CudaStream* stream) {
if (params.value_head_size <= keys_per_block) {
DispatchWithAttnBias<T, ArchTag, is_aligned, queries_per_block, keys_per_block, true>(params,
stream);
} else {
DispatchWithAttnBias<T, ArchTag, is_aligned, queries_per_block, keys_per_block, false>(params,
stream);
}
}
template<typename T, typename ArchTag, bool is_aligned>
void DispatchKeysPerBlock(const Params& params, ep::CudaStream* stream) {
if (params.value_head_size <= 64) {
DispatchSingleValueIteration<T, ArchTag, is_aligned, 64, 64>(params, stream);
} else {
DispatchSingleValueIteration<T, ArchTag, is_aligned, 32, 128>(params, stream);
}
}
template<typename T, typename ArchTag>
void DispatchIsAligned(const Params& params, ep::CudaStream* stream) {
if (reinterpret_cast<uintptr_t>(params.query_ptr) % 16 == 0
&& reinterpret_cast<uintptr_t>(params.key_ptr) % 16 == 0
&& reinterpret_cast<uintptr_t>(params.value_ptr) % 16 == 0
&& params.attn_bias_stride_m % (16 / sizeof(T)) == 0
&& params.head_size % (16 / sizeof(T)) == 0
&& params.value_head_size % (16 / sizeof(T)) == 0) {
DispatchKeysPerBlock<T, ArchTag, true>(params, stream);
} else {
DispatchKeysPerBlock<T, ArchTag, false>(params, stream);
}
}
template<typename T>
void DispatchArchTag(const Params& params, ep::CudaStream* stream) {
const int major = stream->device_properties().major;
const int minor = stream->device_properties().minor;
if (major == 8) {
DispatchIsAligned<T, cutlass::arch::Sm80>(params, stream);
} else if (major == 7) {
if (minor == 5) {
DispatchIsAligned<T, cutlass::arch::Sm75>(params, stream);
} else {
DispatchIsAligned<T, cutlass::arch::Sm70>(params, stream);
}
} else {
UNIMPLEMENTED();
}
}
void DispatchCutlassFmha(const Params& params, ep::CudaStream* stream) {
if (params.data_type == DataType::kFloat16) {
DispatchArchTag<cutlass::half_t>(params, stream);
} else if (params.data_type == DataType::kFloat) {
DispatchArchTag<float>(params, stream);
} else {
UNIMPLEMENTED();
}
}
class FusedMultiHeadAttentionInferenceKernel final : public user_op::OpKernel,
public user_op::CudaGraphSupport {
public:
FusedMultiHeadAttentionInferenceKernel() = default;
~FusedMultiHeadAttentionInferenceKernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const Tensor* query = ctx->Tensor4ArgNameAndIndex("query", 0);
const Tensor* key = ctx->Tensor4ArgNameAndIndex("key", 0);
const Tensor* value = ctx->Tensor4ArgNameAndIndex("value", 0);
const Tensor* attn_bias = nullptr;
if (ctx->has_input("attn_bias", 0)) { attn_bias = ctx->Tensor4ArgNameAndIndex("attn_bias", 0); }
const Tensor* query_seq_start = nullptr;
const Tensor* key_seq_start = nullptr;
const Tensor* key_seq_len = nullptr;
const float scale = ctx->Attr<double>("scale");
if (ctx->has_input("query_seq_start", 0)) {
CHECK(ctx->has_input("key_seq_start", 0));
query_seq_start = ctx->Tensor4ArgNameAndIndex("query_seq_start", 0);
key_seq_start = ctx->Tensor4ArgNameAndIndex("key_seq_start", 0);
CHECK(query_seq_start->data_type() == DataType::kInt32);
CHECK(key_seq_start->data_type() == DataType::kInt32);
CHECK_EQ(query_seq_start->shape_view().NumAxes(), 1);
CHECK_GT(query_seq_start->shape_view().At(0), 1);
CHECK(query_seq_start->shape_view() == key_seq_start->shape_view());
if (ctx->has_input("key_seq_len", 0)) {
key_seq_len = ctx->Tensor4ArgNameAndIndex("key_seq_len", 0);
CHECK(key_seq_len->data_type() == DataType::kInt32);
CHECK_EQ(key_seq_len->shape_view().NumAxes(), 1);
CHECK_EQ(key_seq_len->shape_view().At(0), query_seq_start->shape_view().At(0) - 1);
}
} else {
CHECK(!ctx->has_input("key_seq_start", 0));
CHECK(!ctx->has_input("key_seq_len", 0));
}
Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
Tensor* tmp = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const DataType data_type = query->data_type();
CHECK_EQ(key->data_type(), data_type);
CHECK_EQ(value->data_type(), data_type);
CHECK_EQ(out->data_type(), data_type);
const int64_t query_head_size = ctx->Attr<int64_t>("query_head_size");
const std::string& attn_mask_type = ctx->Attr<std::string>("attn_mask_type");
const int64_t causal_diagonal_offset = ctx->Attr<int64_t>("causal_diagonal_offset");
CHECK_GE(causal_diagonal_offset, 0);
const std::string& query_layout = ctx->Attr<std::string>("query_layout");
const std::string& key_layout = ctx->Attr<std::string>("key_layout");
const std::string& value_layout = ctx->Attr<std::string>("value_layout");
const std::string& output_layout = ctx->Attr<std::string>("output_layout");
Optional<int64_t> batch_size;
if (query_seq_start != nullptr) { batch_size = query_seq_start->shape_view().At(0) - 1; }
Optional<int64_t> query_max_seq_len;
const int64_t attr_query_max_seq_len = ctx->Attr<int64_t>("query_max_seq_len");
if (attr_query_max_seq_len != 0) { query_max_seq_len = attr_query_max_seq_len; }
Optional<int64_t> key_max_seq_len;
const int64_t attr_key_max_seq_len = ctx->Attr<int64_t>("key_max_seq_len");
if (attr_key_max_seq_len != 0) { key_max_seq_len = attr_key_max_seq_len; }
int64_t q_b = 0;
int64_t q_m = 0;
int64_t q_h = 0;
int64_t q_k = 0;
int64_t q_b_stride = 0;
int64_t q_m_stride = 0;
int64_t q_h_stride = 0;
int64_t q_offset = 0;
bool q_bm_packed = false;
ParseDims(query->shape_view(), query_layout, batch_size, query_max_seq_len, Optional<int64_t>(),
query_head_size, 0, &q_b, &q_m, &q_h, &q_k, &q_b_stride, &q_m_stride, &q_h_stride,
&q_offset, &q_bm_packed);
if (q_bm_packed) { CHECK(query_seq_start != nullptr); }
int64_t k_b = 0;
int64_t k_m = 0;
int64_t k_h = 0;
int64_t k_k = 0;
int64_t k_b_stride = 0;
int64_t k_m_stride = 0;
int64_t k_h_stride = 0;
int64_t k_offset = 0;
bool k_bm_packed = false;
ParseDims(key->shape_view(), key_layout, q_b, key_max_seq_len, Optional<int64_t>(),
query_head_size, 1, &k_b, &k_m, &k_h, &k_k, &k_b_stride, &k_m_stride, &k_h_stride,
&k_offset, &k_bm_packed);
CHECK_EQ(k_b, q_b);
CHECK_EQ(k_h, q_h);
CHECK_EQ(k_bm_packed, q_bm_packed);
int64_t v_b = 0;
int64_t v_m = 0;
int64_t v_h = 0;
int64_t v_k = 0;
int64_t v_b_stride = 0;
int64_t v_m_stride = 0;
int64_t v_h_stride = 0;
int64_t v_offset = 0;
bool v_bm_packed = false;
ParseDims(value->shape_view(), value_layout, q_b, k_m, q_h, Optional<int64_t>(), 2, &v_b, &v_m,
&v_h, &v_k, &v_b_stride, &v_m_stride, &v_h_stride, &v_offset, &v_bm_packed);
CHECK_EQ(v_b, q_b);
CHECK_EQ(v_m, k_m);
CHECK_EQ(v_bm_packed, k_bm_packed);
if (output_layout == "BM(HK)") {
CHECK(!q_bm_packed);
CHECK_EQ(out->shape_view().NumAxes(), 3);
CHECK_EQ(out->shape_view().At(0), q_b);
CHECK_EQ(out->shape_view().At(1), q_m);
CHECK_EQ(out->shape_view().At(2), q_h * v_k);
} else if (output_layout == "MB(HK)") {
CHECK(!q_bm_packed);
CHECK_EQ(out->shape_view().NumAxes(), 3);
CHECK_EQ(q_b, 1);
CHECK_EQ(out->shape_view().At(0), q_m);
CHECK_EQ(out->shape_view().At(1), q_b);
CHECK_EQ(out->shape_view().At(2), q_h * v_k);
} else if (output_layout == "(BM)(HK)") {
CHECK(q_bm_packed);
CHECK_EQ(out->shape_view().NumAxes(), 2);
CHECK_EQ(out->shape_view().At(0), query->shape_view().At(0));
CHECK_EQ(out->shape_view().At(1), q_h * v_k);
} else {
UNIMPLEMENTED();
}
auto* cuda_stream = ctx->stream()->As<ep::CudaStream>();
// Compatible with typo `KERENL`
const bool enable_trt_flash_attn =
ParseBooleanFromEnv(
"ONEFLOW_KERNEL_FMHA_ENABLE_TRT_FLASH_ATTN_IMPL",
ParseBooleanFromEnv("ONEFLOW_KERENL_FMHA_ENABLE_TRT_FLASH_ATTN_IMPL", true))
&& ParseBooleanFromEnv("ONEFLOW_MATMUL_ALLOW_HALF_PRECISION_ACCUMULATION", false);
const bool is_default_scale =
std::abs(scale - 1.0 / std::sqrt(static_cast<float>(q_k))) <= 1e-5;
const int arch = cuda_stream->cuda_arch() / 10;
const bool is_trt_supported_arch = (arch == 75 || arch == 80 || arch == 86 || arch == 89);
const bool is_trt_supported_head_size = ((q_k == 40) || (q_k == 64));
// Avoid PackQKV overhead when seq_len is small.
const bool is_long_seq_len = q_m >= 512;
const bool is_trt_supported_layout = (query_layout == "BMHK" || query_layout == "BM(HK)")
&& (key_layout == "BMHK" || key_layout == "BM(HK)")
&& (value_layout == "BMHK" || value_layout == "BM(HK)")
&& (output_layout == "BMHK" || output_layout == "BM(HK)");
if (is_default_scale && query_seq_start == nullptr && enable_trt_flash_attn
&& data_type == DataType::kFloat16 && q_m == k_m && q_k == v_k && is_trt_supported_head_size
&& is_long_seq_len && is_trt_supported_arch && attn_mask_type == "none"
&& attn_bias == nullptr && is_trt_supported_layout) {
// The fmha implementation below is based on TensorRT's multiHeadFlashAttentionPlugin
// implementation at:
// https://github.com/NVIDIA/TensorRT/tree/main/plugin/multiHeadFlashAttentionPlugin
int32_t cu_seqlens_d_size = (q_b + 1) * sizeof(int32_t);
int32_t* cu_seqlens_d = reinterpret_cast<int32_t*>(tmp->mut_dptr());
half* packed_qkv =
reinterpret_cast<half*>(tmp->mut_dptr<char>() + GetCudaAlignedSize(cu_seqlens_d_size));
constexpr int pack_size = 4;
using PackType = Pack<half, pack_size>;
const int64_t count = q_b * q_m * q_h * q_k * 3 / pack_size;
hipLaunchKernelGGL(( PackQkv<PackType>), dim3((count - 1 + 256) / 256), dim3(256), 0, cuda_stream->cuda_stream(),
q_b, q_m, q_h, q_k / pack_size, reinterpret_cast<const PackType*>(query->dptr()),
reinterpret_cast<const PackType*>(key->dptr()),
reinterpret_cast<const PackType*>(value->dptr()), reinterpret_cast<PackType*>(packed_qkv),
cu_seqlens_d);
#ifdef WITH_CUDA_GRAPHS
hipStreamCaptureMode mode = hipStreamCaptureModeRelaxed;
if (cuda_stream->IsGraphCapturing()) {
OF_CUDA_CHECK(hipThreadExchangeStreamCaptureMode(&mode));
}
#endif // WITH_CUDA_GRAPHS
nvinfer1::plugin::FusedMultiHeadFlashAttentionKernel const* kernels =
nvinfer1::plugin::getFMHAFlashCubinKernels(nvinfer1::plugin::DATA_TYPE_FP16, arch);
#ifdef WITH_CUDA_GRAPHS
if (cuda_stream->IsGraphCapturing()) {
OF_CUDA_CHECK(hipThreadExchangeStreamCaptureMode(&mode));
}
#endif // WITH_CUDA_GRAPHS
nvinfer1::plugin::runFMHFAKernel(packed_qkv, cu_seqlens_d, out->mut_dptr(), q_b * q_m, arch,
kernels, q_b, q_h, q_k, q_m, cuda_stream->cuda_stream());
return;
}
Params params{};
params.data_type = data_type;
params.num_batches = q_b;
params.num_heads = q_h;
params.query_seq_len = q_m;
params.kv_seq_len = k_m;
params.head_size = q_k;
params.value_head_size = v_k;
params.scale = scale;
params.q_stride_b = q_b_stride;
params.q_stride_m = q_m_stride;
params.q_stride_h = q_h_stride;
params.k_stride_b = k_b_stride;
params.k_stride_m = k_m_stride;
params.k_stride_h = k_h_stride;
params.v_stride_b = v_b_stride;
params.v_stride_m = v_m_stride;
params.v_stride_h = v_h_stride;
params.query_ptr = query->dptr<char>() + q_offset * GetSizeOfDataType(data_type);
params.key_ptr = key->dptr<char>() + k_offset * GetSizeOfDataType(data_type);
params.value_ptr = value->dptr<char>() + v_offset * GetSizeOfDataType(data_type);
params.query_seq_start_ptr =
query_seq_start == nullptr ? nullptr : query_seq_start->dptr<int32_t>();
params.key_seq_start_ptr = key_seq_start == nullptr ? nullptr : key_seq_start->dptr<int32_t>();
params.key_seq_len_ptr = key_seq_len == nullptr ? nullptr : key_seq_len->dptr<int32_t>();
params.out_ptr = out->mut_dptr();
const int64_t tmp_buffer_size = tmp->shape_view().elem_cnt();
params.workspace = tmp->mut_dptr();
params.workspace_size = tmp_buffer_size;
params.attn_mask_type = attn_mask_type;
params.causal_diagonal_offset = causal_diagonal_offset;
if (attn_bias != nullptr) {
const int64_t num_attn_bias_axes = attn_bias->shape_view().NumAxes();
CHECK_GE(num_attn_bias_axes, 1);
CHECK_LE(num_attn_bias_axes, 4);
DimVector padded_attn_bias_shape;
for (int i = 0; i < 4 - num_attn_bias_axes; ++i) { padded_attn_bias_shape.push_back(1); }
for (int i = 0; i < num_attn_bias_axes; ++i) {
padded_attn_bias_shape.push_back(attn_bias->shape_view().At(i));
}
CHECK_GE(padded_attn_bias_shape.at(3), k_m);
int64_t bias_stride = padded_attn_bias_shape.at(3);
if (padded_attn_bias_shape.at(2) == 1) {
params.attn_bias_stride_m = 0;
} else {
CHECK_GE(padded_attn_bias_shape.at(2), q_m);
params.attn_bias_stride_m = bias_stride;
bias_stride *= padded_attn_bias_shape.at(2);
}
if (padded_attn_bias_shape.at(1) == 1) {
params.attn_bias_stride_h = 0;
} else {
CHECK_EQ(padded_attn_bias_shape.at(1), q_h);
params.attn_bias_stride_h = bias_stride;
bias_stride *= q_h;
}
if (padded_attn_bias_shape.at(0) == 1) {
params.attn_bias_stride_b = 0;
} else {
CHECK_EQ(padded_attn_bias_shape.at(0), q_b);
params.attn_bias_stride_b = bias_stride;
}
params.attn_bias_ptr = attn_bias->dptr();
} else {
params.attn_bias_ptr = nullptr;
params.attn_bias_stride_m = 0;
params.attn_bias_stride_h = 0;
params.attn_bias_stride_b = 0;
}
DispatchCutlassFmha(params, cuda_stream);
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
size_t InferTmpBufferSize(InferContext* ctx) {
const auto& out_desc = ctx->OutputTensorDesc("out", 0);
size_t buffer_size = 0;
buffer_size +=
GetCudaAlignedSize(out_desc.shape().elem_cnt() * GetSizeOfDataType(DataType::kFloat));
buffer_size +=
GetCudaAlignedSize(out_desc.shape().elem_cnt() * GetSizeOfDataType(out_desc.data_type())) * 3;
buffer_size +=
GetCudaAlignedSize((out_desc.shape().At(0) + 1) * GetSizeOfDataType(DataType::kInt32));
return buffer_size;
}
#define REGISTER_FUSED_MULTI_HEAD_ATTENTION_INFERENCE_KERNEL(dtype) \
REGISTER_USER_KERNEL("fused_multi_head_attention_inference") \
.SetCreateFn<FusedMultiHeadAttentionInferenceKernel>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \
&& (user_op::HobDataType("out", 0) == dtype)) \
.SetInferTmpSizeFn(InferTmpBufferSize);
REGISTER_FUSED_MULTI_HEAD_ATTENTION_INFERENCE_KERNEL(DataType::kFloat16)
REGISTER_FUSED_MULTI_HEAD_ATTENTION_INFERENCE_KERNEL(DataType::kFloat)
template<typename Index>
struct ConcatParam {
const void* past_ptr;
const void* ptr;
void* output_ptr;
Index past_offset;
Index offset;
Index output_offset;
Index past_m;
Index past_stride_b;
Index past_stride_m;
Index past_stride_h;
Index stride_b;
Index stride_m;
Index stride_h;
Index output_stride_b;
Index output_stride_m;
Index output_stride_h;
Index count;
Index output_khm;
Index output_kh;
Index output_k;
};
template<typename Index>
struct BatchConcatParam {
ConcatParam<Index> params[2];
};
template<typename T, typename Index>
__device__ void ConcatPastKeyValue(ConcatParam<Index> p) {
for (Index i = blockIdx.x * blockDim.x + threadIdx.x; i < p.count; i += blockDim.x * gridDim.x) {
Index b_idx = i / p.output_khm;
Index b_off = i - b_idx * p.output_khm;
Index m_idx = b_off / p.output_kh;
Index m_off = b_off - m_idx * p.output_kh;
Index h_idx = m_off / p.output_k;
Index k_idx = m_off - h_idx * p.output_k;
T v;
if (m_idx < p.past_m) {
v = reinterpret_cast<const T*>(
p.past_ptr)[p.past_offset + b_idx * p.past_stride_b + m_idx * p.past_stride_m
+ h_idx * p.past_stride_h + k_idx];
} else {
v = reinterpret_cast<const T*>(
p.ptr)[p.offset + b_idx * p.stride_b + (m_idx - p.past_m) * p.stride_m
+ h_idx * p.stride_h + k_idx];
}
reinterpret_cast<T*>(
p.output_ptr)[p.output_offset + b_idx * p.output_stride_b + m_idx * p.output_stride_m
+ h_idx * p.output_stride_h + k_idx] = v;
}
}
template<size_t elem_size, typename Index>
__global__ void BatchConcatPastKeyValue(BatchConcatParam<Index> params) {
if (blockIdx.y == 0) {
ConcatPastKeyValue<std::aligned_storage<elem_size, elem_size>::type, Index>(params.params[0]);
} else if (blockIdx.y == 1) {
ConcatPastKeyValue<std::aligned_storage<elem_size, elem_size>::type, Index>(params.params[1]);
} else {
// do nothing
}
}
class FusedAttentionConcatPastKeyValueKernel final : public user_op::OpKernel,
public user_op::CudaGraphSupport {
public:
FusedAttentionConcatPastKeyValueKernel() = default;
~FusedAttentionConcatPastKeyValueKernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const Tensor* key = ctx->Tensor4ArgNameAndIndex("key", 0);
const Tensor* value = ctx->Tensor4ArgNameAndIndex("value", 0);
Tensor* output_key = ctx->Tensor4ArgNameAndIndex("output_key", 0);
Tensor* output_value = ctx->Tensor4ArgNameAndIndex("output_value", 0);
const DataType data_type = key->data_type();
const Tensor* past_key = nullptr;
const Tensor* past_value = nullptr;
if (ctx->has_input("past_key", 0)) {
CHECK(ctx->has_input("past_value", 0));
past_key = ctx->Tensor4ArgNameAndIndex("past_key", 0);
past_value = ctx->Tensor4ArgNameAndIndex("past_value", 0);
CHECK_EQ(past_key->data_type(), data_type);
CHECK_EQ(past_value->data_type(), data_type);
} else {
CHECK(!ctx->has_input("past_value", 0));
}
CHECK_EQ(value->data_type(), data_type);
CHECK_EQ(output_key->data_type(), data_type);
CHECK_EQ(output_value->data_type(), data_type);
const int64_t size_of_data_type = GetSizeOfDataType(data_type);
const int64_t key_head_size = ctx->Attr<int64_t>("key_head_size");
const std::string& past_key_layout = ctx->Attr<std::string>("past_key_layout");
const std::string& past_value_layout = ctx->Attr<std::string>("past_value_layout");
const std::string& key_layout = ctx->Attr<std::string>("key_layout");
const std::string& value_layout = ctx->Attr<std::string>("value_layout");
int64_t pack_size = 16 / size_of_data_type;
while (key_head_size % pack_size != 0) { pack_size /= 2; }
auto ParsePackedDims =
[](const ShapeView& shape, const std::string& layout, const Optional<int64_t>& num_heads,
const Optional<int64_t>& head_size, int64_t tensor_index, int64_t* b, int64_t* m,
int64_t* h, int64_t* k, int64_t* b_stride, int64_t* m_stride, int64_t* h_stride,
int64_t* offset, int64_t pack_size) {
ParseDims(shape, layout, num_heads, head_size, tensor_index, b, m, h, k, b_stride,
m_stride, h_stride, offset);
*k /= pack_size;
*b_stride /= pack_size;
*m_stride /= pack_size;
*h_stride /= pack_size;
*offset /= pack_size;
};
int64_t key_b = 0;
int64_t key_m = 0;
int64_t key_h = 0;
int64_t key_k = 0;
int64_t key_b_stride = 0;
int64_t key_m_stride = 0;
int64_t key_h_stride = 0;
int64_t key_offset = 0;
ParsePackedDims(key->shape_view(), key_layout, Optional<int64_t>(), key_head_size, 1, &key_b,
&key_m, &key_h, &key_k, &key_b_stride, &key_m_stride, &key_h_stride,
&key_offset, pack_size);
int64_t value_b = 0;
int64_t value_m = 0;
int64_t value_h = 0;
int64_t value_k = 0;
int64_t value_b_stride = 0;
int64_t value_m_stride = 0;
int64_t value_h_stride = 0;
int64_t value_offset = 0;
ParsePackedDims(value->shape_view(), value_layout, key_h, key_head_size, 2, &value_b, &value_m,
&value_h, &value_k, &value_b_stride, &value_m_stride, &value_h_stride,
&value_offset, pack_size);
CHECK_EQ(value_b, key_b);
CHECK_EQ(value_m, key_m);
int64_t past_key_b = 0;
int64_t past_key_m = 0;
int64_t past_key_h = 0;
int64_t past_key_k = 0;
int64_t past_key_b_stride = 0;
int64_t past_key_m_stride = 0;
int64_t past_key_h_stride = 0;
int64_t past_key_offset = 0;
if (past_key != nullptr) {
ParsePackedDims(past_key->shape_view(), past_key_layout, key_h, key_head_size, 1, &past_key_b,
&past_key_m, &past_key_h, &past_key_k, &past_key_b_stride, &past_key_m_stride,
&past_key_h_stride, &past_key_offset, pack_size);
}
int64_t past_value_b = 0;
int64_t past_value_m = 0;
int64_t past_value_h = 0;
int64_t past_value_k = 0;
int64_t past_value_b_stride = 0;
int64_t past_value_m_stride = 0;
int64_t past_value_h_stride = 0;
int64_t past_value_offset = 0;
if (past_value != nullptr) {
ParsePackedDims(past_value->shape_view(), past_value_layout, key_h, key_head_size, 2,
&past_value_b, &past_value_m, &past_value_h, &past_value_k,
&past_value_b_stride, &past_value_m_stride, &past_value_h_stride,
&past_value_offset, pack_size);
}
CHECK_EQ(past_value_b, past_key_b);
CHECK_EQ(past_value_m, past_key_m);
int64_t output_key_b = 0;
int64_t output_key_m = 0;
int64_t output_key_h = 0;
int64_t output_key_k = 0;
int64_t output_key_b_stride = 0;
int64_t output_key_m_stride = 0;
int64_t output_key_h_stride = 0;
int64_t output_key_offset = 0;
ParsePackedDims(output_key->shape_view(), past_key_layout, key_h, key_head_size, 1,
&output_key_b, &output_key_m, &output_key_h, &output_key_k,
&output_key_b_stride, &output_key_m_stride, &output_key_h_stride,
&output_key_offset, pack_size);
CHECK_EQ(output_key_b, key_b);
CHECK_EQ(output_key_m, past_key_m + key_m);
int64_t output_value_b = 0;
int64_t output_value_m = 0;
int64_t output_value_h = 0;
int64_t output_value_k = 0;
int64_t output_value_b_stride = 0;
int64_t output_value_m_stride = 0;
int64_t output_value_h_stride = 0;
int64_t output_value_offset = 0;
ParsePackedDims(output_value->shape_view(), past_value_layout, key_h, key_head_size, 2,
&output_value_b, &output_value_m, &output_value_h, &output_value_k,
&output_value_b_stride, &output_value_m_stride, &output_value_h_stride,
&output_value_offset, pack_size);
CHECK_EQ(output_value_b, key_b);
CHECK_EQ(output_value_m, past_value_m + value_m);
int64_t max_tensor_elem = (1 << 30) * pack_size;
CHECK((past_key == nullptr || past_key->shape_view().elem_cnt() <= max_tensor_elem)
&& (past_value == nullptr || past_value->shape_view().elem_cnt() <= max_tensor_elem)
&& key->shape_view().elem_cnt() <= max_tensor_elem
&& value->shape_view().elem_cnt() <= max_tensor_elem
&& output_key->shape_view().elem_cnt() <= max_tensor_elem
&& output_value->shape_view().elem_cnt() <= max_tensor_elem);
int64_t count = output_key_b * output_key_m * output_key_h * output_key_k;
BatchConcatParam<int32_t> kv;
kv.params[0].past_ptr = past_key == nullptr ? nullptr : past_key->dptr();
kv.params[0].ptr = key->dptr();
kv.params[0].output_ptr = output_key->mut_dptr();
kv.params[0].past_offset = past_key_offset;
kv.params[0].offset = key_offset;
kv.params[0].output_offset = output_key_offset;
kv.params[0].past_m = past_key_m;
kv.params[0].past_stride_b = past_key_b_stride;
kv.params[0].past_stride_m = past_key_m_stride;
kv.params[0].past_stride_h = past_key_h_stride;
kv.params[0].stride_b = key_b_stride;
kv.params[0].stride_m = key_m_stride;
kv.params[0].stride_h = key_h_stride;
kv.params[0].output_stride_b = output_key_b_stride;
kv.params[0].output_stride_m = output_key_m_stride;
kv.params[0].output_stride_h = output_key_h_stride;
kv.params[0].count = count;
kv.params[0].output_khm = output_key_k * output_key_h * output_key_m;
kv.params[0].output_kh = output_key_k * output_key_h;
kv.params[0].output_k = output_key_k;
kv.params[1].past_ptr = past_value == nullptr ? nullptr : past_value->dptr();
kv.params[1].ptr = value->dptr();
kv.params[1].output_ptr = output_value->mut_dptr();
kv.params[1].past_offset = past_value_offset;
kv.params[1].offset = value_offset;
kv.params[1].output_offset = output_value_offset;
kv.params[1].past_m = past_value_m;
kv.params[1].past_stride_b = past_value_b_stride;
kv.params[1].past_stride_m = past_value_m_stride;
kv.params[1].past_stride_h = past_value_h_stride;
kv.params[1].stride_b = value_b_stride;
kv.params[1].stride_m = value_m_stride;
kv.params[1].stride_h = value_h_stride;
kv.params[1].output_stride_b = output_value_b_stride;
kv.params[1].output_stride_m = output_value_m_stride;
kv.params[1].output_stride_h = output_value_h_stride;
kv.params[1].count = count;
kv.params[1].output_khm = output_value_k * output_value_h * output_value_m;
kv.params[1].output_kh = output_value_k * output_value_h;
kv.params[1].output_k = output_value_k;
constexpr uint32_t block_size = 256;
const dim3 grid_size((count - 1 + block_size) / block_size, 2);
const int64_t elem_size = size_of_data_type * pack_size;
hipStream_t cuda_stream = ctx->stream()->As<ep::CudaStream>()->cuda_stream();
if (elem_size == 16) {
hipLaunchKernelGGL(( BatchConcatPastKeyValue<16, int32_t>), dim3(grid_size), dim3(block_size), 0, cuda_stream, kv);
} else if (elem_size == 8) {
hipLaunchKernelGGL(( BatchConcatPastKeyValue<8, int32_t>), dim3(grid_size), dim3(block_size), 0, cuda_stream, kv);
} else if (elem_size == 4) {
hipLaunchKernelGGL(( BatchConcatPastKeyValue<4, int32_t>), dim3(grid_size), dim3(block_size), 0, cuda_stream, kv);
} else if (elem_size == 2) {
hipLaunchKernelGGL(( BatchConcatPastKeyValue<2, int32_t>), dim3(grid_size), dim3(block_size), 0, cuda_stream, kv);
} else if (elem_size == 1) {
hipLaunchKernelGGL(( BatchConcatPastKeyValue<1, int32_t>), dim3(grid_size), dim3(block_size), 0, cuda_stream, kv);
} else {
UNIMPLEMENTED();
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
REGISTER_USER_KERNEL("fused_attention_concat_past_key_value")
.SetCreateFn<FusedAttentionConcatPastKeyValueKernel>()
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA));
template<typename T, typename PositionType, typename IndexType, size_t num_dims,
size_t rotary_emb_dim>
struct FusedApplyRotaryEmbParam {
const T* x;
const T* cos;
const T* sin;
const PositionType* position_ids;
T* out;
const T theta;
const float inv_actual_rotary_size; // 1.0 / (rotary_size per rotary dimension)
const IndexType actual_rotary_size; // rotary_size per rotary dimension
const IndexType rotary_size;
const IndexType rotate_stride;
const IndexType k0;
const IndexType k1;
IndexType num_elements;
const IndexType k;
const IndexType x_offset;
IndexType ref_stride[num_dims]; // b, m, h, k
IndexType out_stride[num_dims]; // ordered descendingly by stride
IndexType x_stride[num_dims];
IndexType position_b_stride;
IndexType position_rotate_stride;
IndexType sinuous_m_stride;
FusedApplyRotaryEmbParam(const T* x, const T* cos, const T* sin, const PositionType* position_ids,
T* out, const T theta, const float inv_actual_rotary_size,
const IndexType actual_rotary_size, const IndexType rotary_size,
const IndexType rotate_stride, const IndexType num_elements,
const IndexType k, const IndexType k0, const IndexType k1,
const IndexType x_offset)
: x(x),
cos(cos),
sin(sin),
position_ids(position_ids),
out(out),
theta(theta),
inv_actual_rotary_size(inv_actual_rotary_size),
actual_rotary_size(actual_rotary_size),
rotary_size(rotary_size),
rotate_stride(rotate_stride),
num_elements(num_elements),
k(k),
k0(k0),
k1(k1),
x_offset(x_offset) {}
};
template<typename T, typename PositionType, typename IndexType, size_t PackSize, size_t num_dims,
size_t rotary_emb_dim>
__global__ void IntervalKernel(
FusedApplyRotaryEmbParam<T, PositionType, IndexType, num_dims, rotary_emb_dim> param) {
for (IndexType packed_offset = threadIdx.x + blockIdx.x * blockDim.x;
packed_offset < param.num_elements; packed_offset += blockDim.x * gridDim.x) {
using LoadPack = cuda::elementwise::Packed<T, PackSize>;
IndexType offset = packed_offset * PackSize;
IndexType index[num_dims]; // b, m, h, k
IndexType temp_offset = offset;
for (int i = 0; i < num_dims - 1; i++) {
IndexType ref_stride = param.ref_stride[i];
IndexType idx = temp_offset / ref_stride;
index[i] = idx;
temp_offset = temp_offset - idx * ref_stride;
}
index[num_dims - 1] = temp_offset;
IndexType x_offset = param.x_offset;
IndexType out_offset = 0;
#pragma unroll
for (int i = 0; i < num_dims; i++) {
x_offset = x_offset + param.x_stride[i] * index[i];
out_offset = out_offset + param.out_stride[i] * index[i];
}
const LoadPack x_vec = *reinterpret_cast<const LoadPack*>(param.x + x_offset);
const IndexType k_index = index[num_dims - 1];
if (k_index < param.rotary_size) {
const IndexType position_rotate_index = (k_index >= param.k0) ? 1 : 0;
const IndexType b_index = index[0], m_index = index[1];
const IndexType position_id_offset = b_index * param.position_b_stride
+ position_rotate_index * param.position_rotate_stride
+ m_index;
const PositionType position =
param.position_ids ? param.position_ids[position_id_offset] : m_index;
const IndexType actual_k_index = k_index % param.actual_rotary_size;
const IndexType sinuous_offset = position * param.sinuous_m_stride + actual_k_index;
LoadPack cos_vec, sin_vec, out_vec;
if (param.cos && param.sin) {
cos_vec = *reinterpret_cast<const LoadPack*>(param.cos + sinuous_offset);
sin_vec = *reinterpret_cast<const LoadPack*>(param.sin + sinuous_offset);
} else {
const IndexType actual_ndim = param.rotary_size / rotary_emb_dim;
#pragma unroll
for (int i = 0; i < PackSize / 2; i++) {
T val = position
* expf(2.0f * static_cast<float>(((actual_k_index >> 1) + i))
* param.inv_actual_rotary_size * logf(param.theta));
T cos_val = cosf(val);
T sin_val = sinf(val);
cos_vec.elem[i * 2] = cos_val;
cos_vec.elem[i * 2 + 1] = cos_val;
sin_vec.elem[i * 2] = sin_val;
sin_vec.elem[i * 2 + 1] = sin_val;
}
}
#pragma unroll
for (int i = 0; i < PackSize / 2; i++) {
out_vec.elem[i * 2] =
x_vec.elem[i * 2] * cos_vec.elem[i * 2] - x_vec.elem[i * 2 + 1] * sin_vec.elem[i * 2];
out_vec.elem[i * 2 + 1] = x_vec.elem[i * 2 + 1] * cos_vec.elem[i * 2 + 1]
+ x_vec.elem[i * 2] * sin_vec.elem[i * 2 + 1];
}
*(reinterpret_cast<LoadPack*>(param.out + out_offset)) = out_vec;
} else {
*(reinterpret_cast<LoadPack*>(param.out + out_offset)) = x_vec;
}
}
}
template<typename T, typename PositionType, typename IndexType, size_t num_dims,
size_t rotary_emb_dim>
__global__ void PlaneKernel(
FusedApplyRotaryEmbParam<T, PositionType, IndexType, num_dims, rotary_emb_dim> param) {
for (IndexType offset = threadIdx.x + blockIdx.x * blockDim.x; offset < param.num_elements;
offset += blockDim.x * gridDim.x) {
using LoadPack = cuda::elementwise::Packed<T, 2>;
IndexType temp_offset = offset;
IndexType index[num_dims];
#pragma unroll
for (int i = 0; i < num_dims - 1; i++) {
IndexType ref_stride = param.ref_stride[i];
IndexType idx = temp_offset / ref_stride;
index[i] = idx;
temp_offset = temp_offset - idx * ref_stride;
}
index[num_dims - 1] = temp_offset;
const IndexType b_index = index[0], m_index = index[1], k_index = index[num_dims - 1];
const IndexType position_rotate_index = (k_index >= param.k0) ? 1 : 0;
const IndexType position_id_offset = b_index * param.position_b_stride
+ position_rotate_index * param.position_rotate_stride
+ m_index;
const PositionType position =
param.position_ids ? param.position_ids[position_id_offset] : m_index;
const IndexType actual_k_index = k_index % param.actual_rotary_size;
const IndexType sinuous_offset = position * param.k + actual_k_index;
T cos_val, sin_val, out_val;
if (param.cos && param.sin) {
cos_val = *(param.cos + sinuous_offset);
sin_val = *(param.sin + sinuous_offset);
} else {
T val = position
* expf(2.0f * static_cast<float>(k_index % (param.actual_rotary_size >> 1))
* param.inv_actual_rotary_size * logf(param.theta));
cos_val = cosf(val);
sin_val = sinf(val);
}
LoadPack x_vec;
IndexType x_offset = param.x_offset;
IndexType out_offset = 0;
#pragma unroll
for (int i = 0; i < num_dims; i++) {
x_offset = x_offset + param.x_stride[i] * index[i];
out_offset = out_offset + param.out_stride[i] * index[i];
}
if (k_index < param.k0) {
x_vec.elem[0] = *(param.x + x_offset);
x_vec.elem[1] = (param.k0 - k_index > param.rotate_stride)
? static_cast<T>(-*(param.x + x_offset + param.rotate_stride))
: *(param.x + x_offset - param.rotate_stride);
out_val = cos_val * x_vec.elem[0] + sin_val * x_vec.elem[1];
} else if (k_index < param.k1) {
x_vec.elem[0] = *(param.x + x_offset);
x_vec.elem[1] = (param.k1 - k_index > param.rotate_stride)
? static_cast<T>(-*(param.x + x_offset + param.rotate_stride))
: *(param.x + x_offset - param.rotate_stride);
out_val = cos_val * x_vec.elem[0] + sin_val * x_vec.elem[1];
} else {
out_val = *(param.x + x_offset);
}
*(param.out + out_offset) = out_val;
}
}
template<typename T, typename PositionType, typename IndexType, size_t PackSize, size_t num_dims,
size_t rotary_emb_dim>
void LaunchKernel(ep::CudaStream* stream, const T* x, const T* cos, const T* sin,
const PositionType* position_ids, T* out, const int64_t* position_shape,
const std::string& x_layout, const std::string& output_layout,
const std::string& mode, const T theta, const IndexType rotary_size,
const IndexType b, const IndexType m, const IndexType h, const IndexType k,
const IndexType x_b_stride, const IndexType x_m_stride,
const IndexType x_h_stride, const IndexType x_offset,
const IndexType out_b_stride, const IndexType out_m_stride,
const IndexType out_h_stride, IndexType num_elements) {
const IndexType k0 = rotary_size / rotary_emb_dim,
k1 = rotary_size; // TODO: this only support 1d, 2d, rotary postional encoding
const IndexType rotate_stride = rotary_size / (2 * rotary_emb_dim);
const IndexType actual_rotary_size = rotary_size / rotary_emb_dim;
const float inv_actual_rotary_size = 1.0 / actual_rotary_size;
struct FusedApplyRotaryEmbParam<T, PositionType, IndexType, num_dims, rotary_emb_dim> param(
x, cos, sin, position_ids, out, theta, inv_actual_rotary_size, actual_rotary_size,
rotary_size, rotate_stride, num_elements, k, k0, k1, x_offset);
const IndexType ref_strides[num_dims] = {m * h * k, h * k, k, 1};
const IndexType out_strides[num_dims] = {out_b_stride, out_m_stride, out_h_stride, 1};
const IndexType x_strides[num_dims] = {x_b_stride, x_m_stride, x_h_stride, 1};
param.sinuous_m_stride = actual_rotary_size;
const IndexType position_m = position_shape ? static_cast<IndexType>(position_shape[2]) : m;
param.position_rotate_stride = position_m;
param.position_b_stride = position_m * rotary_emb_dim;
// K has to be the last dimension, only k&m matters, therefore strides other than k&m does not
// really needs to be computed
#pragma unroll
for (int i = 0; i < num_dims; i++) {
param.ref_stride[i] = ref_strides[i];
param.out_stride[i] = out_strides[i];
param.x_stride[i] = x_strides[i];
}
constexpr size_t blk_size = 128;
if (mode == "plane") {
param.num_elements = param.num_elements * PackSize;
hipLaunchKernelGGL(( PlaneKernel<T, PositionType, IndexType, num_dims, rotary_emb_dim>)
, dim3((param.num_elements + blk_size - 1) / blk_size), dim3(blk_size), 0, stream->cuda_stream(),
param);
} else {
hipLaunchKernelGGL(( IntervalKernel<T, PositionType, IndexType, PackSize, num_dims, rotary_emb_dim>)
, dim3((param.num_elements + blk_size - 1) / blk_size), dim3(blk_size), 0, stream->cuda_stream(),
param);
}
}
template<typename T, typename PositionType, typename IndexType, size_t num_dims,
size_t rotary_emb_dim>
void DispatchPackSize(ep::CudaStream* stream, const T* x, const T* cos, const T* sin,
const PositionType* position_ids, T* out, const int64_t* position_shape,
const std::string& x_layout, const std::string& output_layout,
const std::string& mode, const T theta, const IndexType rotary_size,
const IndexType b, const IndexType m, const IndexType h, const IndexType k,
const IndexType x_b_stride, const IndexType x_m_stride,
const IndexType x_h_stride, const IndexType x_offset,
const IndexType out_b_stride, const IndexType out_m_stride,
const IndexType out_h_stride, IndexType num_elements) {
const auto CheckPackSize = [&](const size_t PackSize) {
bool r = (((reinterpret_cast<uintptr_t>(x) % (sizeof(T) * PackSize)) == 0)
&& (((rotary_size / rotary_emb_dim) % PackSize) == 0)
&& (((k - rotary_size) % PackSize) == 0) && ((16 / sizeof(T)) >= PackSize));
return r;
};
if (CheckPackSize(8)) {
num_elements /= 8;
LaunchKernel<T, PositionType, IndexType, 8, num_dims, rotary_emb_dim>(
stream, x, cos, sin, position_ids, out, position_shape, x_layout, output_layout, mode,
theta, rotary_size, b, m, h, k, x_b_stride, x_m_stride, x_h_stride, x_offset, out_b_stride,
out_m_stride, out_h_stride, num_elements);
} else if (CheckPackSize(4)) {
num_elements /= 4;
LaunchKernel<T, PositionType, IndexType, 4, num_dims, rotary_emb_dim>(
stream, x, cos, sin, position_ids, out, position_shape, x_layout, output_layout, mode,
theta, rotary_size, b, m, h, k, x_b_stride, x_m_stride, x_h_stride, x_offset, out_b_stride,
out_m_stride, out_h_stride, num_elements);
} else {
num_elements /= 2;
LaunchKernel<T, PositionType, IndexType, 2, num_dims, rotary_emb_dim>(
stream, x, cos, sin, position_ids, out, position_shape, x_layout, output_layout, mode,
theta, rotary_size, b, m, h, k, x_b_stride, x_m_stride, x_h_stride, x_offset, out_b_stride,
out_m_stride, out_h_stride, num_elements);
}
}
template<typename T, typename PositionType, size_t num_dims, size_t rotary_emb_dim>
void DispatchIndex(ep::CudaStream* stream, const T* x, const T* cos, const T* sin,
const PositionType* position_ids, T* out, const int64_t* position_shape,
const std::string& x_layout, const std::string& output_layout,
const std::string& mode, const T theta, const int64_t rotary_size,
const int64_t b, const int64_t m, const int64_t h, const int64_t k,
const int64_t x_b_stride, const int64_t x_m_stride, const int64_t x_h_stride,
const int64_t x_offset, const int64_t out_b_stride, const int64_t out_m_stride,
const int64_t out_h_stride) {
int64_t num_elements = b * m * h * k;
if (num_elements < (1 << 30)) {
DispatchPackSize<T, PositionType, int32_t, num_dims, rotary_emb_dim>(
stream, x, cos, sin, position_ids, out, position_shape, x_layout, output_layout, mode,
theta, static_cast<int32_t>(rotary_size), static_cast<int32_t>(b), static_cast<int32_t>(m),
static_cast<int32_t>(h), static_cast<int32_t>(k), static_cast<int32_t>(x_b_stride),
static_cast<int32_t>(x_m_stride), static_cast<int32_t>(x_h_stride),
static_cast<int32_t>(x_offset), static_cast<int32_t>(out_b_stride),
static_cast<int32_t>(out_m_stride), static_cast<int32_t>(out_h_stride),
static_cast<int32_t>(num_elements));
} else {
DispatchPackSize<T, PositionType, int64_t, num_dims, rotary_emb_dim>(
stream, x, cos, sin, position_ids, out, position_shape, x_layout, output_layout, mode,
theta, rotary_size, b, m, h, k, x_b_stride, x_m_stride, x_h_stride, x_offset, out_b_stride,
out_m_stride, out_h_stride, num_elements);
}
}
template<typename T, typename PositionType, size_t num_dims>
void DispatchRotaryEmbeddingDimension(ep::CudaStream* stream, const T* x, const T* cos,
const T* sin, const PositionType* position_ids, T* out,
const int64_t* position_shape, const std::string& x_layout,
const std::string& output_layout, const std::string& mode,
const T theta, const int64_t rotary_size,
const int rotary_emb_dim, const int64_t b, const int64_t m,
const int64_t h, const int64_t k, const int64_t x_b_stride,
const int64_t x_m_stride, const int64_t x_h_stride,
const int64_t x_offset, const int64_t out_b_stride,
const int64_t out_m_stride, const int64_t out_h_stride) {
if (rotary_emb_dim == 1) {
DispatchIndex<T, PositionType, num_dims, 1>(
stream, x, cos, sin, position_ids, out, position_shape, x_layout, output_layout, mode,
theta, rotary_size, b, m, h, k, x_b_stride, x_m_stride, x_h_stride, x_offset, out_b_stride,
out_m_stride, out_h_stride);
} else if (rotary_emb_dim == 2) {
DispatchIndex<T, PositionType, num_dims, 2>(
stream, x, cos, sin, position_ids, out, position_shape, x_layout, output_layout, mode,
theta, rotary_size, b, m, h, k, x_b_stride, x_m_stride, x_h_stride, x_offset, out_b_stride,
out_m_stride, out_h_stride);
}
}
template<typename T, typename PositionType>
class FusedApplyRotaryEmbKernel final : public user_op::OpKernel {
public:
FusedApplyRotaryEmbKernel() = default;
~FusedApplyRotaryEmbKernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0);
user_op::Tensor* cos = nullptr;
user_op::Tensor* sin = nullptr;
user_op::Tensor* position_ids = nullptr;
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const std::string& x_layout = ctx->Attr<std::string>("x_layout");
const std::string& output_layout = ctx->Attr<std::string>("output_layout");
const std::string& mode = ctx->Attr<std::string>("mode");
const int64_t tensor_index = ctx->Attr<int64_t>("tensor_index");
const int64_t k_size = ctx->Attr<int64_t>("k_size");
const int64_t rotary_size = ctx->Attr<int64_t>("rotary_size");
const float theta = 1.0f / ctx->Attr<float>("base");
int rotary_emb_dim = 1;
if (ctx->has_input("cos", 0)) { cos = ctx->Tensor4ArgNameAndIndex("cos", 0); }
if (ctx->has_input("sin", 0)) { sin = ctx->Tensor4ArgNameAndIndex("sin", 0); }
if (ctx->has_input("position_ids", 0)) {
position_ids = ctx->Tensor4ArgNameAndIndex("position_ids", 0);
rotary_emb_dim = position_ids->shape_view().At(1);
}
constexpr size_t ndims = 4;
int64_t b = 0;
int64_t m = 0;
int64_t h = 0;
int64_t k = 0;
int64_t out_b_stride = 0, out_m_stride = 0, out_h_stride = 0, out_offset = 0;
int64_t x_b_stride = 0, x_m_stride = 0, x_h_stride = 0, x_offset = 0;
ParseDims(out->shape_view(), output_layout, Optional<int64_t>(), k_size, 0, &b, &m, &h, &k,
&out_b_stride, &out_m_stride, &out_h_stride, &out_offset);
ParseDims(x->shape_view(), x_layout, Optional<int64_t>(), k_size, tensor_index, &b, &m, &h, &k,
&x_b_stride, &x_m_stride, &x_h_stride, &x_offset);
// TODO: hard code num_dims & seems redundant template problem...
DispatchRotaryEmbeddingDimension<T, PositionType, ndims>(
ctx->stream()->As<ep::CudaStream>(), reinterpret_cast<const T*>(x->dptr()),
cos ? reinterpret_cast<const T*>(cos->dptr()) : nullptr,
sin ? reinterpret_cast<const T*>(sin->dptr()) : nullptr,
position_ids ? reinterpret_cast<const PositionType*>(position_ids->dptr()) : nullptr,
reinterpret_cast<T*>(out->mut_dptr()),
position_ids ? position_ids->shape_view().data() : nullptr, x_layout, output_layout, mode,
static_cast<T>(theta), rotary_size, rotary_emb_dim, b, m, h, k, x_b_stride, x_m_stride,
x_h_stride, x_offset, out_b_stride, out_m_stride, out_h_stride);
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_FUSED_APPLY_ROTARY_EMB_GPU(dtype, position_type) \
REGISTER_USER_KERNEL("fused_apply_rotary_emb") \
.SetCreateFn<FusedApplyRotaryEmbKernel<dtype, position_type>>() \
.SetIsMatchedHob( \
(user_op::HobDeviceType() == DeviceType::kCUDA) \
&& (user_op::HobDataType("out", 0) == GetDataType<dtype>::value) \
&& (user_op::HobInputSize("position_ids") == 1) \
&& (user_op::HobDataType("position_ids", 0) == GetDataType<position_type>::value));
#define REGISTER_FUSED_APPLY_ROTARY_EMB_GPU_DTYPE(dtype) \
REGISTER_FUSED_APPLY_ROTARY_EMB_GPU(dtype, int64_t); \
REGISTER_FUSED_APPLY_ROTARY_EMB_GPU(dtype, int32_t); \
REGISTER_USER_KERNEL("fused_apply_rotary_emb") \
.SetCreateFn<FusedApplyRotaryEmbKernel<dtype, int64_t>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \
&& (user_op::HobDataType("out", 0) == GetDataType<dtype>::value) \
&& (user_op::HobInputSize("position_ids") == 0));
REGISTER_FUSED_APPLY_ROTARY_EMB_GPU_DTYPE(float);
REGISTER_FUSED_APPLY_ROTARY_EMB_GPU_DTYPE(half);
#if TORCH_HIP_VERSION >= 11000
REGISTER_FUSED_APPLY_ROTARY_EMB_GPU_DTYPE(nv_bfloat16);
#endif // TORCH_HIP_VERSION >= 11000
} // namespace
} // namespace user_op
} // namespace oneflow
#endif // WITH_CUTLASS
| e33fc56aa62c0a1e5ee3215d6c0f5dec71f2c8dd.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifdef WITH_CUTLASS
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/ep/cuda/cuda_stream.h"
#include "oneflow/core/cuda/elementwise.cuh"
#include "oneflow/core/ep/include/primitive/permute.h"
#include "cutlass/arch/mma.h"
#include "cutlass/gemm/warp/mma.h"
#include "kernel_forward.h"
#include "oneflow/core/kernel/cuda_graph_support.h"
#include "trt_flash_attention/fmha.h"
#include "trt_flash_attention/fmha_flash_attention.h"
namespace oneflow {
namespace user_op {
namespace {
void ParseDims(const ShapeView& shape, const std::string& layout,
const Optional<int64_t>& batch_size, const Optional<int64_t>& seq_len,
const Optional<int64_t>& num_heads, const Optional<int64_t>& head_size,
int64_t tensor_index, int64_t* b, int64_t* m, int64_t* h, int64_t* k,
int64_t* b_stride, int64_t* m_stride, int64_t* h_stride, int64_t* offset,
bool* bm_packed) {
if (shape.NumAxes() == 2) {
if (layout == "(BM)(HK)" || layout == "(BM)(H2K)" || layout == "(BM)(H3K)") {
*bm_packed = true;
CHECK(batch_size);
CHECK(seq_len);
*b = CHECK_JUST(batch_size);
*m = CHECK_JUST(seq_len);
int64_t packed_n = 0;
if (layout == "(BM)(HK)") {
packed_n = 1;
} else if (layout == "(BM)(H2K)") {
packed_n = 2;
} else if (layout == "(BM)(H3K)") {
packed_n = 3;
} else {
UNIMPLEMENTED();
}
const int64_t hidden_size = shape.At(1);
if (num_heads) {
const int64_t expected_h = CHECK_JUST(num_heads);
const int64_t packed_h = packed_n * expected_h;
CHECK_EQ(hidden_size % packed_h, 0);
*h = expected_h;
*k = hidden_size / packed_h;
} else if (head_size) {
const int64_t expected_k = CHECK_JUST(head_size);
const int64_t packed_k = packed_n * expected_k;
CHECK_EQ(hidden_size % packed_k, 0);
*h = hidden_size / packed_k;
*k = expected_k;
} else {
UNIMPLEMENTED();
}
*h_stride = *k * packed_n;
*m_stride = *h_stride * *h;
*b_stride = 0;
if (packed_n == 1) {
*offset = 0;
} else if (packed_n == 2) {
CHECK_GE(tensor_index, 1);
*offset = (tensor_index - 1) * *k;
} else if (packed_n == 3) {
*offset = tensor_index * *k;
} else {
UNIMPLEMENTED();
}
} else {
UNIMPLEMENTED();
}
} else if (shape.NumAxes() == 3) {
if (layout == "BM(HK)" || layout == "BM(H2K)" || layout == "BM(H3K)" || layout == "MB(HK)"
|| layout == "MB(H2K)" || layout == "MB(H3K)") {
*bm_packed = false;
bool batch_first = false;
int64_t packed_n = 0;
const std::string layout_bm = layout.substr(0, 2);
const std::string layout_hk = layout.substr(2);
if (layout_bm == "BM") {
*b = shape.At(0);
*m = shape.At(1);
batch_first = true;
} else if (layout_bm == "MB") {
*b = shape.At(1);
*m = shape.At(0);
batch_first = false;
} else {
UNIMPLEMENTED();
}
if (layout_hk == "(HK)") {
packed_n = 1;
} else if (layout_hk == "(H2K)") {
packed_n = 2;
} else if (layout_hk == "(H3K)") {
packed_n = 3;
} else {
UNIMPLEMENTED();
}
const int64_t hidden_size = shape.At(2);
if (num_heads) {
const int64_t expected_h = CHECK_JUST(num_heads);
const int64_t packed_h = packed_n * expected_h;
CHECK_EQ(hidden_size % packed_h, 0);
*h = expected_h;
*k = hidden_size / packed_h;
} else if (head_size) {
const int64_t expected_k = CHECK_JUST(head_size);
const int64_t packed_k = packed_n * expected_k;
CHECK_EQ(hidden_size % packed_k, 0);
*h = hidden_size / packed_k;
*k = expected_k;
} else {
UNIMPLEMENTED();
}
*h_stride = *k * packed_n;
if (batch_first) {
*m_stride = *h_stride * *h;
*b_stride = *m_stride * *m;
} else {
*b_stride = *h_stride * *h;
*m_stride = *b_stride * *b;
}
if (packed_n == 1) {
*offset = 0;
} else if (packed_n == 2) {
CHECK_GE(tensor_index, 1);
*offset = (tensor_index - 1) * *k;
} else if (packed_n == 3) {
*offset = tensor_index * *k;
} else {
UNIMPLEMENTED();
}
} else if (layout == "(BM)HK") {
*bm_packed = true;
CHECK(batch_size);
CHECK(seq_len);
*b = CHECK_JUST(batch_size);
*m = CHECK_JUST(seq_len);
*h = shape.At(1);
*k = shape.At(2);
*h_stride = *k;
*m_stride = *h_stride * *h;
*b_stride = 0;
} else {
UNIMPLEMENTED();
}
} else if (shape.NumAxes() == 4) {
*bm_packed = false;
if (layout == "BMHK") {
*b = shape.At(0);
*m = shape.At(1);
*h = shape.At(2);
*k = shape.At(3);
*h_stride = *k;
*m_stride = *h_stride * *h;
*b_stride = *m_stride * *m;
} else if (layout == "BHMK") {
*b = shape.At(0);
*m = shape.At(2);
*h = shape.At(1);
*k = shape.At(3);
*m_stride = *k;
*h_stride = *m_stride * *m;
*b_stride = *h_stride * *h;
} else if (layout == "MBHK") {
*b = shape.At(1);
*m = shape.At(0);
*h = shape.At(2);
*k = shape.At(3);
*h_stride = *k;
*b_stride = *h_stride * *h;
*m_stride = *b_stride * *b;
} else {
UNIMPLEMENTED();
}
*offset = 0;
} else {
UNIMPLEMENTED();
};
if (batch_size) {
const int64_t expected_b = CHECK_JUST(batch_size);
CHECK_EQ(*b, expected_b);
}
if (seq_len) {
const int64_t expected_m = CHECK_JUST(seq_len);
CHECK_EQ(*m, expected_m);
}
if (num_heads) {
const int64_t expected_h = CHECK_JUST(num_heads);
CHECK_EQ(*h, expected_h);
}
if (head_size) {
const int64_t expected_k = CHECK_JUST(head_size);
CHECK_EQ(*k, expected_k);
}
}
void ParseDims(const ShapeView& shape, const std::string& layout,
const Optional<int64_t>& num_heads, const Optional<int64_t>& head_size,
int64_t tensor_index, int64_t* b, int64_t* m, int64_t* h, int64_t* k,
int64_t* b_stride, int64_t* m_stride, int64_t* h_stride, int64_t* offset) {
bool bm_packed{};
ParseDims(shape, layout, Optional<int64_t>(), Optional<int64_t>(), num_heads, head_size,
tensor_index, b, m, h, k, b_stride, m_stride, h_stride, offset, &bm_packed);
}
template<typename T, int pack_size>
struct alignas(pack_size * sizeof(T)) Pack {
T elem[pack_size];
};
template<typename T>
__global__ void PackQkv(int b, int s, int nh, int d, const T* q, const T* k, const T* v, T* o,
int32_t* seq_len) {
int count = b * s * nh * d * 3;
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < count; i += blockDim.x * gridDim.x) {
int row = i / (d * 3);
int out_col = i - row * (d * 3);
T out;
if (out_col < d) {
out = q[row * d + out_col];
} else if (out_col < 2 * d) {
out = k[row * d + out_col - d];
} else {
out = v[row * d + out_col - d * 2];
}
o[i] = out;
}
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < b + 1; i += blockDim.x * gridDim.x) {
seq_len[i] = i * s;
}
}
struct Params {
DataType data_type;
int64_t num_batches;
int64_t num_heads;
int64_t query_seq_len;
int64_t kv_seq_len;
int64_t head_size;
int64_t value_head_size;
int64_t q_stride_b;
int64_t q_stride_m;
int64_t q_stride_h;
int64_t k_stride_b;
int64_t k_stride_m;
int64_t k_stride_h;
int64_t v_stride_b;
int64_t v_stride_m;
int64_t v_stride_h;
std::string attn_mask_type;
int64_t causal_diagonal_offset;
const void* query_ptr;
const void* key_ptr;
const void* value_ptr;
const void* attn_bias_ptr;
const void* query_seq_start_ptr;
const void* key_seq_start_ptr;
const void* key_seq_len_ptr;
int64_t attn_bias_stride_b;
int64_t attn_bias_stride_h;
int64_t attn_bias_stride_m;
void* out_ptr;
void* workspace;
int64_t workspace_size;
float scale;
};
template<typename T, typename ArchTag, bool is_aligned, int queries_per_block, int keys_per_block,
bool single_value_iteration, bool with_attn_bias>
void LaunchCutlassFmha(const Params& params, ep::CudaStream* stream) {
// The fmha implementation below is based on xformers's fmha
// implementation at:
// https://github.com/facebookresearch/xformers/tree/main/xformers/csrc/attention/cuda/fmha
using Attention = AttentionKernel<T, ArchTag, is_aligned, queries_per_block, keys_per_block,
single_value_iteration, false, with_attn_bias>;
typename Attention::Params p{};
p.query_ptr = const_cast<T*>(reinterpret_cast<const T*>(params.query_ptr));
p.key_ptr = const_cast<T*>(reinterpret_cast<const T*>(params.key_ptr));
p.value_ptr = const_cast<T*>(reinterpret_cast<const T*>(params.value_ptr));
p.attn_bias_ptr = const_cast<T*>(reinterpret_cast<const T*>(params.attn_bias_ptr));
p.seqstart_q_ptr =
const_cast<int32_t*>(reinterpret_cast<const int32_t*>(params.query_seq_start_ptr));
p.seqstart_k_ptr =
const_cast<int32_t*>(reinterpret_cast<const int32_t*>(params.key_seq_start_ptr));
p.seqlen_k_ptr = const_cast<int32_t*>(reinterpret_cast<const int32_t*>(params.key_seq_len_ptr));
p.logsumexp_ptr = nullptr;
p.output_ptr = reinterpret_cast<T*>(params.out_ptr);
if (Attention::kNeedsOutputAccumulatorBuffer) {
using Acc = typename Attention::accum_t;
CHECK_GE(params.workspace_size, params.num_batches * params.query_seq_len * params.num_heads
* params.value_head_size * sizeof(Acc));
p.output_accum_ptr = reinterpret_cast<Acc*>(params.workspace);
} else {
p.output_accum_ptr = nullptr;
}
p.num_heads = params.num_heads;
p.num_batches = params.num_batches;
p.head_dim = params.head_size;
p.head_dim_value = params.value_head_size;
p.num_queries = params.query_seq_len;
p.num_keys = params.kv_seq_len;
p.q_strideM = params.q_stride_m;
p.k_strideM = params.k_stride_m;
p.v_strideM = params.v_stride_m;
p.o_strideM = p.head_dim_value * p.num_heads;
p.bias_strideM = params.attn_bias_stride_m;
p.q_strideH = params.q_stride_h;
p.k_strideH = params.k_stride_h;
p.v_strideH = params.v_stride_h;
p.bias_strideH = params.attn_bias_stride_h;
p.q_strideB = params.q_stride_b;
p.k_strideB = params.k_stride_b;
p.v_strideB = params.v_stride_b;
p.bias_strideB = params.attn_bias_stride_b;
p.scale = params.scale;
if (params.attn_mask_type == "none") {
p.custom_mask_type = Attention::NoCustomMask;
} else if (params.attn_mask_type == "causal_from_top_left") {
p.custom_mask_type = Attention::CausalFromTopLeft;
} else if (params.attn_mask_type == "causal_from_bottom_right") {
p.custom_mask_type = Attention::CausalFromBottomRight;
} else {
UNIMPLEMENTED();
}
p.causal_diagonal_offset = params.causal_diagonal_offset;
p.use_dropout = false;
constexpr auto kernel_fn = attention_kernel_batched_impl<Attention>;
int smem_bytes = sizeof(typename Attention::SharedStorage);
if (smem_bytes > 0xc000) {
static bool once = [&]() {
cudaFuncSetAttribute(kernel_fn, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_bytes);
return true;
}();
}
CHECK(Attention::check_supported(p));
kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes, stream->cuda_stream()>>>(p);
}
template<typename T, typename ArchTag, bool is_aligned, int queries_per_block, int keys_per_block,
bool single_value_iteration>
void DispatchWithAttnBias(const Params& params, ep::CudaStream* stream) {
if (params.attn_bias_ptr != nullptr) {
LaunchCutlassFmha<T, ArchTag, is_aligned, queries_per_block, keys_per_block,
single_value_iteration, true>(params, stream);
} else {
LaunchCutlassFmha<T, ArchTag, is_aligned, queries_per_block, keys_per_block,
single_value_iteration, false>(params, stream);
}
}
template<typename T, typename ArchTag, bool is_aligned, int queries_per_block, int keys_per_block>
void DispatchSingleValueIteration(const Params& params, ep::CudaStream* stream) {
if (params.value_head_size <= keys_per_block) {
DispatchWithAttnBias<T, ArchTag, is_aligned, queries_per_block, keys_per_block, true>(params,
stream);
} else {
DispatchWithAttnBias<T, ArchTag, is_aligned, queries_per_block, keys_per_block, false>(params,
stream);
}
}
template<typename T, typename ArchTag, bool is_aligned>
void DispatchKeysPerBlock(const Params& params, ep::CudaStream* stream) {
if (params.value_head_size <= 64) {
DispatchSingleValueIteration<T, ArchTag, is_aligned, 64, 64>(params, stream);
} else {
DispatchSingleValueIteration<T, ArchTag, is_aligned, 32, 128>(params, stream);
}
}
template<typename T, typename ArchTag>
void DispatchIsAligned(const Params& params, ep::CudaStream* stream) {
if (reinterpret_cast<uintptr_t>(params.query_ptr) % 16 == 0
&& reinterpret_cast<uintptr_t>(params.key_ptr) % 16 == 0
&& reinterpret_cast<uintptr_t>(params.value_ptr) % 16 == 0
&& params.attn_bias_stride_m % (16 / sizeof(T)) == 0
&& params.head_size % (16 / sizeof(T)) == 0
&& params.value_head_size % (16 / sizeof(T)) == 0) {
DispatchKeysPerBlock<T, ArchTag, true>(params, stream);
} else {
DispatchKeysPerBlock<T, ArchTag, false>(params, stream);
}
}
template<typename T>
void DispatchArchTag(const Params& params, ep::CudaStream* stream) {
const int major = stream->device_properties().major;
const int minor = stream->device_properties().minor;
if (major == 8) {
DispatchIsAligned<T, cutlass::arch::Sm80>(params, stream);
} else if (major == 7) {
if (minor == 5) {
DispatchIsAligned<T, cutlass::arch::Sm75>(params, stream);
} else {
DispatchIsAligned<T, cutlass::arch::Sm70>(params, stream);
}
} else {
UNIMPLEMENTED();
}
}
void DispatchCutlassFmha(const Params& params, ep::CudaStream* stream) {
if (params.data_type == DataType::kFloat16) {
DispatchArchTag<cutlass::half_t>(params, stream);
} else if (params.data_type == DataType::kFloat) {
DispatchArchTag<float>(params, stream);
} else {
UNIMPLEMENTED();
}
}
class FusedMultiHeadAttentionInferenceKernel final : public user_op::OpKernel,
public user_op::CudaGraphSupport {
public:
FusedMultiHeadAttentionInferenceKernel() = default;
~FusedMultiHeadAttentionInferenceKernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const Tensor* query = ctx->Tensor4ArgNameAndIndex("query", 0);
const Tensor* key = ctx->Tensor4ArgNameAndIndex("key", 0);
const Tensor* value = ctx->Tensor4ArgNameAndIndex("value", 0);
const Tensor* attn_bias = nullptr;
if (ctx->has_input("attn_bias", 0)) { attn_bias = ctx->Tensor4ArgNameAndIndex("attn_bias", 0); }
const Tensor* query_seq_start = nullptr;
const Tensor* key_seq_start = nullptr;
const Tensor* key_seq_len = nullptr;
const float scale = ctx->Attr<double>("scale");
if (ctx->has_input("query_seq_start", 0)) {
CHECK(ctx->has_input("key_seq_start", 0));
query_seq_start = ctx->Tensor4ArgNameAndIndex("query_seq_start", 0);
key_seq_start = ctx->Tensor4ArgNameAndIndex("key_seq_start", 0);
CHECK(query_seq_start->data_type() == DataType::kInt32);
CHECK(key_seq_start->data_type() == DataType::kInt32);
CHECK_EQ(query_seq_start->shape_view().NumAxes(), 1);
CHECK_GT(query_seq_start->shape_view().At(0), 1);
CHECK(query_seq_start->shape_view() == key_seq_start->shape_view());
if (ctx->has_input("key_seq_len", 0)) {
key_seq_len = ctx->Tensor4ArgNameAndIndex("key_seq_len", 0);
CHECK(key_seq_len->data_type() == DataType::kInt32);
CHECK_EQ(key_seq_len->shape_view().NumAxes(), 1);
CHECK_EQ(key_seq_len->shape_view().At(0), query_seq_start->shape_view().At(0) - 1);
}
} else {
CHECK(!ctx->has_input("key_seq_start", 0));
CHECK(!ctx->has_input("key_seq_len", 0));
}
Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
Tensor* tmp = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const DataType data_type = query->data_type();
CHECK_EQ(key->data_type(), data_type);
CHECK_EQ(value->data_type(), data_type);
CHECK_EQ(out->data_type(), data_type);
const int64_t query_head_size = ctx->Attr<int64_t>("query_head_size");
const std::string& attn_mask_type = ctx->Attr<std::string>("attn_mask_type");
const int64_t causal_diagonal_offset = ctx->Attr<int64_t>("causal_diagonal_offset");
CHECK_GE(causal_diagonal_offset, 0);
const std::string& query_layout = ctx->Attr<std::string>("query_layout");
const std::string& key_layout = ctx->Attr<std::string>("key_layout");
const std::string& value_layout = ctx->Attr<std::string>("value_layout");
const std::string& output_layout = ctx->Attr<std::string>("output_layout");
Optional<int64_t> batch_size;
if (query_seq_start != nullptr) { batch_size = query_seq_start->shape_view().At(0) - 1; }
Optional<int64_t> query_max_seq_len;
const int64_t attr_query_max_seq_len = ctx->Attr<int64_t>("query_max_seq_len");
if (attr_query_max_seq_len != 0) { query_max_seq_len = attr_query_max_seq_len; }
Optional<int64_t> key_max_seq_len;
const int64_t attr_key_max_seq_len = ctx->Attr<int64_t>("key_max_seq_len");
if (attr_key_max_seq_len != 0) { key_max_seq_len = attr_key_max_seq_len; }
int64_t q_b = 0;
int64_t q_m = 0;
int64_t q_h = 0;
int64_t q_k = 0;
int64_t q_b_stride = 0;
int64_t q_m_stride = 0;
int64_t q_h_stride = 0;
int64_t q_offset = 0;
bool q_bm_packed = false;
ParseDims(query->shape_view(), query_layout, batch_size, query_max_seq_len, Optional<int64_t>(),
query_head_size, 0, &q_b, &q_m, &q_h, &q_k, &q_b_stride, &q_m_stride, &q_h_stride,
&q_offset, &q_bm_packed);
if (q_bm_packed) { CHECK(query_seq_start != nullptr); }
int64_t k_b = 0;
int64_t k_m = 0;
int64_t k_h = 0;
int64_t k_k = 0;
int64_t k_b_stride = 0;
int64_t k_m_stride = 0;
int64_t k_h_stride = 0;
int64_t k_offset = 0;
bool k_bm_packed = false;
ParseDims(key->shape_view(), key_layout, q_b, key_max_seq_len, Optional<int64_t>(),
query_head_size, 1, &k_b, &k_m, &k_h, &k_k, &k_b_stride, &k_m_stride, &k_h_stride,
&k_offset, &k_bm_packed);
CHECK_EQ(k_b, q_b);
CHECK_EQ(k_h, q_h);
CHECK_EQ(k_bm_packed, q_bm_packed);
int64_t v_b = 0;
int64_t v_m = 0;
int64_t v_h = 0;
int64_t v_k = 0;
int64_t v_b_stride = 0;
int64_t v_m_stride = 0;
int64_t v_h_stride = 0;
int64_t v_offset = 0;
bool v_bm_packed = false;
ParseDims(value->shape_view(), value_layout, q_b, k_m, q_h, Optional<int64_t>(), 2, &v_b, &v_m,
&v_h, &v_k, &v_b_stride, &v_m_stride, &v_h_stride, &v_offset, &v_bm_packed);
CHECK_EQ(v_b, q_b);
CHECK_EQ(v_m, k_m);
CHECK_EQ(v_bm_packed, k_bm_packed);
if (output_layout == "BM(HK)") {
CHECK(!q_bm_packed);
CHECK_EQ(out->shape_view().NumAxes(), 3);
CHECK_EQ(out->shape_view().At(0), q_b);
CHECK_EQ(out->shape_view().At(1), q_m);
CHECK_EQ(out->shape_view().At(2), q_h * v_k);
} else if (output_layout == "MB(HK)") {
CHECK(!q_bm_packed);
CHECK_EQ(out->shape_view().NumAxes(), 3);
CHECK_EQ(q_b, 1);
CHECK_EQ(out->shape_view().At(0), q_m);
CHECK_EQ(out->shape_view().At(1), q_b);
CHECK_EQ(out->shape_view().At(2), q_h * v_k);
} else if (output_layout == "(BM)(HK)") {
CHECK(q_bm_packed);
CHECK_EQ(out->shape_view().NumAxes(), 2);
CHECK_EQ(out->shape_view().At(0), query->shape_view().At(0));
CHECK_EQ(out->shape_view().At(1), q_h * v_k);
} else {
UNIMPLEMENTED();
}
auto* cuda_stream = ctx->stream()->As<ep::CudaStream>();
// Compatible with typo `KERENL`
const bool enable_trt_flash_attn =
ParseBooleanFromEnv(
"ONEFLOW_KERNEL_FMHA_ENABLE_TRT_FLASH_ATTN_IMPL",
ParseBooleanFromEnv("ONEFLOW_KERENL_FMHA_ENABLE_TRT_FLASH_ATTN_IMPL", true))
&& ParseBooleanFromEnv("ONEFLOW_MATMUL_ALLOW_HALF_PRECISION_ACCUMULATION", false);
const bool is_default_scale =
std::abs(scale - 1.0 / std::sqrt(static_cast<float>(q_k))) <= 1e-5;
const int arch = cuda_stream->cuda_arch() / 10;
const bool is_trt_supported_arch = (arch == 75 || arch == 80 || arch == 86 || arch == 89);
const bool is_trt_supported_head_size = ((q_k == 40) || (q_k == 64));
// Avoid PackQKV overhead when seq_len is small.
const bool is_long_seq_len = q_m >= 512;
const bool is_trt_supported_layout = (query_layout == "BMHK" || query_layout == "BM(HK)")
&& (key_layout == "BMHK" || key_layout == "BM(HK)")
&& (value_layout == "BMHK" || value_layout == "BM(HK)")
&& (output_layout == "BMHK" || output_layout == "BM(HK)");
if (is_default_scale && query_seq_start == nullptr && enable_trt_flash_attn
&& data_type == DataType::kFloat16 && q_m == k_m && q_k == v_k && is_trt_supported_head_size
&& is_long_seq_len && is_trt_supported_arch && attn_mask_type == "none"
&& attn_bias == nullptr && is_trt_supported_layout) {
// The fmha implementation below is based on TensorRT's multiHeadFlashAttentionPlugin
// implementation at:
// https://github.com/NVIDIA/TensorRT/tree/main/plugin/multiHeadFlashAttentionPlugin
int32_t cu_seqlens_d_size = (q_b + 1) * sizeof(int32_t);
int32_t* cu_seqlens_d = reinterpret_cast<int32_t*>(tmp->mut_dptr());
half* packed_qkv =
reinterpret_cast<half*>(tmp->mut_dptr<char>() + GetCudaAlignedSize(cu_seqlens_d_size));
constexpr int pack_size = 4;
using PackType = Pack<half, pack_size>;
const int64_t count = q_b * q_m * q_h * q_k * 3 / pack_size;
PackQkv<PackType><<<(count - 1 + 256) / 256, 256, 0, cuda_stream->cuda_stream()>>>(
q_b, q_m, q_h, q_k / pack_size, reinterpret_cast<const PackType*>(query->dptr()),
reinterpret_cast<const PackType*>(key->dptr()),
reinterpret_cast<const PackType*>(value->dptr()), reinterpret_cast<PackType*>(packed_qkv),
cu_seqlens_d);
#ifdef WITH_CUDA_GRAPHS
cudaStreamCaptureMode mode = cudaStreamCaptureModeRelaxed;
if (cuda_stream->IsGraphCapturing()) {
OF_CUDA_CHECK(cudaThreadExchangeStreamCaptureMode(&mode));
}
#endif // WITH_CUDA_GRAPHS
nvinfer1::plugin::FusedMultiHeadFlashAttentionKernel const* kernels =
nvinfer1::plugin::getFMHAFlashCubinKernels(nvinfer1::plugin::DATA_TYPE_FP16, arch);
#ifdef WITH_CUDA_GRAPHS
if (cuda_stream->IsGraphCapturing()) {
OF_CUDA_CHECK(cudaThreadExchangeStreamCaptureMode(&mode));
}
#endif // WITH_CUDA_GRAPHS
nvinfer1::plugin::runFMHFAKernel(packed_qkv, cu_seqlens_d, out->mut_dptr(), q_b * q_m, arch,
kernels, q_b, q_h, q_k, q_m, cuda_stream->cuda_stream());
return;
}
Params params{};
params.data_type = data_type;
params.num_batches = q_b;
params.num_heads = q_h;
params.query_seq_len = q_m;
params.kv_seq_len = k_m;
params.head_size = q_k;
params.value_head_size = v_k;
params.scale = scale;
params.q_stride_b = q_b_stride;
params.q_stride_m = q_m_stride;
params.q_stride_h = q_h_stride;
params.k_stride_b = k_b_stride;
params.k_stride_m = k_m_stride;
params.k_stride_h = k_h_stride;
params.v_stride_b = v_b_stride;
params.v_stride_m = v_m_stride;
params.v_stride_h = v_h_stride;
params.query_ptr = query->dptr<char>() + q_offset * GetSizeOfDataType(data_type);
params.key_ptr = key->dptr<char>() + k_offset * GetSizeOfDataType(data_type);
params.value_ptr = value->dptr<char>() + v_offset * GetSizeOfDataType(data_type);
params.query_seq_start_ptr =
query_seq_start == nullptr ? nullptr : query_seq_start->dptr<int32_t>();
params.key_seq_start_ptr = key_seq_start == nullptr ? nullptr : key_seq_start->dptr<int32_t>();
params.key_seq_len_ptr = key_seq_len == nullptr ? nullptr : key_seq_len->dptr<int32_t>();
params.out_ptr = out->mut_dptr();
const int64_t tmp_buffer_size = tmp->shape_view().elem_cnt();
params.workspace = tmp->mut_dptr();
params.workspace_size = tmp_buffer_size;
params.attn_mask_type = attn_mask_type;
params.causal_diagonal_offset = causal_diagonal_offset;
if (attn_bias != nullptr) {
const int64_t num_attn_bias_axes = attn_bias->shape_view().NumAxes();
CHECK_GE(num_attn_bias_axes, 1);
CHECK_LE(num_attn_bias_axes, 4);
DimVector padded_attn_bias_shape;
for (int i = 0; i < 4 - num_attn_bias_axes; ++i) { padded_attn_bias_shape.push_back(1); }
for (int i = 0; i < num_attn_bias_axes; ++i) {
padded_attn_bias_shape.push_back(attn_bias->shape_view().At(i));
}
CHECK_GE(padded_attn_bias_shape.at(3), k_m);
int64_t bias_stride = padded_attn_bias_shape.at(3);
if (padded_attn_bias_shape.at(2) == 1) {
params.attn_bias_stride_m = 0;
} else {
CHECK_GE(padded_attn_bias_shape.at(2), q_m);
params.attn_bias_stride_m = bias_stride;
bias_stride *= padded_attn_bias_shape.at(2);
}
if (padded_attn_bias_shape.at(1) == 1) {
params.attn_bias_stride_h = 0;
} else {
CHECK_EQ(padded_attn_bias_shape.at(1), q_h);
params.attn_bias_stride_h = bias_stride;
bias_stride *= q_h;
}
if (padded_attn_bias_shape.at(0) == 1) {
params.attn_bias_stride_b = 0;
} else {
CHECK_EQ(padded_attn_bias_shape.at(0), q_b);
params.attn_bias_stride_b = bias_stride;
}
params.attn_bias_ptr = attn_bias->dptr();
} else {
params.attn_bias_ptr = nullptr;
params.attn_bias_stride_m = 0;
params.attn_bias_stride_h = 0;
params.attn_bias_stride_b = 0;
}
DispatchCutlassFmha(params, cuda_stream);
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
size_t InferTmpBufferSize(InferContext* ctx) {
const auto& out_desc = ctx->OutputTensorDesc("out", 0);
size_t buffer_size = 0;
buffer_size +=
GetCudaAlignedSize(out_desc.shape().elem_cnt() * GetSizeOfDataType(DataType::kFloat));
buffer_size +=
GetCudaAlignedSize(out_desc.shape().elem_cnt() * GetSizeOfDataType(out_desc.data_type())) * 3;
buffer_size +=
GetCudaAlignedSize((out_desc.shape().At(0) + 1) * GetSizeOfDataType(DataType::kInt32));
return buffer_size;
}
#define REGISTER_FUSED_MULTI_HEAD_ATTENTION_INFERENCE_KERNEL(dtype) \
REGISTER_USER_KERNEL("fused_multi_head_attention_inference") \
.SetCreateFn<FusedMultiHeadAttentionInferenceKernel>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \
&& (user_op::HobDataType("out", 0) == dtype)) \
.SetInferTmpSizeFn(InferTmpBufferSize);
REGISTER_FUSED_MULTI_HEAD_ATTENTION_INFERENCE_KERNEL(DataType::kFloat16)
REGISTER_FUSED_MULTI_HEAD_ATTENTION_INFERENCE_KERNEL(DataType::kFloat)
template<typename Index>
struct ConcatParam {
const void* past_ptr;
const void* ptr;
void* output_ptr;
Index past_offset;
Index offset;
Index output_offset;
Index past_m;
Index past_stride_b;
Index past_stride_m;
Index past_stride_h;
Index stride_b;
Index stride_m;
Index stride_h;
Index output_stride_b;
Index output_stride_m;
Index output_stride_h;
Index count;
Index output_khm;
Index output_kh;
Index output_k;
};
template<typename Index>
struct BatchConcatParam {
ConcatParam<Index> params[2];
};
template<typename T, typename Index>
__device__ void ConcatPastKeyValue(ConcatParam<Index> p) {
for (Index i = blockIdx.x * blockDim.x + threadIdx.x; i < p.count; i += blockDim.x * gridDim.x) {
Index b_idx = i / p.output_khm;
Index b_off = i - b_idx * p.output_khm;
Index m_idx = b_off / p.output_kh;
Index m_off = b_off - m_idx * p.output_kh;
Index h_idx = m_off / p.output_k;
Index k_idx = m_off - h_idx * p.output_k;
T v;
if (m_idx < p.past_m) {
v = reinterpret_cast<const T*>(
p.past_ptr)[p.past_offset + b_idx * p.past_stride_b + m_idx * p.past_stride_m
+ h_idx * p.past_stride_h + k_idx];
} else {
v = reinterpret_cast<const T*>(
p.ptr)[p.offset + b_idx * p.stride_b + (m_idx - p.past_m) * p.stride_m
+ h_idx * p.stride_h + k_idx];
}
reinterpret_cast<T*>(
p.output_ptr)[p.output_offset + b_idx * p.output_stride_b + m_idx * p.output_stride_m
+ h_idx * p.output_stride_h + k_idx] = v;
}
}
template<size_t elem_size, typename Index>
__global__ void BatchConcatPastKeyValue(BatchConcatParam<Index> params) {
if (blockIdx.y == 0) {
ConcatPastKeyValue<std::aligned_storage<elem_size, elem_size>::type, Index>(params.params[0]);
} else if (blockIdx.y == 1) {
ConcatPastKeyValue<std::aligned_storage<elem_size, elem_size>::type, Index>(params.params[1]);
} else {
// do nothing
}
}
class FusedAttentionConcatPastKeyValueKernel final : public user_op::OpKernel,
public user_op::CudaGraphSupport {
public:
FusedAttentionConcatPastKeyValueKernel() = default;
~FusedAttentionConcatPastKeyValueKernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const Tensor* key = ctx->Tensor4ArgNameAndIndex("key", 0);
const Tensor* value = ctx->Tensor4ArgNameAndIndex("value", 0);
Tensor* output_key = ctx->Tensor4ArgNameAndIndex("output_key", 0);
Tensor* output_value = ctx->Tensor4ArgNameAndIndex("output_value", 0);
const DataType data_type = key->data_type();
const Tensor* past_key = nullptr;
const Tensor* past_value = nullptr;
if (ctx->has_input("past_key", 0)) {
CHECK(ctx->has_input("past_value", 0));
past_key = ctx->Tensor4ArgNameAndIndex("past_key", 0);
past_value = ctx->Tensor4ArgNameAndIndex("past_value", 0);
CHECK_EQ(past_key->data_type(), data_type);
CHECK_EQ(past_value->data_type(), data_type);
} else {
CHECK(!ctx->has_input("past_value", 0));
}
CHECK_EQ(value->data_type(), data_type);
CHECK_EQ(output_key->data_type(), data_type);
CHECK_EQ(output_value->data_type(), data_type);
const int64_t size_of_data_type = GetSizeOfDataType(data_type);
const int64_t key_head_size = ctx->Attr<int64_t>("key_head_size");
const std::string& past_key_layout = ctx->Attr<std::string>("past_key_layout");
const std::string& past_value_layout = ctx->Attr<std::string>("past_value_layout");
const std::string& key_layout = ctx->Attr<std::string>("key_layout");
const std::string& value_layout = ctx->Attr<std::string>("value_layout");
int64_t pack_size = 16 / size_of_data_type;
while (key_head_size % pack_size != 0) { pack_size /= 2; }
auto ParsePackedDims =
[](const ShapeView& shape, const std::string& layout, const Optional<int64_t>& num_heads,
const Optional<int64_t>& head_size, int64_t tensor_index, int64_t* b, int64_t* m,
int64_t* h, int64_t* k, int64_t* b_stride, int64_t* m_stride, int64_t* h_stride,
int64_t* offset, int64_t pack_size) {
ParseDims(shape, layout, num_heads, head_size, tensor_index, b, m, h, k, b_stride,
m_stride, h_stride, offset);
*k /= pack_size;
*b_stride /= pack_size;
*m_stride /= pack_size;
*h_stride /= pack_size;
*offset /= pack_size;
};
int64_t key_b = 0;
int64_t key_m = 0;
int64_t key_h = 0;
int64_t key_k = 0;
int64_t key_b_stride = 0;
int64_t key_m_stride = 0;
int64_t key_h_stride = 0;
int64_t key_offset = 0;
ParsePackedDims(key->shape_view(), key_layout, Optional<int64_t>(), key_head_size, 1, &key_b,
&key_m, &key_h, &key_k, &key_b_stride, &key_m_stride, &key_h_stride,
&key_offset, pack_size);
int64_t value_b = 0;
int64_t value_m = 0;
int64_t value_h = 0;
int64_t value_k = 0;
int64_t value_b_stride = 0;
int64_t value_m_stride = 0;
int64_t value_h_stride = 0;
int64_t value_offset = 0;
ParsePackedDims(value->shape_view(), value_layout, key_h, key_head_size, 2, &value_b, &value_m,
&value_h, &value_k, &value_b_stride, &value_m_stride, &value_h_stride,
&value_offset, pack_size);
CHECK_EQ(value_b, key_b);
CHECK_EQ(value_m, key_m);
int64_t past_key_b = 0;
int64_t past_key_m = 0;
int64_t past_key_h = 0;
int64_t past_key_k = 0;
int64_t past_key_b_stride = 0;
int64_t past_key_m_stride = 0;
int64_t past_key_h_stride = 0;
int64_t past_key_offset = 0;
if (past_key != nullptr) {
ParsePackedDims(past_key->shape_view(), past_key_layout, key_h, key_head_size, 1, &past_key_b,
&past_key_m, &past_key_h, &past_key_k, &past_key_b_stride, &past_key_m_stride,
&past_key_h_stride, &past_key_offset, pack_size);
}
int64_t past_value_b = 0;
int64_t past_value_m = 0;
int64_t past_value_h = 0;
int64_t past_value_k = 0;
int64_t past_value_b_stride = 0;
int64_t past_value_m_stride = 0;
int64_t past_value_h_stride = 0;
int64_t past_value_offset = 0;
if (past_value != nullptr) {
ParsePackedDims(past_value->shape_view(), past_value_layout, key_h, key_head_size, 2,
&past_value_b, &past_value_m, &past_value_h, &past_value_k,
&past_value_b_stride, &past_value_m_stride, &past_value_h_stride,
&past_value_offset, pack_size);
}
CHECK_EQ(past_value_b, past_key_b);
CHECK_EQ(past_value_m, past_key_m);
int64_t output_key_b = 0;
int64_t output_key_m = 0;
int64_t output_key_h = 0;
int64_t output_key_k = 0;
int64_t output_key_b_stride = 0;
int64_t output_key_m_stride = 0;
int64_t output_key_h_stride = 0;
int64_t output_key_offset = 0;
ParsePackedDims(output_key->shape_view(), past_key_layout, key_h, key_head_size, 1,
&output_key_b, &output_key_m, &output_key_h, &output_key_k,
&output_key_b_stride, &output_key_m_stride, &output_key_h_stride,
&output_key_offset, pack_size);
CHECK_EQ(output_key_b, key_b);
CHECK_EQ(output_key_m, past_key_m + key_m);
int64_t output_value_b = 0;
int64_t output_value_m = 0;
int64_t output_value_h = 0;
int64_t output_value_k = 0;
int64_t output_value_b_stride = 0;
int64_t output_value_m_stride = 0;
int64_t output_value_h_stride = 0;
int64_t output_value_offset = 0;
ParsePackedDims(output_value->shape_view(), past_value_layout, key_h, key_head_size, 2,
&output_value_b, &output_value_m, &output_value_h, &output_value_k,
&output_value_b_stride, &output_value_m_stride, &output_value_h_stride,
&output_value_offset, pack_size);
CHECK_EQ(output_value_b, key_b);
CHECK_EQ(output_value_m, past_value_m + value_m);
int64_t max_tensor_elem = (1 << 30) * pack_size;
CHECK((past_key == nullptr || past_key->shape_view().elem_cnt() <= max_tensor_elem)
&& (past_value == nullptr || past_value->shape_view().elem_cnt() <= max_tensor_elem)
&& key->shape_view().elem_cnt() <= max_tensor_elem
&& value->shape_view().elem_cnt() <= max_tensor_elem
&& output_key->shape_view().elem_cnt() <= max_tensor_elem
&& output_value->shape_view().elem_cnt() <= max_tensor_elem);
int64_t count = output_key_b * output_key_m * output_key_h * output_key_k;
BatchConcatParam<int32_t> kv;
kv.params[0].past_ptr = past_key == nullptr ? nullptr : past_key->dptr();
kv.params[0].ptr = key->dptr();
kv.params[0].output_ptr = output_key->mut_dptr();
kv.params[0].past_offset = past_key_offset;
kv.params[0].offset = key_offset;
kv.params[0].output_offset = output_key_offset;
kv.params[0].past_m = past_key_m;
kv.params[0].past_stride_b = past_key_b_stride;
kv.params[0].past_stride_m = past_key_m_stride;
kv.params[0].past_stride_h = past_key_h_stride;
kv.params[0].stride_b = key_b_stride;
kv.params[0].stride_m = key_m_stride;
kv.params[0].stride_h = key_h_stride;
kv.params[0].output_stride_b = output_key_b_stride;
kv.params[0].output_stride_m = output_key_m_stride;
kv.params[0].output_stride_h = output_key_h_stride;
kv.params[0].count = count;
kv.params[0].output_khm = output_key_k * output_key_h * output_key_m;
kv.params[0].output_kh = output_key_k * output_key_h;
kv.params[0].output_k = output_key_k;
kv.params[1].past_ptr = past_value == nullptr ? nullptr : past_value->dptr();
kv.params[1].ptr = value->dptr();
kv.params[1].output_ptr = output_value->mut_dptr();
kv.params[1].past_offset = past_value_offset;
kv.params[1].offset = value_offset;
kv.params[1].output_offset = output_value_offset;
kv.params[1].past_m = past_value_m;
kv.params[1].past_stride_b = past_value_b_stride;
kv.params[1].past_stride_m = past_value_m_stride;
kv.params[1].past_stride_h = past_value_h_stride;
kv.params[1].stride_b = value_b_stride;
kv.params[1].stride_m = value_m_stride;
kv.params[1].stride_h = value_h_stride;
kv.params[1].output_stride_b = output_value_b_stride;
kv.params[1].output_stride_m = output_value_m_stride;
kv.params[1].output_stride_h = output_value_h_stride;
kv.params[1].count = count;
kv.params[1].output_khm = output_value_k * output_value_h * output_value_m;
kv.params[1].output_kh = output_value_k * output_value_h;
kv.params[1].output_k = output_value_k;
constexpr uint32_t block_size = 256;
const dim3 grid_size((count - 1 + block_size) / block_size, 2);
const int64_t elem_size = size_of_data_type * pack_size;
cudaStream_t cuda_stream = ctx->stream()->As<ep::CudaStream>()->cuda_stream();
if (elem_size == 16) {
BatchConcatPastKeyValue<16, int32_t><<<grid_size, block_size, 0, cuda_stream>>>(kv);
} else if (elem_size == 8) {
BatchConcatPastKeyValue<8, int32_t><<<grid_size, block_size, 0, cuda_stream>>>(kv);
} else if (elem_size == 4) {
BatchConcatPastKeyValue<4, int32_t><<<grid_size, block_size, 0, cuda_stream>>>(kv);
} else if (elem_size == 2) {
BatchConcatPastKeyValue<2, int32_t><<<grid_size, block_size, 0, cuda_stream>>>(kv);
} else if (elem_size == 1) {
BatchConcatPastKeyValue<1, int32_t><<<grid_size, block_size, 0, cuda_stream>>>(kv);
} else {
UNIMPLEMENTED();
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
REGISTER_USER_KERNEL("fused_attention_concat_past_key_value")
.SetCreateFn<FusedAttentionConcatPastKeyValueKernel>()
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA));
template<typename T, typename PositionType, typename IndexType, size_t num_dims,
size_t rotary_emb_dim>
struct FusedApplyRotaryEmbParam {
const T* x;
const T* cos;
const T* sin;
const PositionType* position_ids;
T* out;
const T theta;
const float inv_actual_rotary_size; // 1.0 / (rotary_size per rotary dimension)
const IndexType actual_rotary_size; // rotary_size per rotary dimension
const IndexType rotary_size;
const IndexType rotate_stride;
const IndexType k0;
const IndexType k1;
IndexType num_elements;
const IndexType k;
const IndexType x_offset;
IndexType ref_stride[num_dims]; // b, m, h, k
IndexType out_stride[num_dims]; // ordered descendingly by stride
IndexType x_stride[num_dims];
IndexType position_b_stride;
IndexType position_rotate_stride;
IndexType sinuous_m_stride;
FusedApplyRotaryEmbParam(const T* x, const T* cos, const T* sin, const PositionType* position_ids,
T* out, const T theta, const float inv_actual_rotary_size,
const IndexType actual_rotary_size, const IndexType rotary_size,
const IndexType rotate_stride, const IndexType num_elements,
const IndexType k, const IndexType k0, const IndexType k1,
const IndexType x_offset)
: x(x),
cos(cos),
sin(sin),
position_ids(position_ids),
out(out),
theta(theta),
inv_actual_rotary_size(inv_actual_rotary_size),
actual_rotary_size(actual_rotary_size),
rotary_size(rotary_size),
rotate_stride(rotate_stride),
num_elements(num_elements),
k(k),
k0(k0),
k1(k1),
x_offset(x_offset) {}
};
template<typename T, typename PositionType, typename IndexType, size_t PackSize, size_t num_dims,
size_t rotary_emb_dim>
__global__ void IntervalKernel(
FusedApplyRotaryEmbParam<T, PositionType, IndexType, num_dims, rotary_emb_dim> param) {
for (IndexType packed_offset = threadIdx.x + blockIdx.x * blockDim.x;
packed_offset < param.num_elements; packed_offset += blockDim.x * gridDim.x) {
using LoadPack = cuda::elementwise::Packed<T, PackSize>;
IndexType offset = packed_offset * PackSize;
IndexType index[num_dims]; // b, m, h, k
IndexType temp_offset = offset;
for (int i = 0; i < num_dims - 1; i++) {
IndexType ref_stride = param.ref_stride[i];
IndexType idx = temp_offset / ref_stride;
index[i] = idx;
temp_offset = temp_offset - idx * ref_stride;
}
index[num_dims - 1] = temp_offset;
IndexType x_offset = param.x_offset;
IndexType out_offset = 0;
#pragma unroll
for (int i = 0; i < num_dims; i++) {
x_offset = x_offset + param.x_stride[i] * index[i];
out_offset = out_offset + param.out_stride[i] * index[i];
}
const LoadPack x_vec = *reinterpret_cast<const LoadPack*>(param.x + x_offset);
const IndexType k_index = index[num_dims - 1];
if (k_index < param.rotary_size) {
const IndexType position_rotate_index = (k_index >= param.k0) ? 1 : 0;
const IndexType b_index = index[0], m_index = index[1];
const IndexType position_id_offset = b_index * param.position_b_stride
+ position_rotate_index * param.position_rotate_stride
+ m_index;
const PositionType position =
param.position_ids ? param.position_ids[position_id_offset] : m_index;
const IndexType actual_k_index = k_index % param.actual_rotary_size;
const IndexType sinuous_offset = position * param.sinuous_m_stride + actual_k_index;
LoadPack cos_vec, sin_vec, out_vec;
if (param.cos && param.sin) {
cos_vec = *reinterpret_cast<const LoadPack*>(param.cos + sinuous_offset);
sin_vec = *reinterpret_cast<const LoadPack*>(param.sin + sinuous_offset);
} else {
const IndexType actual_ndim = param.rotary_size / rotary_emb_dim;
#pragma unroll
for (int i = 0; i < PackSize / 2; i++) {
T val = position
* expf(2.0f * static_cast<float>(((actual_k_index >> 1) + i))
* param.inv_actual_rotary_size * logf(param.theta));
T cos_val = cosf(val);
T sin_val = sinf(val);
cos_vec.elem[i * 2] = cos_val;
cos_vec.elem[i * 2 + 1] = cos_val;
sin_vec.elem[i * 2] = sin_val;
sin_vec.elem[i * 2 + 1] = sin_val;
}
}
#pragma unroll
for (int i = 0; i < PackSize / 2; i++) {
out_vec.elem[i * 2] =
x_vec.elem[i * 2] * cos_vec.elem[i * 2] - x_vec.elem[i * 2 + 1] * sin_vec.elem[i * 2];
out_vec.elem[i * 2 + 1] = x_vec.elem[i * 2 + 1] * cos_vec.elem[i * 2 + 1]
+ x_vec.elem[i * 2] * sin_vec.elem[i * 2 + 1];
}
*(reinterpret_cast<LoadPack*>(param.out + out_offset)) = out_vec;
} else {
*(reinterpret_cast<LoadPack*>(param.out + out_offset)) = x_vec;
}
}
}
template<typename T, typename PositionType, typename IndexType, size_t num_dims,
size_t rotary_emb_dim>
__global__ void PlaneKernel(
FusedApplyRotaryEmbParam<T, PositionType, IndexType, num_dims, rotary_emb_dim> param) {
for (IndexType offset = threadIdx.x + blockIdx.x * blockDim.x; offset < param.num_elements;
offset += blockDim.x * gridDim.x) {
using LoadPack = cuda::elementwise::Packed<T, 2>;
IndexType temp_offset = offset;
IndexType index[num_dims];
#pragma unroll
for (int i = 0; i < num_dims - 1; i++) {
IndexType ref_stride = param.ref_stride[i];
IndexType idx = temp_offset / ref_stride;
index[i] = idx;
temp_offset = temp_offset - idx * ref_stride;
}
index[num_dims - 1] = temp_offset;
const IndexType b_index = index[0], m_index = index[1], k_index = index[num_dims - 1];
const IndexType position_rotate_index = (k_index >= param.k0) ? 1 : 0;
const IndexType position_id_offset = b_index * param.position_b_stride
+ position_rotate_index * param.position_rotate_stride
+ m_index;
const PositionType position =
param.position_ids ? param.position_ids[position_id_offset] : m_index;
const IndexType actual_k_index = k_index % param.actual_rotary_size;
const IndexType sinuous_offset = position * param.k + actual_k_index;
T cos_val, sin_val, out_val;
if (param.cos && param.sin) {
cos_val = *(param.cos + sinuous_offset);
sin_val = *(param.sin + sinuous_offset);
} else {
T val = position
* expf(2.0f * static_cast<float>(k_index % (param.actual_rotary_size >> 1))
* param.inv_actual_rotary_size * logf(param.theta));
cos_val = cosf(val);
sin_val = sinf(val);
}
LoadPack x_vec;
IndexType x_offset = param.x_offset;
IndexType out_offset = 0;
#pragma unroll
for (int i = 0; i < num_dims; i++) {
x_offset = x_offset + param.x_stride[i] * index[i];
out_offset = out_offset + param.out_stride[i] * index[i];
}
if (k_index < param.k0) {
x_vec.elem[0] = *(param.x + x_offset);
x_vec.elem[1] = (param.k0 - k_index > param.rotate_stride)
? static_cast<T>(-*(param.x + x_offset + param.rotate_stride))
: *(param.x + x_offset - param.rotate_stride);
out_val = cos_val * x_vec.elem[0] + sin_val * x_vec.elem[1];
} else if (k_index < param.k1) {
x_vec.elem[0] = *(param.x + x_offset);
x_vec.elem[1] = (param.k1 - k_index > param.rotate_stride)
? static_cast<T>(-*(param.x + x_offset + param.rotate_stride))
: *(param.x + x_offset - param.rotate_stride);
out_val = cos_val * x_vec.elem[0] + sin_val * x_vec.elem[1];
} else {
out_val = *(param.x + x_offset);
}
*(param.out + out_offset) = out_val;
}
}
template<typename T, typename PositionType, typename IndexType, size_t PackSize, size_t num_dims,
size_t rotary_emb_dim>
void LaunchKernel(ep::CudaStream* stream, const T* x, const T* cos, const T* sin,
const PositionType* position_ids, T* out, const int64_t* position_shape,
const std::string& x_layout, const std::string& output_layout,
const std::string& mode, const T theta, const IndexType rotary_size,
const IndexType b, const IndexType m, const IndexType h, const IndexType k,
const IndexType x_b_stride, const IndexType x_m_stride,
const IndexType x_h_stride, const IndexType x_offset,
const IndexType out_b_stride, const IndexType out_m_stride,
const IndexType out_h_stride, IndexType num_elements) {
const IndexType k0 = rotary_size / rotary_emb_dim,
k1 = rotary_size; // TODO: this only support 1d, 2d, rotary postional encoding
const IndexType rotate_stride = rotary_size / (2 * rotary_emb_dim);
const IndexType actual_rotary_size = rotary_size / rotary_emb_dim;
const float inv_actual_rotary_size = 1.0 / actual_rotary_size;
struct FusedApplyRotaryEmbParam<T, PositionType, IndexType, num_dims, rotary_emb_dim> param(
x, cos, sin, position_ids, out, theta, inv_actual_rotary_size, actual_rotary_size,
rotary_size, rotate_stride, num_elements, k, k0, k1, x_offset);
const IndexType ref_strides[num_dims] = {m * h * k, h * k, k, 1};
const IndexType out_strides[num_dims] = {out_b_stride, out_m_stride, out_h_stride, 1};
const IndexType x_strides[num_dims] = {x_b_stride, x_m_stride, x_h_stride, 1};
param.sinuous_m_stride = actual_rotary_size;
const IndexType position_m = position_shape ? static_cast<IndexType>(position_shape[2]) : m;
param.position_rotate_stride = position_m;
param.position_b_stride = position_m * rotary_emb_dim;
// K has to be the last dimension, only k&m matters, therefore strides other than k&m does not
// really needs to be computed
#pragma unroll
for (int i = 0; i < num_dims; i++) {
param.ref_stride[i] = ref_strides[i];
param.out_stride[i] = out_strides[i];
param.x_stride[i] = x_strides[i];
}
constexpr size_t blk_size = 128;
if (mode == "plane") {
param.num_elements = param.num_elements * PackSize;
PlaneKernel<T, PositionType, IndexType, num_dims, rotary_emb_dim>
<<<(param.num_elements + blk_size - 1) / blk_size, blk_size, 0, stream->cuda_stream()>>>(
param);
} else {
IntervalKernel<T, PositionType, IndexType, PackSize, num_dims, rotary_emb_dim>
<<<(param.num_elements + blk_size - 1) / blk_size, blk_size, 0, stream->cuda_stream()>>>(
param);
}
}
template<typename T, typename PositionType, typename IndexType, size_t num_dims,
size_t rotary_emb_dim>
void DispatchPackSize(ep::CudaStream* stream, const T* x, const T* cos, const T* sin,
const PositionType* position_ids, T* out, const int64_t* position_shape,
const std::string& x_layout, const std::string& output_layout,
const std::string& mode, const T theta, const IndexType rotary_size,
const IndexType b, const IndexType m, const IndexType h, const IndexType k,
const IndexType x_b_stride, const IndexType x_m_stride,
const IndexType x_h_stride, const IndexType x_offset,
const IndexType out_b_stride, const IndexType out_m_stride,
const IndexType out_h_stride, IndexType num_elements) {
const auto CheckPackSize = [&](const size_t PackSize) {
bool r = (((reinterpret_cast<uintptr_t>(x) % (sizeof(T) * PackSize)) == 0)
&& (((rotary_size / rotary_emb_dim) % PackSize) == 0)
&& (((k - rotary_size) % PackSize) == 0) && ((16 / sizeof(T)) >= PackSize));
return r;
};
if (CheckPackSize(8)) {
num_elements /= 8;
LaunchKernel<T, PositionType, IndexType, 8, num_dims, rotary_emb_dim>(
stream, x, cos, sin, position_ids, out, position_shape, x_layout, output_layout, mode,
theta, rotary_size, b, m, h, k, x_b_stride, x_m_stride, x_h_stride, x_offset, out_b_stride,
out_m_stride, out_h_stride, num_elements);
} else if (CheckPackSize(4)) {
num_elements /= 4;
LaunchKernel<T, PositionType, IndexType, 4, num_dims, rotary_emb_dim>(
stream, x, cos, sin, position_ids, out, position_shape, x_layout, output_layout, mode,
theta, rotary_size, b, m, h, k, x_b_stride, x_m_stride, x_h_stride, x_offset, out_b_stride,
out_m_stride, out_h_stride, num_elements);
} else {
num_elements /= 2;
LaunchKernel<T, PositionType, IndexType, 2, num_dims, rotary_emb_dim>(
stream, x, cos, sin, position_ids, out, position_shape, x_layout, output_layout, mode,
theta, rotary_size, b, m, h, k, x_b_stride, x_m_stride, x_h_stride, x_offset, out_b_stride,
out_m_stride, out_h_stride, num_elements);
}
}
template<typename T, typename PositionType, size_t num_dims, size_t rotary_emb_dim>
void DispatchIndex(ep::CudaStream* stream, const T* x, const T* cos, const T* sin,
const PositionType* position_ids, T* out, const int64_t* position_shape,
const std::string& x_layout, const std::string& output_layout,
const std::string& mode, const T theta, const int64_t rotary_size,
const int64_t b, const int64_t m, const int64_t h, const int64_t k,
const int64_t x_b_stride, const int64_t x_m_stride, const int64_t x_h_stride,
const int64_t x_offset, const int64_t out_b_stride, const int64_t out_m_stride,
const int64_t out_h_stride) {
int64_t num_elements = b * m * h * k;
if (num_elements < (1 << 30)) {
DispatchPackSize<T, PositionType, int32_t, num_dims, rotary_emb_dim>(
stream, x, cos, sin, position_ids, out, position_shape, x_layout, output_layout, mode,
theta, static_cast<int32_t>(rotary_size), static_cast<int32_t>(b), static_cast<int32_t>(m),
static_cast<int32_t>(h), static_cast<int32_t>(k), static_cast<int32_t>(x_b_stride),
static_cast<int32_t>(x_m_stride), static_cast<int32_t>(x_h_stride),
static_cast<int32_t>(x_offset), static_cast<int32_t>(out_b_stride),
static_cast<int32_t>(out_m_stride), static_cast<int32_t>(out_h_stride),
static_cast<int32_t>(num_elements));
} else {
DispatchPackSize<T, PositionType, int64_t, num_dims, rotary_emb_dim>(
stream, x, cos, sin, position_ids, out, position_shape, x_layout, output_layout, mode,
theta, rotary_size, b, m, h, k, x_b_stride, x_m_stride, x_h_stride, x_offset, out_b_stride,
out_m_stride, out_h_stride, num_elements);
}
}
template<typename T, typename PositionType, size_t num_dims>
void DispatchRotaryEmbeddingDimension(ep::CudaStream* stream, const T* x, const T* cos,
const T* sin, const PositionType* position_ids, T* out,
const int64_t* position_shape, const std::string& x_layout,
const std::string& output_layout, const std::string& mode,
const T theta, const int64_t rotary_size,
const int rotary_emb_dim, const int64_t b, const int64_t m,
const int64_t h, const int64_t k, const int64_t x_b_stride,
const int64_t x_m_stride, const int64_t x_h_stride,
const int64_t x_offset, const int64_t out_b_stride,
const int64_t out_m_stride, const int64_t out_h_stride) {
if (rotary_emb_dim == 1) {
DispatchIndex<T, PositionType, num_dims, 1>(
stream, x, cos, sin, position_ids, out, position_shape, x_layout, output_layout, mode,
theta, rotary_size, b, m, h, k, x_b_stride, x_m_stride, x_h_stride, x_offset, out_b_stride,
out_m_stride, out_h_stride);
} else if (rotary_emb_dim == 2) {
DispatchIndex<T, PositionType, num_dims, 2>(
stream, x, cos, sin, position_ids, out, position_shape, x_layout, output_layout, mode,
theta, rotary_size, b, m, h, k, x_b_stride, x_m_stride, x_h_stride, x_offset, out_b_stride,
out_m_stride, out_h_stride);
}
}
template<typename T, typename PositionType>
class FusedApplyRotaryEmbKernel final : public user_op::OpKernel {
public:
FusedApplyRotaryEmbKernel() = default;
~FusedApplyRotaryEmbKernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0);
user_op::Tensor* cos = nullptr;
user_op::Tensor* sin = nullptr;
user_op::Tensor* position_ids = nullptr;
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const std::string& x_layout = ctx->Attr<std::string>("x_layout");
const std::string& output_layout = ctx->Attr<std::string>("output_layout");
const std::string& mode = ctx->Attr<std::string>("mode");
const int64_t tensor_index = ctx->Attr<int64_t>("tensor_index");
const int64_t k_size = ctx->Attr<int64_t>("k_size");
const int64_t rotary_size = ctx->Attr<int64_t>("rotary_size");
const float theta = 1.0f / ctx->Attr<float>("base");
int rotary_emb_dim = 1;
if (ctx->has_input("cos", 0)) { cos = ctx->Tensor4ArgNameAndIndex("cos", 0); }
if (ctx->has_input("sin", 0)) { sin = ctx->Tensor4ArgNameAndIndex("sin", 0); }
if (ctx->has_input("position_ids", 0)) {
position_ids = ctx->Tensor4ArgNameAndIndex("position_ids", 0);
rotary_emb_dim = position_ids->shape_view().At(1);
}
constexpr size_t ndims = 4;
int64_t b = 0;
int64_t m = 0;
int64_t h = 0;
int64_t k = 0;
int64_t out_b_stride = 0, out_m_stride = 0, out_h_stride = 0, out_offset = 0;
int64_t x_b_stride = 0, x_m_stride = 0, x_h_stride = 0, x_offset = 0;
ParseDims(out->shape_view(), output_layout, Optional<int64_t>(), k_size, 0, &b, &m, &h, &k,
&out_b_stride, &out_m_stride, &out_h_stride, &out_offset);
ParseDims(x->shape_view(), x_layout, Optional<int64_t>(), k_size, tensor_index, &b, &m, &h, &k,
&x_b_stride, &x_m_stride, &x_h_stride, &x_offset);
// TODO: hard code num_dims & seems redundant template problem...
DispatchRotaryEmbeddingDimension<T, PositionType, ndims>(
ctx->stream()->As<ep::CudaStream>(), reinterpret_cast<const T*>(x->dptr()),
cos ? reinterpret_cast<const T*>(cos->dptr()) : nullptr,
sin ? reinterpret_cast<const T*>(sin->dptr()) : nullptr,
position_ids ? reinterpret_cast<const PositionType*>(position_ids->dptr()) : nullptr,
reinterpret_cast<T*>(out->mut_dptr()),
position_ids ? position_ids->shape_view().data() : nullptr, x_layout, output_layout, mode,
static_cast<T>(theta), rotary_size, rotary_emb_dim, b, m, h, k, x_b_stride, x_m_stride,
x_h_stride, x_offset, out_b_stride, out_m_stride, out_h_stride);
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_FUSED_APPLY_ROTARY_EMB_GPU(dtype, position_type) \
REGISTER_USER_KERNEL("fused_apply_rotary_emb") \
.SetCreateFn<FusedApplyRotaryEmbKernel<dtype, position_type>>() \
.SetIsMatchedHob( \
(user_op::HobDeviceType() == DeviceType::kCUDA) \
&& (user_op::HobDataType("out", 0) == GetDataType<dtype>::value) \
&& (user_op::HobInputSize("position_ids") == 1) \
&& (user_op::HobDataType("position_ids", 0) == GetDataType<position_type>::value));
#define REGISTER_FUSED_APPLY_ROTARY_EMB_GPU_DTYPE(dtype) \
REGISTER_FUSED_APPLY_ROTARY_EMB_GPU(dtype, int64_t); \
REGISTER_FUSED_APPLY_ROTARY_EMB_GPU(dtype, int32_t); \
REGISTER_USER_KERNEL("fused_apply_rotary_emb") \
.SetCreateFn<FusedApplyRotaryEmbKernel<dtype, int64_t>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \
&& (user_op::HobDataType("out", 0) == GetDataType<dtype>::value) \
&& (user_op::HobInputSize("position_ids") == 0));
REGISTER_FUSED_APPLY_ROTARY_EMB_GPU_DTYPE(float);
REGISTER_FUSED_APPLY_ROTARY_EMB_GPU_DTYPE(half);
#if CUDA_VERSION >= 11000
REGISTER_FUSED_APPLY_ROTARY_EMB_GPU_DTYPE(nv_bfloat16);
#endif // CUDA_VERSION >= 11000
} // namespace
} // namespace user_op
} // namespace oneflow
#endif // WITH_CUTLASS
|
dab9cf306ace27799511e76dea8c260fb6f91549.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* File: test.cu
*/
#include <test.h>
__device__ void hello()
{
int a = 1 + 10;
return;
}
__global__
void hellok()
{
hello();
}
__host__
void testcuda()
{
hipLaunchKernelGGL(( hellok), dim3(10),dim3(10), 0, 0, );
}
/*
*
*/ | dab9cf306ace27799511e76dea8c260fb6f91549.cu | /*
* File: test.cu
*/
#include <test.h>
__device__ void hello()
{
int a = 1 + 10;
return;
}
__global__
void hellok()
{
hello();
}
__host__
void testcuda()
{
hellok<<<10,10>>>();
}
/*
*
*/ |
9a7da2fc99d9e6617f8bcdc906c1b461b1d36af6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_xvel_plus_2_front;
int xdim0_update_halo_kernel2_xvel_plus_2_front_h = -1;
__constant__ int ydim0_update_halo_kernel2_xvel_plus_2_front;
int ydim0_update_halo_kernel2_xvel_plus_2_front_h = -1;
__constant__ int xdim1_update_halo_kernel2_xvel_plus_2_front;
int xdim1_update_halo_kernel2_xvel_plus_2_front_h = -1;
__constant__ int ydim1_update_halo_kernel2_xvel_plus_2_front;
int ydim1_update_halo_kernel2_xvel_plus_2_front_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_xvel_plus_2_front * (y) + \
xdim0_update_halo_kernel2_xvel_plus_2_front * \
ydim0_update_halo_kernel2_xvel_plus_2_front * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_xvel_plus_2_front * (y) + \
xdim1_update_halo_kernel2_xvel_plus_2_front * \
ydim1_update_halo_kernel2_xvel_plus_2_front * (z))
// user function
__device__
inline void
update_halo_kernel2_xvel_plus_2_front_gpu(double *xvel0, double *xvel1,
const int *fields) {
if (fields[FIELD_XVEL0] == 1)
xvel0[OPS_ACC0(0, 0, 0)] = xvel0[OPS_ACC0(0, 0, -2)];
if (fields[FIELD_XVEL1] == 1)
xvel1[OPS_ACC1(0, 0, 0)] = xvel1[OPS_ACC1(0, 0, -2)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_xvel_plus_2_front(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_front +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_front *
ydim0_update_halo_kernel2_xvel_plus_2_front;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_front +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_front *
ydim1_update_halo_kernel2_xvel_plus_2_front;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_xvel_plus_2_front_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_plus_2_front(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_xvel_plus_2_front_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 3, range, 35))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(35, "update_halo_kernel2_xvel_plus_2_front");
OPS_kernels[35].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_xvel_plus_2_front_h ||
ydim0 != ydim0_update_halo_kernel2_xvel_plus_2_front_h ||
xdim1 != xdim1_update_halo_kernel2_xvel_plus_2_front_h ||
ydim1 != ydim1_update_halo_kernel2_xvel_plus_2_front_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel2_xvel_plus_2_front, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_xvel_plus_2_front_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel2_xvel_plus_2_front, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_xvel_plus_2_front_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel2_xvel_plus_2_front, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_xvel_plus_2_front_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel2_xvel_plus_2_front, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_xvel_plus_2_front_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[35].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel2_xvel_plus_2_front), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[35].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[35].mpi_time += t2 - t1;
OPS_kernels[35].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[35].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_plus_2_front(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 35;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 35;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_xvel_plus_2_front_execute;
if (OPS_diags > 1) {
ops_timing_realloc(35, "update_halo_kernel2_xvel_plus_2_front");
}
ops_enqueue_kernel(desc);
}
#endif
| 9a7da2fc99d9e6617f8bcdc906c1b461b1d36af6.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_xvel_plus_2_front;
int xdim0_update_halo_kernel2_xvel_plus_2_front_h = -1;
__constant__ int ydim0_update_halo_kernel2_xvel_plus_2_front;
int ydim0_update_halo_kernel2_xvel_plus_2_front_h = -1;
__constant__ int xdim1_update_halo_kernel2_xvel_plus_2_front;
int xdim1_update_halo_kernel2_xvel_plus_2_front_h = -1;
__constant__ int ydim1_update_halo_kernel2_xvel_plus_2_front;
int ydim1_update_halo_kernel2_xvel_plus_2_front_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_xvel_plus_2_front * (y) + \
xdim0_update_halo_kernel2_xvel_plus_2_front * \
ydim0_update_halo_kernel2_xvel_plus_2_front * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_xvel_plus_2_front * (y) + \
xdim1_update_halo_kernel2_xvel_plus_2_front * \
ydim1_update_halo_kernel2_xvel_plus_2_front * (z))
// user function
__device__
inline void
update_halo_kernel2_xvel_plus_2_front_gpu(double *xvel0, double *xvel1,
const int *fields) {
if (fields[FIELD_XVEL0] == 1)
xvel0[OPS_ACC0(0, 0, 0)] = xvel0[OPS_ACC0(0, 0, -2)];
if (fields[FIELD_XVEL1] == 1)
xvel1[OPS_ACC1(0, 0, 0)] = xvel1[OPS_ACC1(0, 0, -2)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_xvel_plus_2_front(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_front +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_front *
ydim0_update_halo_kernel2_xvel_plus_2_front;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_front +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_front *
ydim1_update_halo_kernel2_xvel_plus_2_front;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_xvel_plus_2_front_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_plus_2_front(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_xvel_plus_2_front_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 3, range, 35))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(35, "update_halo_kernel2_xvel_plus_2_front");
OPS_kernels[35].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_xvel_plus_2_front_h ||
ydim0 != ydim0_update_halo_kernel2_xvel_plus_2_front_h ||
xdim1 != xdim1_update_halo_kernel2_xvel_plus_2_front_h ||
ydim1 != ydim1_update_halo_kernel2_xvel_plus_2_front_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel2_xvel_plus_2_front, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_xvel_plus_2_front_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel2_xvel_plus_2_front, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_xvel_plus_2_front_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel2_xvel_plus_2_front, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_xvel_plus_2_front_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel2_xvel_plus_2_front, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_xvel_plus_2_front_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[35].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel2_xvel_plus_2_front<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[35].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[35].mpi_time += t2 - t1;
OPS_kernels[35].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[35].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_plus_2_front(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 35;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 35;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_xvel_plus_2_front_execute;
if (OPS_diags > 1) {
ops_timing_realloc(35, "update_halo_kernel2_xvel_plus_2_front");
}
ops_enqueue_kernel(desc);
}
#endif
|
733b14c17205bf9404dac4ab23b4f65e0432f218.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
hipError_t backPropagation(double *out, double *x,
double *y, double *W, unsigned int row, unsigned int column, double eta);
__global__ void subtractKernel(double *out, double *y, unsigned int row)
{
int thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_idx >= row) {
return;
}
out[thread_idx] -= y[thread_idx];
}
__global__ void updateWKernel(double *out, double *x, double *W,
unsigned int row, unsigned int column, double eta)
{
int thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_idx >= row * column) {
return;
}
W[thread_idx] = W[thread_idx] - eta * out[thread_idx / column] * x[thread_idx % column];
}
int main(int argc, char *argv[])
{
unsigned int row = atoi(argv[1]);
unsigned int column = atoi(argv[2]);
double eta = atof(argv[3]);
double *W = (double*)malloc(row * column * sizeof(double));
double *x = (double*)malloc(column * sizeof(double));
double *y = (double*)malloc(row * sizeof(double));
double *out = (double*)malloc(row * sizeof(double));
for (int i = 0; i < column; i++) {
x[i] = 10;
}
for (int i = 0; i < row * column; i++) {
W[i] = 10;
}
for (int i = 0; i < column; i++) {
y[i] = 10;
}
for (int i = 0; i < column; i++) {
out[i] = 11;
}
// Add vectors in parallel.
hipError_t cudaStatus = backPropagation(out, x, y, W, row, column, eta);
for (int i = 0; i < row; i++) {
for (int j = 0; j < column; j++) {
printf("%.2f ", W[i * column + j]);
}
printf("\n");
}
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t backPropagation(double *out, double *x,
double *y, double *W, unsigned int row, unsigned int column, double eta)
{
double *dev_out = 0;
double *dev_x = 0;
double *dev_y = 0;
double *dev_W = 0;
hipError_t cudaStatus;
cudaStatus = hipMalloc((void**)&dev_out, row * sizeof(double));
cudaStatus = hipMalloc((void**)&dev_x, column * sizeof(double));
cudaStatus = hipMalloc((void**)&dev_y, row * sizeof(double));
cudaStatus = hipMalloc((void**)&dev_W, row * column * sizeof(double));
// Copy input vectors from host memory to GPU buffers.
hipMemcpy(dev_out, out, row * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_x, x, column * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_y, y, row * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_W, W, row * column * sizeof(double), hipMemcpyHostToDevice);
// Compute (out - y) to get the differential of cost on predictions
hipLaunchKernelGGL(( subtractKernel), dim3(column / 512 + 1), dim3(512), 0, 0, dev_out, dev_y, row);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Compute the differential of cost on weights and update weights: W = W - eta*(delta*(x)T)
hipLaunchKernelGGL(( updateWKernel), dim3(row * column / 512 + 1), dim3(512), 0, 0, dev_out, dev_x, dev_W, row, column, eta);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(W, dev_W, row * column * sizeof(double), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_out);
hipFree(dev_x);
hipFree(dev_y);
hipFree(dev_W);
return cudaStatus;
}
| 733b14c17205bf9404dac4ab23b4f65e0432f218.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
cudaError_t backPropagation(double *out, double *x,
double *y, double *W, unsigned int row, unsigned int column, double eta);
__global__ void subtractKernel(double *out, double *y, unsigned int row)
{
int thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_idx >= row) {
return;
}
out[thread_idx] -= y[thread_idx];
}
__global__ void updateWKernel(double *out, double *x, double *W,
unsigned int row, unsigned int column, double eta)
{
int thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_idx >= row * column) {
return;
}
W[thread_idx] = W[thread_idx] - eta * out[thread_idx / column] * x[thread_idx % column];
}
int main(int argc, char *argv[])
{
unsigned int row = atoi(argv[1]);
unsigned int column = atoi(argv[2]);
double eta = atof(argv[3]);
double *W = (double*)malloc(row * column * sizeof(double));
double *x = (double*)malloc(column * sizeof(double));
double *y = (double*)malloc(row * sizeof(double));
double *out = (double*)malloc(row * sizeof(double));
for (int i = 0; i < column; i++) {
x[i] = 10;
}
for (int i = 0; i < row * column; i++) {
W[i] = 10;
}
for (int i = 0; i < column; i++) {
y[i] = 10;
}
for (int i = 0; i < column; i++) {
out[i] = 11;
}
// Add vectors in parallel.
cudaError_t cudaStatus = backPropagation(out, x, y, W, row, column, eta);
for (int i = 0; i < row; i++) {
for (int j = 0; j < column; j++) {
printf("%.2f ", W[i * column + j]);
}
printf("\n");
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t backPropagation(double *out, double *x,
double *y, double *W, unsigned int row, unsigned int column, double eta)
{
double *dev_out = 0;
double *dev_x = 0;
double *dev_y = 0;
double *dev_W = 0;
cudaError_t cudaStatus;
cudaStatus = cudaMalloc((void**)&dev_out, row * sizeof(double));
cudaStatus = cudaMalloc((void**)&dev_x, column * sizeof(double));
cudaStatus = cudaMalloc((void**)&dev_y, row * sizeof(double));
cudaStatus = cudaMalloc((void**)&dev_W, row * column * sizeof(double));
// Copy input vectors from host memory to GPU buffers.
cudaMemcpy(dev_out, out, row * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_x, x, column * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_y, y, row * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_W, W, row * column * sizeof(double), cudaMemcpyHostToDevice);
// Compute (out - y) to get the differential of cost on predictions
subtractKernel<<<column / 512 + 1, 512>>>(dev_out, dev_y, row);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Compute the differential of cost on weights and update weights: W = W - eta*(delta*(x)T)
updateWKernel<<<row * column / 512 + 1, 512>>>(dev_out, dev_x, dev_W, row, column, eta);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(W, dev_W, row * column * sizeof(double), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_out);
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(dev_W);
return cudaStatus;
}
|
cd7ed78023edb8c6b3363ab3f6ce19458407f956.hip | // !!! This is a file automatically generated by hipify!!!
/*
* GPUb1piAngAmp_kernel.cu
*
*/
#include <stdio.h>
#include "hip/hip_runtime.h"
// Original headers were scattered around file system
#include "GPUManager/GPUCustomTypes.h"
#include "GPUManager/CUDA-Complex.cuh"
#include "GPUUtils/lorentzBoost.cuh"
#include "GPUUtils/threeVector.cuh"
#include "GPUUtils/wignerD.cuh"
#include "GPUUtils/clebsch.cuh"
#include "AMPTOOLS_AMPS/breakupMomentum.cuh"
#include "AMPTOOLS_AMPS/barrierFactor.cuh"
// Test headers
#if 0
#include "GPUCustomTypes.h"
#include "CUDA-Complex.cuh"
#include "lorentzBoost.cuh"
#include "threeVector.cuh"
#include "wignerD.cuh"
#include "clebsch.cuh"
#include "breakupMomentum.cuh"
#include "barrierFactor.cuh"
#endif
#define ADD4(a,b) { a[0]+b[0], a[1]+b[1], a[2]+b[2], a[3]+b[3] }
#define MASS(v) (G_SQRT(v[0]*v[0]-v[1]*v[1]-v[2]*v[2]-v[3]*v[3]))
#define Nterm(J) (G_SQRT((2*J+1)/(4*M_PI)))
// Macro to ease definition of loops
#define LOOP(INDEX,START,END,INC) for (int INDEX=START;INDEX<=END;INDEX+=INC)
static __device__ void //note: 4-vector input presumed
rotateZ( GDouble* v, GDouble phi ){
GDouble sinphi = G_SIN(phi);
GDouble cosphi = G_COS(phi);
GDouble tx;
tx = v[1] * cosphi - v[2] * sinphi;
v[2] = v[2] * cosphi + v[1] * sinphi;
v[1] = tx;
}
static __device__ void //note: 4-vector input presumed
rotateY ( GDouble* v, GDouble theta) {
double sinphi = G_SIN(theta);
double cosphi = G_COS(theta);
double tz;
tz = v[3] * cosphi - v[1] * sinphi;
v[1] = v[1] * cosphi + v[3] * sinphi;
v[3] = tz;
}
static __device__ GDouble //note: 3-vector input presumed
theta( GDouble* pv ){
GDouble r= G_SQRT(pv[0]*pv[0] + pv[1]*pv[1]);
return G_ATAN2( r , pv[2] );
}
static __device__ void
MoveToRF(GDouble *parent, GDouble *daughter)
{
GDouble *par3vec=parent+1;
rotateZ( daughter , -phi(par3vec) );
rotateY( daughter , -theta(par3vec) );
GDouble beta[]={0,0, -G_SQRT(dot(par3vec,par3vec))/parent[0]};
//** (x) Might this be bootToRest???
// beta is defined to boost to parent's rest frame
// I just adapted GPUUtil boost fcn with vector beta input
boost( daughter , beta );
}
static __device__ WCUComplex
BreitWigner_loc(GDouble m0, GDouble Gamma0, int L,
GDouble *P1, GDouble *P2)
{
GDouble Ptot[4] = ADD4(P1, P2);
GDouble m = MASS(Ptot);
GDouble mass1 = MASS(P1);
GDouble mass2 = MASS(P2);
// assert positive breakup momenta
GDouble q0 = fabs( breakupMomentum(m0, mass1, mass2) );
GDouble q = fabs( breakupMomentum(m, mass1, mass2) );
//printf("BW: (%5.3f, %5.3f, %d) m=%6.4f m1=%6.4f m2=%6.4f q=%6.4f q0=%6.4f\n",
// m0,Gamma0,L,m,mass1,mass2,q,q0);
GDouble F0 = L==0 ? 1.0 : barrierFactor(q0, L);
GDouble F = L==0 ? 1.0 : barrierFactor(q, L);
GDouble width_coef=Gamma0*(m0/m);
//GDouble qq0=q/q0;
//GDouble width_qdep = (L==0 ? qq0 : (L==1 ? qq0*qq0*qq0 : pow(qq0,2*L+1)))*((F*F)/(F0*F0));
GDouble width_qdep = q/q0 * (F*F)/(F0*F0);
//GDouble num_qdep = (L==0 ? q : (L==1 ? q*q*q : pow(q,2*L+1)))*(F*F);
GDouble num_qdep = q*(F*F);
GDouble width = width_coef * width_qdep;
//complex<GDouble> bwtop(m0 * width, 0.0 );
WCUComplex bwtop = { G_SQRT(m0*width_coef) * num_qdep, 0 };
WCUComplex bwbottom = { m0*m0 - m*m , -1.0 * ( m0 * width ) };
return ( bwtop / bwbottom );
}
// JR 2012-07-29
// Set all Amplitudes to 0 on the Device. This is needed now because we only
// calculate amplitudes for those momenta sets with non-zero amplitudes. If
// this function were not performed, amplitudes which are supposed to be zero will
// be undefined.
__global__ void Setzero_kernel(WCUComplex *pcDevAmp, int iNEvents) {
int iEvent = GPU_THIS_EVENT;
if (iEvent>=iNEvents) return;
pcDevAmp[iEvent].m_dRe = 0.0;
pcDevAmp[iEvent].m_dIm = 0.0;
}
// JR 2012-07-29
// Perform beginning of b1pi calculation, just enough to determine those
// amplitude which will be set to zero. Amplitudes are set to (1,0) if
// they are not zero. These amplitudes will need set to their correct
// values on the call to GPUb1piAngAmp_kernel().
__global__ void Pretest_kernel( GPU_AMP_PROTO , int polBeam, GDouble polFrac,
int J_X, int Par_X, int L_X, int I_X, int epsilon_R, int Iz_b1, int Iz_pi,
GDouble u_rho_1, GDouble u_rho_3, GDouble u_omega_1, GDouble u_omega_3,
GDouble u_b1_0, GDouble u_b1_2,
GDouble G0_omega, GDouble G0_b1, bool orthocheck)
{
// Calculate event for this thread.
int iEvent = GPU_THIS_EVENT;
WCUComplex CZero = { 0, 0 };
WCUComplex COne = { 1, 0 };
int pol=(polBeam==1 ? +1 : -1); // y and x-pol. respectively
//** (x) This statement can be evaluated at top of function?
if (J_X==0 && Par_X*pol*epsilon_R==-1) {
pcDevAmp[iEvent] = CZero;
return;
}
GDouble m0_omega = 0.783;
GDouble m0_b1 = 1.223;
bool isZero;
// Copy four-vectors for this thread from global memory.
GDouble b1s_pi [4] = GPU_P4(3);
GDouble omegas_pi[4] = GPU_P4(4);
GDouble rhos_pim [4] = GPU_P4(5);
GDouble rhos_pip [4] = GPU_P4(6);
// Make four-vector sums
GDouble rho [4] = ADD4(rhos_pip, rhos_pim );
GDouble omega [4] = ADD4(rho, omegas_pi);
GDouble b1 [4] = ADD4(omega, b1s_pi);
// Store mass of b1; for other vectors we can calculate mass on the fly.
GDouble b1mass = MASS(b1);
// Is this term zero?
isZero = MASS(rho)+0.135 > m0_omega+3*G0_omega;
isZero |= fabs(MASS(omega)-m0_omega) > 3*G0_omega;
isZero |= fabs(b1mass-m0_b1) > 3*G0_b1;
isZero |= b1mass < (m0_omega - 3*G0_omega);
if (isZero) pcDevAmp[iEvent] = CZero;
else pcDevAmp[iEvent] = COne;
}
// JR 2012-07-29
// Calculate amplitudes only for those momenta sets with known non-zero
// amplitudes.
__global__ void
GPUb1piAngAmp_kernel(
int cnt,
// GPU_AMP_PROTO ,
GDouble* pfDevData, WCUComplex* pcDevAmp, int* piDevPerm, int iNParticles, int iNEvents,
int polBeam, GDouble polFrac,
int J_X, int Par_X, int L_X, int I_X, int epsilon_R, int Iz_b1, int Iz_pi,
GDouble u_rho_1, GDouble u_rho_3, GDouble u_omega_1, GDouble u_omega_3,
GDouble u_b1_0, GDouble u_b1_2,
GDouble G0_omega, GDouble G0_b1, bool orthocheck)
{
// Calculate event for this thread.
// int iEvent = GPU_THIS_EVENT;
// JR 2012-07-29
// NOTE: This vesrsion of this function is called with different settings
// for threadIdx, blockIdx and blockDim than for the original version.
// The next line relects that change.
int iEvent = threadIdx.x + blockIdx.x * blockDim.x;
// Skip this event index if it overruns number of events.
if (iEvent>=iNEvents) return;
WCUComplex CZero = { 0, 0 };
WCUComplex i = { 0, 1 };
WCUComplex COne = { 1, 0 };
int pol=(polBeam==1 ? +1 : -1); // y and x-pol. respectively
if (J_X==0 && Par_X*pol*epsilon_R==-1) {
pcDevAmp[iEvent] = CZero;
return;
}
int m_X;
GDouble u_rho, u_omega, u_b1;
GDouble InvSqrt2 = 1.0/G_SQRT(2.0);
GDouble m0_rho = 0.775;
GDouble G0_rho = 0.149;
GDouble m0_omega = 0.783;
GDouble m0_b1 = 1.223;
bool useCutoff = true;
bool isZero;
// Copy four-vectors for this thread from global memory.
// 2012-05-19 JR rhos_pip0,omega0,rho0 added for use
// in BreitWigner_loc() below.
GDouble beam [4] = GPU_P4(0);
GDouble recoil [4] = GPU_P4(1);
GDouble Xs_pi [4] = GPU_P4(2);
GDouble b1s_pi [4] = GPU_P4(3);
GDouble omegas_pi[4] = GPU_P4(4);
GDouble rhos_pim [4] = GPU_P4(5);
GDouble rhos_pip [4] = GPU_P4(6);
GDouble rhos_pip0[4] = GPU_P4(6);
// Make four-vector sums
GDouble rho [4] = ADD4(rhos_pip, rhos_pim );
GDouble rho0 [4] = ADD4(rhos_pip, rhos_pim );
GDouble omega [4] = ADD4(rho, omegas_pi);
GDouble omega0[4] = ADD4(rho, omegas_pi);
GDouble b1 [4] = ADD4(omega, b1s_pi);
// Store mass of b1; for other vectors we can calculate mass on the fly.
GDouble b1mass = MASS(b1);
// Is this term zero?
if (useCutoff) {
isZero = MASS(rho)+0.135 > m0_omega+3*G0_omega;
isZero |= fabs(MASS(omega)-m0_omega) > 3*G0_omega;
isZero |= fabs(b1mass-m0_b1) > 3*G0_b1;
isZero |= b1mass < (m0_omega - 3*G0_omega);
// Zero amplitude
if (isZero) {
pcDevAmp[iEvent] = CZero;
return;
}
}
// Continue to Calculate amplitude
GDouble X[4] = ADD4(b1, Xs_pi);
GDouble q = breakupMomentum( MASS(X), b1mass, MASS(Xs_pi) );
GDouble alpha = phi( &(recoil[1]) );
// NOTE: Values of beam and recoil are changed below.
boostToRest (beam, X);
boostToRest (recoil, X);
// Define new coordinate system with
// - beam parallel to z direction
// - recoil in the x,z plain (i.e., y is normal to recoil and beam)
// - y is normal to beam and recoil.
GDouble zGJ[3] = { beam[1], beam[2], beam[3] };
makeUnit( zGJ );
//** (x) Be care of cross order, need to check this
// 2012-05-19 JR - Invert yGJ to make cross come out right.
// GDouble yGJ[3] = { recoil[1], recoil[2], recoil[3] };
GDouble yGJ[3] = { -recoil[1], -recoil[2], -recoil[3] };
cross( yGJ, zGJ );
makeUnit( yGJ );
GDouble xGJ[3] = { yGJ[0], yGJ[1], yGJ[2] };
cross( xGJ, zGJ );
//particles to rest frames of their parents
boostToRest (b1, X);
boostToRest (omega, X);
boostToRest (rho, X);
boostToRest (rhos_pip, X);
// Note that in this form of the cascade of boosts, we are not
// saving the 4-vecs in their intermediate RF, but going sequentially
// straight to their immediate parent's RF.
// Make sure to verify that the intermediares were not in fact needed
// and that we didn't break anything with this simplification.
MoveToRF(b1,omega);
MoveToRF(b1,rho); MoveToRF(omega,rho);
MoveToRF(b1,rhos_pip); MoveToRF(omega,rhos_pip); MoveToRF(rho,rhos_pip);
GDouble *b1_3vec=b1+1;
GDouble ang_b1[]={dot(b1_3vec, xGJ),
dot(b1_3vec, yGJ),
dot(b1_3vec, zGJ)};
GDouble b1_XRF_cosTheta = cosTheta(ang_b1);
GDouble b1_XRF_phi = phi(ang_b1);
GDouble rho_omegaRF_cosTheta = cosTheta(rho+1);
GDouble rho_omegaRF_phi = phi(rho+1);
GDouble rhos_pip_rhoRF_cosTheta = cosTheta(rhos_pip+1);
GDouble rhos_pip_rhoRF_phi = phi(rhos_pip+1);
GDouble omega_b1RF_cosTheta = cosTheta(omega+1);
GDouble omega_b1RF_phi = phi(omega+1);
/*
List_l_R: 0 1
List_J_rho: 1
List_l_rho: -1 1
List_L_omega: 1
List_l_omega: -1 0 1
List_L_b1: 0 2
List_l_b1: -1 0 1
*/
// SUMMATION GUIDE:
// notation meant to resemble TeX symbols in derivation
// exception: pol = \epsilon_\gamma
// l -> lambda, indicating helicity
// u_[particle](q.n.) -> amplitude strength coefficient
int l_R_lim = J_X + 1;
//shortcut: CB(L_X, J_b1, 0, l_b1 ; J_X, l_b1) vanishes when
// = CB(1, 1, 0, 0 ; 1, 0), so omit l_b1=0 when J_X=L_X=1
int l_b1_inc = L_X==1 && J_X==1 ? 2 : 1;
// restrict omega decay to just p wave
int L_omega_lim = 1; // set to 3 to allow F wave
int L_Rsign_lim;
GDouble cosAlpha=G_COS(alpha), sinAlpha=G_SIN(alpha);
WCUComplex expFact = {cosAlpha, sinAlpha};
WCUComplex expFact_conj = {cosAlpha, -sinAlpha};
WCUComplex ThelSum = { 0 , 0 };
// Setup dependent loop limits
LOOP(l_gamma, -1, 1, 2) {
LOOP(l_R, 0, l_R_lim, 1) {
if(l_R==0 && epsilon_R==-1) continue;
// LOOP(l_R, (1-epsilon_R)/2, l_R_lim, 1) // if this still causes some GPU core
// misalignment, try setting lower bound back to zero and tacking on
// * !(l_R==0 && epsilon_R==-1)
// to the long list of factors multiplying Thelsum below -IS
//summing positive and negative helicity terms of R's reflectivity state
L_Rsign_lim = l_R > 0 ? -1 : +1;
// Switch order of loop, because LOOP can only handle increasing increments
// LOOP(l_Rsign, 1, L_Rsign_lim, -2)
LOOP(l_Rsign, L_Rsign_lim, 1, 2) {
m_X = l_gamma - l_Rsign * l_R;
if (m_X==0) {
//testing for cancelation in |J 0>+pol*P*epsilon_R*(-1)^J|J 0>
if(Par_X*pol*epsilon_R == (J_X % 2 ==0 ? -1:+1)) continue;
} else {
//enforcing that the selected projection <= vector magnitude
if( abs(m_X)>J_X) continue;
}
WCUComplex l_b1DepTerm = {0,0};
LOOP(l_b1, -1,1,l_b1_inc) {
WCUComplex L_b1DepTerm = {0,0};
LOOP(L_b1,0,2,2) {
WCUComplex l_omegaDepTerm = {0,0};
// 2012-05-19 JR Fix l_omega loop
// LOOP(l_omega,-1,0,1)
LOOP(l_omega,-1,1,1) {
WCUComplex L_omegaDepTerm = {0,0};
LOOP(L_omega, 1, L_omega_lim, 2) {
WCUComplex J_rhoDepTerm = {0,0};
LOOP(J_rho, 1, L_omega_lim, 2) {
//enforces triang. ineq. betw. J_omega=1, J_rho and L_omega
// in effect, L_omega and J_rho take identical values
if( abs(J_rho-L_omega) > 1) continue;
WCUComplex l_rhoDepTerm = {0,0};
LOOP(l_rho,-1,1,1) {
//shortcut CB(1,1,0,0;1,0)=0
if(L_omega==1 && J_rho==1 && l_rho==0) continue;
l_rhoDepTerm +=
Conjugate(wignerD(1, l_omega, l_rho,
rho_omegaRF_cosTheta, rho_omegaRF_phi))
* clebsch(L_omega, 0, J_rho, l_rho, 1, l_rho)
* Y(J_rho, l_rho, rhos_pip_rhoRF_cosTheta, rhos_pip_rhoRF_phi);
}
u_rho = J_rho==1 ? u_rho_1 : (J_rho==3 ? u_rho_3 : 0);
J_rhoDepTerm += u_rho * l_rhoDepTerm *
BreitWigner_loc(m0_rho,G0_rho, J_rho,rhos_pip0,rhos_pim);
}
J_rhoDepTerm *= BreitWigner_loc(m0_omega, G0_omega, L_omega, omegas_pi,rho0);
u_omega = L_omega==1 ? u_omega_1 : (L_omega==3 ? u_omega_3 : 0);
L_omegaDepTerm += u_omega * J_rhoDepTerm * Nterm(L_omega);
}
l_omegaDepTerm += L_omegaDepTerm *
clebsch(L_b1, 0, 1, l_omega, 1, l_omega) *
Conjugate(wignerD(1, l_b1, l_omega,
omega_b1RF_cosTheta, omega_b1RF_phi));
}
l_omegaDepTerm *= BreitWigner_loc(m0_b1, G0_b1, L_b1, b1s_pi, omega0);
u_b1 = L_b1==0 ? u_b1_0 : (L_b1==2 ? u_b1_2 : 0);
L_b1DepTerm += u_b1 * l_omegaDepTerm * Nterm(L_b1);
}
//-- (_) understand why assignment here produces:
// KERNEL LAUNCH ERROR [b1piAngAmp]: the launch timed out and was terminated
// assigning/incrementing integers causes no problems
l_b1DepTerm += L_b1DepTerm *
Conjugate(wignerD(J_X, m_X, l_b1, b1_XRF_cosTheta, b1_XRF_phi)) *
clebsch(L_X, 0, 1, l_b1, J_X, l_b1);
}
ThelSum += l_b1DepTerm
//to account for |eps_g> ~ (|1,-1>exp(-ia)-pol|1,+1>exp(ia))
* (l_gamma==1 ? (-pol)*expFact : expFact_conj)
//Assemble reflectivity eigenvector with epsilon_X=pol*epslion_R
* (GDouble) (m_X<0 ? Par_X*pol*epsilon_R*((J_X-m_X) % 2 == 0 ? +1:-1) : 1)
* (GDouble) (m_X == 0 ? 1.0 : InvSqrt2 )
// to apply th(l_R) reflectivity state prefactor:
// m=0: 1/2 m>0: 1/sqrt(2) m<0: 0 (last just skipped in this sum)
* (GDouble) (l_R > 0 ? InvSqrt2 : 1.0 )
//apply coefficients to the reflectivity basis terms:
* (GDouble) (l_Rsign==1 ? 1 : epsilon_R)
; //v(*epsilon_R) *
}
}
}
ThelSum *= Nterm(L_X) *
// barrier factor
(GDouble)(L_X==0 ? 1.0 : (L_X==1 ? q : G_POW(q,L_X))) *
// to apply polarization fraction weights:
(GDouble)G_SQRT((1.0-pol*polFrac)*0.5) * //(1+g) for x-pol, (1-g) for y-pol
(pol==1 ? i : COne)*InvSqrt2 * //to account for |eps_g> ~ sqrt(-eps/2)
clebsch(1, Iz_b1, 1, Iz_pi, I_X, Iz_b1 + Iz_pi);
pcDevAmp[iEvent] = ThelSum;
}
#ifdef DEBUG
// This is for debugging
// It reads the amplitdues and momemta vectors from the CUDA device and prints them.
void
printCudaArrays(GDouble* pfDevData, WCUComplex* pcDevAmp, int* piDevPerm, int iNParticles, int iNEvents, int cnt) {
// Read amplitudes from GPU to CPU
GDouble *amp = (GDouble *) malloc (iNEvents * 2 * sizeof(GDouble));
hipMemcpy (amp, pcDevAmp, iNEvents * 2 * sizeof(GDouble), hipMemcpyDeviceToHost);
// Copy 4momenta from GPU to CPU - make part() big enough to hold the entire set of momenta
GDouble *part = (GDouble *) malloc (iNEvents * 4 * iNParticles * sizeof(GDouble));
hipMemcpy (part, pfDevData, iNEvents * 4 * iNParticles * sizeof(GDouble), hipMemcpyDeviceToHost);
// Print arrays
int ievent, ipart, idim;
int ndim = 4;
for (ievent=0; ievent<iNEvents; ievent++) {
printf ("test: CUDA: %2d %6d ", cnt, ievent);
// Print amplitude
printf (" %12.4e %12.4e", amp[2*ievent], amp[2*ievent+1]);
for (ipart=0;ipart<iNParticles;ipart++) {
printf (" ");
for (idim=0;idim<4;idim++) {
printf ( " %8.4f", part[ievent + idim*iNEvents + ipart*ndim*iNEvents ] );
}
}
printf("\n");
}
// Free allocations from arrays allocated withing this function
if (amp) free(amp);
if (part) free(part);
}
#endif
void
GPUb1piAngAmp_exec(dim3 dimGrid, dim3 dimBlock,
// GPU_AMP_PROTO,
GDouble* pfDevData, WCUComplex* pcDevAmp, int* piDevPerm, int iNParticles, int iNEvents,
int polBeam, GDouble polFrac,
int J_X, int Par_X, int L_X, int I_X, int epsilon_R,
int Iz_b1, int Iz_pi,
GDouble u_rho_1, GDouble u_rho_3,
GDouble u_omega_1, GDouble u_omega_3,
GDouble u_b1_0, GDouble u_b1_2,
GDouble G0_omega, GDouble G0_b1, bool orthocheck)
{
int ievent, ievent1, idim, ipart, i, j, k;
int nonZero = 0;
int static cnt = 0;
cnt++;
// printf("test: Call to GPUb1piAngAmp_exec: cnt %d\n", cnt);
// Identify amplitudes which are zero
hipLaunchKernelGGL(( Pretest_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0,
// GPU_AMP_ARGS,
pfDevData, pcDevAmp, piDevPerm, iNParticles, iNEvents,
polBeam, polFrac,
J_X, Par_X, L_X, I_X, epsilon_R, Iz_b1, Iz_pi,
u_rho_1, u_rho_3, u_omega_1, u_omega_3, u_b1_0, u_b1_2,
G0_omega, G0_b1, orthocheck );
// printf("test: after call to Pretest_kernel()\n");
// Copy pcDevAmp from device to host */
GDouble *hostAmp = (GDouble *) malloc(2*iNEvents*sizeof(GDouble));
hipMemcpy (hostAmp, pcDevAmp, 2*iNEvents*sizeof(GDouble), hipMemcpyDeviceToHost);
// Initialize all on-device amplitudes to zero
hipLaunchKernelGGL(( Setzero_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, pcDevAmp,iNEvents);
// printf("test: after call to Setzero_kernel()\n");
// Count number of nonZero amplitudes
for (i=0;i<iNEvents;i++) {
if (hostAmp[2*i]==1.0) nonZero++;
}
// Allocate array to hold indices of nonZero amplitudes
int *nonZeroIndices = (int *) malloc(nonZero * sizeof(int));
j = 0;
for (i=0;i<iNEvents;i++) {
if (hostAmp[2*i]==1.0) nonZeroIndices[j++] = i;
}
// Copy 4momenta from GPU to CPU - make part() big enough to hold the entire set of momenta
GDouble *part = (GDouble *) malloc (iNEvents * 4 * iNParticles * sizeof(GDouble));
hipMemcpy (part, pfDevData, iNEvents * 4 * iNParticles * sizeof(GDouble), hipMemcpyDeviceToHost);
// printf("test: after copy pfDevData to Device\n");
// Copy nonZero momenta in place to the start of the array part
// Make sure order of copying moves continuously from lower to higher indice.
for (ipart=0;ipart<iNParticles;ipart++) {
for (idim=0;idim<4;idim++) {
for (ievent1=0;ievent1<nonZero;ievent1++) {
ievent = nonZeroIndices[ievent1];
// Index of nonZero event in original particle array
i = ievent + idim * iNEvents + ipart * 4 * iNEvents;
// Index of nonZero event in new particle array
j = ievent1 + idim * nonZero + ipart * 4 * nonZero;
part[j] = part[i];
}
}
}
// Copy new particles on CPU back to GPU, only need those momenta sets which were non-zero, not the size of the entire set.
GDouble *part_dev;
hipMalloc(&part_dev, nonZero * 4 * iNParticles * sizeof(GDouble) );
hipMemcpy( part_dev, part, nonZero * 4 * iNParticles * sizeof(GDouble), hipMemcpyHostToDevice );
// printf("test: after copy Part to Device\n");
// Reset dimGrid and dimBlock for the value of nonZero
int Nthreads = 32;
dim3 dimBlock1(Nthreads);
dim3 dimGrid1((nonZero-1)/Nthreads+1);
// Evaluate non-zero amplitudes
// iNEvents = nonZero;
hipLaunchKernelGGL(( GPUb1piAngAmp_kernel), dim3(dimGrid1), dim3(dimBlock1) , 0, 0,
cnt,
// GPU_AMP_ARGS,
// pfDevData, pcDevAmp, piDevPerm, iNParticles, nonZero,
part_dev, pcDevAmp, piDevPerm, iNParticles, nonZero,
polBeam, polFrac,
J_X, Par_X, L_X, I_X, epsilon_R, Iz_b1, Iz_pi,
u_rho_1, u_rho_3, u_omega_1, u_omega_3, u_b1_0, u_b1_2,
G0_omega, G0_b1, orthocheck );
// printf("test: after call to GUPb1piAngAmp_kernel()\n");
// Read amplitudes from GPU to CPU
GDouble *amp = (GDouble *) malloc (iNEvents * 2 * sizeof(GDouble));
hipMemcpy (amp, pcDevAmp, iNEvents * 2 * sizeof(GDouble), hipMemcpyDeviceToHost);
// printf("test: after copy Amp to Host\n");
// Re-arrange location of amplitudes on GPU to match original distribution of vectors
// Progress through the index array backward.
k = iNEvents;
for (i=nonZero-1;i>=0;i--) {
// Zero those elements between this element and last.
for (j=nonZeroIndices[i]+1;j<k;j++) {
amp[2*j ] = 0.0;
amp[2*j+1] = 0.0;
}
k = nonZeroIndices[i];
amp[2*k ] = amp[2*i ];
amp[2*k+1] = amp[2*i+1];
}
// Zero remaining elements
for (j=0;j<nonZeroIndices[0];j++) {
amp[2*j ] = 0.0;
amp[2*j+1] = 0.0;
}
// Write values back to GPU so calling program will find them where they
// expect them.
hipMemcpy (pcDevAmp, amp, iNEvents * 2 * sizeof(GDouble), hipMemcpyHostToDevice);
// printf("test: after copy Amp to Device\n");
// Free allocations
if (part_dev) hipFree(part_dev);
if (amp) free(amp);
if (part) free(part);
// printf("test: after Free allocations\n");
// Print Particle and Amplitude CUDA arrays
#ifdef DEBUG
printCudaArrays(pfDevData, pcDevAmp, piDevPerm, iNParticles, iNEvents, cnt);
#endif
}
| cd7ed78023edb8c6b3363ab3f6ce19458407f956.cu | /*
* GPUb1piAngAmp_kernel.cu
*
*/
#include <stdio.h>
#include "cuda.h"
// Original headers were scattered around file system
#include "GPUManager/GPUCustomTypes.h"
#include "GPUManager/CUDA-Complex.cuh"
#include "GPUUtils/lorentzBoost.cuh"
#include "GPUUtils/threeVector.cuh"
#include "GPUUtils/wignerD.cuh"
#include "GPUUtils/clebsch.cuh"
#include "AMPTOOLS_AMPS/breakupMomentum.cuh"
#include "AMPTOOLS_AMPS/barrierFactor.cuh"
// Test headers
#if 0
#include "GPUCustomTypes.h"
#include "CUDA-Complex.cuh"
#include "lorentzBoost.cuh"
#include "threeVector.cuh"
#include "wignerD.cuh"
#include "clebsch.cuh"
#include "breakupMomentum.cuh"
#include "barrierFactor.cuh"
#endif
#define ADD4(a,b) { a[0]+b[0], a[1]+b[1], a[2]+b[2], a[3]+b[3] }
#define MASS(v) (G_SQRT(v[0]*v[0]-v[1]*v[1]-v[2]*v[2]-v[3]*v[3]))
#define Nterm(J) (G_SQRT((2*J+1)/(4*M_PI)))
// Macro to ease definition of loops
#define LOOP(INDEX,START,END,INC) for (int INDEX=START;INDEX<=END;INDEX+=INC)
static __device__ void //note: 4-vector input presumed
rotateZ( GDouble* v, GDouble phi ){
GDouble sinphi = G_SIN(phi);
GDouble cosphi = G_COS(phi);
GDouble tx;
tx = v[1] * cosphi - v[2] * sinphi;
v[2] = v[2] * cosphi + v[1] * sinphi;
v[1] = tx;
}
static __device__ void //note: 4-vector input presumed
rotateY ( GDouble* v, GDouble theta) {
double sinphi = G_SIN(theta);
double cosphi = G_COS(theta);
double tz;
tz = v[3] * cosphi - v[1] * sinphi;
v[1] = v[1] * cosphi + v[3] * sinphi;
v[3] = tz;
}
static __device__ GDouble //note: 3-vector input presumed
theta( GDouble* pv ){
GDouble r= G_SQRT(pv[0]*pv[0] + pv[1]*pv[1]);
return G_ATAN2( r , pv[2] );
}
static __device__ void
MoveToRF(GDouble *parent, GDouble *daughter)
{
GDouble *par3vec=parent+1;
rotateZ( daughter , -phi(par3vec) );
rotateY( daughter , -theta(par3vec) );
GDouble beta[]={0,0, -G_SQRT(dot(par3vec,par3vec))/parent[0]};
//** (x) Might this be bootToRest???
// beta is defined to boost to parent's rest frame
// I just adapted GPUUtil boost fcn with vector beta input
boost( daughter , beta );
}
static __device__ WCUComplex
BreitWigner_loc(GDouble m0, GDouble Gamma0, int L,
GDouble *P1, GDouble *P2)
{
GDouble Ptot[4] = ADD4(P1, P2);
GDouble m = MASS(Ptot);
GDouble mass1 = MASS(P1);
GDouble mass2 = MASS(P2);
// assert positive breakup momenta
GDouble q0 = fabs( breakupMomentum(m0, mass1, mass2) );
GDouble q = fabs( breakupMomentum(m, mass1, mass2) );
//printf("BW: (%5.3f, %5.3f, %d) m=%6.4f m1=%6.4f m2=%6.4f q=%6.4f q0=%6.4f\n",
// m0,Gamma0,L,m,mass1,mass2,q,q0);
GDouble F0 = L==0 ? 1.0 : barrierFactor(q0, L);
GDouble F = L==0 ? 1.0 : barrierFactor(q, L);
GDouble width_coef=Gamma0*(m0/m);
//GDouble qq0=q/q0;
//GDouble width_qdep = (L==0 ? qq0 : (L==1 ? qq0*qq0*qq0 : pow(qq0,2*L+1)))*((F*F)/(F0*F0));
GDouble width_qdep = q/q0 * (F*F)/(F0*F0);
//GDouble num_qdep = (L==0 ? q : (L==1 ? q*q*q : pow(q,2*L+1)))*(F*F);
GDouble num_qdep = q*(F*F);
GDouble width = width_coef * width_qdep;
//complex<GDouble> bwtop(m0 * width, 0.0 );
WCUComplex bwtop = { G_SQRT(m0*width_coef) * num_qdep, 0 };
WCUComplex bwbottom = { m0*m0 - m*m , -1.0 * ( m0 * width ) };
return ( bwtop / bwbottom );
}
// JR 2012-07-29
// Set all Amplitudes to 0 on the Device. This is needed now because we only
// calculate amplitudes for those momenta sets with non-zero amplitudes. If
// this function were not performed, amplitudes which are supposed to be zero will
// be undefined.
__global__ void Setzero_kernel(WCUComplex *pcDevAmp, int iNEvents) {
int iEvent = GPU_THIS_EVENT;
if (iEvent>=iNEvents) return;
pcDevAmp[iEvent].m_dRe = 0.0;
pcDevAmp[iEvent].m_dIm = 0.0;
}
// JR 2012-07-29
// Perform beginning of b1pi calculation, just enough to determine those
// amplitude which will be set to zero. Amplitudes are set to (1,0) if
// they are not zero. These amplitudes will need set to their correct
// values on the call to GPUb1piAngAmp_kernel().
__global__ void Pretest_kernel( GPU_AMP_PROTO , int polBeam, GDouble polFrac,
int J_X, int Par_X, int L_X, int I_X, int epsilon_R, int Iz_b1, int Iz_pi,
GDouble u_rho_1, GDouble u_rho_3, GDouble u_omega_1, GDouble u_omega_3,
GDouble u_b1_0, GDouble u_b1_2,
GDouble G0_omega, GDouble G0_b1, bool orthocheck)
{
// Calculate event for this thread.
int iEvent = GPU_THIS_EVENT;
WCUComplex CZero = { 0, 0 };
WCUComplex COne = { 1, 0 };
int pol=(polBeam==1 ? +1 : -1); // y and x-pol. respectively
//** (x) This statement can be evaluated at top of function?
if (J_X==0 && Par_X*pol*epsilon_R==-1) {
pcDevAmp[iEvent] = CZero;
return;
}
GDouble m0_omega = 0.783;
GDouble m0_b1 = 1.223;
bool isZero;
// Copy four-vectors for this thread from global memory.
GDouble b1s_pi [4] = GPU_P4(3);
GDouble omegas_pi[4] = GPU_P4(4);
GDouble rhos_pim [4] = GPU_P4(5);
GDouble rhos_pip [4] = GPU_P4(6);
// Make four-vector sums
GDouble rho [4] = ADD4(rhos_pip, rhos_pim );
GDouble omega [4] = ADD4(rho, omegas_pi);
GDouble b1 [4] = ADD4(omega, b1s_pi);
// Store mass of b1; for other vectors we can calculate mass on the fly.
GDouble b1mass = MASS(b1);
// Is this term zero?
isZero = MASS(rho)+0.135 > m0_omega+3*G0_omega;
isZero |= fabs(MASS(omega)-m0_omega) > 3*G0_omega;
isZero |= fabs(b1mass-m0_b1) > 3*G0_b1;
isZero |= b1mass < (m0_omega - 3*G0_omega);
if (isZero) pcDevAmp[iEvent] = CZero;
else pcDevAmp[iEvent] = COne;
}
// JR 2012-07-29
// Calculate amplitudes only for those momenta sets with known non-zero
// amplitudes.
__global__ void
GPUb1piAngAmp_kernel(
int cnt,
// GPU_AMP_PROTO ,
GDouble* pfDevData, WCUComplex* pcDevAmp, int* piDevPerm, int iNParticles, int iNEvents,
int polBeam, GDouble polFrac,
int J_X, int Par_X, int L_X, int I_X, int epsilon_R, int Iz_b1, int Iz_pi,
GDouble u_rho_1, GDouble u_rho_3, GDouble u_omega_1, GDouble u_omega_3,
GDouble u_b1_0, GDouble u_b1_2,
GDouble G0_omega, GDouble G0_b1, bool orthocheck)
{
// Calculate event for this thread.
// int iEvent = GPU_THIS_EVENT;
// JR 2012-07-29
// NOTE: This vesrsion of this function is called with different settings
// for threadIdx, blockIdx and blockDim than for the original version.
// The next line relects that change.
int iEvent = threadIdx.x + blockIdx.x * blockDim.x;
// Skip this event index if it overruns number of events.
if (iEvent>=iNEvents) return;
WCUComplex CZero = { 0, 0 };
WCUComplex i = { 0, 1 };
WCUComplex COne = { 1, 0 };
int pol=(polBeam==1 ? +1 : -1); // y and x-pol. respectively
if (J_X==0 && Par_X*pol*epsilon_R==-1) {
pcDevAmp[iEvent] = CZero;
return;
}
int m_X;
GDouble u_rho, u_omega, u_b1;
GDouble InvSqrt2 = 1.0/G_SQRT(2.0);
GDouble m0_rho = 0.775;
GDouble G0_rho = 0.149;
GDouble m0_omega = 0.783;
GDouble m0_b1 = 1.223;
bool useCutoff = true;
bool isZero;
// Copy four-vectors for this thread from global memory.
// 2012-05-19 JR rhos_pip0,omega0,rho0 added for use
// in BreitWigner_loc() below.
GDouble beam [4] = GPU_P4(0);
GDouble recoil [4] = GPU_P4(1);
GDouble Xs_pi [4] = GPU_P4(2);
GDouble b1s_pi [4] = GPU_P4(3);
GDouble omegas_pi[4] = GPU_P4(4);
GDouble rhos_pim [4] = GPU_P4(5);
GDouble rhos_pip [4] = GPU_P4(6);
GDouble rhos_pip0[4] = GPU_P4(6);
// Make four-vector sums
GDouble rho [4] = ADD4(rhos_pip, rhos_pim );
GDouble rho0 [4] = ADD4(rhos_pip, rhos_pim );
GDouble omega [4] = ADD4(rho, omegas_pi);
GDouble omega0[4] = ADD4(rho, omegas_pi);
GDouble b1 [4] = ADD4(omega, b1s_pi);
// Store mass of b1; for other vectors we can calculate mass on the fly.
GDouble b1mass = MASS(b1);
// Is this term zero?
if (useCutoff) {
isZero = MASS(rho)+0.135 > m0_omega+3*G0_omega;
isZero |= fabs(MASS(omega)-m0_omega) > 3*G0_omega;
isZero |= fabs(b1mass-m0_b1) > 3*G0_b1;
isZero |= b1mass < (m0_omega - 3*G0_omega);
// Zero amplitude
if (isZero) {
pcDevAmp[iEvent] = CZero;
return;
}
}
// Continue to Calculate amplitude
GDouble X[4] = ADD4(b1, Xs_pi);
GDouble q = breakupMomentum( MASS(X), b1mass, MASS(Xs_pi) );
GDouble alpha = phi( &(recoil[1]) );
// NOTE: Values of beam and recoil are changed below.
boostToRest (beam, X);
boostToRest (recoil, X);
// Define new coordinate system with
// - beam parallel to z direction
// - recoil in the x,z plain (i.e., y is normal to recoil and beam)
// - y is normal to beam and recoil.
GDouble zGJ[3] = { beam[1], beam[2], beam[3] };
makeUnit( zGJ );
//** (x) Be care of cross order, need to check this
// 2012-05-19 JR - Invert yGJ to make cross come out right.
// GDouble yGJ[3] = { recoil[1], recoil[2], recoil[3] };
GDouble yGJ[3] = { -recoil[1], -recoil[2], -recoil[3] };
cross( yGJ, zGJ );
makeUnit( yGJ );
GDouble xGJ[3] = { yGJ[0], yGJ[1], yGJ[2] };
cross( xGJ, zGJ );
//particles to rest frames of their parents
boostToRest (b1, X);
boostToRest (omega, X);
boostToRest (rho, X);
boostToRest (rhos_pip, X);
// Note that in this form of the cascade of boosts, we are not
// saving the 4-vecs in their intermediate RF, but going sequentially
// straight to their immediate parent's RF.
// Make sure to verify that the intermediares were not in fact needed
// and that we didn't break anything with this simplification.
MoveToRF(b1,omega);
MoveToRF(b1,rho); MoveToRF(omega,rho);
MoveToRF(b1,rhos_pip); MoveToRF(omega,rhos_pip); MoveToRF(rho,rhos_pip);
GDouble *b1_3vec=b1+1;
GDouble ang_b1[]={dot(b1_3vec, xGJ),
dot(b1_3vec, yGJ),
dot(b1_3vec, zGJ)};
GDouble b1_XRF_cosTheta = cosTheta(ang_b1);
GDouble b1_XRF_phi = phi(ang_b1);
GDouble rho_omegaRF_cosTheta = cosTheta(rho+1);
GDouble rho_omegaRF_phi = phi(rho+1);
GDouble rhos_pip_rhoRF_cosTheta = cosTheta(rhos_pip+1);
GDouble rhos_pip_rhoRF_phi = phi(rhos_pip+1);
GDouble omega_b1RF_cosTheta = cosTheta(omega+1);
GDouble omega_b1RF_phi = phi(omega+1);
/*
List_l_R: 0 1
List_J_rho: 1
List_l_rho: -1 1
List_L_omega: 1
List_l_omega: -1 0 1
List_L_b1: 0 2
List_l_b1: -1 0 1
*/
// SUMMATION GUIDE:
// notation meant to resemble TeX symbols in derivation
// exception: pol = \epsilon_\gamma
// l -> lambda, indicating helicity
// u_[particle](q.n.) -> amplitude strength coefficient
int l_R_lim = J_X + 1;
//shortcut: CB(L_X, J_b1, 0, l_b1 ; J_X, l_b1) vanishes when
// = CB(1, 1, 0, 0 ; 1, 0), so omit l_b1=0 when J_X=L_X=1
int l_b1_inc = L_X==1 && J_X==1 ? 2 : 1;
// restrict omega decay to just p wave
int L_omega_lim = 1; // set to 3 to allow F wave
int L_Rsign_lim;
GDouble cosAlpha=G_COS(alpha), sinAlpha=G_SIN(alpha);
WCUComplex expFact = {cosAlpha, sinAlpha};
WCUComplex expFact_conj = {cosAlpha, -sinAlpha};
WCUComplex ThelSum = { 0 , 0 };
// Setup dependent loop limits
LOOP(l_gamma, -1, 1, 2) {
LOOP(l_R, 0, l_R_lim, 1) {
if(l_R==0 && epsilon_R==-1) continue;
// LOOP(l_R, (1-epsilon_R)/2, l_R_lim, 1) // if this still causes some GPU core
// misalignment, try setting lower bound back to zero and tacking on
// * !(l_R==0 && epsilon_R==-1)
// to the long list of factors multiplying Thelsum below -IS
//summing positive and negative helicity terms of R's reflectivity state
L_Rsign_lim = l_R > 0 ? -1 : +1;
// Switch order of loop, because LOOP can only handle increasing increments
// LOOP(l_Rsign, 1, L_Rsign_lim, -2)
LOOP(l_Rsign, L_Rsign_lim, 1, 2) {
m_X = l_gamma - l_Rsign * l_R;
if (m_X==0) {
//testing for cancelation in |J 0>+pol*P*epsilon_R*(-1)^J|J 0>
if(Par_X*pol*epsilon_R == (J_X % 2 ==0 ? -1:+1)) continue;
} else {
//enforcing that the selected projection <= vector magnitude
if( abs(m_X)>J_X) continue;
}
WCUComplex l_b1DepTerm = {0,0};
LOOP(l_b1, -1,1,l_b1_inc) {
WCUComplex L_b1DepTerm = {0,0};
LOOP(L_b1,0,2,2) {
WCUComplex l_omegaDepTerm = {0,0};
// 2012-05-19 JR Fix l_omega loop
// LOOP(l_omega,-1,0,1)
LOOP(l_omega,-1,1,1) {
WCUComplex L_omegaDepTerm = {0,0};
LOOP(L_omega, 1, L_omega_lim, 2) {
WCUComplex J_rhoDepTerm = {0,0};
LOOP(J_rho, 1, L_omega_lim, 2) {
//enforces triang. ineq. betw. J_omega=1, J_rho and L_omega
// in effect, L_omega and J_rho take identical values
if( abs(J_rho-L_omega) > 1) continue;
WCUComplex l_rhoDepTerm = {0,0};
LOOP(l_rho,-1,1,1) {
//shortcut CB(1,1,0,0;1,0)=0
if(L_omega==1 && J_rho==1 && l_rho==0) continue;
l_rhoDepTerm +=
Conjugate(wignerD(1, l_omega, l_rho,
rho_omegaRF_cosTheta, rho_omegaRF_phi))
* clebsch(L_omega, 0, J_rho, l_rho, 1, l_rho)
* Y(J_rho, l_rho, rhos_pip_rhoRF_cosTheta, rhos_pip_rhoRF_phi);
}
u_rho = J_rho==1 ? u_rho_1 : (J_rho==3 ? u_rho_3 : 0);
J_rhoDepTerm += u_rho * l_rhoDepTerm *
BreitWigner_loc(m0_rho,G0_rho, J_rho,rhos_pip0,rhos_pim);
}
J_rhoDepTerm *= BreitWigner_loc(m0_omega, G0_omega, L_omega, omegas_pi,rho0);
u_omega = L_omega==1 ? u_omega_1 : (L_omega==3 ? u_omega_3 : 0);
L_omegaDepTerm += u_omega * J_rhoDepTerm * Nterm(L_omega);
}
l_omegaDepTerm += L_omegaDepTerm *
clebsch(L_b1, 0, 1, l_omega, 1, l_omega) *
Conjugate(wignerD(1, l_b1, l_omega,
omega_b1RF_cosTheta, omega_b1RF_phi));
}
l_omegaDepTerm *= BreitWigner_loc(m0_b1, G0_b1, L_b1, b1s_pi, omega0);
u_b1 = L_b1==0 ? u_b1_0 : (L_b1==2 ? u_b1_2 : 0);
L_b1DepTerm += u_b1 * l_omegaDepTerm * Nterm(L_b1);
}
//-- (_) understand why assignment here produces:
// KERNEL LAUNCH ERROR [b1piAngAmp]: the launch timed out and was terminated
// assigning/incrementing integers causes no problems
l_b1DepTerm += L_b1DepTerm *
Conjugate(wignerD(J_X, m_X, l_b1, b1_XRF_cosTheta, b1_XRF_phi)) *
clebsch(L_X, 0, 1, l_b1, J_X, l_b1);
}
ThelSum += l_b1DepTerm
//to account for |eps_g> ~ (|1,-1>exp(-ia)-pol|1,+1>exp(ia))
* (l_gamma==1 ? (-pol)*expFact : expFact_conj)
//Assemble reflectivity eigenvector with epsilon_X=pol*epslion_R
* (GDouble) (m_X<0 ? Par_X*pol*epsilon_R*((J_X-m_X) % 2 == 0 ? +1:-1) : 1)
* (GDouble) (m_X == 0 ? 1.0 : InvSqrt2 )
// to apply th(l_R) reflectivity state prefactor:
// m=0: 1/2 m>0: 1/sqrt(2) m<0: 0 (last just skipped in this sum)
* (GDouble) (l_R > 0 ? InvSqrt2 : 1.0 )
//apply coefficients to the reflectivity basis terms:
* (GDouble) (l_Rsign==1 ? 1 : epsilon_R)
; //v(*epsilon_R) *
}
}
}
ThelSum *= Nterm(L_X) *
// barrier factor
(GDouble)(L_X==0 ? 1.0 : (L_X==1 ? q : G_POW(q,L_X))) *
// to apply polarization fraction weights:
(GDouble)G_SQRT((1.0-pol*polFrac)*0.5) * //(1+g) for x-pol, (1-g) for y-pol
(pol==1 ? i : COne)*InvSqrt2 * //to account for |eps_g> ~ sqrt(-eps/2)
clebsch(1, Iz_b1, 1, Iz_pi, I_X, Iz_b1 + Iz_pi);
pcDevAmp[iEvent] = ThelSum;
}
#ifdef DEBUG
// This is for debugging
// It reads the amplitdues and momemta vectors from the CUDA device and prints them.
void
printCudaArrays(GDouble* pfDevData, WCUComplex* pcDevAmp, int* piDevPerm, int iNParticles, int iNEvents, int cnt) {
// Read amplitudes from GPU to CPU
GDouble *amp = (GDouble *) malloc (iNEvents * 2 * sizeof(GDouble));
cudaMemcpy (amp, pcDevAmp, iNEvents * 2 * sizeof(GDouble), cudaMemcpyDeviceToHost);
// Copy 4momenta from GPU to CPU - make part() big enough to hold the entire set of momenta
GDouble *part = (GDouble *) malloc (iNEvents * 4 * iNParticles * sizeof(GDouble));
cudaMemcpy (part, pfDevData, iNEvents * 4 * iNParticles * sizeof(GDouble), cudaMemcpyDeviceToHost);
// Print arrays
int ievent, ipart, idim;
int ndim = 4;
for (ievent=0; ievent<iNEvents; ievent++) {
printf ("test: CUDA: %2d %6d ", cnt, ievent);
// Print amplitude
printf (" %12.4e %12.4e", amp[2*ievent], amp[2*ievent+1]);
for (ipart=0;ipart<iNParticles;ipart++) {
printf (" ");
for (idim=0;idim<4;idim++) {
printf ( " %8.4f", part[ievent + idim*iNEvents + ipart*ndim*iNEvents ] );
}
}
printf("\n");
}
// Free allocations from arrays allocated withing this function
if (amp) free(amp);
if (part) free(part);
}
#endif
void
GPUb1piAngAmp_exec(dim3 dimGrid, dim3 dimBlock,
// GPU_AMP_PROTO,
GDouble* pfDevData, WCUComplex* pcDevAmp, int* piDevPerm, int iNParticles, int iNEvents,
int polBeam, GDouble polFrac,
int J_X, int Par_X, int L_X, int I_X, int epsilon_R,
int Iz_b1, int Iz_pi,
GDouble u_rho_1, GDouble u_rho_3,
GDouble u_omega_1, GDouble u_omega_3,
GDouble u_b1_0, GDouble u_b1_2,
GDouble G0_omega, GDouble G0_b1, bool orthocheck)
{
int ievent, ievent1, idim, ipart, i, j, k;
int nonZero = 0;
int static cnt = 0;
cnt++;
// printf("test: Call to GPUb1piAngAmp_exec: cnt %d\n", cnt);
// Identify amplitudes which are zero
Pretest_kernel<<< dimGrid, dimBlock >>>
(
// GPU_AMP_ARGS,
pfDevData, pcDevAmp, piDevPerm, iNParticles, iNEvents,
polBeam, polFrac,
J_X, Par_X, L_X, I_X, epsilon_R, Iz_b1, Iz_pi,
u_rho_1, u_rho_3, u_omega_1, u_omega_3, u_b1_0, u_b1_2,
G0_omega, G0_b1, orthocheck );
// printf("test: after call to Pretest_kernel()\n");
// Copy pcDevAmp from device to host */
GDouble *hostAmp = (GDouble *) malloc(2*iNEvents*sizeof(GDouble));
cudaMemcpy (hostAmp, pcDevAmp, 2*iNEvents*sizeof(GDouble), cudaMemcpyDeviceToHost);
// Initialize all on-device amplitudes to zero
Setzero_kernel<<< dimGrid, dimBlock >>>(pcDevAmp,iNEvents);
// printf("test: after call to Setzero_kernel()\n");
// Count number of nonZero amplitudes
for (i=0;i<iNEvents;i++) {
if (hostAmp[2*i]==1.0) nonZero++;
}
// Allocate array to hold indices of nonZero amplitudes
int *nonZeroIndices = (int *) malloc(nonZero * sizeof(int));
j = 0;
for (i=0;i<iNEvents;i++) {
if (hostAmp[2*i]==1.0) nonZeroIndices[j++] = i;
}
// Copy 4momenta from GPU to CPU - make part() big enough to hold the entire set of momenta
GDouble *part = (GDouble *) malloc (iNEvents * 4 * iNParticles * sizeof(GDouble));
cudaMemcpy (part, pfDevData, iNEvents * 4 * iNParticles * sizeof(GDouble), cudaMemcpyDeviceToHost);
// printf("test: after copy pfDevData to Device\n");
// Copy nonZero momenta in place to the start of the array part
// Make sure order of copying moves continuously from lower to higher indice.
for (ipart=0;ipart<iNParticles;ipart++) {
for (idim=0;idim<4;idim++) {
for (ievent1=0;ievent1<nonZero;ievent1++) {
ievent = nonZeroIndices[ievent1];
// Index of nonZero event in original particle array
i = ievent + idim * iNEvents + ipart * 4 * iNEvents;
// Index of nonZero event in new particle array
j = ievent1 + idim * nonZero + ipart * 4 * nonZero;
part[j] = part[i];
}
}
}
// Copy new particles on CPU back to GPU, only need those momenta sets which were non-zero, not the size of the entire set.
GDouble *part_dev;
cudaMalloc(&part_dev, nonZero * 4 * iNParticles * sizeof(GDouble) );
cudaMemcpy( part_dev, part, nonZero * 4 * iNParticles * sizeof(GDouble), cudaMemcpyHostToDevice );
// printf("test: after copy Part to Device\n");
// Reset dimGrid and dimBlock for the value of nonZero
int Nthreads = 32;
dim3 dimBlock1(Nthreads);
dim3 dimGrid1((nonZero-1)/Nthreads+1);
// Evaluate non-zero amplitudes
// iNEvents = nonZero;
GPUb1piAngAmp_kernel<<< dimGrid1, dimBlock1 >>>
(
cnt,
// GPU_AMP_ARGS,
// pfDevData, pcDevAmp, piDevPerm, iNParticles, nonZero,
part_dev, pcDevAmp, piDevPerm, iNParticles, nonZero,
polBeam, polFrac,
J_X, Par_X, L_X, I_X, epsilon_R, Iz_b1, Iz_pi,
u_rho_1, u_rho_3, u_omega_1, u_omega_3, u_b1_0, u_b1_2,
G0_omega, G0_b1, orthocheck );
// printf("test: after call to GUPb1piAngAmp_kernel()\n");
// Read amplitudes from GPU to CPU
GDouble *amp = (GDouble *) malloc (iNEvents * 2 * sizeof(GDouble));
cudaMemcpy (amp, pcDevAmp, iNEvents * 2 * sizeof(GDouble), cudaMemcpyDeviceToHost);
// printf("test: after copy Amp to Host\n");
// Re-arrange location of amplitudes on GPU to match original distribution of vectors
// Progress through the index array backward.
k = iNEvents;
for (i=nonZero-1;i>=0;i--) {
// Zero those elements between this element and last.
for (j=nonZeroIndices[i]+1;j<k;j++) {
amp[2*j ] = 0.0;
amp[2*j+1] = 0.0;
}
k = nonZeroIndices[i];
amp[2*k ] = amp[2*i ];
amp[2*k+1] = amp[2*i+1];
}
// Zero remaining elements
for (j=0;j<nonZeroIndices[0];j++) {
amp[2*j ] = 0.0;
amp[2*j+1] = 0.0;
}
// Write values back to GPU so calling program will find them where they
// expect them.
cudaMemcpy (pcDevAmp, amp, iNEvents * 2 * sizeof(GDouble), cudaMemcpyHostToDevice);
// printf("test: after copy Amp to Device\n");
// Free allocations
if (part_dev) cudaFree(part_dev);
if (amp) free(amp);
if (part) free(part);
// printf("test: after Free allocations\n");
// Print Particle and Amplitude CUDA arrays
#ifdef DEBUG
printCudaArrays(pfDevData, pcDevAmp, piDevPerm, iNParticles, iNEvents, cnt);
#endif
}
|
495227e37be042d82d802942a0aca2a9862777d6.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "test_utils.h"
#include <gtest/gtest.h>
#include <iostream>
#include <raft/cuda_utils.cuh>
#include <raft/cudart_utils.h>
#include <raft/label/classlabels.hpp>
#include <raft/random/make_blobs.hpp>
#include <raft/spatial/knn/knn.hpp>
#include <rmm/device_uvector.hpp>
#include <selection/knn.cuh>
#include <vector>
namespace MLCommon {
namespace Selection {
struct KNNClassifyInputs {
int rows;
int cols;
int n_labels;
float cluster_std;
int k;
};
class KNNClassifyTest : public ::testing::TestWithParam<KNNClassifyInputs> {
public:
KNNClassifyTest()
: params(::testing::TestWithParam<KNNClassifyInputs>::GetParam()),
stream(handle.get_stream()),
train_samples(params.rows * params.cols, stream),
train_labels(params.rows, stream),
pred_labels(params.rows, stream),
knn_indices(params.rows * params.k, stream),
knn_dists(params.rows * params.k, stream)
{
basicTest();
}
protected:
void basicTest()
{
raft::random::make_blobs<float, int>(train_samples.data(),
train_labels.data(),
params.rows,
params.cols,
params.n_labels,
stream,
true,
nullptr,
nullptr,
params.cluster_std);
rmm::device_uvector<int> unique_labels(0, stream);
auto n_classes =
raft::label::getUniquelabels(unique_labels, train_labels.data(), params.rows, stream);
std::vector<float*> ptrs(1);
std::vector<int> sizes(1);
ptrs[0] = train_samples.data();
sizes[0] = params.rows;
raft::spatial::knn::brute_force_knn(handle,
ptrs,
sizes,
params.cols,
train_samples.data(),
params.rows,
knn_indices.data(),
knn_dists.data(),
params.k);
std::vector<int*> y;
y.push_back(train_labels.data());
std::vector<int*> uniq_labels;
uniq_labels.push_back(unique_labels.data());
std::vector<int> n_unique;
n_unique.push_back(n_classes);
knn_classify(handle,
pred_labels.data(),
knn_indices.data(),
y,
params.rows,
params.rows,
params.k,
uniq_labels,
n_unique);
handle.sync_stream(stream);
}
protected:
KNNClassifyInputs params;
raft::handle_t handle;
hipStream_t stream;
rmm::device_uvector<float> train_samples;
rmm::device_uvector<int> train_labels;
rmm::device_uvector<int> pred_labels;
rmm::device_uvector<int64_t> knn_indices;
rmm::device_uvector<float> knn_dists;
};
typedef KNNClassifyTest KNNClassifyTestF;
TEST_P(KNNClassifyTestF, Fit)
{
ASSERT_TRUE(
devArrMatch(train_labels.data(), pred_labels.data(), params.rows, raft::Compare<int>()));
}
const std::vector<KNNClassifyInputs> inputsf = {{100, 10, 2, 0.01f, 2},
{1000, 10, 5, 0.01f, 2},
{10000, 10, 5, 0.01f, 2},
{100, 10, 2, 0.01f, 10},
{1000, 10, 5, 0.01f, 10},
{10000, 10, 5, 0.01f, 10},
{100, 10, 2, 0.01f, 50},
{1000, 10, 5, 0.01f, 50},
{10000, 10, 5, 0.01f, 50}};
INSTANTIATE_TEST_CASE_P(KNNClassifyTest, KNNClassifyTestF, ::testing::ValuesIn(inputsf));
}; // end namespace Selection
}; // namespace MLCommon
| 495227e37be042d82d802942a0aca2a9862777d6.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "test_utils.h"
#include <gtest/gtest.h>
#include <iostream>
#include <raft/cuda_utils.cuh>
#include <raft/cudart_utils.h>
#include <raft/label/classlabels.hpp>
#include <raft/random/make_blobs.hpp>
#include <raft/spatial/knn/knn.hpp>
#include <rmm/device_uvector.hpp>
#include <selection/knn.cuh>
#include <vector>
namespace MLCommon {
namespace Selection {
struct KNNClassifyInputs {
int rows;
int cols;
int n_labels;
float cluster_std;
int k;
};
class KNNClassifyTest : public ::testing::TestWithParam<KNNClassifyInputs> {
public:
KNNClassifyTest()
: params(::testing::TestWithParam<KNNClassifyInputs>::GetParam()),
stream(handle.get_stream()),
train_samples(params.rows * params.cols, stream),
train_labels(params.rows, stream),
pred_labels(params.rows, stream),
knn_indices(params.rows * params.k, stream),
knn_dists(params.rows * params.k, stream)
{
basicTest();
}
protected:
void basicTest()
{
raft::random::make_blobs<float, int>(train_samples.data(),
train_labels.data(),
params.rows,
params.cols,
params.n_labels,
stream,
true,
nullptr,
nullptr,
params.cluster_std);
rmm::device_uvector<int> unique_labels(0, stream);
auto n_classes =
raft::label::getUniquelabels(unique_labels, train_labels.data(), params.rows, stream);
std::vector<float*> ptrs(1);
std::vector<int> sizes(1);
ptrs[0] = train_samples.data();
sizes[0] = params.rows;
raft::spatial::knn::brute_force_knn(handle,
ptrs,
sizes,
params.cols,
train_samples.data(),
params.rows,
knn_indices.data(),
knn_dists.data(),
params.k);
std::vector<int*> y;
y.push_back(train_labels.data());
std::vector<int*> uniq_labels;
uniq_labels.push_back(unique_labels.data());
std::vector<int> n_unique;
n_unique.push_back(n_classes);
knn_classify(handle,
pred_labels.data(),
knn_indices.data(),
y,
params.rows,
params.rows,
params.k,
uniq_labels,
n_unique);
handle.sync_stream(stream);
}
protected:
KNNClassifyInputs params;
raft::handle_t handle;
cudaStream_t stream;
rmm::device_uvector<float> train_samples;
rmm::device_uvector<int> train_labels;
rmm::device_uvector<int> pred_labels;
rmm::device_uvector<int64_t> knn_indices;
rmm::device_uvector<float> knn_dists;
};
typedef KNNClassifyTest KNNClassifyTestF;
TEST_P(KNNClassifyTestF, Fit)
{
ASSERT_TRUE(
devArrMatch(train_labels.data(), pred_labels.data(), params.rows, raft::Compare<int>()));
}
const std::vector<KNNClassifyInputs> inputsf = {{100, 10, 2, 0.01f, 2},
{1000, 10, 5, 0.01f, 2},
{10000, 10, 5, 0.01f, 2},
{100, 10, 2, 0.01f, 10},
{1000, 10, 5, 0.01f, 10},
{10000, 10, 5, 0.01f, 10},
{100, 10, 2, 0.01f, 50},
{1000, 10, 5, 0.01f, 50},
{10000, 10, 5, 0.01f, 50}};
INSTANTIATE_TEST_CASE_P(KNNClassifyTest, KNNClassifyTestF, ::testing::ValuesIn(inputsf));
}; // end namespace Selection
}; // namespace MLCommon
|
e374884b7e5d2a508534679a37ea79bf2b59b737.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include <iostream>
#include <string>
#include "routines.cuh"
#include "layer_kernels.cuh"
#include <cudaconv2.cuh>
using namespace std;
extern LayerOpt opt1, opt2, opt3, opt4, optTop;
extern FILE* pFile;
extern float perturbScale;
void finetune_rnorm_maxout() {
////assignOpt();
printf("starting finetune_rnorm_competeOut()!\nkeepProb = %f-%f-%f\nperturbScale = %f\n", opt1.keepStartRate, opt1.keepIncRate, opt1.keepEndRate, perturbScale);
printf("inputKeepProb = %f-%f-%f\n", opt1.keepInputStartRate, opt1.keepInputIncRate, opt1.keepInputEndRate);
printf("image sizes\nlayer1: %d\nlayer2: %d\nlayer3: %d\nlayer4: %d\n", opt1.imSize, opt2.imSize, opt3.imSize, opt4.imSize);
printf("image channels\nlayer1: %d\nlayer2: %d\nlayer3: %d\nlayer4: %d\nlayerTop: %d\n", opt1.numChannels, opt2.numChannels, opt3.numChannels, opt4.numChannels, opt4.numFilters);
printf("learning rates: layer1--%f, layer2--%f, layer3--%f, layer4--%f, layerTop--%f\n", opt1.lrW, opt2.lrW, opt3.lrW, opt4.lrW, optTop.lrW);
printf("bias rates: layer1--%f, layer2--%f, layer3--%f, layer4--%f, layerTop--%f\n", opt1.lrB, opt2.lrB, opt3.lrB, opt4.lrB, optTop.lrB);
printf("inputScale--%f\n", opt1.inputScale);
printf("lrStartRate = %f, momStartRate = %f\n", opt1.lrStartScale, opt1.momStartScale);
printf("lrEndRate = %f, momEndRate = %f\n", opt1.lrMinRate, opt1.momMaxRate);
printf("lrDecayFactor = %f, momIncFactor = %f\n", opt1.lrDecayFactor, opt1.momIncFactor);
printf("lr schedule = %s, mom schedule = %s\n", opt1.lrDecayType, opt1.momIncType);
printf("competeMax sizes: layer1 -- %d, layer2 -- %d, layer3 -- %d, layer4 -- %d\n", opt1.maxOutPoolSize, opt2.maxOutPoolSize, opt3.maxOutPoolSize, opt4.maxOutPoolSize);
printf("competeMax strides: layer1 -- %d, layer2 -- %d, layer3 -- %d, layer4 -- %d\n", opt1.maxOutPoolStride, opt2.maxOutPoolStride, opt3.maxOutPoolStride, opt4.maxOutPoolStride);
printf("labelSize = %d\n", opt1.labelSize);
printf("flip = %d\n", opt1.flip);
fprintf(pFile, "starting finetune_rnorm_competeOut()!\nkeepProb = %f-%f-%f\nperturbScale = %f\n", opt1.keepStartRate, opt1.keepIncRate, opt1.keepEndRate, perturbScale);
fprintf(pFile, "inputKeepProb = %f-%f-%f\n", opt1.keepInputStartRate, opt1.keepInputIncRate, opt1.keepInputEndRate);
fprintf(pFile, "image sizes\nlayer1: %d\nlayer2: %d\nlayer3: %d\nlayer4: %d\n", opt1.imSize, opt2.imSize, opt3.imSize, opt4.imSize);
fprintf(pFile, "image channels\nlayer1: %d\nlayer2: %d\nlayer3: %d\nlayer4: %d\nlayerTop: %d\n", opt1.numChannels, opt2.numChannels, opt3.numChannels, opt4.numChannels, opt4.numFilters);
fprintf(pFile, "learning rates: layer1--%f, layer2--%f, layer3--%f, layer4--%f, layerTop--%f\n", opt1.lrW, opt2.lrW, opt3.lrW, opt4.lrW, optTop.lrW);
fprintf(pFile, "bias rates: layer1--%f, layer2--%f, layer3--%f, layer4--%f, layerTop--%f\n", opt1.lrB, opt2.lrB, opt3.lrB, opt4.lrB, optTop.lrB);
fprintf(pFile, "inputScale--%f\n", opt1.inputScale);
fprintf(pFile, "lrStartRate = %f, momStartRate = %f\n", opt1.lrStartScale, opt1.momStartScale);
fprintf(pFile, "lrEndRate = %f, momEndRate = %f\n", opt1.lrMinRate, opt1.momMaxRate);
fprintf(pFile, "lrDecayFactor = %f, momIncFactor = %f\n", opt1.lrDecayFactor, opt1.momIncFactor);
fprintf(pFile, "lr schedule = %s, mom schedule = %s\n", opt1.lrDecayType, opt1.momIncType);
fprintf(pFile, "competeMax sizes: layer1 -- %d, layer2 -- %d, layer3 -- %d, layer4 -- %d\n", opt1.maxOutPoolSize, opt2.maxOutPoolSize, opt3.maxOutPoolSize, opt4.maxOutPoolSize);
fprintf(pFile, "competeMax strides: layer1 -- %d, layer2 -- %d, layer3 -- %d, layer4 -- %d\n", opt1.maxOutPoolStride, opt2.maxOutPoolStride, opt3.maxOutPoolStride, opt4.maxOutPoolStride);
fprintf(pFile, "labelSize = %d\n", opt1.labelSize);
fprintf(pFile, "flip = %d\n", opt1.flip);
// data and parameters storage
NVMatrix act0; // avatar of the input
NVMatrix act1, act1Pool, act1PoolMax;
NVMatrix act2, act2Pool, act2PoolMax;
NVMatrix act3, act3Pool, act3PoolMax;
NVMatrix act4, act4Max;
NVMatrix actTop;
NVMatrix act1Grad, act1PoolGrad, act1PoolMaxGrad;
NVMatrix act2Grad, act2PoolGrad, act2PoolMaxGrad;
NVMatrix act3Grad, act3PoolGrad, act3PoolMaxGrad;
NVMatrix act4Grad, act4MaxGrad;
NVMatrix actTopGrad;
NVMatrix weight1, weight2, weight3, weight4, weightTop;
NVMatrix weight1Grad, weight2Grad, weight3Grad, weight4Grad, weightTopGrad;
NVMatrix weight1Inc, weight2Inc, weight3Inc, weight4Inc, weightTopInc;
NVMatrix weight1GradTmp, weight2GradTmp, weight3GradTmp, weight4GradTmp, weightTopGradTmp;
NVMatrix bias1, bias2, bias3, bias4, biasTop; // bias4 is just an all-zero dummy vector
NVMatrix bias1Grad, bias2Grad, bias3Grad, bias4Grad, biasTopGrad;
NVMatrix bias1Inc, bias2Inc, bias3Inc, bias4Inc, biasTopInc;
// initialize parameters
//if (opt1.loadParam) {
weight1.resize(opt1.numVis, opt1.numFilters);
weight2.resize(opt2.numVis, opt2.numFilters);
if (strcmp(opt3.layerType, "local") == 0)
weight3.resize(opt3.numVis * opt3.outX * opt3.outX, opt3.numFilters);
else if (strcmp(opt3.layerType, "conv") == 0)
weight3.resize(opt3.numVis, opt3.numFilters);
weight4.resize(opt4.numVis, opt4.numFilters);
weightTop.resize(optTop.numVis, optTop.numFilters);
bias1.resize(opt1.numFilters, 1);
bias2.resize(opt2.numFilters, 1);
if (strcmp(opt3.layerType, "local") == 0)
bias3.resize(opt3.numFilters * opt3.outX * opt3.outX, 1);
else if (strcmp(opt3.layerType, "conv") == 0)
bias3.resize(opt3.numFilters, 1);
bias4.resize(1, opt4.numFilters);
bias4.setTrans(true);
biasTop.resize(1, optTop.numFilters);
biasTop.setTrans(true);
if (opt1.loadParam) {
NVReadFromFile(weight1, opt1.weightPath + "/weight1.bin");
NVReadFromFile(weight2, opt1.weightPath + "/weight2.bin");
NVReadFromFile(weight3, opt1.weightPath + "/weight3.bin");
NVReadFromFile(weight4, opt1.weightPath + "/weight4.bin");
NVReadFromFile(weightTop, opt1.weightPath + "/weightTop.bin");
NVReadFromFile(bias1, opt1.weightPath + "/bias1.bin");
NVReadFromFile(bias2, opt1.weightPath + "/bias2.bin");
NVReadFromFile(bias3, opt1.weightPath + "/bias3.bin");
NVReadFromFile(bias4, opt1.weightPath + "/bias4.bin");
NVReadFromFile(biasTop, opt1.weightPath + "/biasTop.bin");
}
else {
weight1.randomizeUniform(); weight1.scale(2*opt1.initstv); weight1.addScalar(-opt1.initstv);
weight2.randomizeUniform(); weight2.scale(2*opt2.initstv); weight2.addScalar(-opt2.initstv);
weight3.randomizeUniform(); weight3.scale(2*opt3.initstv); weight3.addScalar(-opt3.initstv);
weight4.randomizeUniform(); weight4.scale(2*opt4.initstv); weight4.addScalar(-opt4.initstv);
weightTop.randomizeUniform(); weightTop.scale(2*optTop.initstv); weightTop.addScalar(-optTop.initstv);
bias1.apply(NVMatrixOps::Zero());
bias2.apply(NVMatrixOps::Zero());
bias3.apply(NVMatrixOps::Zero());
bias4.apply(NVMatrixOps::Zero());
biasTop.apply(NVMatrixOps::Zero());
}
initWeights(weight1Inc, opt1.numVis, opt1.numFilters, false, 0.0); initWeights(weight1Grad, opt1.numVis, opt1.numFilters, false, 0.0);
initWeights(weight2Inc, opt2.numVis, opt2.numFilters, false, 0.0); initWeights(weight2Grad, opt2.numVis, opt2.numFilters, false, 0.0);
if (strcmp(opt3.layerType, "local") == 0) {
initWeights(weight3Inc, opt3.numVis * opt3.outX * opt3.outX, opt3.numFilters, false, 0.0);
initWeights(weight3Grad, opt3.numVis * opt3.outX * opt3.outX, opt3.numFilters, false, 0.0);
}
else if (strcmp(opt3.layerType, "conv") == 0) {
initWeights(weight3Inc, opt3.numVis, opt3.numFilters, false, 0.0);
initWeights(weight3Grad, opt3.numVis, opt3.numFilters, false, 0.0);
}
initWeights(weight4Inc, opt4.numVis, opt4.numFilters, false, 0.0); initWeights(weight4Grad, opt4.numVis, opt4.numFilters, false, 0.0);
initWeights(weightTopInc, optTop.numVis, optTop.numFilters, false, 0.0); initWeights(weightTopGrad, optTop.numVis, optTop.numFilters, false, 0.0);
initWeights(bias1Inc, opt1.numFilters, 1, false, 0.0); initWeights(bias1Grad, opt1.numFilters, 1, false, 0.0);
initWeights(bias2Inc, opt2.numFilters, 1, false, 0.0); initWeights(bias2Grad, opt2.numFilters, 1, false, 0.0);
if (strcmp(opt3.layerType, "local") == 0) {
initWeights(bias3Inc, opt3.numFilters * opt3.outX * opt3.outX, 1, false, 0.0);
initWeights(bias3Grad, opt3.numFilters * opt3.outX * opt3.outX, 1, false, 0.0);
}
else if (strcmp(opt3.layerType, "conv") == 0) {
initWeights(bias3Inc, opt3.numFilters, 1, false, 0.0);
initWeights(bias3Grad, opt3.numFilters, 1, false, 0.0);
}
initWeights(bias4Inc, 1, opt4.numFilters, false, 0.0); initWeights(bias4Grad, 1, opt4.numFilters, false, 0.0);
initWeights(biasTopInc, 1, optTop.numFilters, true, 0.0); initWeights(biasTopGrad, 1, optTop.numFilters, true, 0.0);
// read data to host memory (and labels to the GPU memory)
int imPixels = 32*32*opt1.numChannels;
int batchSize = opt1.batchSize;
int trainBatchNum = opt1.numTrain / batchSize;
int testBatchNum = opt1.numTest / batchSize;
vector<Matrix*> CPUTrain(trainBatchNum), CPUTest(testBatchNum);
vector<NVMatrix*> GPUTrain(trainBatchNum), GPUTest(testBatchNum);
vector<NVMatrix*> GPURawLabelTrain(trainBatchNum), GPURawLabelTest(testBatchNum);
for (int batch = 0; batch < trainBatchNum; batch++) {
CPUTrain[batch] = new Matrix(imPixels, batchSize);
CPUTrain[batch]->setTrans(false);
GPUTrain[batch] = new NVMatrix();
GPURawLabelTrain[batch] = new NVMatrix(1, batchSize);
GPURawLabelTrain[batch]->setTrans(false);
if (strcmp(opt1.exp.c_str(), "cifar10") == 0) {
hmReadFromFile(*CPUTrain[batch], opt1.dataPath + "/cifar_whitened.bin", batch*batchSize);
NVRawLabelReadFromFile(*GPURawLabelTrain[batch], opt1.dataPath + "/cifar_labels.bin", batch*batchSize);
}
else if (strcmp(opt1.exp.c_str(), "cifar100") == 0) {
hmReadFromFile(*CPUTrain[batch], opt1.dataPath + "/cifar100_whitened.bin", batch*batchSize);
NVRawLabelReadFromFile(*GPURawLabelTrain[batch], opt1.dataPath + "/cifar100_fine_labels.bin", batch*batchSize);
}
}
batchSize = opt1.numTrain % opt1.batchSize; // the last batch
if (batchSize > 0) {
CPUTrain.push_back(new Matrix(imPixels, batchSize));
CPUTrain.back()->setTrans(false);
GPUTrain.push_back(new NVMatrix());
GPURawLabelTrain.push_back(new NVMatrix(1, batchSize));
GPURawLabelTrain.back()->setTrans(false);
if (strcmp(opt1.exp.c_str(), "cifar10") == 0) {
hmReadFromFile(*CPUTrain.back(), opt1.dataPath + "/cifar_whitened.bin", trainBatchNum*batchSize);
NVRawLabelReadFromFile(*GPURawLabelTrain.back(), opt1.dataPath + "/cifar_labels.bin", trainBatchNum*batchSize);
}
else if (strcmp(opt1.exp.c_str(), "cifar100") == 0) {
hmReadFromFile(*CPUTrain.back(), opt1.dataPath + "/cifar100_whitened.bin", trainBatchNum*batchSize);
NVRawLabelReadFromFile(*GPURawLabelTrain.back(), opt1.dataPath + "/cifar100_fine_labels.bin", trainBatchNum*batchSize);
}
}
// test set
batchSize = opt1.batchSize;
for (int batch = 0; batch < testBatchNum; batch++) {
CPUTest[batch] = new Matrix(imPixels, batchSize);
CPUTest[batch]->setTrans(false);
GPUTest[batch] = new NVMatrix();
GPURawLabelTest[batch] = new NVMatrix(1, batchSize);
GPURawLabelTest[batch]->setTrans(false);
if (strcmp(opt1.exp.c_str(), "cifar10") == 0) {
hmReadFromFile(*CPUTest[batch], opt1.dataPath + "/cifar_whitened.bin", opt1.numTrain+batch*batchSize);
NVRawLabelReadFromFile(*GPURawLabelTest[batch], opt1.dataPath + "/cifar_labels.bin", opt1.numTrain+batch*batchSize);
}
else if (strcmp(opt1.exp.c_str(), "cifar100") == 0) {
hmReadFromFile(*CPUTest[batch], opt1.dataPath + "/cifar100_whitened.bin", opt1.numTrain+batch*batchSize);
NVRawLabelReadFromFile(*GPURawLabelTest[batch], opt1.dataPath + "/cifar100_fine_labels.bin", opt1.numTrain+batch*batchSize);
}
}
batchSize = opt1.numTest % opt1.batchSize; // the last batch
if (batchSize > 0) {
CPUTest.push_back(new Matrix(imPixels, batchSize));
CPUTest.back()->setTrans(false);
GPUTest.push_back(new NVMatrix());
GPURawLabelTest.push_back(new NVMatrix(1, batchSize));
GPURawLabelTest.back()->setTrans(false);
if (strcmp(opt1.exp.c_str(), "cifar10") == 0) {
hmReadFromFile(*CPUTest.back(), opt1.dataPath + "/cifar_whitened.bin", opt1.numTrain+testBatchNum*batchSize);
NVRawLabelReadFromFile(*GPURawLabelTest.back(), opt1.dataPath + "/cifar_labels.bin", opt1.numTrain+testBatchNum*batchSize);
}
else if (strcmp(opt1.exp.c_str(), "cifar100") == 0) {
hmReadFromFile(*CPUTest.back(), opt1.dataPath + "/cifar100_whitened.bin", opt1.numTrain+testBatchNum*batchSize);
NVRawLabelReadFromFile(*GPURawLabelTest.back(), opt1.dataPath + "/cifar100_fine_labels.bin", opt1.numTrain+testBatchNum*batchSize);
}
}
NVMatrix trueLabelLogProbs;
NVMatrix correctProbs;
MTYPE cost; // as before, we trace the performance using the cost variable
MTYPE cost1;
NVMatrix absM;
MTYPE weightAbs1, weightAbs2, weightAbs3, weightAbs4, weightAbsTop;
MTYPE biasAbs1, biasAbs2, biasAbs3, biasAbs4, biasAbsTop;
MTYPE weightGradAbs1, weightGradAbs2, weightGradAbs3, weightGradAbs4, weightGradAbsTop;
MTYPE biasGradAbs1, biasGradAbs2, biasGradAbs3, biasGradAbs4, biasGradAbsTop;
clock_t startClock;
clock_t tick;
cropDataProvider(CPUTest, GPUTest, opt1, true, opt1.whitened); // test data is fixed
NVMask maxMask1, maxMask2, maxMask3, maxMask4;
NVMatrix dropMask0, dropMask1, dropMask2, dropMask3, dropMask4;
// normalization
NVMatrix normCol1, normCol2, normCol3, normCol4, normColTop;
NVMatrix tmp1, tmp2, tmp3, tmp4, tmpTop;
float lr_scale = opt1.lrStartScale, mom_scale = opt1.momStartScale;
float keepProb = opt1.keepStartRate;
float keepInputProb = opt1.keepInputStartRate;
for (int epoch = 0; epoch < opt1.numEpochs; epoch++) {
cost = 0;
cost1 = 0;
cropDataProvider(CPUTrain, GPUTrain, opt1, false, opt1.whitened); // copy data to the GPU side
hipDeviceSynchronize();
startClock = clock();
for (int batch = 0; batch < GPUTrain.size(); batch++) {
batchSize = GPUTrain[batch]->getNumCols();
// ====forward pass====
// 0->1
// cout << "0->1\n";
//original
//act0.resize()
GPUTrain[batch]->copy(act0);
dropMask0.resize(act0);
dropMask0.randomizeBinary(keepInputProb); // input part
act0.eltwiseMult(dropMask0);
act0.scale(opt1.inputScale);
act0.scale(1.0/keepProb);
activateConv(act0, act1, weight1, bias1, opt1);
convLocalPool(act1, act1Pool, opt1.numFilters, opt1.poolSize, opt1.poolStartX, opt1.poolStride, opt1.poolOutX, MaxPooler());
convMaxOut(act1Pool, act1PoolMax, maxMask1, opt1.numFilters, opt1.maxOutPoolSize, opt1.maxOutPoolStride, opt1.poolOutX, batchSize);
act1PoolMax.transpose(false);
// 1->2
// cout << "1->2\n";
//original
dropMask1.resize(act1PoolMax);
dropMask1.randomizeBinary(keepProb);
act1PoolMax.eltwiseMult(dropMask1);
act1PoolMax.scale(1.0/keepProb);
activateConv(act1PoolMax, act2, weight2, bias2, opt2);
convLocalPool(act2, act2Pool, opt2.numFilters, opt2.poolSize, opt2.poolStartX, opt2.poolStride, opt2.poolOutX, MaxPooler());
convMaxOut(act2Pool, act2PoolMax, maxMask2, opt2.numFilters, opt2.maxOutPoolSize, opt2.maxOutPoolStride, opt2.poolOutX, batchSize);
act2PoolMax.transpose(false);
// 2->3
// cout << "2->3\n";
// original
dropMask2.resize(act2PoolMax);
dropMask2.randomizeBinary(keepProb);
act2PoolMax.eltwiseMult(dropMask2);
act2PoolMax.scale(1.0/keepProb);
if (strcmp(opt3.layerType, "local") == 0)
activateLocal(act2PoolMax, act3, weight3, bias3, opt3);
else if (strcmp(opt3.layerType, "conv") == 0)
activateConv(act2PoolMax, act3, weight3, bias3, opt3);
convLocalPool(act3, act3Pool, opt3.numFilters, opt3.poolSize, opt3.poolStartX, opt3.poolStride, opt3.poolOutX, MaxPooler());
convMaxOut(act3Pool, act3PoolMax, maxMask3, opt3.numFilters, opt3.maxOutPoolSize, opt3.maxOutPoolStride, opt3.poolOutX, batchSize);
// 3->4
// cout << "3->4\n";
// original
dropMask3.resize(act3PoolMax);
dropMask3.randomizeBinary(keepProb);
act3PoolMax.eltwiseMult(dropMask3);
act3PoolMax.scale(1.0/keepProb);
act3PoolMax.transpose(true);
activate(act3PoolMax, act4, weight4, bias4, 0, 1);
act3PoolMax.transpose(false);
act4.transpose(false);
convMaxOut(act4, act4Max, maxMask4, opt4.numFilters/opt4.outX/opt4.outX, opt4.maxOutPoolSize, opt4.maxOutPoolStride, opt4.outX, batchSize);
// 4->top
// cout << "4->top\n";
dropMask4.resize(act4Max);
dropMask4.randomizeBinary(keepProb);
act4Max.eltwiseMult(dropMask4);
act4Max.scale(1.0/keepProb);
act4Max.transpose(true);
activate(act4Max, actTop, weightTop, biasTop, 0, 1);
act4Max.transpose(false);
//softmax layer
NVMatrix& max = actTop.max(1);
actTop.addVector(max, -1);
actTop.apply(NVMatrixOps::Exp());
NVMatrix& sum = actTop.sum(1);
actTop.eltwiseDivideByVector(sum);
delete &max;
delete ∑
// compute cost
computeLogregSoftmaxGrad(*GPURawLabelTrain[batch], actTop, actTopGrad, false, 1);
actTop.transpose(false);
computeLogregCost(*GPURawLabelTrain[batch], actTop, trueLabelLogProbs, correctProbs); //labelLogProbs:(1, numCases); correctProbs:(1, numCases)
cost += correctProbs.sum();
cost1 += trueLabelLogProbs.sum();
// ====== back pass ======
// top -> 4, 3, 2, 1
// cout << "top -> 4, 3, 2, 1\n";
// weight update
act4Max.transpose(false); actTopGrad.transpose(true);
weightTopGrad.addProduct(act4Max, actTopGrad, 0, 1);
biasTopGrad.addSum(actTopGrad, 0, 0, 1);
// bp
weightTop.transpose(true);
act4MaxGrad.transpose(true);
act4MaxGrad.addProduct(actTopGrad, weightTop, 0, 1);
weightTop.transpose(false);
// 4->3
// cout << "4->3\n";
act4MaxGrad.transpose(false); // convert back to row-major
act4Max.transpose(false);
act4MaxGrad.eltwiseMult(dropMask4);
act4MaxGrad.scale(1.0/keepProb);
convMaxOutUndo(act4MaxGrad, act4Grad, maxMask4, opt4.numFilters/opt4.outX/opt4.outX, opt4.maxOutPoolStride, opt4.outX, batchSize);
act3PoolMax.transpose(false); act4Grad.transpose(true);
weight4Grad.addProduct(act3PoolMax, act4Grad, 0, 1);
bias4Grad.addSum(act4Grad, 0, 0, 1);
// bp
weight4.transpose(true);
act3PoolMaxGrad.transpose(true);
act3PoolMaxGrad.addProduct(act4Grad, weight4, 0, 1);
weight4.transpose(false);
// 3->2
// cout << "3->2\n";
act3PoolMaxGrad.transpose(false);
act3PoolMax.transpose(false);
act3PoolMaxGrad.eltwiseMult(dropMask3);
act3PoolMaxGrad.scale(1.0/keepProb);
convMaxOutUndo(act3PoolMaxGrad, act3PoolGrad, maxMask3, opt3.numFilters, opt3.maxOutPoolStride, opt3.poolOutX, batchSize);
convLocalMaxUndo(act3, act3PoolGrad, act3Pool, act3Grad, opt3.poolSize, opt3.poolStartX, opt3.poolStride, opt3.poolOutX);
if (strcmp(opt3.layerType, "local") == 0) {
localWeightActs(act2PoolMax, act3Grad, weight3Grad, opt3.imSize, opt3.outX, opt3.outX, opt3.patchSize, opt3.paddingStart, 1, opt3.numChannels, 1);
bias3Grad.addSum(act3Grad, 1, 0, 1);
localImgActs(act3Grad, weight3, act2PoolMaxGrad, opt3.imSize, opt3.imSize, opt3.outX, opt3.paddingStart, 1, opt3.numChannels, 1);
}
else if (strcmp(opt3.layerType, "conv") == 0) {
convWeightActs(act2PoolMax, act3Grad, weight3GradTmp, opt3.imSize, opt3.outX, opt3.outX, opt3.patchSize, opt3.paddingStart, 1, opt3.numChannels, 1, opt3.partialSum);
weight3GradTmp.reshape(opt3.outX * opt3.outX / opt3.partialSum, opt3.numChannels * opt3.patchSize * opt3.patchSize * opt3.numFilters);
weight3Grad.addSum(weight3GradTmp, 0, 0, 1);
weight3Grad.reshape(opt3.numChannels * opt3.patchSize * opt3.patchSize, opt3.numFilters);
act3Grad.reshape(opt3.numFilters, opt3.outX * opt3.outX * batchSize);
bias3Grad.addSum(act3Grad, 1, 0, 1);
act3Grad.reshape(opt3.numFilters * opt3.outX * opt3.outX, batchSize);
convImgActs(act3Grad, weight3, act2PoolMaxGrad, opt3.imSize, opt3.imSize, opt3.outX, opt3.paddingStart, 1, opt3.numChannels, 1);
}
// 2->1
// cout << "2->1\n";
// original part
act2PoolMaxGrad.transpose(false);
act2PoolMax.transpose(false);
act2PoolMaxGrad.eltwiseMult(dropMask2);
act2PoolMaxGrad.scale(1.0/keepProb);
convMaxOutUndo(act2PoolMaxGrad, act2PoolGrad, maxMask2, opt2.numFilters, opt2.maxOutPoolStride, opt2.poolOutX, batchSize);
convLocalMaxUndo(act2, act2PoolGrad, act2Pool, act2Grad, opt2.poolSize, opt2.poolStartX, opt2.poolStride, opt2.poolOutX);
convWeightActs(act1PoolMax, act2Grad, weight2GradTmp, opt2.imSize, opt2.outX, opt2.outX, opt2.patchSize, opt2.paddingStart, 1, opt2.numChannels, 1, opt2.partialSum);
weight2GradTmp.reshape(opt2.outX * opt2.outX / opt2.partialSum, opt2.numChannels * opt2.patchSize * opt2.patchSize * opt2.numFilters);
weight2Grad.addSum(weight2GradTmp, 0, 0, 1);
weight2Grad.reshape(opt2.numChannels * opt2.patchSize * opt2.patchSize, opt2.numFilters);
act2Grad.reshape(opt2.numFilters, opt2.outX * opt2.outX * batchSize);
bias2Grad.addSum(act2Grad, 1, 0, 1);
act2Grad.reshape(opt2.numFilters * opt2.outX * opt2.outX, batchSize);
convImgActs(act2Grad, weight2, act1PoolMaxGrad, opt2.imSize, opt2.imSize, opt2.outX, opt2.paddingStart, 1, opt2.numChannels, 1);
// 1->0
// cout << "1->0\n";
// original part
act1PoolMaxGrad.transpose(false);
act1PoolMax.transpose(false);
act1PoolMaxGrad.eltwiseMult(dropMask1);
act1PoolMaxGrad.scale(1.0/keepProb);
convMaxOutUndo(act1PoolMaxGrad, act1PoolGrad, maxMask1, opt1.numFilters, opt1.maxOutPoolStride, opt1.poolOutX, batchSize);
convLocalMaxUndo(act1, act1PoolGrad, act1Pool, act1Grad, opt1.poolSize, opt1.poolStartX, opt1.poolStride, opt1.poolOutX);
convWeightActs(act0, act1Grad, weight1GradTmp, opt1.imSize, opt1.outX, opt1.outX, opt1.patchSize, opt1.paddingStart, 1, opt1.numChannels, 1, opt1.partialSum);
weight1GradTmp.reshape(opt1.outX * opt1.outX / opt1.partialSum, opt1.numChannels * opt1.patchSize * opt1.patchSize * opt1.numFilters);
weight1Grad.addSum(weight1GradTmp, 0, 0, 1);
weight1Grad.reshape(opt1.numChannels * opt1.patchSize * opt1.patchSize, opt1.numFilters);
act1Grad.reshape(opt1.numFilters, opt1.outX * opt1.outX * batchSize);
bias1Grad.addSum(act1Grad, 1, 0, 1);
act1Grad.reshape(opt1.numFilters * opt1.outX * opt1.outX, batchSize);
// update
updateWeight(weight1Grad, weight1Inc, weight1, opt1, batchSize, lr_scale, mom_scale);
updateWeight(weight2Grad, weight2Inc, weight2, opt2, batchSize, lr_scale, mom_scale);
updateWeight(weight3Grad, weight3Inc, weight3, opt3, batchSize, lr_scale, mom_scale);
updateWeight(weight4Grad, weight4Inc, weight4, opt4, batchSize, lr_scale, mom_scale);
updateWeight(weightTopGrad, weightTopInc, weightTop, optTop, batchSize, lr_scale, mom_scale);
updateBias(bias1Grad, bias1Inc, bias1, opt1, batchSize, lr_scale, mom_scale);
updateBias(bias2Grad, bias2Inc, bias2, opt2, batchSize, lr_scale, mom_scale);
updateBias(bias3Grad, bias3Inc, bias3, opt3, batchSize, lr_scale, mom_scale);
updateBias(bias4Grad, bias4Inc, bias4, opt4, batchSize, lr_scale, mom_scale);
updateBias(biasTopGrad, biasTopInc, biasTop, optTop, batchSize, lr_scale, mom_scale);
// normalize weights
NVNormalizeCol2(weight1, bias1, normCol1, tmp1, opt1.maxNorm);
NVNormalizeCol2(weight2, bias2, normCol2, tmp2, opt2.maxNorm);
NVNormalizeCol2(weight3, bias3, normCol3, tmp3, opt3.maxNorm);
NVNormalizeCol2(weight4, bias4, normCol4, tmp4, opt4.maxNorm);
NVNormalizeCol2(weightTop, biasTop, normColTop, tmpTop, optTop.maxNorm);
} // for (int epoch = 0; epoch < opt1.numEpochs; epoch++)
// compute cost
cost /= opt1.numTrain;
cost1 /= opt1.numTrain;
printf("\nfinished epoch %d of %d; classify precision = %f; objective = %f; elapsed time = %f seconds\n", epoch, opt1.numEpochs,
cost, cost1, (float)(clock() - startClock)/CLOCKS_PER_SEC);
fprintf(pFile, "\nfinished epoch %d of %d; classify precision = %f; objective = %f; elapsed time = %f seconds\n", epoch, opt1.numEpochs,
cost, cost1, (float)(clock() - startClock)/CLOCKS_PER_SEC);
printf("weight norms\nweight1 = %f\nweight2 = %f\nweight3 = %f\nweight4 = %f\nweightTop = %f\n"
"bias1 = %f\nbias2 = %f\nbias3 = %f\nbias4 = %f\nbiasTop = %f\n",
normCol1.max(), normCol2.max(), normCol3.max(), normCol4.max(), normColTop.max(),
bias1.norm(), bias2.norm(), bias3.norm(), bias4.norm(), biasTop.norm());
fprintf(pFile, "weight norms\nweight1 = %f\nweight2 = %f\nweight3 = %f\nweight4 = %f\nweightTop = %f\n"
"bias1 = %f\nbias2 = %f\nbias3 = %f\nbias4 = %f\nbiasTop = %f\n",
normCol1.max(), normCol2.max(), normCol3.max(), normCol4.max(), normColTop.max(),
bias1.norm(), bias2.norm(), bias3.norm(), bias4.norm(), biasTop.norm());
printf("lr_scale = %f, mom_scale = %f, keepProb = %f, keepInputProb = %f\n", lr_scale, mom_scale, keepProb, keepInputProb);
fprintf(pFile, "lr_scale = %f, mom_scale = %f, keepProb = %f, keepInputProb = %f\n", lr_scale, mom_scale, keepProb, keepInputProb);
// decay learning rate
lr_scale = lrDecay(lr_scale, opt1.lrDecayType, opt1.lrDecayFactor, opt1.lrMinRate);
mom_scale = momInc(mom_scale, opt1.momIncType, opt1.momIncFactor, opt1.momMaxRate);
// decay dropout
keepProb = keepProb + opt1.keepIncRate;
keepProb = keepProb > opt1.keepEndRate ? opt1.keepEndRate : keepProb;
keepInputProb = keepInputProb + opt1.keepInputIncRate;
keepInputProb = keepInputProb > opt1.keepInputEndRate ? opt1.keepInputEndRate : keepInputProb;
// process the test set every 3 epochs
if (epoch % 3 == 2) {
hipDeviceSynchronize();
startClock = clock();
cost = 0;
cost1 = 0;
for (int batch = 0; batch < GPUTest.size(); batch++) {
batchSize = GPUTest[batch]->getNumCols();
// ====forward pass====
// 0->1
// cout << "0->1\n";
//original
GPUTest[batch]->copy(act0);
act0.scale(opt1.inputScale);
activateConv(act0, act1, weight1, bias1, opt1);
convLocalPool(act1, act1Pool, opt1.numFilters, opt1.poolSize, opt1.poolStartX, opt1.poolStride, opt1.poolOutX, MaxPooler());
convMaxOut(act1Pool, act1PoolMax, maxMask1, opt1.numFilters, opt1.maxOutPoolSize, opt1.maxOutPoolStride, opt1.poolOutX, batchSize);
act1PoolMax.transpose(false);
// 1->2
// cout << "1->2\n";
//original
activateConv(act1PoolMax, act2, weight2, bias2, opt2);
convLocalPool(act2, act2Pool, opt2.numFilters, opt2.poolSize, opt2.poolStartX, opt2.poolStride, opt2.poolOutX, MaxPooler());
convMaxOut(act2Pool, act2PoolMax, maxMask2, opt2.numFilters, opt2.maxOutPoolSize, opt2.maxOutPoolStride, opt2.poolOutX, batchSize);
act2PoolMax.transpose(false);
// 2->3
// cout << "2->3\n";
// original
if (strcmp(opt3.layerType, "local") == 0)
activateLocal(act2PoolMax, act3, weight3, bias3, opt3);
else if (strcmp(opt3.layerType, "conv") == 0)
activateConv(act2PoolMax, act3, weight3, bias3, opt3);
convLocalPool(act3, act3Pool, opt3.numFilters, opt3.poolSize, opt3.poolStartX, opt3.poolStride, opt3.poolOutX, MaxPooler());
convMaxOut(act3Pool, act3PoolMax, maxMask3, opt3.numFilters, opt3.maxOutPoolSize, opt3.maxOutPoolStride, opt3.poolOutX, batchSize);
// 3->4
// cout << "3->4\n";
// original
act3PoolMax.transpose(true);
activate(act3PoolMax, act4, weight4, bias4, 0, 1);
act3PoolMax.transpose(false);
act4.transpose(false);
convMaxOut(act4, act4Max, maxMask4, opt4.numFilters/opt4.outX/opt4.outX, opt4.maxOutPoolSize, opt4.maxOutPoolStride, opt4.outX, batchSize);
// 4->top
// cout << "4->top\n";
act4Max.transpose(true);
activate(act4Max, actTop, weightTop, biasTop, 0, 1);
act4Max.transpose(false);
//softmax layer
NVMatrix& max = actTop.max(1);
actTop.addVector(max, -1);
actTop.apply(NVMatrixOps::Exp());
NVMatrix& sum = actTop.sum(1);
actTop.eltwiseDivideByVector(sum);
delete &max;
delete ∑
// compute cost
computeLogregSoftmaxGrad(*GPURawLabelTest[batch], actTop, actTopGrad, false, 1);
actTop.transpose(false);
computeLogregCost(*GPURawLabelTest[batch], actTop, trueLabelLogProbs, correctProbs); //labelLogProbs:(1, numCases); correctProbs:(1, numCases)
cost += correctProbs.sum();
cost1 += trueLabelLogProbs.sum();
} //for (int batch = opt1.batchNum; batch < opt1.batchNum+opt1.testBatchNum; batch++)
hipDeviceSynchronize();
cost /= opt1.numTest;
cost1 /= opt1.numTest;
printf("\ntest set precision: %f\n; objective = %f; time elapsed = %f seconds\n", cost, cost1,
(float)(clock() - startClock)/CLOCKS_PER_SEC);
fprintf(pFile, "\ntest set precision: %f\n; objective = %f; time elapsed = %f seconds\n", cost, cost1,
(float)(clock() - startClock)/CLOCKS_PER_SEC);
// save checkpoint
NVSaveToFile(weight1, opt1.weightPath + "/weight1.bin");
NVSaveToFile(weight2, opt1.weightPath + "/weight2.bin");
NVSaveToFile(weight3, opt1.weightPath + "/weight3.bin");
NVSaveToFile(weight4, opt1.weightPath + "/weight4.bin");
NVSaveToFile(weightTop, opt1.weightPath + "/weightTop.bin");
NVSaveToFile(bias1, opt1.weightPath + "/bias1.bin");
NVSaveToFile(bias2, opt1.weightPath + "/bias2.bin");
NVSaveToFile(bias3, opt1.weightPath + "/bias3.bin");
NVSaveToFile(bias4, opt1.weightPath + "/bias4.bin");
NVSaveToFile(biasTop, opt1.weightPath + "/biasTop.bin");
printf("Checkpoint saved!\n\n");
fprintf(pFile, "Checkpoint saved!\n\n");
} //if (epoch % 10 == 0)
} // for (int epoch = 0; epoch < opt1.numEpochs; epoch++)
printf("finetuning_rnorm_maxout() complete!\n");
fprintf(pFile, "finetuning_rnorm_maxout() complete!\n");
CPUTrain.clear();
GPUTrain.clear();
CPUTest.clear();
GPUTest.clear();
GPURawLabelTrain.clear();
GPURawLabelTest.clear();
} // void finetune_rnorm_maxout()
void multiViewTest_maxout() {
////assignOpt();
printf("starting multiViewTest_maxout()!\n");
fprintf(pFile, "starting multiViewTest_maxout()!\n");
// initialize cublas
hipSetDevice(cutGetMaxGflopsDeviceId());
hipDeviceSetCacheConfig(hipFuncCachePreferShared);
hipblasInit();
// data and parameters storage
NVMatrix act1, act1Max, act1MaxPool, act1Denom;
NVMatrix act2, act2Max, act2MaxPool, act2Denom;
NVMatrix act3, act3Max, act3MaxPool;
NVMatrix act4, act4Max;
NVMatrix actTop;
NVMatrix softMaxAct;
NVMask maxMask1, maxMask2, maxMask3, maxMask4;
NVMatrix weight1, weight2, weight3, weight4, weightTop;
NVMatrix bias1, bias2, bias3, bias4, biasTop; // bias4 is just an all-zero dummy vector
// initialize parameters
weight1.resize(opt1.numVis, opt1.numFilters);
weight2.resize(opt2.numVis, opt2.numFilters);
if (strcmp(opt3.layerType, "local") == 0)
weight3.resize(opt3.numVis * opt3.outX * opt3.outX, opt3.numFilters);
else if (strcmp(opt3.layerType, "conv") == 0)
weight3.resize(opt3.numVis, opt3.numFilters);
weight4.resize(opt4.numVis, opt4.numFilters);
weightTop.resize(optTop.numVis, optTop.numFilters);
bias1.resize(opt1.numFilters, 1);
bias2.resize(opt2.numFilters, 1);
if (strcmp(opt3.layerType, "local") == 0)
bias3.resize(opt3.numFilters * opt3.outX * opt3.outX, 1);
else if (strcmp(opt3.layerType, "conv") == 0)
bias3.resize(opt3.numFilters, 1);
bias4.resize(1, opt4.numFilters);
bias4.setTrans(true);
biasTop.resize(1, optTop.numFilters);
biasTop.setTrans(true);
NVReadFromFile(weight1, opt1.dataPath + "/weight1.bin");
NVReadFromFile(weight2, opt1.dataPath + "/weight2.bin");
NVReadFromFile(weight3, opt1.dataPath + "/weight3.bin");
NVReadFromFile(weight4, opt1.dataPath + "/weight4.bin");
NVReadFromFile(weightTop, opt1.dataPath + "/weightTop.bin");
NVReadFromFile(bias1, opt1.dataPath + "/bias1.bin");
NVReadFromFile(bias2, opt1.dataPath + "/bias2.bin");
NVReadFromFile(bias3, opt1.dataPath + "/bias3.bin");
NVReadFromFile(bias4, opt1.dataPath + "/bias4.bin");
NVReadFromFile(biasTop, opt1.dataPath + "/biasTop.bin");
// read data to host memory (and labels to the GPU memory)
int imPixels = 32*32*opt1.numChannels;
int batchSize = opt1.batchSize;
int testBatchNum = opt1.numTest / batchSize;
vector<Matrix*> CPUTest(testBatchNum);
vector<NVMatrix*> GPUTest(testBatchNum*opt1.numViews);
vector<NVMatrix*> GPURawLabelTest(testBatchNum);
// test set
batchSize = opt1.batchSize;
for (int batch = 0; batch < testBatchNum; batch++) {
CPUTest[batch] = new Matrix(imPixels, batchSize);
CPUTest[batch]->setTrans(false);
for (int r = 0; r < opt1.numViews; r++)
GPUTest[batch*opt1.numViews+r] = new NVMatrix();
hmReadFromFile(*CPUTest[batch], opt1.dataPath + "/cifar_raw.bin", opt1.numTrain+batch*batchSize);
GPURawLabelTest[batch] = new NVMatrix(1, batchSize);
GPURawLabelTest[batch]->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTest[batch], opt1.dataPath + "/cifar_labels.bin", opt1.numTrain+batch*batchSize);
}
batchSize = opt1.numTest % opt1.batchSize; // the last batch
if (batchSize > 0) {
CPUTest.push_back(new Matrix(imPixels, batchSize));
CPUTest.back()->setTrans(false);
for (int r = 0; r < opt1.numViews; r++)
GPUTest.push_back(new NVMatrix());
hmReadFromFile(*CPUTest.back(), opt1.dataPath + "/cifar_raw.bin", opt1.numTrain+testBatchNum*batchSize);
GPURawLabelTest.push_back(new NVMatrix(1, batchSize));
GPURawLabelTest.back()->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTest.back(), opt1.dataPath + "/cifar_labels.bin", opt1.numTrain+testBatchNum*batchSize);
}
multiViewDataProvider(CPUTest, GPUTest, opt1, opt1.numViews, opt1.whitened); // copy data to the GPU side
NVMatrix trueLabelLogProbs;
NVMatrix correctProbs;
MTYPE cost; // as before, we trace the performance using the cost variable
MTYPE cost1;
clock_t startClock;
clock_t tick;
cost = 0;
cost1 = 0;
hipDeviceSynchronize();
startClock = clock();
for (int batch = 0; batch < CPUTest.size(); batch++) {
batchSize = CPUTest[batch]->getNumCols();
for (int r = 0; r < opt1.numViews; r++) {
// ====forward pass====
// 0->1
//cout << "0->1\n";
//original
activateConv(*GPUTest[batch*opt1.numViews+r], act1, weight1, bias1, opt1);
convMaxOut(act1, act1Max, maxMask1, opt1.numFilters, opt1.maxOutPoolSize, opt1.maxOutPoolStride, opt1.outX, batchSize);
act1MaxPool.transpose(false);
convLocalPool(act1Max, act1MaxPool, opt1.numGroups, opt1.poolSize, opt1.poolStartX, opt1.poolStride, opt1.poolOutX, MaxPooler());
// 1->2
//cout << "1->2\n";
//original
activateConv(act1MaxPool, act2, weight2, bias2, opt2);
convMaxOut(act2, act2Max, maxMask2, opt2.numFilters, opt2.maxOutPoolSize, opt2.maxOutPoolStride, opt2.outX, batchSize);
act2MaxPool.transpose(false);
convLocalPool(act2Max, act2MaxPool, opt2.numGroups, opt2.poolSize, opt2.poolStartX, opt2.poolStride, opt2.poolOutX, MaxPooler());
// 2->3
//cout << "2->3\n";
// original
if (strcmp(opt3.layerType, "local") == 0)
activateLocal(act2MaxPool, act3, weight3, bias3, opt3);
else if (strcmp(opt3.layerType, "conv") == 0)
activateConv(act2MaxPool, act3, weight3, bias3, opt3);
convMaxOut(act3, act3Max, maxMask3, opt3.numFilters, opt3.maxOutPoolSize, opt3.maxOutPoolStride, opt3.outX, batchSize);
convLocalPool(act3Max, act3MaxPool, opt3.numGroups, opt3.poolSize, opt3.poolStartX, opt3.poolStride, opt3.poolOutX, MaxPooler());
// 3->4
// cout << "3->4\n";
// original
act3MaxPool.transpose(true);
activate(act3MaxPool, act4, weight4, bias4, 0, 1);
act3MaxPool.transpose(false);
act4.transpose(false);
convMaxOut(act4, act4Max, maxMask4, opt4.numFilters/opt4.outX/opt4.outX, opt4.maxOutPoolSize, opt4.maxOutPoolStride, opt4.outX, batchSize);
// 4->top
// cout << "4->top\n";
act4Max.transpose(true);
activate(act4Max, actTop, weightTop, biasTop, 0, 1);
act4Max.transpose(false);
//softmax layer
NVMatrix& max = actTop.max(1);
actTop.addVector(max, -1);
actTop.apply(NVMatrixOps::Exp());
NVMatrix& sum = actTop.sum(1);
actTop.eltwiseDivideByVector(sum);
delete &max;
delete ∑
actTop.transpose(false);
if (r == 0)
actTop.copy(softMaxAct);
else
softMaxAct.add(actTop);
}// for (r = 0:9)
softMaxAct.scale(0.1);
computeLogregCost(*GPURawLabelTest[batch], softMaxAct, trueLabelLogProbs, correctProbs); //labelLogProbs:(1, numCases); correctProbs:(1, numCases)
cost += correctProbs.sum();
cost1 += trueLabelLogProbs.sum();
}//for (batches)
hipDeviceSynchronize();
cost /= opt1.numTest;
cost1 /= opt1.numTest;
printf("\ntest set precision: %f\n; objective = %f; time elapsed = %f seconds\n", cost, cost1,
(float)(clock() - startClock)/CLOCKS_PER_SEC);
printf("multiViewTest_maxout() complete!\n");
fprintf(pFile, "\ntest set precision: %f\n; objective = %f; time elapsed = %f seconds\n", cost, cost1,
(float)(clock() - startClock)/CLOCKS_PER_SEC);
fprintf(pFile, "multiViewTest_maxout() complete!\n");
CPUTest.clear();
GPUTest.clear();
GPURawLabelTest.clear();
} // void multiViewTest_maxout()
| e374884b7e5d2a508534679a37ea79bf2b59b737.cu | #include <vector>
#include <iostream>
#include <string>
#include "routines.cuh"
#include "layer_kernels.cuh"
#include <cudaconv2.cuh>
using namespace std;
extern LayerOpt opt1, opt2, opt3, opt4, optTop;
extern FILE* pFile;
extern float perturbScale;
void finetune_rnorm_maxout() {
////assignOpt();
printf("starting finetune_rnorm_competeOut()!\nkeepProb = %f-%f-%f\nperturbScale = %f\n", opt1.keepStartRate, opt1.keepIncRate, opt1.keepEndRate, perturbScale);
printf("inputKeepProb = %f-%f-%f\n", opt1.keepInputStartRate, opt1.keepInputIncRate, opt1.keepInputEndRate);
printf("image sizes\nlayer1: %d\nlayer2: %d\nlayer3: %d\nlayer4: %d\n", opt1.imSize, opt2.imSize, opt3.imSize, opt4.imSize);
printf("image channels\nlayer1: %d\nlayer2: %d\nlayer3: %d\nlayer4: %d\nlayerTop: %d\n", opt1.numChannels, opt2.numChannels, opt3.numChannels, opt4.numChannels, opt4.numFilters);
printf("learning rates: layer1--%f, layer2--%f, layer3--%f, layer4--%f, layerTop--%f\n", opt1.lrW, opt2.lrW, opt3.lrW, opt4.lrW, optTop.lrW);
printf("bias rates: layer1--%f, layer2--%f, layer3--%f, layer4--%f, layerTop--%f\n", opt1.lrB, opt2.lrB, opt3.lrB, opt4.lrB, optTop.lrB);
printf("inputScale--%f\n", opt1.inputScale);
printf("lrStartRate = %f, momStartRate = %f\n", opt1.lrStartScale, opt1.momStartScale);
printf("lrEndRate = %f, momEndRate = %f\n", opt1.lrMinRate, opt1.momMaxRate);
printf("lrDecayFactor = %f, momIncFactor = %f\n", opt1.lrDecayFactor, opt1.momIncFactor);
printf("lr schedule = %s, mom schedule = %s\n", opt1.lrDecayType, opt1.momIncType);
printf("competeMax sizes: layer1 -- %d, layer2 -- %d, layer3 -- %d, layer4 -- %d\n", opt1.maxOutPoolSize, opt2.maxOutPoolSize, opt3.maxOutPoolSize, opt4.maxOutPoolSize);
printf("competeMax strides: layer1 -- %d, layer2 -- %d, layer3 -- %d, layer4 -- %d\n", opt1.maxOutPoolStride, opt2.maxOutPoolStride, opt3.maxOutPoolStride, opt4.maxOutPoolStride);
printf("labelSize = %d\n", opt1.labelSize);
printf("flip = %d\n", opt1.flip);
fprintf(pFile, "starting finetune_rnorm_competeOut()!\nkeepProb = %f-%f-%f\nperturbScale = %f\n", opt1.keepStartRate, opt1.keepIncRate, opt1.keepEndRate, perturbScale);
fprintf(pFile, "inputKeepProb = %f-%f-%f\n", opt1.keepInputStartRate, opt1.keepInputIncRate, opt1.keepInputEndRate);
fprintf(pFile, "image sizes\nlayer1: %d\nlayer2: %d\nlayer3: %d\nlayer4: %d\n", opt1.imSize, opt2.imSize, opt3.imSize, opt4.imSize);
fprintf(pFile, "image channels\nlayer1: %d\nlayer2: %d\nlayer3: %d\nlayer4: %d\nlayerTop: %d\n", opt1.numChannels, opt2.numChannels, opt3.numChannels, opt4.numChannels, opt4.numFilters);
fprintf(pFile, "learning rates: layer1--%f, layer2--%f, layer3--%f, layer4--%f, layerTop--%f\n", opt1.lrW, opt2.lrW, opt3.lrW, opt4.lrW, optTop.lrW);
fprintf(pFile, "bias rates: layer1--%f, layer2--%f, layer3--%f, layer4--%f, layerTop--%f\n", opt1.lrB, opt2.lrB, opt3.lrB, opt4.lrB, optTop.lrB);
fprintf(pFile, "inputScale--%f\n", opt1.inputScale);
fprintf(pFile, "lrStartRate = %f, momStartRate = %f\n", opt1.lrStartScale, opt1.momStartScale);
fprintf(pFile, "lrEndRate = %f, momEndRate = %f\n", opt1.lrMinRate, opt1.momMaxRate);
fprintf(pFile, "lrDecayFactor = %f, momIncFactor = %f\n", opt1.lrDecayFactor, opt1.momIncFactor);
fprintf(pFile, "lr schedule = %s, mom schedule = %s\n", opt1.lrDecayType, opt1.momIncType);
fprintf(pFile, "competeMax sizes: layer1 -- %d, layer2 -- %d, layer3 -- %d, layer4 -- %d\n", opt1.maxOutPoolSize, opt2.maxOutPoolSize, opt3.maxOutPoolSize, opt4.maxOutPoolSize);
fprintf(pFile, "competeMax strides: layer1 -- %d, layer2 -- %d, layer3 -- %d, layer4 -- %d\n", opt1.maxOutPoolStride, opt2.maxOutPoolStride, opt3.maxOutPoolStride, opt4.maxOutPoolStride);
fprintf(pFile, "labelSize = %d\n", opt1.labelSize);
fprintf(pFile, "flip = %d\n", opt1.flip);
// data and parameters storage
NVMatrix act0; // avatar of the input
NVMatrix act1, act1Pool, act1PoolMax;
NVMatrix act2, act2Pool, act2PoolMax;
NVMatrix act3, act3Pool, act3PoolMax;
NVMatrix act4, act4Max;
NVMatrix actTop;
NVMatrix act1Grad, act1PoolGrad, act1PoolMaxGrad;
NVMatrix act2Grad, act2PoolGrad, act2PoolMaxGrad;
NVMatrix act3Grad, act3PoolGrad, act3PoolMaxGrad;
NVMatrix act4Grad, act4MaxGrad;
NVMatrix actTopGrad;
NVMatrix weight1, weight2, weight3, weight4, weightTop;
NVMatrix weight1Grad, weight2Grad, weight3Grad, weight4Grad, weightTopGrad;
NVMatrix weight1Inc, weight2Inc, weight3Inc, weight4Inc, weightTopInc;
NVMatrix weight1GradTmp, weight2GradTmp, weight3GradTmp, weight4GradTmp, weightTopGradTmp;
NVMatrix bias1, bias2, bias3, bias4, biasTop; // bias4 is just an all-zero dummy vector
NVMatrix bias1Grad, bias2Grad, bias3Grad, bias4Grad, biasTopGrad;
NVMatrix bias1Inc, bias2Inc, bias3Inc, bias4Inc, biasTopInc;
// initialize parameters
//if (opt1.loadParam) {
weight1.resize(opt1.numVis, opt1.numFilters);
weight2.resize(opt2.numVis, opt2.numFilters);
if (strcmp(opt3.layerType, "local") == 0)
weight3.resize(opt3.numVis * opt3.outX * opt3.outX, opt3.numFilters);
else if (strcmp(opt3.layerType, "conv") == 0)
weight3.resize(opt3.numVis, opt3.numFilters);
weight4.resize(opt4.numVis, opt4.numFilters);
weightTop.resize(optTop.numVis, optTop.numFilters);
bias1.resize(opt1.numFilters, 1);
bias2.resize(opt2.numFilters, 1);
if (strcmp(opt3.layerType, "local") == 0)
bias3.resize(opt3.numFilters * opt3.outX * opt3.outX, 1);
else if (strcmp(opt3.layerType, "conv") == 0)
bias3.resize(opt3.numFilters, 1);
bias4.resize(1, opt4.numFilters);
bias4.setTrans(true);
biasTop.resize(1, optTop.numFilters);
biasTop.setTrans(true);
if (opt1.loadParam) {
NVReadFromFile(weight1, opt1.weightPath + "/weight1.bin");
NVReadFromFile(weight2, opt1.weightPath + "/weight2.bin");
NVReadFromFile(weight3, opt1.weightPath + "/weight3.bin");
NVReadFromFile(weight4, opt1.weightPath + "/weight4.bin");
NVReadFromFile(weightTop, opt1.weightPath + "/weightTop.bin");
NVReadFromFile(bias1, opt1.weightPath + "/bias1.bin");
NVReadFromFile(bias2, opt1.weightPath + "/bias2.bin");
NVReadFromFile(bias3, opt1.weightPath + "/bias3.bin");
NVReadFromFile(bias4, opt1.weightPath + "/bias4.bin");
NVReadFromFile(biasTop, opt1.weightPath + "/biasTop.bin");
}
else {
weight1.randomizeUniform(); weight1.scale(2*opt1.initstv); weight1.addScalar(-opt1.initstv);
weight2.randomizeUniform(); weight2.scale(2*opt2.initstv); weight2.addScalar(-opt2.initstv);
weight3.randomizeUniform(); weight3.scale(2*opt3.initstv); weight3.addScalar(-opt3.initstv);
weight4.randomizeUniform(); weight4.scale(2*opt4.initstv); weight4.addScalar(-opt4.initstv);
weightTop.randomizeUniform(); weightTop.scale(2*optTop.initstv); weightTop.addScalar(-optTop.initstv);
bias1.apply(NVMatrixOps::Zero());
bias2.apply(NVMatrixOps::Zero());
bias3.apply(NVMatrixOps::Zero());
bias4.apply(NVMatrixOps::Zero());
biasTop.apply(NVMatrixOps::Zero());
}
initWeights(weight1Inc, opt1.numVis, opt1.numFilters, false, 0.0); initWeights(weight1Grad, opt1.numVis, opt1.numFilters, false, 0.0);
initWeights(weight2Inc, opt2.numVis, opt2.numFilters, false, 0.0); initWeights(weight2Grad, opt2.numVis, opt2.numFilters, false, 0.0);
if (strcmp(opt3.layerType, "local") == 0) {
initWeights(weight3Inc, opt3.numVis * opt3.outX * opt3.outX, opt3.numFilters, false, 0.0);
initWeights(weight3Grad, opt3.numVis * opt3.outX * opt3.outX, opt3.numFilters, false, 0.0);
}
else if (strcmp(opt3.layerType, "conv") == 0) {
initWeights(weight3Inc, opt3.numVis, opt3.numFilters, false, 0.0);
initWeights(weight3Grad, opt3.numVis, opt3.numFilters, false, 0.0);
}
initWeights(weight4Inc, opt4.numVis, opt4.numFilters, false, 0.0); initWeights(weight4Grad, opt4.numVis, opt4.numFilters, false, 0.0);
initWeights(weightTopInc, optTop.numVis, optTop.numFilters, false, 0.0); initWeights(weightTopGrad, optTop.numVis, optTop.numFilters, false, 0.0);
initWeights(bias1Inc, opt1.numFilters, 1, false, 0.0); initWeights(bias1Grad, opt1.numFilters, 1, false, 0.0);
initWeights(bias2Inc, opt2.numFilters, 1, false, 0.0); initWeights(bias2Grad, opt2.numFilters, 1, false, 0.0);
if (strcmp(opt3.layerType, "local") == 0) {
initWeights(bias3Inc, opt3.numFilters * opt3.outX * opt3.outX, 1, false, 0.0);
initWeights(bias3Grad, opt3.numFilters * opt3.outX * opt3.outX, 1, false, 0.0);
}
else if (strcmp(opt3.layerType, "conv") == 0) {
initWeights(bias3Inc, opt3.numFilters, 1, false, 0.0);
initWeights(bias3Grad, opt3.numFilters, 1, false, 0.0);
}
initWeights(bias4Inc, 1, opt4.numFilters, false, 0.0); initWeights(bias4Grad, 1, opt4.numFilters, false, 0.0);
initWeights(biasTopInc, 1, optTop.numFilters, true, 0.0); initWeights(biasTopGrad, 1, optTop.numFilters, true, 0.0);
// read data to host memory (and labels to the GPU memory)
int imPixels = 32*32*opt1.numChannels;
int batchSize = opt1.batchSize;
int trainBatchNum = opt1.numTrain / batchSize;
int testBatchNum = opt1.numTest / batchSize;
vector<Matrix*> CPUTrain(trainBatchNum), CPUTest(testBatchNum);
vector<NVMatrix*> GPUTrain(trainBatchNum), GPUTest(testBatchNum);
vector<NVMatrix*> GPURawLabelTrain(trainBatchNum), GPURawLabelTest(testBatchNum);
for (int batch = 0; batch < trainBatchNum; batch++) {
CPUTrain[batch] = new Matrix(imPixels, batchSize);
CPUTrain[batch]->setTrans(false);
GPUTrain[batch] = new NVMatrix();
GPURawLabelTrain[batch] = new NVMatrix(1, batchSize);
GPURawLabelTrain[batch]->setTrans(false);
if (strcmp(opt1.exp.c_str(), "cifar10") == 0) {
hmReadFromFile(*CPUTrain[batch], opt1.dataPath + "/cifar_whitened.bin", batch*batchSize);
NVRawLabelReadFromFile(*GPURawLabelTrain[batch], opt1.dataPath + "/cifar_labels.bin", batch*batchSize);
}
else if (strcmp(opt1.exp.c_str(), "cifar100") == 0) {
hmReadFromFile(*CPUTrain[batch], opt1.dataPath + "/cifar100_whitened.bin", batch*batchSize);
NVRawLabelReadFromFile(*GPURawLabelTrain[batch], opt1.dataPath + "/cifar100_fine_labels.bin", batch*batchSize);
}
}
batchSize = opt1.numTrain % opt1.batchSize; // the last batch
if (batchSize > 0) {
CPUTrain.push_back(new Matrix(imPixels, batchSize));
CPUTrain.back()->setTrans(false);
GPUTrain.push_back(new NVMatrix());
GPURawLabelTrain.push_back(new NVMatrix(1, batchSize));
GPURawLabelTrain.back()->setTrans(false);
if (strcmp(opt1.exp.c_str(), "cifar10") == 0) {
hmReadFromFile(*CPUTrain.back(), opt1.dataPath + "/cifar_whitened.bin", trainBatchNum*batchSize);
NVRawLabelReadFromFile(*GPURawLabelTrain.back(), opt1.dataPath + "/cifar_labels.bin", trainBatchNum*batchSize);
}
else if (strcmp(opt1.exp.c_str(), "cifar100") == 0) {
hmReadFromFile(*CPUTrain.back(), opt1.dataPath + "/cifar100_whitened.bin", trainBatchNum*batchSize);
NVRawLabelReadFromFile(*GPURawLabelTrain.back(), opt1.dataPath + "/cifar100_fine_labels.bin", trainBatchNum*batchSize);
}
}
// test set
batchSize = opt1.batchSize;
for (int batch = 0; batch < testBatchNum; batch++) {
CPUTest[batch] = new Matrix(imPixels, batchSize);
CPUTest[batch]->setTrans(false);
GPUTest[batch] = new NVMatrix();
GPURawLabelTest[batch] = new NVMatrix(1, batchSize);
GPURawLabelTest[batch]->setTrans(false);
if (strcmp(opt1.exp.c_str(), "cifar10") == 0) {
hmReadFromFile(*CPUTest[batch], opt1.dataPath + "/cifar_whitened.bin", opt1.numTrain+batch*batchSize);
NVRawLabelReadFromFile(*GPURawLabelTest[batch], opt1.dataPath + "/cifar_labels.bin", opt1.numTrain+batch*batchSize);
}
else if (strcmp(opt1.exp.c_str(), "cifar100") == 0) {
hmReadFromFile(*CPUTest[batch], opt1.dataPath + "/cifar100_whitened.bin", opt1.numTrain+batch*batchSize);
NVRawLabelReadFromFile(*GPURawLabelTest[batch], opt1.dataPath + "/cifar100_fine_labels.bin", opt1.numTrain+batch*batchSize);
}
}
batchSize = opt1.numTest % opt1.batchSize; // the last batch
if (batchSize > 0) {
CPUTest.push_back(new Matrix(imPixels, batchSize));
CPUTest.back()->setTrans(false);
GPUTest.push_back(new NVMatrix());
GPURawLabelTest.push_back(new NVMatrix(1, batchSize));
GPURawLabelTest.back()->setTrans(false);
if (strcmp(opt1.exp.c_str(), "cifar10") == 0) {
hmReadFromFile(*CPUTest.back(), opt1.dataPath + "/cifar_whitened.bin", opt1.numTrain+testBatchNum*batchSize);
NVRawLabelReadFromFile(*GPURawLabelTest.back(), opt1.dataPath + "/cifar_labels.bin", opt1.numTrain+testBatchNum*batchSize);
}
else if (strcmp(opt1.exp.c_str(), "cifar100") == 0) {
hmReadFromFile(*CPUTest.back(), opt1.dataPath + "/cifar100_whitened.bin", opt1.numTrain+testBatchNum*batchSize);
NVRawLabelReadFromFile(*GPURawLabelTest.back(), opt1.dataPath + "/cifar100_fine_labels.bin", opt1.numTrain+testBatchNum*batchSize);
}
}
NVMatrix trueLabelLogProbs;
NVMatrix correctProbs;
MTYPE cost; // as before, we trace the performance using the cost variable
MTYPE cost1;
NVMatrix absM;
MTYPE weightAbs1, weightAbs2, weightAbs3, weightAbs4, weightAbsTop;
MTYPE biasAbs1, biasAbs2, biasAbs3, biasAbs4, biasAbsTop;
MTYPE weightGradAbs1, weightGradAbs2, weightGradAbs3, weightGradAbs4, weightGradAbsTop;
MTYPE biasGradAbs1, biasGradAbs2, biasGradAbs3, biasGradAbs4, biasGradAbsTop;
clock_t startClock;
clock_t tick;
cropDataProvider(CPUTest, GPUTest, opt1, true, opt1.whitened); // test data is fixed
NVMask maxMask1, maxMask2, maxMask3, maxMask4;
NVMatrix dropMask0, dropMask1, dropMask2, dropMask3, dropMask4;
// normalization
NVMatrix normCol1, normCol2, normCol3, normCol4, normColTop;
NVMatrix tmp1, tmp2, tmp3, tmp4, tmpTop;
float lr_scale = opt1.lrStartScale, mom_scale = opt1.momStartScale;
float keepProb = opt1.keepStartRate;
float keepInputProb = opt1.keepInputStartRate;
for (int epoch = 0; epoch < opt1.numEpochs; epoch++) {
cost = 0;
cost1 = 0;
cropDataProvider(CPUTrain, GPUTrain, opt1, false, opt1.whitened); // copy data to the GPU side
cudaThreadSynchronize();
startClock = clock();
for (int batch = 0; batch < GPUTrain.size(); batch++) {
batchSize = GPUTrain[batch]->getNumCols();
// ====forward pass====
// 0->1
// cout << "0->1\n";
//original
//act0.resize()
GPUTrain[batch]->copy(act0);
dropMask0.resize(act0);
dropMask0.randomizeBinary(keepInputProb); // input part
act0.eltwiseMult(dropMask0);
act0.scale(opt1.inputScale);
act0.scale(1.0/keepProb);
activateConv(act0, act1, weight1, bias1, opt1);
convLocalPool(act1, act1Pool, opt1.numFilters, opt1.poolSize, opt1.poolStartX, opt1.poolStride, opt1.poolOutX, MaxPooler());
convMaxOut(act1Pool, act1PoolMax, maxMask1, opt1.numFilters, opt1.maxOutPoolSize, opt1.maxOutPoolStride, opt1.poolOutX, batchSize);
act1PoolMax.transpose(false);
// 1->2
// cout << "1->2\n";
//original
dropMask1.resize(act1PoolMax);
dropMask1.randomizeBinary(keepProb);
act1PoolMax.eltwiseMult(dropMask1);
act1PoolMax.scale(1.0/keepProb);
activateConv(act1PoolMax, act2, weight2, bias2, opt2);
convLocalPool(act2, act2Pool, opt2.numFilters, opt2.poolSize, opt2.poolStartX, opt2.poolStride, opt2.poolOutX, MaxPooler());
convMaxOut(act2Pool, act2PoolMax, maxMask2, opt2.numFilters, opt2.maxOutPoolSize, opt2.maxOutPoolStride, opt2.poolOutX, batchSize);
act2PoolMax.transpose(false);
// 2->3
// cout << "2->3\n";
// original
dropMask2.resize(act2PoolMax);
dropMask2.randomizeBinary(keepProb);
act2PoolMax.eltwiseMult(dropMask2);
act2PoolMax.scale(1.0/keepProb);
if (strcmp(opt3.layerType, "local") == 0)
activateLocal(act2PoolMax, act3, weight3, bias3, opt3);
else if (strcmp(opt3.layerType, "conv") == 0)
activateConv(act2PoolMax, act3, weight3, bias3, opt3);
convLocalPool(act3, act3Pool, opt3.numFilters, opt3.poolSize, opt3.poolStartX, opt3.poolStride, opt3.poolOutX, MaxPooler());
convMaxOut(act3Pool, act3PoolMax, maxMask3, opt3.numFilters, opt3.maxOutPoolSize, opt3.maxOutPoolStride, opt3.poolOutX, batchSize);
// 3->4
// cout << "3->4\n";
// original
dropMask3.resize(act3PoolMax);
dropMask3.randomizeBinary(keepProb);
act3PoolMax.eltwiseMult(dropMask3);
act3PoolMax.scale(1.0/keepProb);
act3PoolMax.transpose(true);
activate(act3PoolMax, act4, weight4, bias4, 0, 1);
act3PoolMax.transpose(false);
act4.transpose(false);
convMaxOut(act4, act4Max, maxMask4, opt4.numFilters/opt4.outX/opt4.outX, opt4.maxOutPoolSize, opt4.maxOutPoolStride, opt4.outX, batchSize);
// 4->top
// cout << "4->top\n";
dropMask4.resize(act4Max);
dropMask4.randomizeBinary(keepProb);
act4Max.eltwiseMult(dropMask4);
act4Max.scale(1.0/keepProb);
act4Max.transpose(true);
activate(act4Max, actTop, weightTop, biasTop, 0, 1);
act4Max.transpose(false);
//softmax layer
NVMatrix& max = actTop.max(1);
actTop.addVector(max, -1);
actTop.apply(NVMatrixOps::Exp());
NVMatrix& sum = actTop.sum(1);
actTop.eltwiseDivideByVector(sum);
delete &max;
delete ∑
// compute cost
computeLogregSoftmaxGrad(*GPURawLabelTrain[batch], actTop, actTopGrad, false, 1);
actTop.transpose(false);
computeLogregCost(*GPURawLabelTrain[batch], actTop, trueLabelLogProbs, correctProbs); //labelLogProbs:(1, numCases); correctProbs:(1, numCases)
cost += correctProbs.sum();
cost1 += trueLabelLogProbs.sum();
// ====== back pass ======
// top -> 4, 3, 2, 1
// cout << "top -> 4, 3, 2, 1\n";
// weight update
act4Max.transpose(false); actTopGrad.transpose(true);
weightTopGrad.addProduct(act4Max, actTopGrad, 0, 1);
biasTopGrad.addSum(actTopGrad, 0, 0, 1);
// bp
weightTop.transpose(true);
act4MaxGrad.transpose(true);
act4MaxGrad.addProduct(actTopGrad, weightTop, 0, 1);
weightTop.transpose(false);
// 4->3
// cout << "4->3\n";
act4MaxGrad.transpose(false); // convert back to row-major
act4Max.transpose(false);
act4MaxGrad.eltwiseMult(dropMask4);
act4MaxGrad.scale(1.0/keepProb);
convMaxOutUndo(act4MaxGrad, act4Grad, maxMask4, opt4.numFilters/opt4.outX/opt4.outX, opt4.maxOutPoolStride, opt4.outX, batchSize);
act3PoolMax.transpose(false); act4Grad.transpose(true);
weight4Grad.addProduct(act3PoolMax, act4Grad, 0, 1);
bias4Grad.addSum(act4Grad, 0, 0, 1);
// bp
weight4.transpose(true);
act3PoolMaxGrad.transpose(true);
act3PoolMaxGrad.addProduct(act4Grad, weight4, 0, 1);
weight4.transpose(false);
// 3->2
// cout << "3->2\n";
act3PoolMaxGrad.transpose(false);
act3PoolMax.transpose(false);
act3PoolMaxGrad.eltwiseMult(dropMask3);
act3PoolMaxGrad.scale(1.0/keepProb);
convMaxOutUndo(act3PoolMaxGrad, act3PoolGrad, maxMask3, opt3.numFilters, opt3.maxOutPoolStride, opt3.poolOutX, batchSize);
convLocalMaxUndo(act3, act3PoolGrad, act3Pool, act3Grad, opt3.poolSize, opt3.poolStartX, opt3.poolStride, opt3.poolOutX);
if (strcmp(opt3.layerType, "local") == 0) {
localWeightActs(act2PoolMax, act3Grad, weight3Grad, opt3.imSize, opt3.outX, opt3.outX, opt3.patchSize, opt3.paddingStart, 1, opt3.numChannels, 1);
bias3Grad.addSum(act3Grad, 1, 0, 1);
localImgActs(act3Grad, weight3, act2PoolMaxGrad, opt3.imSize, opt3.imSize, opt3.outX, opt3.paddingStart, 1, opt3.numChannels, 1);
}
else if (strcmp(opt3.layerType, "conv") == 0) {
convWeightActs(act2PoolMax, act3Grad, weight3GradTmp, opt3.imSize, opt3.outX, opt3.outX, opt3.patchSize, opt3.paddingStart, 1, opt3.numChannels, 1, opt3.partialSum);
weight3GradTmp.reshape(opt3.outX * opt3.outX / opt3.partialSum, opt3.numChannels * opt3.patchSize * opt3.patchSize * opt3.numFilters);
weight3Grad.addSum(weight3GradTmp, 0, 0, 1);
weight3Grad.reshape(opt3.numChannels * opt3.patchSize * opt3.patchSize, opt3.numFilters);
act3Grad.reshape(opt3.numFilters, opt3.outX * opt3.outX * batchSize);
bias3Grad.addSum(act3Grad, 1, 0, 1);
act3Grad.reshape(opt3.numFilters * opt3.outX * opt3.outX, batchSize);
convImgActs(act3Grad, weight3, act2PoolMaxGrad, opt3.imSize, opt3.imSize, opt3.outX, opt3.paddingStart, 1, opt3.numChannels, 1);
}
// 2->1
// cout << "2->1\n";
// original part
act2PoolMaxGrad.transpose(false);
act2PoolMax.transpose(false);
act2PoolMaxGrad.eltwiseMult(dropMask2);
act2PoolMaxGrad.scale(1.0/keepProb);
convMaxOutUndo(act2PoolMaxGrad, act2PoolGrad, maxMask2, opt2.numFilters, opt2.maxOutPoolStride, opt2.poolOutX, batchSize);
convLocalMaxUndo(act2, act2PoolGrad, act2Pool, act2Grad, opt2.poolSize, opt2.poolStartX, opt2.poolStride, opt2.poolOutX);
convWeightActs(act1PoolMax, act2Grad, weight2GradTmp, opt2.imSize, opt2.outX, opt2.outX, opt2.patchSize, opt2.paddingStart, 1, opt2.numChannels, 1, opt2.partialSum);
weight2GradTmp.reshape(opt2.outX * opt2.outX / opt2.partialSum, opt2.numChannels * opt2.patchSize * opt2.patchSize * opt2.numFilters);
weight2Grad.addSum(weight2GradTmp, 0, 0, 1);
weight2Grad.reshape(opt2.numChannels * opt2.patchSize * opt2.patchSize, opt2.numFilters);
act2Grad.reshape(opt2.numFilters, opt2.outX * opt2.outX * batchSize);
bias2Grad.addSum(act2Grad, 1, 0, 1);
act2Grad.reshape(opt2.numFilters * opt2.outX * opt2.outX, batchSize);
convImgActs(act2Grad, weight2, act1PoolMaxGrad, opt2.imSize, opt2.imSize, opt2.outX, opt2.paddingStart, 1, opt2.numChannels, 1);
// 1->0
// cout << "1->0\n";
// original part
act1PoolMaxGrad.transpose(false);
act1PoolMax.transpose(false);
act1PoolMaxGrad.eltwiseMult(dropMask1);
act1PoolMaxGrad.scale(1.0/keepProb);
convMaxOutUndo(act1PoolMaxGrad, act1PoolGrad, maxMask1, opt1.numFilters, opt1.maxOutPoolStride, opt1.poolOutX, batchSize);
convLocalMaxUndo(act1, act1PoolGrad, act1Pool, act1Grad, opt1.poolSize, opt1.poolStartX, opt1.poolStride, opt1.poolOutX);
convWeightActs(act0, act1Grad, weight1GradTmp, opt1.imSize, opt1.outX, opt1.outX, opt1.patchSize, opt1.paddingStart, 1, opt1.numChannels, 1, opt1.partialSum);
weight1GradTmp.reshape(opt1.outX * opt1.outX / opt1.partialSum, opt1.numChannels * opt1.patchSize * opt1.patchSize * opt1.numFilters);
weight1Grad.addSum(weight1GradTmp, 0, 0, 1);
weight1Grad.reshape(opt1.numChannels * opt1.patchSize * opt1.patchSize, opt1.numFilters);
act1Grad.reshape(opt1.numFilters, opt1.outX * opt1.outX * batchSize);
bias1Grad.addSum(act1Grad, 1, 0, 1);
act1Grad.reshape(opt1.numFilters * opt1.outX * opt1.outX, batchSize);
// update
updateWeight(weight1Grad, weight1Inc, weight1, opt1, batchSize, lr_scale, mom_scale);
updateWeight(weight2Grad, weight2Inc, weight2, opt2, batchSize, lr_scale, mom_scale);
updateWeight(weight3Grad, weight3Inc, weight3, opt3, batchSize, lr_scale, mom_scale);
updateWeight(weight4Grad, weight4Inc, weight4, opt4, batchSize, lr_scale, mom_scale);
updateWeight(weightTopGrad, weightTopInc, weightTop, optTop, batchSize, lr_scale, mom_scale);
updateBias(bias1Grad, bias1Inc, bias1, opt1, batchSize, lr_scale, mom_scale);
updateBias(bias2Grad, bias2Inc, bias2, opt2, batchSize, lr_scale, mom_scale);
updateBias(bias3Grad, bias3Inc, bias3, opt3, batchSize, lr_scale, mom_scale);
updateBias(bias4Grad, bias4Inc, bias4, opt4, batchSize, lr_scale, mom_scale);
updateBias(biasTopGrad, biasTopInc, biasTop, optTop, batchSize, lr_scale, mom_scale);
// normalize weights
NVNormalizeCol2(weight1, bias1, normCol1, tmp1, opt1.maxNorm);
NVNormalizeCol2(weight2, bias2, normCol2, tmp2, opt2.maxNorm);
NVNormalizeCol2(weight3, bias3, normCol3, tmp3, opt3.maxNorm);
NVNormalizeCol2(weight4, bias4, normCol4, tmp4, opt4.maxNorm);
NVNormalizeCol2(weightTop, biasTop, normColTop, tmpTop, optTop.maxNorm);
} // for (int epoch = 0; epoch < opt1.numEpochs; epoch++)
// compute cost
cost /= opt1.numTrain;
cost1 /= opt1.numTrain;
printf("\nfinished epoch %d of %d; classify precision = %f; objective = %f; elapsed time = %f seconds\n", epoch, opt1.numEpochs,
cost, cost1, (float)(clock() - startClock)/CLOCKS_PER_SEC);
fprintf(pFile, "\nfinished epoch %d of %d; classify precision = %f; objective = %f; elapsed time = %f seconds\n", epoch, opt1.numEpochs,
cost, cost1, (float)(clock() - startClock)/CLOCKS_PER_SEC);
printf("weight norms\nweight1 = %f\nweight2 = %f\nweight3 = %f\nweight4 = %f\nweightTop = %f\n"
"bias1 = %f\nbias2 = %f\nbias3 = %f\nbias4 = %f\nbiasTop = %f\n",
normCol1.max(), normCol2.max(), normCol3.max(), normCol4.max(), normColTop.max(),
bias1.norm(), bias2.norm(), bias3.norm(), bias4.norm(), biasTop.norm());
fprintf(pFile, "weight norms\nweight1 = %f\nweight2 = %f\nweight3 = %f\nweight4 = %f\nweightTop = %f\n"
"bias1 = %f\nbias2 = %f\nbias3 = %f\nbias4 = %f\nbiasTop = %f\n",
normCol1.max(), normCol2.max(), normCol3.max(), normCol4.max(), normColTop.max(),
bias1.norm(), bias2.norm(), bias3.norm(), bias4.norm(), biasTop.norm());
printf("lr_scale = %f, mom_scale = %f, keepProb = %f, keepInputProb = %f\n", lr_scale, mom_scale, keepProb, keepInputProb);
fprintf(pFile, "lr_scale = %f, mom_scale = %f, keepProb = %f, keepInputProb = %f\n", lr_scale, mom_scale, keepProb, keepInputProb);
// decay learning rate
lr_scale = lrDecay(lr_scale, opt1.lrDecayType, opt1.lrDecayFactor, opt1.lrMinRate);
mom_scale = momInc(mom_scale, opt1.momIncType, opt1.momIncFactor, opt1.momMaxRate);
// decay dropout
keepProb = keepProb + opt1.keepIncRate;
keepProb = keepProb > opt1.keepEndRate ? opt1.keepEndRate : keepProb;
keepInputProb = keepInputProb + opt1.keepInputIncRate;
keepInputProb = keepInputProb > opt1.keepInputEndRate ? opt1.keepInputEndRate : keepInputProb;
// process the test set every 3 epochs
if (epoch % 3 == 2) {
cudaThreadSynchronize();
startClock = clock();
cost = 0;
cost1 = 0;
for (int batch = 0; batch < GPUTest.size(); batch++) {
batchSize = GPUTest[batch]->getNumCols();
// ====forward pass====
// 0->1
// cout << "0->1\n";
//original
GPUTest[batch]->copy(act0);
act0.scale(opt1.inputScale);
activateConv(act0, act1, weight1, bias1, opt1);
convLocalPool(act1, act1Pool, opt1.numFilters, opt1.poolSize, opt1.poolStartX, opt1.poolStride, opt1.poolOutX, MaxPooler());
convMaxOut(act1Pool, act1PoolMax, maxMask1, opt1.numFilters, opt1.maxOutPoolSize, opt1.maxOutPoolStride, opt1.poolOutX, batchSize);
act1PoolMax.transpose(false);
// 1->2
// cout << "1->2\n";
//original
activateConv(act1PoolMax, act2, weight2, bias2, opt2);
convLocalPool(act2, act2Pool, opt2.numFilters, opt2.poolSize, opt2.poolStartX, opt2.poolStride, opt2.poolOutX, MaxPooler());
convMaxOut(act2Pool, act2PoolMax, maxMask2, opt2.numFilters, opt2.maxOutPoolSize, opt2.maxOutPoolStride, opt2.poolOutX, batchSize);
act2PoolMax.transpose(false);
// 2->3
// cout << "2->3\n";
// original
if (strcmp(opt3.layerType, "local") == 0)
activateLocal(act2PoolMax, act3, weight3, bias3, opt3);
else if (strcmp(opt3.layerType, "conv") == 0)
activateConv(act2PoolMax, act3, weight3, bias3, opt3);
convLocalPool(act3, act3Pool, opt3.numFilters, opt3.poolSize, opt3.poolStartX, opt3.poolStride, opt3.poolOutX, MaxPooler());
convMaxOut(act3Pool, act3PoolMax, maxMask3, opt3.numFilters, opt3.maxOutPoolSize, opt3.maxOutPoolStride, opt3.poolOutX, batchSize);
// 3->4
// cout << "3->4\n";
// original
act3PoolMax.transpose(true);
activate(act3PoolMax, act4, weight4, bias4, 0, 1);
act3PoolMax.transpose(false);
act4.transpose(false);
convMaxOut(act4, act4Max, maxMask4, opt4.numFilters/opt4.outX/opt4.outX, opt4.maxOutPoolSize, opt4.maxOutPoolStride, opt4.outX, batchSize);
// 4->top
// cout << "4->top\n";
act4Max.transpose(true);
activate(act4Max, actTop, weightTop, biasTop, 0, 1);
act4Max.transpose(false);
//softmax layer
NVMatrix& max = actTop.max(1);
actTop.addVector(max, -1);
actTop.apply(NVMatrixOps::Exp());
NVMatrix& sum = actTop.sum(1);
actTop.eltwiseDivideByVector(sum);
delete &max;
delete ∑
// compute cost
computeLogregSoftmaxGrad(*GPURawLabelTest[batch], actTop, actTopGrad, false, 1);
actTop.transpose(false);
computeLogregCost(*GPURawLabelTest[batch], actTop, trueLabelLogProbs, correctProbs); //labelLogProbs:(1, numCases); correctProbs:(1, numCases)
cost += correctProbs.sum();
cost1 += trueLabelLogProbs.sum();
} //for (int batch = opt1.batchNum; batch < opt1.batchNum+opt1.testBatchNum; batch++)
cudaThreadSynchronize();
cost /= opt1.numTest;
cost1 /= opt1.numTest;
printf("\ntest set precision: %f\n; objective = %f; time elapsed = %f seconds\n", cost, cost1,
(float)(clock() - startClock)/CLOCKS_PER_SEC);
fprintf(pFile, "\ntest set precision: %f\n; objective = %f; time elapsed = %f seconds\n", cost, cost1,
(float)(clock() - startClock)/CLOCKS_PER_SEC);
// save checkpoint
NVSaveToFile(weight1, opt1.weightPath + "/weight1.bin");
NVSaveToFile(weight2, opt1.weightPath + "/weight2.bin");
NVSaveToFile(weight3, opt1.weightPath + "/weight3.bin");
NVSaveToFile(weight4, opt1.weightPath + "/weight4.bin");
NVSaveToFile(weightTop, opt1.weightPath + "/weightTop.bin");
NVSaveToFile(bias1, opt1.weightPath + "/bias1.bin");
NVSaveToFile(bias2, opt1.weightPath + "/bias2.bin");
NVSaveToFile(bias3, opt1.weightPath + "/bias3.bin");
NVSaveToFile(bias4, opt1.weightPath + "/bias4.bin");
NVSaveToFile(biasTop, opt1.weightPath + "/biasTop.bin");
printf("Checkpoint saved!\n\n");
fprintf(pFile, "Checkpoint saved!\n\n");
} //if (epoch % 10 == 0)
} // for (int epoch = 0; epoch < opt1.numEpochs; epoch++)
printf("finetuning_rnorm_maxout() complete!\n");
fprintf(pFile, "finetuning_rnorm_maxout() complete!\n");
CPUTrain.clear();
GPUTrain.clear();
CPUTest.clear();
GPUTest.clear();
GPURawLabelTrain.clear();
GPURawLabelTest.clear();
} // void finetune_rnorm_maxout()
void multiViewTest_maxout() {
////assignOpt();
printf("starting multiViewTest_maxout()!\n");
fprintf(pFile, "starting multiViewTest_maxout()!\n");
// initialize cublas
cudaSetDevice(cutGetMaxGflopsDeviceId());
cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
cublasInit();
// data and parameters storage
NVMatrix act1, act1Max, act1MaxPool, act1Denom;
NVMatrix act2, act2Max, act2MaxPool, act2Denom;
NVMatrix act3, act3Max, act3MaxPool;
NVMatrix act4, act4Max;
NVMatrix actTop;
NVMatrix softMaxAct;
NVMask maxMask1, maxMask2, maxMask3, maxMask4;
NVMatrix weight1, weight2, weight3, weight4, weightTop;
NVMatrix bias1, bias2, bias3, bias4, biasTop; // bias4 is just an all-zero dummy vector
// initialize parameters
weight1.resize(opt1.numVis, opt1.numFilters);
weight2.resize(opt2.numVis, opt2.numFilters);
if (strcmp(opt3.layerType, "local") == 0)
weight3.resize(opt3.numVis * opt3.outX * opt3.outX, opt3.numFilters);
else if (strcmp(opt3.layerType, "conv") == 0)
weight3.resize(opt3.numVis, opt3.numFilters);
weight4.resize(opt4.numVis, opt4.numFilters);
weightTop.resize(optTop.numVis, optTop.numFilters);
bias1.resize(opt1.numFilters, 1);
bias2.resize(opt2.numFilters, 1);
if (strcmp(opt3.layerType, "local") == 0)
bias3.resize(opt3.numFilters * opt3.outX * opt3.outX, 1);
else if (strcmp(opt3.layerType, "conv") == 0)
bias3.resize(opt3.numFilters, 1);
bias4.resize(1, opt4.numFilters);
bias4.setTrans(true);
biasTop.resize(1, optTop.numFilters);
biasTop.setTrans(true);
NVReadFromFile(weight1, opt1.dataPath + "/weight1.bin");
NVReadFromFile(weight2, opt1.dataPath + "/weight2.bin");
NVReadFromFile(weight3, opt1.dataPath + "/weight3.bin");
NVReadFromFile(weight4, opt1.dataPath + "/weight4.bin");
NVReadFromFile(weightTop, opt1.dataPath + "/weightTop.bin");
NVReadFromFile(bias1, opt1.dataPath + "/bias1.bin");
NVReadFromFile(bias2, opt1.dataPath + "/bias2.bin");
NVReadFromFile(bias3, opt1.dataPath + "/bias3.bin");
NVReadFromFile(bias4, opt1.dataPath + "/bias4.bin");
NVReadFromFile(biasTop, opt1.dataPath + "/biasTop.bin");
// read data to host memory (and labels to the GPU memory)
int imPixels = 32*32*opt1.numChannels;
int batchSize = opt1.batchSize;
int testBatchNum = opt1.numTest / batchSize;
vector<Matrix*> CPUTest(testBatchNum);
vector<NVMatrix*> GPUTest(testBatchNum*opt1.numViews);
vector<NVMatrix*> GPURawLabelTest(testBatchNum);
// test set
batchSize = opt1.batchSize;
for (int batch = 0; batch < testBatchNum; batch++) {
CPUTest[batch] = new Matrix(imPixels, batchSize);
CPUTest[batch]->setTrans(false);
for (int r = 0; r < opt1.numViews; r++)
GPUTest[batch*opt1.numViews+r] = new NVMatrix();
hmReadFromFile(*CPUTest[batch], opt1.dataPath + "/cifar_raw.bin", opt1.numTrain+batch*batchSize);
GPURawLabelTest[batch] = new NVMatrix(1, batchSize);
GPURawLabelTest[batch]->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTest[batch], opt1.dataPath + "/cifar_labels.bin", opt1.numTrain+batch*batchSize);
}
batchSize = opt1.numTest % opt1.batchSize; // the last batch
if (batchSize > 0) {
CPUTest.push_back(new Matrix(imPixels, batchSize));
CPUTest.back()->setTrans(false);
for (int r = 0; r < opt1.numViews; r++)
GPUTest.push_back(new NVMatrix());
hmReadFromFile(*CPUTest.back(), opt1.dataPath + "/cifar_raw.bin", opt1.numTrain+testBatchNum*batchSize);
GPURawLabelTest.push_back(new NVMatrix(1, batchSize));
GPURawLabelTest.back()->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTest.back(), opt1.dataPath + "/cifar_labels.bin", opt1.numTrain+testBatchNum*batchSize);
}
multiViewDataProvider(CPUTest, GPUTest, opt1, opt1.numViews, opt1.whitened); // copy data to the GPU side
NVMatrix trueLabelLogProbs;
NVMatrix correctProbs;
MTYPE cost; // as before, we trace the performance using the cost variable
MTYPE cost1;
clock_t startClock;
clock_t tick;
cost = 0;
cost1 = 0;
cudaThreadSynchronize();
startClock = clock();
for (int batch = 0; batch < CPUTest.size(); batch++) {
batchSize = CPUTest[batch]->getNumCols();
for (int r = 0; r < opt1.numViews; r++) {
// ====forward pass====
// 0->1
//cout << "0->1\n";
//original
activateConv(*GPUTest[batch*opt1.numViews+r], act1, weight1, bias1, opt1);
convMaxOut(act1, act1Max, maxMask1, opt1.numFilters, opt1.maxOutPoolSize, opt1.maxOutPoolStride, opt1.outX, batchSize);
act1MaxPool.transpose(false);
convLocalPool(act1Max, act1MaxPool, opt1.numGroups, opt1.poolSize, opt1.poolStartX, opt1.poolStride, opt1.poolOutX, MaxPooler());
// 1->2
//cout << "1->2\n";
//original
activateConv(act1MaxPool, act2, weight2, bias2, opt2);
convMaxOut(act2, act2Max, maxMask2, opt2.numFilters, opt2.maxOutPoolSize, opt2.maxOutPoolStride, opt2.outX, batchSize);
act2MaxPool.transpose(false);
convLocalPool(act2Max, act2MaxPool, opt2.numGroups, opt2.poolSize, opt2.poolStartX, opt2.poolStride, opt2.poolOutX, MaxPooler());
// 2->3
//cout << "2->3\n";
// original
if (strcmp(opt3.layerType, "local") == 0)
activateLocal(act2MaxPool, act3, weight3, bias3, opt3);
else if (strcmp(opt3.layerType, "conv") == 0)
activateConv(act2MaxPool, act3, weight3, bias3, opt3);
convMaxOut(act3, act3Max, maxMask3, opt3.numFilters, opt3.maxOutPoolSize, opt3.maxOutPoolStride, opt3.outX, batchSize);
convLocalPool(act3Max, act3MaxPool, opt3.numGroups, opt3.poolSize, opt3.poolStartX, opt3.poolStride, opt3.poolOutX, MaxPooler());
// 3->4
// cout << "3->4\n";
// original
act3MaxPool.transpose(true);
activate(act3MaxPool, act4, weight4, bias4, 0, 1);
act3MaxPool.transpose(false);
act4.transpose(false);
convMaxOut(act4, act4Max, maxMask4, opt4.numFilters/opt4.outX/opt4.outX, opt4.maxOutPoolSize, opt4.maxOutPoolStride, opt4.outX, batchSize);
// 4->top
// cout << "4->top\n";
act4Max.transpose(true);
activate(act4Max, actTop, weightTop, biasTop, 0, 1);
act4Max.transpose(false);
//softmax layer
NVMatrix& max = actTop.max(1);
actTop.addVector(max, -1);
actTop.apply(NVMatrixOps::Exp());
NVMatrix& sum = actTop.sum(1);
actTop.eltwiseDivideByVector(sum);
delete &max;
delete ∑
actTop.transpose(false);
if (r == 0)
actTop.copy(softMaxAct);
else
softMaxAct.add(actTop);
}// for (r = 0:9)
softMaxAct.scale(0.1);
computeLogregCost(*GPURawLabelTest[batch], softMaxAct, trueLabelLogProbs, correctProbs); //labelLogProbs:(1, numCases); correctProbs:(1, numCases)
cost += correctProbs.sum();
cost1 += trueLabelLogProbs.sum();
}//for (batches)
cudaThreadSynchronize();
cost /= opt1.numTest;
cost1 /= opt1.numTest;
printf("\ntest set precision: %f\n; objective = %f; time elapsed = %f seconds\n", cost, cost1,
(float)(clock() - startClock)/CLOCKS_PER_SEC);
printf("multiViewTest_maxout() complete!\n");
fprintf(pFile, "\ntest set precision: %f\n; objective = %f; time elapsed = %f seconds\n", cost, cost1,
(float)(clock() - startClock)/CLOCKS_PER_SEC);
fprintf(pFile, "multiViewTest_maxout() complete!\n");
CPUTest.clear();
GPUTest.clear();
GPURawLabelTest.clear();
} // void multiViewTest_maxout()
|
df1775081465b1fccbb3cfde396f6c29204978d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/nms_kernel.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/common/memory_utils.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
static const int64_t threadsPerBlock = sizeof(int64_t) * 8;
namespace phi {
template <typename T>
static __global__ void NMS(const T* boxes_data,
float threshold,
int64_t num_boxes,
uint64_t* masks) {
auto raw_start = blockIdx.y;
auto col_start = blockIdx.x;
if (raw_start > col_start) return;
const int raw_last_storage =
min(num_boxes - raw_start * threadsPerBlock, threadsPerBlock);
const int col_last_storage =
min(num_boxes - col_start * threadsPerBlock, threadsPerBlock);
if (threadIdx.x < raw_last_storage) {
uint64_t mask = 0;
auto current_box_idx = raw_start * threadsPerBlock + threadIdx.x;
const T* current_box = boxes_data + current_box_idx * 4;
for (int i = 0; i < col_last_storage; ++i) {
const T* target_box = boxes_data + (col_start * threadsPerBlock + i) * 4;
if (CalculateIoU<T>(current_box, target_box, threshold)) {
mask |= 1ULL << i;
}
}
const int blocks_per_line = CeilDivide(num_boxes, threadsPerBlock);
masks[current_box_idx * blocks_per_line + col_start] = mask;
}
}
template <typename T, typename Context>
void NMSKernel(const Context& dev_ctx,
const DenseTensor& boxes,
float threshold,
DenseTensor* output) {
PADDLE_ENFORCE_EQ(
boxes.dims().size(),
2,
phi::errors::InvalidArgument("The shape [%s] of boxes must be (N, 4).",
boxes.dims()));
PADDLE_ENFORCE_EQ(
boxes.dims()[1],
4,
phi::errors::InvalidArgument("The shape [%s] of boxes must be (N, 4).",
boxes.dims()));
const int64_t num_boxes = boxes.dims()[0];
const auto blocks_per_line = CeilDivide(num_boxes, threadsPerBlock);
dim3 block(threadsPerBlock);
dim3 grid(blocks_per_line, blocks_per_line);
auto mask_data = phi::memory_utils::Alloc(
dev_ctx.GetPlace(),
num_boxes * blocks_per_line * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream())));
uint64_t* mask_dev = reinterpret_cast<uint64_t*>(mask_data->ptr());
hipLaunchKernelGGL(( NMS<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(),
boxes.data<T>(), threshold, num_boxes, mask_dev);
std::vector<uint64_t> mask_host(num_boxes * blocks_per_line);
paddle::memory::Copy(phi::CPUPlace(),
mask_host.data(),
dev_ctx.GetPlace(),
mask_dev,
num_boxes * blocks_per_line * sizeof(uint64_t),
dev_ctx.stream());
std::vector<int64_t> remv(blocks_per_line);
std::vector<int64_t> keep_boxes_idxs(num_boxes);
int64_t* output_host = keep_boxes_idxs.data();
int64_t last_box_num = 0;
for (int64_t i = 0; i < num_boxes; ++i) {
auto remv_element_id = i / threadsPerBlock;
auto remv_bit_id = i % threadsPerBlock;
if (!(remv[remv_element_id] & 1ULL << remv_bit_id)) {
output_host[last_box_num++] = i;
uint64_t* current_mask = mask_host.data() + i * blocks_per_line;
for (auto j = remv_element_id; j < blocks_per_line; ++j) {
remv[j] |= current_mask[j];
}
}
}
output->Resize(phi::make_ddim({last_box_num}));
auto* output_data = dev_ctx.template Alloc<int64_t>(output);
paddle::memory::Copy(dev_ctx.GetPlace(),
output_data,
phi::CPUPlace(),
output_host,
sizeof(int64_t) * last_box_num,
dev_ctx.stream());
}
} // namespace phi
PD_REGISTER_KERNEL(nms, GPU, ALL_LAYOUT, phi::NMSKernel, float, double) {}
| df1775081465b1fccbb3cfde396f6c29204978d4.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/nms_kernel.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/common/memory_utils.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
static const int64_t threadsPerBlock = sizeof(int64_t) * 8;
namespace phi {
template <typename T>
static __global__ void NMS(const T* boxes_data,
float threshold,
int64_t num_boxes,
uint64_t* masks) {
auto raw_start = blockIdx.y;
auto col_start = blockIdx.x;
if (raw_start > col_start) return;
const int raw_last_storage =
min(num_boxes - raw_start * threadsPerBlock, threadsPerBlock);
const int col_last_storage =
min(num_boxes - col_start * threadsPerBlock, threadsPerBlock);
if (threadIdx.x < raw_last_storage) {
uint64_t mask = 0;
auto current_box_idx = raw_start * threadsPerBlock + threadIdx.x;
const T* current_box = boxes_data + current_box_idx * 4;
for (int i = 0; i < col_last_storage; ++i) {
const T* target_box = boxes_data + (col_start * threadsPerBlock + i) * 4;
if (CalculateIoU<T>(current_box, target_box, threshold)) {
mask |= 1ULL << i;
}
}
const int blocks_per_line = CeilDivide(num_boxes, threadsPerBlock);
masks[current_box_idx * blocks_per_line + col_start] = mask;
}
}
template <typename T, typename Context>
void NMSKernel(const Context& dev_ctx,
const DenseTensor& boxes,
float threshold,
DenseTensor* output) {
PADDLE_ENFORCE_EQ(
boxes.dims().size(),
2,
phi::errors::InvalidArgument("The shape [%s] of boxes must be (N, 4).",
boxes.dims()));
PADDLE_ENFORCE_EQ(
boxes.dims()[1],
4,
phi::errors::InvalidArgument("The shape [%s] of boxes must be (N, 4).",
boxes.dims()));
const int64_t num_boxes = boxes.dims()[0];
const auto blocks_per_line = CeilDivide(num_boxes, threadsPerBlock);
dim3 block(threadsPerBlock);
dim3 grid(blocks_per_line, blocks_per_line);
auto mask_data = phi::memory_utils::Alloc(
dev_ctx.GetPlace(),
num_boxes * blocks_per_line * sizeof(uint64_t),
phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream())));
uint64_t* mask_dev = reinterpret_cast<uint64_t*>(mask_data->ptr());
NMS<T><<<grid, block, 0, dev_ctx.stream()>>>(
boxes.data<T>(), threshold, num_boxes, mask_dev);
std::vector<uint64_t> mask_host(num_boxes * blocks_per_line);
paddle::memory::Copy(phi::CPUPlace(),
mask_host.data(),
dev_ctx.GetPlace(),
mask_dev,
num_boxes * blocks_per_line * sizeof(uint64_t),
dev_ctx.stream());
std::vector<int64_t> remv(blocks_per_line);
std::vector<int64_t> keep_boxes_idxs(num_boxes);
int64_t* output_host = keep_boxes_idxs.data();
int64_t last_box_num = 0;
for (int64_t i = 0; i < num_boxes; ++i) {
auto remv_element_id = i / threadsPerBlock;
auto remv_bit_id = i % threadsPerBlock;
if (!(remv[remv_element_id] & 1ULL << remv_bit_id)) {
output_host[last_box_num++] = i;
uint64_t* current_mask = mask_host.data() + i * blocks_per_line;
for (auto j = remv_element_id; j < blocks_per_line; ++j) {
remv[j] |= current_mask[j];
}
}
}
output->Resize(phi::make_ddim({last_box_num}));
auto* output_data = dev_ctx.template Alloc<int64_t>(output);
paddle::memory::Copy(dev_ctx.GetPlace(),
output_data,
phi::CPUPlace(),
output_host,
sizeof(int64_t) * last_box_num,
dev_ctx.stream());
}
} // namespace phi
PD_REGISTER_KERNEL(nms, GPU, ALL_LAYOUT, phi::NMSKernel, float, double) {}
|
798f7bc1391f0e3febdcec81e9114a754e17aeae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pq.cuh"
#include <thrust/reduce.h>
#include <float.h>
extern hipStream_t streamAttr;
extern int Blocks;
extern int Threads;
template <class dataPoint>
void AttractiveEstimation(int *row, int *col, dataPoint *data, dataPoint *Fattr,
dataPoint *y, int n, int d, int blockSize,
int blockRows, int num_blocks, int nnz, int format) {
if (format == 0) {
hipLaunchKernelGGL(( pq_csr_naive), dim3(Blocks), dim3(Threads), 0, 0, row, col, data, n, d, y, Fattr);
} else if (format == 1) {
// bsr_col_pq(data, row, col, y, Fattr, blockRows, blockSize, num_blocks, n,
// d);
} else if (format == 2) {
hipLaunchKernelGGL(( coo_pq_kernel), dim3(Blocks), dim3(Threads), 0, 0, nnz, col, row, data, y, Fattr, d, n);
} else if (format == 3) {
printf("hybrid\n");
}
}
template <class dataPoint>
void AttractiveEstimation(sparse_matrix<dataPoint> P, int d, dataPoint *y,
dataPoint *Fattr) {
if (P.format == 0) {
hipLaunchKernelGGL(( pq_csr_naive<dataPoint, int>)
, dim3(Blocks), dim3(Threads), 0, streamAttr, P.row, P.col, P.val, P.n, d, y, Fattr);
} else if (P.format == 1) {
// bsr_col_pq<dataPoint>(P.val, P.row, P.col, y, Fattr, P.blockRows,
// P.blockSize, P.nnzb,
// P.n, d);
} else if (P.format == 2) {
switch (d) {
case 1:
hipLaunchKernelGGL(( coo_pq_kernel<dataPoint, 1>), dim3(Blocks), dim3(Threads), 0, streamAttr,
P.nnz, P.col, P.row, P.val, y, Fattr, P.n);
break;
case 2:
hipLaunchKernelGGL(( coo_pq_kernel<dataPoint, 2>), dim3(Blocks), dim3(Threads), 0, streamAttr,
P.nnz, P.col, P.row, P.val, y, Fattr, P.n);
break;
case 3:
hipLaunchKernelGGL(( coo_pq_kernel<dataPoint, 3>), dim3(Blocks), dim3(Threads), 0, streamAttr,
P.nnz, P.col, P.row, P.val, y, Fattr, P.n);
break;
}
} else if (P.format == 3) {
gpu_hybrid_spmv<dataPoint>(P.elements_in_rows, P.coo_size, y, P.n, Fattr,
P.ell_cols, P.ell_data, P.coo_data,
P.coo_row_ids, P.coo_col_ids, d);
}
}
template <typename data_type,int d>
__global__ void
ComputeKLkernel(const int n_elements, const matidx *__restrict__ col_ids,
const matidx *__restrict__ row_ids,
const data_type *__restrict__ data,
const data_type *__restrict__ Y, data_type *__restrict__ Cij, const int n,data_type zeta,data_type alpha) {
register matidx row, column;
register data_type dist, p,q;
register unsigned int element;
register data_type dx, dy, dz;
for (element = blockIdx.x * blockDim.x + threadIdx.x; element < n_elements;
element += blockDim.x * gridDim.x) {
row = row_ids[element];
column = col_ids[element];
switch (d) {
case 1:
dx = (Y[row] - Y[column]);
dist = dx * dx;
p = data[element]*alpha;
q= 1/(zeta* (1 + dist));
Cij[element]=p*log((p+FLT_MIN)/(q+FLT_MIN));
break;
case 2:
dx = (Y[row] - Y[column]);
dy = (Y[row + n] - Y[column + n]);
dist = dx * dx + dy * dy;
p = data[element]*alpha;
q= 1/(zeta* (1 + dist));
Cij[element]=p*log((p+FLT_MIN)/(q+FLT_MIN));
break;
case 3:
dx = (Y[row] - Y[column]);
dy = (Y[row + n] - Y[column + n]);
dz = (Y[row + 2 * n] - Y[column + 2 * n]);
dist = dx * dx + dy * dy + dz * dz;
p = data[element]*alpha;
q= 1/(zeta* (1 + dist));
Cij[element]=p*log((p+FLT_MIN)/(q+FLT_MIN));
break;
}
}
}
template<class dataPoint>
dataPoint tsneCost(sparse_matrix<dataPoint>P,dataPoint* y, int n,int d,dataPoint alpha,dataPoint zeta ){
dataPoint* Cij;
gpuErrchk(hipMallocManaged(&Cij, P.nnz* sizeof(dataPoint)));
switch(d){
case 1:
hipLaunchKernelGGL(( ComputeKLkernel<dataPoint,1>), dim3(Blocks), dim3(Threads), 0, 0, P.nnz, P.col, P.row, P.val, y, Cij, P.n,zeta,alpha);
break;
case 2:
hipLaunchKernelGGL(( ComputeKLkernel<dataPoint,2>), dim3(Blocks), dim3(Threads), 0, 0, P.nnz, P.col, P.row, P.val, y, Cij, P.n,zeta,alpha);
break;
case 3:
hipLaunchKernelGGL(( ComputeKLkernel<dataPoint,3>), dim3(Blocks), dim3(Threads), 0, 0, P.nnz, P.col, P.row, P.val, y, Cij, P.n,zeta,alpha);
break;
}
hipDeviceSynchronize();
dataPoint C= thrust::reduce(Cij, Cij+P.nnz);
gpuErrchk(hipFree(Cij));
return C;
}
template
float tsneCost(sparse_matrix<float> P,float* y, int n,int d,float alpha,float zeta );
template
double tsneCost(sparse_matrix<double> P,double* y, int n,int d,double alpha,double zeta );
template void AttractiveEstimation(sparse_matrix<float> P, int d, float *y,
float *Fattr);
template void AttractiveEstimation(sparse_matrix<double> P, int d, double *y,
double *Fattr);
| 798f7bc1391f0e3febdcec81e9114a754e17aeae.cu | #include "pq.cuh"
#include <thrust/reduce.h>
#include <float.h>
extern cudaStream_t streamAttr;
extern int Blocks;
extern int Threads;
template <class dataPoint>
void AttractiveEstimation(int *row, int *col, dataPoint *data, dataPoint *Fattr,
dataPoint *y, int n, int d, int blockSize,
int blockRows, int num_blocks, int nnz, int format) {
if (format == 0) {
pq_csr_naive<<<Blocks, Threads>>>(row, col, data, n, d, y, Fattr);
} else if (format == 1) {
// bsr_col_pq(data, row, col, y, Fattr, blockRows, blockSize, num_blocks, n,
// d);
} else if (format == 2) {
coo_pq_kernel<<<Blocks, Threads>>>(nnz, col, row, data, y, Fattr, d, n);
} else if (format == 3) {
printf("hybrid\n");
}
}
template <class dataPoint>
void AttractiveEstimation(sparse_matrix<dataPoint> P, int d, dataPoint *y,
dataPoint *Fattr) {
if (P.format == 0) {
pq_csr_naive<dataPoint, int>
<<<Blocks, Threads, 0, streamAttr>>>(P.row, P.col, P.val, P.n, d, y, Fattr);
} else if (P.format == 1) {
// bsr_col_pq<dataPoint>(P.val, P.row, P.col, y, Fattr, P.blockRows,
// P.blockSize, P.nnzb,
// P.n, d);
} else if (P.format == 2) {
switch (d) {
case 1:
coo_pq_kernel<dataPoint, 1><<<Blocks, Threads, 0, streamAttr>>>(
P.nnz, P.col, P.row, P.val, y, Fattr, P.n);
break;
case 2:
coo_pq_kernel<dataPoint, 2><<<Blocks, Threads, 0, streamAttr>>>(
P.nnz, P.col, P.row, P.val, y, Fattr, P.n);
break;
case 3:
coo_pq_kernel<dataPoint, 3><<<Blocks, Threads, 0, streamAttr>>>(
P.nnz, P.col, P.row, P.val, y, Fattr, P.n);
break;
}
} else if (P.format == 3) {
gpu_hybrid_spmv<dataPoint>(P.elements_in_rows, P.coo_size, y, P.n, Fattr,
P.ell_cols, P.ell_data, P.coo_data,
P.coo_row_ids, P.coo_col_ids, d);
}
}
template <typename data_type,int d>
__global__ void
ComputeKLkernel(const int n_elements, const matidx *__restrict__ col_ids,
const matidx *__restrict__ row_ids,
const data_type *__restrict__ data,
const data_type *__restrict__ Y, data_type *__restrict__ Cij, const int n,data_type zeta,data_type alpha) {
register matidx row, column;
register data_type dist, p,q;
register unsigned int element;
register data_type dx, dy, dz;
for (element = blockIdx.x * blockDim.x + threadIdx.x; element < n_elements;
element += blockDim.x * gridDim.x) {
row = row_ids[element];
column = col_ids[element];
switch (d) {
case 1:
dx = (Y[row] - Y[column]);
dist = dx * dx;
p = data[element]*alpha;
q= 1/(zeta* (1 + dist));
Cij[element]=p*log((p+FLT_MIN)/(q+FLT_MIN));
break;
case 2:
dx = (Y[row] - Y[column]);
dy = (Y[row + n] - Y[column + n]);
dist = dx * dx + dy * dy;
p = data[element]*alpha;
q= 1/(zeta* (1 + dist));
Cij[element]=p*log((p+FLT_MIN)/(q+FLT_MIN));
break;
case 3:
dx = (Y[row] - Y[column]);
dy = (Y[row + n] - Y[column + n]);
dz = (Y[row + 2 * n] - Y[column + 2 * n]);
dist = dx * dx + dy * dy + dz * dz;
p = data[element]*alpha;
q= 1/(zeta* (1 + dist));
Cij[element]=p*log((p+FLT_MIN)/(q+FLT_MIN));
break;
}
}
}
template<class dataPoint>
dataPoint tsneCost(sparse_matrix<dataPoint>P,dataPoint* y, int n,int d,dataPoint alpha,dataPoint zeta ){
dataPoint* Cij;
gpuErrchk(cudaMallocManaged(&Cij, P.nnz* sizeof(dataPoint)));
switch(d){
case 1:
ComputeKLkernel<dataPoint,1><<<Blocks, Threads>>>(P.nnz, P.col, P.row, P.val, y, Cij, P.n,zeta,alpha);
break;
case 2:
ComputeKLkernel<dataPoint,2><<<Blocks, Threads>>>(P.nnz, P.col, P.row, P.val, y, Cij, P.n,zeta,alpha);
break;
case 3:
ComputeKLkernel<dataPoint,3><<<Blocks, Threads>>>(P.nnz, P.col, P.row, P.val, y, Cij, P.n,zeta,alpha);
break;
}
cudaDeviceSynchronize();
dataPoint C= thrust::reduce(Cij, Cij+P.nnz);
gpuErrchk(cudaFree(Cij));
return C;
}
template
float tsneCost(sparse_matrix<float> P,float* y, int n,int d,float alpha,float zeta );
template
double tsneCost(sparse_matrix<double> P,double* y, int n,int d,double alpha,double zeta );
template void AttractiveEstimation(sparse_matrix<float> P, int d, float *y,
float *Fattr);
template void AttractiveEstimation(sparse_matrix<double> P, int d, double *y,
double *Fattr);
|
fb4470b31334c5b16d3b7905d6c2abd724ba1f07.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Zhe Chen
#include <iostream>
#include <cmath>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "utils.h"
using namespace std;
#define BLOCK_DIM 32
#define BLOCK_DIM_IN 30
double Residual(int N, double *U, double *F){
double h=1.0/(N+1.0);
double res=0.0, res_1=0.0;
#pragma omp parallel for shared(U,F) private(res_1)\
reduction(+:res)
for (int j=1;j<=N;j++){
for (int i=1;i<=N;i++){
res_1=(-U[(N+2)*j+i-1]-U[(N+2)*(j-1)+i]-U[(N+2)*j+i+1]-U[(N+2)*(j+1)+i]+4.0*U[(N+2)*j+i])/h/h-F[(N+2)*j+i];
res+=res_1*res_1;
}
}
res=sqrt(res);
return res;
}
void Jacobi_cpu(int N, double *U, double *F, int maxit, int num_threads){
#if defined(_OPENMP)
int threads_all = omp_get_num_procs();
cout << "Number of cpus in this machine: " << threads_all << endl;
omp_set_num_threads(num_threads);
cout << "Use " << num_threads << " threads" << endl;
#endif
double h = 1.0/(N+1.0);
double res=0.0;
double tol=1e-8;
double rel_res=0.0;
int iter=0;
double *U_new=(double*) malloc((N+2)*(N+2) * sizeof(double));
double res0=Residual(N,U,F);
cout << "Initail residual is " << res0 << endl;
rel_res=tol+1.0;
while (rel_res>tol){
#pragma omp parallel shared(U_new, U)
{
#pragma omp for
for (int j = 1; j <= N; j++) {
for (int i = 1; i <= N; i++) {
//rows first, in the inner loop since it's stored in row order.
U_new[(N+2)*j+i] = 0.25 *
(h * h * F[(N+2)*j+i] + U[(N+2)*j+i-1] + U[(N+2)*(j-1)+i]
+ U[(N+2)*j+i+1]+ U[(N+2)*(j+1)+i]);
}
}
#pragma omp for
for (int j=1; j<=N; j++){
for (int i=1; i<=N; i++){
U[(N+2)*j+i]=U_new[(N+2)*j+i];
}
}
}
res=Residual(N,U,F);
rel_res=res/res0;
// if (iter%(maxit/10)==0){
// std::cout << "Relative residual is " << rel_res << std::endl;
// }
iter++;
if (iter>maxit){
cout << "Max iteration reached: " << maxit <<endl;
cout << "Remaining res: " << rel_res <<endl;
break;
}
}
cout << "Remaining res: " << rel_res <<endl;
free(U_new);
}
__global__ void Jacobi_gpu_kernel(int N, double h, double *U_new, double *U, double *F) {
__shared__ double smem[BLOCK_DIM][BLOCK_DIM];
smem[threadIdx.x][threadIdx.y]=0.0;
if (blockIdx.x*BLOCK_DIM_IN+threadIdx.x<N+2 &&
blockIdx.y*BLOCK_DIM_IN+threadIdx.y<N+2){
smem[threadIdx.x][threadIdx.y]=U[(blockIdx.y*BLOCK_DIM_IN+threadIdx.y)*(N+2)+blockIdx.x*BLOCK_DIM_IN+threadIdx.x];
}
__syncthreads();
if (threadIdx.x<=BLOCK_DIM_IN && threadIdx.x>=1 &&
threadIdx.y<=BLOCK_DIM_IN && threadIdx.y>=1){
if (blockIdx.x*BLOCK_DIM_IN+threadIdx.x<N+1 &&
blockIdx.x*BLOCK_DIM_IN+threadIdx.x>0 &&
blockIdx.y*BLOCK_DIM_IN+threadIdx.y<N+1 &&
blockIdx.y*BLOCK_DIM_IN+threadIdx.y>0){
U_new[(blockIdx.y*BLOCK_DIM_IN+threadIdx.y)*(N+2)+blockIdx.x*BLOCK_DIM_IN+threadIdx.x]=
0.25 *
(h * h * F[(blockIdx.y*BLOCK_DIM_IN+threadIdx.y)*(N+2)+blockIdx.x*BLOCK_DIM_IN+threadIdx.x] + smem[threadIdx.x-1][threadIdx.y] + smem[threadIdx.x+1][threadIdx.y]
+ smem[threadIdx.x][threadIdx.y-1]+ smem[threadIdx.x][threadIdx.y+1]);
}
}
}
void Jacobi_gpu(int N, double *U, double *F, int maxit){
double h = 1.0/(N+1.0);
double res=0.0;
double tol=1e-8;
double rel_res=0.0;
int iter=0;
double *U_new, *U_d, *F_d;
hipMalloc(&U_d, (N+2)*(N+2)*sizeof(double));
hipMalloc(&F_d, (N+2)*(N+2)*sizeof(double));
hipMemcpy(U_d, U, (N+2)*(N+2)*sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(F_d, F, (N+2)*(N+2)*sizeof(double),hipMemcpyHostToDevice);
hipMalloc(&U_new, (N+2)*(N+2)*sizeof(double));
hipMemcpy(U_new, U_d, (N+2)*(N+2)*sizeof(double),hipMemcpyDeviceToDevice);
// memset(U_new, 0, sizeof(double) * (N+2)*(N+2));
// __shared__ double smem[BLOCK_DIM][BLOCK_DIM];
double res0=Residual(N,U,F);
cout << "Initail residual is " << res0 << endl;
rel_res=tol+1.0;
dim3 blockDim(BLOCK_DIM, BLOCK_DIM);
dim3 gridDim((N-1)/(BLOCK_DIM_IN)+1, (N-1)/(BLOCK_DIM_IN)+1);
while (rel_res>tol){
hipLaunchKernelGGL(( Jacobi_gpu_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N, h, U_new, U_d, F_d);
hipMemcpy(U_d, U_new, (N+2)*(N+2)*sizeof(double),hipMemcpyDeviceToDevice);
hipMemcpy(U,U_d,(N+2)*(N+2)*sizeof(double),hipMemcpyDeviceToHost);
res=Residual(N,U,F);
rel_res=res/res0;
// if (iter%(maxit/10)==0){
// std::cout << "Relative residual is " << rel_res << std::endl;
// }
iter++;
if (iter>maxit){
cout << "Max iteration reached: " << maxit <<endl;
cout << "Remaining res: " << rel_res <<endl;
break;
}
}
cout << "Remaining res: " << rel_res <<endl;
hipFree(U_new);
}
int main(int argc, char **argv) {
cout << "Please input N(default=10): " << endl;
int N = 10;
cin >> N;
cout << "Please input num of threads(default=1): " << endl;
int num_threads = 1;
cin >> num_threads;
int maxit=10000;
//allocate
double *U = (double*) malloc ((N+2)*(N+2)*sizeof(double));
double *F = (double*) malloc ((N+2)*(N+2)*sizeof(double));
//initialize
memset(U,0,(N+2)*(N+2)*sizeof(double));
memset(F,0,(N+2)*(N+2)*sizeof(double));
for (int i=0;i<(N+2)*(N+2);i++){
F[i]=1.0;
}
Timer t;
t.tic();
Jacobi_cpu(N, U, F, maxit,num_threads);
printf("CPU Bandwidth = %f GB/s\n", maxit*10*(N+2)*(N+2)*sizeof(double) / (t.toc())/1e9);
cout << "CPU Elapse time=" << t.toc() << "s" <<endl;
memset(U,0,(N+2)*(N+2)*sizeof(double));
t.tic();
Jacobi_gpu(N, U, F, maxit);
printf("GPU Bandwidth = %f GB/s\n", maxit*10*(N+2)*(N+2)*sizeof(double) / (t.toc())/1e9);
cout << "GPU Elapse time=" << t.toc() << "s" <<endl;
free(U);
free(F);
return 0;
}
| fb4470b31334c5b16d3b7905d6c2abd724ba1f07.cu | //Zhe Chen
#include <iostream>
#include <cmath>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "utils.h"
using namespace std;
#define BLOCK_DIM 32
#define BLOCK_DIM_IN 30
double Residual(int N, double *U, double *F){
double h=1.0/(N+1.0);
double res=0.0, res_1=0.0;
#pragma omp parallel for shared(U,F) private(res_1)\
reduction(+:res)
for (int j=1;j<=N;j++){
for (int i=1;i<=N;i++){
res_1=(-U[(N+2)*j+i-1]-U[(N+2)*(j-1)+i]-U[(N+2)*j+i+1]-U[(N+2)*(j+1)+i]+4.0*U[(N+2)*j+i])/h/h-F[(N+2)*j+i];
res+=res_1*res_1;
}
}
res=sqrt(res);
return res;
}
void Jacobi_cpu(int N, double *U, double *F, int maxit, int num_threads){
#if defined(_OPENMP)
int threads_all = omp_get_num_procs();
cout << "Number of cpus in this machine: " << threads_all << endl;
omp_set_num_threads(num_threads);
cout << "Use " << num_threads << " threads" << endl;
#endif
double h = 1.0/(N+1.0);
double res=0.0;
double tol=1e-8;
double rel_res=0.0;
int iter=0;
double *U_new=(double*) malloc((N+2)*(N+2) * sizeof(double));
double res0=Residual(N,U,F);
cout << "Initail residual is " << res0 << endl;
rel_res=tol+1.0;
while (rel_res>tol){
#pragma omp parallel shared(U_new, U)
{
#pragma omp for
for (int j = 1; j <= N; j++) {
for (int i = 1; i <= N; i++) {
//rows first, in the inner loop since it's stored in row order.
U_new[(N+2)*j+i] = 0.25 *
(h * h * F[(N+2)*j+i] + U[(N+2)*j+i-1] + U[(N+2)*(j-1)+i]
+ U[(N+2)*j+i+1]+ U[(N+2)*(j+1)+i]);
}
}
#pragma omp for
for (int j=1; j<=N; j++){
for (int i=1; i<=N; i++){
U[(N+2)*j+i]=U_new[(N+2)*j+i];
}
}
}
res=Residual(N,U,F);
rel_res=res/res0;
// if (iter%(maxit/10)==0){
// std::cout << "Relative residual is " << rel_res << std::endl;
// }
iter++;
if (iter>maxit){
cout << "Max iteration reached: " << maxit <<endl;
cout << "Remaining res: " << rel_res <<endl;
break;
}
}
cout << "Remaining res: " << rel_res <<endl;
free(U_new);
}
__global__ void Jacobi_gpu_kernel(int N, double h, double *U_new, double *U, double *F) {
__shared__ double smem[BLOCK_DIM][BLOCK_DIM];
smem[threadIdx.x][threadIdx.y]=0.0;
if (blockIdx.x*BLOCK_DIM_IN+threadIdx.x<N+2 &&
blockIdx.y*BLOCK_DIM_IN+threadIdx.y<N+2){
smem[threadIdx.x][threadIdx.y]=U[(blockIdx.y*BLOCK_DIM_IN+threadIdx.y)*(N+2)+blockIdx.x*BLOCK_DIM_IN+threadIdx.x];
}
__syncthreads();
if (threadIdx.x<=BLOCK_DIM_IN && threadIdx.x>=1 &&
threadIdx.y<=BLOCK_DIM_IN && threadIdx.y>=1){
if (blockIdx.x*BLOCK_DIM_IN+threadIdx.x<N+1 &&
blockIdx.x*BLOCK_DIM_IN+threadIdx.x>0 &&
blockIdx.y*BLOCK_DIM_IN+threadIdx.y<N+1 &&
blockIdx.y*BLOCK_DIM_IN+threadIdx.y>0){
U_new[(blockIdx.y*BLOCK_DIM_IN+threadIdx.y)*(N+2)+blockIdx.x*BLOCK_DIM_IN+threadIdx.x]=
0.25 *
(h * h * F[(blockIdx.y*BLOCK_DIM_IN+threadIdx.y)*(N+2)+blockIdx.x*BLOCK_DIM_IN+threadIdx.x] + smem[threadIdx.x-1][threadIdx.y] + smem[threadIdx.x+1][threadIdx.y]
+ smem[threadIdx.x][threadIdx.y-1]+ smem[threadIdx.x][threadIdx.y+1]);
}
}
}
void Jacobi_gpu(int N, double *U, double *F, int maxit){
double h = 1.0/(N+1.0);
double res=0.0;
double tol=1e-8;
double rel_res=0.0;
int iter=0;
double *U_new, *U_d, *F_d;
cudaMalloc(&U_d, (N+2)*(N+2)*sizeof(double));
cudaMalloc(&F_d, (N+2)*(N+2)*sizeof(double));
cudaMemcpy(U_d, U, (N+2)*(N+2)*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(F_d, F, (N+2)*(N+2)*sizeof(double),cudaMemcpyHostToDevice);
cudaMalloc(&U_new, (N+2)*(N+2)*sizeof(double));
cudaMemcpy(U_new, U_d, (N+2)*(N+2)*sizeof(double),cudaMemcpyDeviceToDevice);
// memset(U_new, 0, sizeof(double) * (N+2)*(N+2));
// __shared__ double smem[BLOCK_DIM][BLOCK_DIM];
double res0=Residual(N,U,F);
cout << "Initail residual is " << res0 << endl;
rel_res=tol+1.0;
dim3 blockDim(BLOCK_DIM, BLOCK_DIM);
dim3 gridDim((N-1)/(BLOCK_DIM_IN)+1, (N-1)/(BLOCK_DIM_IN)+1);
while (rel_res>tol){
Jacobi_gpu_kernel<<<gridDim,blockDim>>>(N, h, U_new, U_d, F_d);
cudaMemcpy(U_d, U_new, (N+2)*(N+2)*sizeof(double),cudaMemcpyDeviceToDevice);
cudaMemcpy(U,U_d,(N+2)*(N+2)*sizeof(double),cudaMemcpyDeviceToHost);
res=Residual(N,U,F);
rel_res=res/res0;
// if (iter%(maxit/10)==0){
// std::cout << "Relative residual is " << rel_res << std::endl;
// }
iter++;
if (iter>maxit){
cout << "Max iteration reached: " << maxit <<endl;
cout << "Remaining res: " << rel_res <<endl;
break;
}
}
cout << "Remaining res: " << rel_res <<endl;
cudaFree(U_new);
}
int main(int argc, char **argv) {
cout << "Please input N(default=10): " << endl;
int N = 10;
cin >> N;
cout << "Please input num of threads(default=1): " << endl;
int num_threads = 1;
cin >> num_threads;
int maxit=10000;
//allocate
double *U = (double*) malloc ((N+2)*(N+2)*sizeof(double));
double *F = (double*) malloc ((N+2)*(N+2)*sizeof(double));
//initialize
memset(U,0,(N+2)*(N+2)*sizeof(double));
memset(F,0,(N+2)*(N+2)*sizeof(double));
for (int i=0;i<(N+2)*(N+2);i++){
F[i]=1.0;
}
Timer t;
t.tic();
Jacobi_cpu(N, U, F, maxit,num_threads);
printf("CPU Bandwidth = %f GB/s\n", maxit*10*(N+2)*(N+2)*sizeof(double) / (t.toc())/1e9);
cout << "CPU Elapse time=" << t.toc() << "s" <<endl;
memset(U,0,(N+2)*(N+2)*sizeof(double));
t.tic();
Jacobi_gpu(N, U, F, maxit);
printf("GPU Bandwidth = %f GB/s\n", maxit*10*(N+2)*(N+2)*sizeof(double) / (t.toc())/1e9);
cout << "GPU Elapse time=" << t.toc() << "s" <<endl;
free(U);
free(F);
return 0;
}
|
4de2ee576b1b5b497d4580674173514de2357388.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel_update_velocities(float4* d_uv, float4* d_velocities_buffer, int numel) {
size_t col = threadIdx.x + blockIdx.x * blockDim.x;
if (col >= numel) { return; }
d_velocities_buffer[col] = make_float4(
d_uv[col].x,
d_uv[col].y,
0,
0
);
__syncthreads();
} | 4de2ee576b1b5b497d4580674173514de2357388.cu | #include "includes.h"
__global__ void kernel_update_velocities(float4* d_uv, float4* d_velocities_buffer, int numel) {
size_t col = threadIdx.x + blockIdx.x * blockDim.x;
if (col >= numel) { return; }
d_velocities_buffer[col] = make_float4(
d_uv[col].x,
d_uv[col].y,
0,
0
);
__syncthreads();
} |
ab4eda307beb6b148e0ba03fbf4939a3776dce39.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHUNN.h"
#include "common.h"
struct logadd_functor
{
__device__ void operator()(float* output, const float* input1, const float* input2) const
{
if (*input1 < *input2) {
*output = log(1 + exp(*input1 - *input2)) + *input2;
} else {
*output = log(1 + exp(*input2 - *input1)) + *input1;
}
}
};
TH_API void THNN_CudaLogSpace_add(
THCState *state, THCudaTensor *output,
THCudaTensor *input1, THCudaTensor *input2) {
THC_pointwiseApply3(state, output, input1, input2, logadd_functor());
}
struct logadd_inplace_functor
{
__device__ void operator()(float* input1, const float* input2) const
{
if (*input1 < *input2) {
*input1 = log(1 + exp(*input1 - *input2)) + *input2;
} else {
*input1 = log(1 + exp(*input2 - *input1)) + *input1;
}
}
};
TH_API void THNN_CudaLogSpace_add_inplace(
THCState *state, THCudaTensor *input1, THCudaTensor *input2) {
THC_pointwiseApply2(state, input1, input2, logadd_inplace_functor());
}
struct pointwisemod_functor
{
__device__ void operator()(float* output, const float* input) const
{
unsigned int* inta = reinterpret_cast<unsigned int*>(output);
// Set the last bit to the sign value;
*inta = *inta ^ ((-((*input) == 1.0) ^ (*inta)) & (1 << 0));
*output = *(reinterpret_cast<float*>(inta));
}
};
struct signedAdd_functor
{
__device__ void operator()(float* output, const float* input1, const float* input2) const
{
// Get back the signs
float t1_sign = (((*reinterpret_cast<const unsigned int*>(input1)) >> 0) & 1) ? 1.0 : -1.0;
float t2_sign = (((*reinterpret_cast<const unsigned int*>(input2)) >> 0) & 1) ? 1.0 : -1.0;
// Do the add.
float mx = max(*input1, *input2);
float mn = min(*input1, *input2);
float mn_mx = mn - mx;
*output = log1p(exp(mn_mx) * t1_sign * t2_sign) + mx;
// Change sign bit of output.
float sign = (*input1 > *input2) ? t1_sign : t2_sign;
unsigned int* inta = reinterpret_cast<unsigned int*>(output);
*inta = *inta ^ ((-((sign) == 1.0) ^ (*inta)) & (1 << 0));
}
};
struct getsign_functor
{
__device__ void operator()(float* output, float* input) const
{
*output = (((*reinterpret_cast<const unsigned int*>(input) >> 0) & 1)) ? 1.0 : -1.0;
// Reset the last bit
unsigned int* inta = reinterpret_cast<unsigned int*>(input);
*inta &= ~(1 << 0);
// This is the nan trick.
if (*output != *output) {
*output = -CUDART_INF_F;
}
}
};
TH_API void THNN_CudaModSign(
THCState *state, THCudaTensor *output, THCudaTensor *output_sign)
{
THC_pointwiseApply2(state, output, output_sign, pointwisemod_functor());
}
TH_API void THNN_CudaGetSign(
THCState *state, THCudaTensor *output, THCudaTensor *output_sign)
{
THC_pointwiseApply2(state, output_sign, output, getsign_functor());
}
TH_API void THNN_CudaSignedLogSpace_add(
THCState *state, THCudaTensor *output, THCudaTensor *output_sign,
THCudaTensor *input1, THCudaTensor *input2,
THCudaTensor *tensor1_sign, THCudaTensor *tensor2_sign) {
THC_pointwiseApply2(state, input1, tensor1_sign, pointwisemod_functor());
THC_pointwiseApply2(state, input2, tensor2_sign, pointwisemod_functor());
THC_pointwiseApply3(state, output, input1, input2, signedAdd_functor());
THC_pointwiseApply2(state, output_sign, output, getsign_functor());
}
struct signedAdd_inplace_functor
{
__device__ void operator()(float* input1, const float* input2, const float* t1t2_sign_prod) const
{
float mx = max(*input1, *input2);
float mn = min(*input1, *input2);
float mn_mx = mn - mx;
*input1 = log1p(exp(mn_mx) * *t1t2_sign_prod) + mx;
if (*input1 != *input1) {
*input1 = -CUDART_INF_F;
}
}
};
struct prod_functor
{
__device__ void operator()(float* output, const float* input1, const float* input2) const
{
*output = *input1 * *input2;
}
};
struct ge_functor
{
__device__ void operator()(float* tensor1_sign, const float* ge, const float* tensor2_sign) const
{
if (*ge < 1) {
*tensor1_sign = *tensor2_sign;
}
}
};
struct fixnan_functor
{
__device__ void operator()(float* output) const
{
if (isnan(*output)) {
*output = -CUDART_INF_F;
}
}
};
TH_API void THNN_CudaSignedLogSpace_add_inplace(
THCState *state, THCudaTensor *input1, THCudaTensor *input2,
THCudaTensor *tensor1_sign, THCudaTensor *tensor2_sign,
THCudaTensor *t1t2_sign_prod, THCudaTensor *ge) {
THC_pointwiseApply3(state, t1t2_sign_prod, tensor1_sign, tensor2_sign, prod_functor());
THC_pointwiseApply3(state, tensor1_sign, ge, tensor2_sign, ge_functor());
THC_pointwiseApply3(state, input1, input2, t1t2_sign_prod, signedAdd_inplace_functor());
}
TH_API void THNN_CudaFixNaN(
THCState *state, THCudaTensor *input) {
THC_pointwiseApply1(state, input, fixnan_functor());
}
// void THNN_CudaLogSpace_abs(THCState *state, THCudaTensor *output,
// THCudaTensor *input)
// {
// THC_pointwiseApply2(state, output, input, abs_functor());
// }
// struct addexpOutput_functor
// {
// const float max_;
// addexpOutput_functor(float max)
// : max_(max)
// {}
// __device__ void operator()(float* output, const float* input) const
// {
// *output = exp(*input - max_);
// }
// };
// struct addexpOutputSign_functor
// {
// const float max_;
// addexpOutputSign_functor(float max)
// : max_(max)
// {}
// __device__ void operator()(float* output, const float* input, const float* input_sign) const
// {
// *output = exp(*input - max_) * (*input_sign);
// }
// };
// struct logaddOutput_functor
// {
// const float max_;
// logaddOutput_functor(float max)
// : max_(max)
// {}
// __device__ void operator()(float* output, const float* input) const
// {
// *output = log(fabs(*input)) + max_;
// if (*output != *output)
// *output = -CUDART_INF_F;
// }
// };
// void THNN_CudaLogSpace_bmm(THCState *state, THCudaTensor *output,
// THCudaTensor *input1, THCudaTensor *input2,
// THCudaTensor *tmp1, THCudaTensor *tmp2)
// {
// THCUNN_assertSameGPU(state, 2, input1, output);
// THCUNN_assertSameGPU(state, 2, input2, output);
// THCUNN_assertSameGPU(state, 2, tmp1, output);
// THCUNN_assertSameGPU(state, 2, tmp2, output);
// // THCudaTensor_resizeAs(state, output, input1);
// //find maxes
// float max1 = THCudaTensor_maxall(state, input1);
// float max2 = THCudaTensor_maxall(state, input2);
// THC_pointwiseApply2(state, tmp1, input1, addexpOutput_functor(max1));
// THC_pointwiseApply2(state, tmp2, input2, addexpOutput_functor(max2));
// // call bmm
// THCudaTensor_baddbmm(state, output, 0.0, output, 1.0, tmp1, tmp2);
// THC_pointwiseApply2(state, output, output, logaddOutput_functor(max1 + max2));
// }
// void THNN_CudaSignedLogSpace_bmm(THCState *state, THCudaTensor *output, THCudaTensor *output_sign,
// THCudaTensor *input1, THCudaTensor *input2,
// THCudaTensor *tensor1_sign, THCudaTensor *tensor2_sign,
// THCudaTensor *tmp1, THCudaTensor *tmp2)
// {
// float max1 = THCudaTensor_maxall(state, input1);
// float max2 = THCudaTensor_maxall(state, input2);
// THC_pointwiseApply3(state, tmp1, input1, tensor1_sign, addexpOutputSign_functor(max1));
// THC_pointwiseApply3(state, tmp2, input2, tensor2_sign, addexpOutputSign_functor(max2));
// // call bmm
// THCudaTensor_baddbmm(state, output, 0.0, output, 1.0, tmp1, tmp2);
// THCudaTensor_sign(state, output_sign, output);
// THC_pointwiseApply2(state, output, output, logaddOutput_functor(max1 + max2));
// }
// struct signedAdd_functor
// {
// __device__ void operator()(float* output, const float* input1, const float* input2) const
// {
// float mx = max(*input1, *input2);
// float mn = min(*input1, *input2);
// float t1_sign = (((*reinterpret_cast<const unsigned int*>(input1)) >> 0) & 1) ? 1.0 : -1.0;
// float t2_sign = (((*reinterpret_cast<const unsigned int*>(input2)) >> 0) & 1) ? 1.0 : -1.0;
// float mn_mx = mn - mx;
// *output = log1p(exp(mn_mx) * t1_sign * t2_sign) + mx;
// float sign = (*input1 > *input2) ? t1_sign : t2_sign;
// // Change sign bit of output.
// unsigned int* inta = reinterpret_cast<unsigned int*>(output);
// *inta = *inta ^ ((-((sign) == 1.0) ^ (*inta)) & (1 << 0));
// // *output = *(reinterpret_cast<float*>(inta));
// }
// };
// struct getsign_functor
// {
// __device__ void operator()(float* output, float* input) const
// {
// *output = (((*reinterpret_cast<const unsigned int*>(input) >> 0) & 1)) ? 1.0 : -1.0;
// // Reset the last bit
// unsigned int* inta = reinterpret_cast<unsigned int*>(input);
// *inta &= ~(1 << 0);
// if (*output != *output) {
// *output = -CUDART_INF_F;
// }
// }
// };
// TH_API void THNN_CudaSignedLogSpace_add(
// THCState *state, THCudaTensor *output, THCudaTensor *output_sign,
// THCudaTensor *input1, THCudaTensor *input2,
// THCudaTensor *tensor1_sign, THCudaTensor *tensor2_sign) {
// THC_pointwiseApply2(state, input1, tensor1_sign, pointwisemod_functor());
// THC_pointwiseApply2(state, input2, tensor2_sign, pointwisemod_functor());
// THC_pointwiseApply3(state, output, input1, input2, signedAdd_functor());
// THC_pointwiseApply2(state, output_sign, output, getsign_functor());
// }
// TH_API void THNN_CudaSignedLogSpace_addSimple(
// THCState *state, THCudaTensor *output,
// THCudaTensor *input1, THCudaTensor *input2) {
// THC_pointwiseApply3(state, output, input1, input2, signedAdd_functor());
// }
// TH_API void THNN_CudaSignedLogSpace_sum(
// THCState *state, THCudaTensor *input, int dim) {
// THC_pointwiseApply3(state, output, input1, input2, signedAdd_functor());
// }
// struct sum_functor
// {
// const float max_;
// logaddOutput_functor(float max)
// : max_(max)
// {}
// __device__ void operator()(float* output, float* input) const
// {
// float sign = (((*reinterpret_cast<const unsigned int*>(input)) >> 0) & 1) ? 1.0 : -1.0;
// *output = exp(tensor - max_) * sign;
// }
// };
// TH_API void THNN_CudaSignedLogSpace_sumNumber(
// THCState *state, THCudaTensor *input) {
// float max1 = THCudaTensor_maxall(state, input1);
// THC_pointwiseApply3(state, output, input1, input2, signedAdd_functor());
// // float sum =
// }
| ab4eda307beb6b148e0ba03fbf4939a3776dce39.cu | #include "THCUNN.h"
#include "common.h"
struct logadd_functor
{
__device__ void operator()(float* output, const float* input1, const float* input2) const
{
if (*input1 < *input2) {
*output = log(1 + exp(*input1 - *input2)) + *input2;
} else {
*output = log(1 + exp(*input2 - *input1)) + *input1;
}
}
};
TH_API void THNN_CudaLogSpace_add(
THCState *state, THCudaTensor *output,
THCudaTensor *input1, THCudaTensor *input2) {
THC_pointwiseApply3(state, output, input1, input2, logadd_functor());
}
struct logadd_inplace_functor
{
__device__ void operator()(float* input1, const float* input2) const
{
if (*input1 < *input2) {
*input1 = log(1 + exp(*input1 - *input2)) + *input2;
} else {
*input1 = log(1 + exp(*input2 - *input1)) + *input1;
}
}
};
TH_API void THNN_CudaLogSpace_add_inplace(
THCState *state, THCudaTensor *input1, THCudaTensor *input2) {
THC_pointwiseApply2(state, input1, input2, logadd_inplace_functor());
}
struct pointwisemod_functor
{
__device__ void operator()(float* output, const float* input) const
{
unsigned int* inta = reinterpret_cast<unsigned int*>(output);
// Set the last bit to the sign value;
*inta = *inta ^ ((-((*input) == 1.0) ^ (*inta)) & (1 << 0));
*output = *(reinterpret_cast<float*>(inta));
}
};
struct signedAdd_functor
{
__device__ void operator()(float* output, const float* input1, const float* input2) const
{
// Get back the signs
float t1_sign = (((*reinterpret_cast<const unsigned int*>(input1)) >> 0) & 1) ? 1.0 : -1.0;
float t2_sign = (((*reinterpret_cast<const unsigned int*>(input2)) >> 0) & 1) ? 1.0 : -1.0;
// Do the add.
float mx = max(*input1, *input2);
float mn = min(*input1, *input2);
float mn_mx = mn - mx;
*output = log1p(exp(mn_mx) * t1_sign * t2_sign) + mx;
// Change sign bit of output.
float sign = (*input1 > *input2) ? t1_sign : t2_sign;
unsigned int* inta = reinterpret_cast<unsigned int*>(output);
*inta = *inta ^ ((-((sign) == 1.0) ^ (*inta)) & (1 << 0));
}
};
struct getsign_functor
{
__device__ void operator()(float* output, float* input) const
{
*output = (((*reinterpret_cast<const unsigned int*>(input) >> 0) & 1)) ? 1.0 : -1.0;
// Reset the last bit
unsigned int* inta = reinterpret_cast<unsigned int*>(input);
*inta &= ~(1 << 0);
// This is the nan trick.
if (*output != *output) {
*output = -CUDART_INF_F;
}
}
};
TH_API void THNN_CudaModSign(
THCState *state, THCudaTensor *output, THCudaTensor *output_sign)
{
THC_pointwiseApply2(state, output, output_sign, pointwisemod_functor());
}
TH_API void THNN_CudaGetSign(
THCState *state, THCudaTensor *output, THCudaTensor *output_sign)
{
THC_pointwiseApply2(state, output_sign, output, getsign_functor());
}
TH_API void THNN_CudaSignedLogSpace_add(
THCState *state, THCudaTensor *output, THCudaTensor *output_sign,
THCudaTensor *input1, THCudaTensor *input2,
THCudaTensor *tensor1_sign, THCudaTensor *tensor2_sign) {
THC_pointwiseApply2(state, input1, tensor1_sign, pointwisemod_functor());
THC_pointwiseApply2(state, input2, tensor2_sign, pointwisemod_functor());
THC_pointwiseApply3(state, output, input1, input2, signedAdd_functor());
THC_pointwiseApply2(state, output_sign, output, getsign_functor());
}
struct signedAdd_inplace_functor
{
__device__ void operator()(float* input1, const float* input2, const float* t1t2_sign_prod) const
{
float mx = max(*input1, *input2);
float mn = min(*input1, *input2);
float mn_mx = mn - mx;
*input1 = log1p(exp(mn_mx) * *t1t2_sign_prod) + mx;
if (*input1 != *input1) {
*input1 = -CUDART_INF_F;
}
}
};
struct prod_functor
{
__device__ void operator()(float* output, const float* input1, const float* input2) const
{
*output = *input1 * *input2;
}
};
struct ge_functor
{
__device__ void operator()(float* tensor1_sign, const float* ge, const float* tensor2_sign) const
{
if (*ge < 1) {
*tensor1_sign = *tensor2_sign;
}
}
};
struct fixnan_functor
{
__device__ void operator()(float* output) const
{
if (isnan(*output)) {
*output = -CUDART_INF_F;
}
}
};
TH_API void THNN_CudaSignedLogSpace_add_inplace(
THCState *state, THCudaTensor *input1, THCudaTensor *input2,
THCudaTensor *tensor1_sign, THCudaTensor *tensor2_sign,
THCudaTensor *t1t2_sign_prod, THCudaTensor *ge) {
THC_pointwiseApply3(state, t1t2_sign_prod, tensor1_sign, tensor2_sign, prod_functor());
THC_pointwiseApply3(state, tensor1_sign, ge, tensor2_sign, ge_functor());
THC_pointwiseApply3(state, input1, input2, t1t2_sign_prod, signedAdd_inplace_functor());
}
TH_API void THNN_CudaFixNaN(
THCState *state, THCudaTensor *input) {
THC_pointwiseApply1(state, input, fixnan_functor());
}
// void THNN_CudaLogSpace_abs(THCState *state, THCudaTensor *output,
// THCudaTensor *input)
// {
// THC_pointwiseApply2(state, output, input, abs_functor());
// }
// struct addexpOutput_functor
// {
// const float max_;
// addexpOutput_functor(float max)
// : max_(max)
// {}
// __device__ void operator()(float* output, const float* input) const
// {
// *output = exp(*input - max_);
// }
// };
// struct addexpOutputSign_functor
// {
// const float max_;
// addexpOutputSign_functor(float max)
// : max_(max)
// {}
// __device__ void operator()(float* output, const float* input, const float* input_sign) const
// {
// *output = exp(*input - max_) * (*input_sign);
// }
// };
// struct logaddOutput_functor
// {
// const float max_;
// logaddOutput_functor(float max)
// : max_(max)
// {}
// __device__ void operator()(float* output, const float* input) const
// {
// *output = log(fabs(*input)) + max_;
// if (*output != *output)
// *output = -CUDART_INF_F;
// }
// };
// void THNN_CudaLogSpace_bmm(THCState *state, THCudaTensor *output,
// THCudaTensor *input1, THCudaTensor *input2,
// THCudaTensor *tmp1, THCudaTensor *tmp2)
// {
// THCUNN_assertSameGPU(state, 2, input1, output);
// THCUNN_assertSameGPU(state, 2, input2, output);
// THCUNN_assertSameGPU(state, 2, tmp1, output);
// THCUNN_assertSameGPU(state, 2, tmp2, output);
// // THCudaTensor_resizeAs(state, output, input1);
// //find maxes
// float max1 = THCudaTensor_maxall(state, input1);
// float max2 = THCudaTensor_maxall(state, input2);
// THC_pointwiseApply2(state, tmp1, input1, addexpOutput_functor(max1));
// THC_pointwiseApply2(state, tmp2, input2, addexpOutput_functor(max2));
// // call bmm
// THCudaTensor_baddbmm(state, output, 0.0, output, 1.0, tmp1, tmp2);
// THC_pointwiseApply2(state, output, output, logaddOutput_functor(max1 + max2));
// }
// void THNN_CudaSignedLogSpace_bmm(THCState *state, THCudaTensor *output, THCudaTensor *output_sign,
// THCudaTensor *input1, THCudaTensor *input2,
// THCudaTensor *tensor1_sign, THCudaTensor *tensor2_sign,
// THCudaTensor *tmp1, THCudaTensor *tmp2)
// {
// float max1 = THCudaTensor_maxall(state, input1);
// float max2 = THCudaTensor_maxall(state, input2);
// THC_pointwiseApply3(state, tmp1, input1, tensor1_sign, addexpOutputSign_functor(max1));
// THC_pointwiseApply3(state, tmp2, input2, tensor2_sign, addexpOutputSign_functor(max2));
// // call bmm
// THCudaTensor_baddbmm(state, output, 0.0, output, 1.0, tmp1, tmp2);
// THCudaTensor_sign(state, output_sign, output);
// THC_pointwiseApply2(state, output, output, logaddOutput_functor(max1 + max2));
// }
// struct signedAdd_functor
// {
// __device__ void operator()(float* output, const float* input1, const float* input2) const
// {
// float mx = max(*input1, *input2);
// float mn = min(*input1, *input2);
// float t1_sign = (((*reinterpret_cast<const unsigned int*>(input1)) >> 0) & 1) ? 1.0 : -1.0;
// float t2_sign = (((*reinterpret_cast<const unsigned int*>(input2)) >> 0) & 1) ? 1.0 : -1.0;
// float mn_mx = mn - mx;
// *output = log1p(exp(mn_mx) * t1_sign * t2_sign) + mx;
// float sign = (*input1 > *input2) ? t1_sign : t2_sign;
// // Change sign bit of output.
// unsigned int* inta = reinterpret_cast<unsigned int*>(output);
// *inta = *inta ^ ((-((sign) == 1.0) ^ (*inta)) & (1 << 0));
// // *output = *(reinterpret_cast<float*>(inta));
// }
// };
// struct getsign_functor
// {
// __device__ void operator()(float* output, float* input) const
// {
// *output = (((*reinterpret_cast<const unsigned int*>(input) >> 0) & 1)) ? 1.0 : -1.0;
// // Reset the last bit
// unsigned int* inta = reinterpret_cast<unsigned int*>(input);
// *inta &= ~(1 << 0);
// if (*output != *output) {
// *output = -CUDART_INF_F;
// }
// }
// };
// TH_API void THNN_CudaSignedLogSpace_add(
// THCState *state, THCudaTensor *output, THCudaTensor *output_sign,
// THCudaTensor *input1, THCudaTensor *input2,
// THCudaTensor *tensor1_sign, THCudaTensor *tensor2_sign) {
// THC_pointwiseApply2(state, input1, tensor1_sign, pointwisemod_functor());
// THC_pointwiseApply2(state, input2, tensor2_sign, pointwisemod_functor());
// THC_pointwiseApply3(state, output, input1, input2, signedAdd_functor());
// THC_pointwiseApply2(state, output_sign, output, getsign_functor());
// }
// TH_API void THNN_CudaSignedLogSpace_addSimple(
// THCState *state, THCudaTensor *output,
// THCudaTensor *input1, THCudaTensor *input2) {
// THC_pointwiseApply3(state, output, input1, input2, signedAdd_functor());
// }
// TH_API void THNN_CudaSignedLogSpace_sum(
// THCState *state, THCudaTensor *input, int dim) {
// THC_pointwiseApply3(state, output, input1, input2, signedAdd_functor());
// }
// struct sum_functor
// {
// const float max_;
// logaddOutput_functor(float max)
// : max_(max)
// {}
// __device__ void operator()(float* output, float* input) const
// {
// float sign = (((*reinterpret_cast<const unsigned int*>(input)) >> 0) & 1) ? 1.0 : -1.0;
// *output = exp(tensor - max_) * sign;
// }
// };
// TH_API void THNN_CudaSignedLogSpace_sumNumber(
// THCState *state, THCudaTensor *input) {
// float max1 = THCudaTensor_maxall(state, input1);
// THC_pointwiseApply3(state, output, input1, input2, signedAdd_functor());
// // float sum =
// }
|
9301f969da971eb56bb831382042f297e7a56078.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <hip/hip_runtime.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
// TODO: static variables for device memory, any extra info you need, etc
// ...
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice);
hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice);
hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// TODO: initialize any extra device memeory you need
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
hipFree(dev_image); // no-op if dev_image is null
hipFree(dev_paths);
hipFree(dev_geoms);
hipFree(dev_materials);
hipFree(dev_intersections);
// TODO: clean up any extra device memory you created
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
// TODO: implement antialiasing by jittering the ray
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
);
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
__global__ void pathTraceOneBounce(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, ShadeableIntersection * intersections
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
// LOOK: "fake" shader demonstrating what you might do with the info in
// a ShadeableIntersection, as well as how to use thrust's random number
// generator. Observe that since the thrust random number generator basically
// adds "noise" to the iteration, the image should start off noisy and get
// cleaner as more iterations are computed.
//
// Note that this shader does NOT do a BSDF evaluation!
// Your shaders should handle that - this can allow techniques such as
// bump mapping.
__global__ void shadeFakeMaterial (
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
else {
float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f));
pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f;
pathSegments[idx].color *= u01(rng); // apply some noise because why not
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
} else {
pathSegments[idx].color = glm::vec3(0.0f);
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// TODO: perform one iteration of path tracing
hipLaunchKernelGGL(( generateRayFromCamera) , dim3(blocksPerGrid2d), dim3(blockSize2d) , 0, 0, cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
while (!iterationComplete) {
// clean shading chunks
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
hipLaunchKernelGGL(( pathTraceOneBounce) , dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0,
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
);
checkCUDAError("trace one bounce");
hipDeviceSynchronize();
depth++;
// TODO:
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
hipLaunchKernelGGL(( shadeFakeMaterial), dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0,
iter,
num_paths,
dev_intersections,
dev_paths,
dev_materials
);
iterationComplete = true; // TODO: should be based off stream compaction results.
}
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
hipLaunchKernelGGL(( finalGather), dim3(numBlocksPixels), dim3(blockSize1d), 0, 0, num_paths, dev_image, dev_paths);
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
hipLaunchKernelGGL(( sendImageToPBO), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
hipMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
| 9301f969da971eb56bb831382042f297e7a56078.cu | #include <cstdio>
#include <cuda.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
// TODO: static variables for device memory, any extra info you need, etc
// ...
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice);
cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice);
cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// TODO: initialize any extra device memeory you need
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
cudaFree(dev_image); // no-op if dev_image is null
cudaFree(dev_paths);
cudaFree(dev_geoms);
cudaFree(dev_materials);
cudaFree(dev_intersections);
// TODO: clean up any extra device memory you created
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
// TODO: implement antialiasing by jittering the ray
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
);
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
__global__ void pathTraceOneBounce(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, ShadeableIntersection * intersections
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
// LOOK: "fake" shader demonstrating what you might do with the info in
// a ShadeableIntersection, as well as how to use thrust's random number
// generator. Observe that since the thrust random number generator basically
// adds "noise" to the iteration, the image should start off noisy and get
// cleaner as more iterations are computed.
//
// Note that this shader does NOT do a BSDF evaluation!
// Your shaders should handle that - this can allow techniques such as
// bump mapping.
__global__ void shadeFakeMaterial (
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
else {
float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f));
pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f;
pathSegments[idx].color *= u01(rng); // apply some noise because why not
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
} else {
pathSegments[idx].color = glm::vec3(0.0f);
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// TODO: perform one iteration of path tracing
generateRayFromCamera <<<blocksPerGrid2d, blockSize2d >>>(cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
while (!iterationComplete) {
// clean shading chunks
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
pathTraceOneBounce <<<numblocksPathSegmentTracing, blockSize1d>>> (
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
);
checkCUDAError("trace one bounce");
cudaDeviceSynchronize();
depth++;
// TODO:
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
shadeFakeMaterial<<<numblocksPathSegmentTracing, blockSize1d>>> (
iter,
num_paths,
dev_intersections,
dev_paths,
dev_materials
);
iterationComplete = true; // TODO: should be based off stream compaction results.
}
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
finalGather<<<numBlocksPixels, blockSize1d>>>(num_paths, dev_image, dev_paths);
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO<<<blocksPerGrid2d, blockSize2d>>>(pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
cudaMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
|
88d4c65fe45f5ebe73fe84fca3668fd77e20af7e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
All modification made by Intel Corporation: 2016 Intel Corporation
All contributions by the University of California:
Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
All rights reserved.
All other contributions:
Copyright (c) 2014, 2015, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float* X,
hipStream_t str) {
hipStream_t initial_stream;
CUBLAS_CHECK(hipblasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double* X,
hipStream_t str) {
hipStream_t initial_stream;
CUBLAS_CHECK(hipblasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <typename Dtype>
__global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = sqrt(a[index]);
}
}
template <>
void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sqrt_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sqrt_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
| 88d4c65fe45f5ebe73fe84fca3668fd77e20af7e.cu | /*
All modification made by Intel Corporation: © 2016 Intel Corporation
All contributions by the University of California:
Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
All rights reserved.
All other contributions:
Copyright (c) 2014, 2015, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float* X,
cudaStream_t str) {
cudaStream_t initial_stream;
CUBLAS_CHECK(cublasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double* X,
cudaStream_t str) {
cudaStream_t initial_stream;
CUBLAS_CHECK(cublasGetStream(Caffe::cublas_handle(), &initial_stream));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), str));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), initial_stream));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <typename Dtype>
__global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = sqrt(a[index]);
}
}
template <>
void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sqrt_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sqrt_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
|
ef1dfddf94c4538f27274b90c5a854a16ac16ce6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ParseGraph.hxx"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#define MAX 1024
using namespace std;
__global__
void isInfected(long long *person, long long *solution, bool *visited, bool *status, long long n) {
long long ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= n) return;
visited[person[ind] - 1] = true;
if (status[person[ind] - 1]) {
*solution = person[ind] - 1;
}
}
int main(void) {
readNames();
makeGraph();
bool *h_Status = new bool[Status.size()];
copy(Status.begin(), Status.end(), h_Status);
bool *d_Status;
hipMalloc((void**)&d_Status, sizeof(bool) * Status.size());
hipMemcpy(d_Status, h_Status, sizeof(bool) * Status.size(), hipMemcpyHostToDevice);
delete h_Status;
while (true) {
vector<long long> neighbors;
bool *visited;
long long *solution;
hipMallocManaged((void**)&solution, sizeof(long long));
hipMallocManaged((void**)&visited, sizeof(bool) * People.size());
*solution = -1;
cout << "Enter ID of person: ";
long long id; cin >> id;
if (id < 1 || id >= People.size()) {
cerr << "\nID out of range\n";
return -1;
}
neighbors.push_back(id);
cout << "\nProcessing...";
Timer timer, op;
while (!neighbors.empty() && *solution == -1) {
if (op.GetTime() >= 0.1) {
cout << "\rEntries Processed: " << count_if(visited, visited + People.size(), [](bool status) {
return status;
});
cout.flush();
op.Reset();
}
long long *d_neighbors;
hipMalloc((void**)&d_neighbors, sizeof(long long) * neighbors.size());
hipMemcpy(d_neighbors, &neighbors[0], sizeof(long long) * neighbors.size(), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( isInfected), dim3(neighbors.size() / MAX + 1), dim3(MAX), 0, 0, d_neighbors, solution, visited, d_Status, neighbors.size());
hipDeviceSynchronize();
hipFree(d_neighbors);
if (*solution != -1) {
cout << "\rEntries Processed: " << count_if(visited, visited + People.size(), [](bool status) {
return status;
})
<< "... Done" << endl;
cout << "Closest Infected Person: \n" << People[*solution].info() << endl;
break;
}
vector<long long> newNeighbors;
for (auto neighbor: neighbors) {
copy_if(Graph[neighbor - 1].begin(), Graph[neighbor - 1].end(), back_inserter(newNeighbors), [visited](long long id) {
return !visited[id];
});
}
neighbors.clear();
neighbors = newNeighbors;
}
double time = timer.Stop();
if (*solution == -1) {
cout << "\rEntries Processed: " << count_if(visited, visited + People.size(), [](bool status) {
return status;
})
<< "... Done" << endl;
cout << "\nPerson has not come in contact with an infected person\n";
}
cout << "\nTime Taken: " << time << endl;
hipFree(solution);
hipFree(visited);
}
return 0;
} | ef1dfddf94c4538f27274b90c5a854a16ac16ce6.cu | #include "ParseGraph.hxx"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#define MAX 1024
using namespace std;
__global__
void isInfected(long long *person, long long *solution, bool *visited, bool *status, long long n) {
long long ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= n) return;
visited[person[ind] - 1] = true;
if (status[person[ind] - 1]) {
*solution = person[ind] - 1;
}
}
int main(void) {
readNames();
makeGraph();
bool *h_Status = new bool[Status.size()];
copy(Status.begin(), Status.end(), h_Status);
bool *d_Status;
cudaMalloc((void**)&d_Status, sizeof(bool) * Status.size());
cudaMemcpy(d_Status, h_Status, sizeof(bool) * Status.size(), cudaMemcpyHostToDevice);
delete h_Status;
while (true) {
vector<long long> neighbors;
bool *visited;
long long *solution;
cudaMallocManaged((void**)&solution, sizeof(long long));
cudaMallocManaged((void**)&visited, sizeof(bool) * People.size());
*solution = -1;
cout << "Enter ID of person: ";
long long id; cin >> id;
if (id < 1 || id >= People.size()) {
cerr << "\nID out of range\n";
return -1;
}
neighbors.push_back(id);
cout << "\nProcessing...";
Timer timer, op;
while (!neighbors.empty() && *solution == -1) {
if (op.GetTime() >= 0.1) {
cout << "\rEntries Processed: " << count_if(visited, visited + People.size(), [](bool status) {
return status;
});
cout.flush();
op.Reset();
}
long long *d_neighbors;
cudaMalloc((void**)&d_neighbors, sizeof(long long) * neighbors.size());
cudaMemcpy(d_neighbors, &neighbors[0], sizeof(long long) * neighbors.size(), cudaMemcpyHostToDevice);
isInfected<<<neighbors.size() / MAX + 1, MAX>>>(d_neighbors, solution, visited, d_Status, neighbors.size());
cudaDeviceSynchronize();
cudaFree(d_neighbors);
if (*solution != -1) {
cout << "\rEntries Processed: " << count_if(visited, visited + People.size(), [](bool status) {
return status;
})
<< "... Done" << endl;
cout << "Closest Infected Person: \n" << People[*solution].info() << endl;
break;
}
vector<long long> newNeighbors;
for (auto neighbor: neighbors) {
copy_if(Graph[neighbor - 1].begin(), Graph[neighbor - 1].end(), back_inserter(newNeighbors), [visited](long long id) {
return !visited[id];
});
}
neighbors.clear();
neighbors = newNeighbors;
}
double time = timer.Stop();
if (*solution == -1) {
cout << "\rEntries Processed: " << count_if(visited, visited + People.size(), [](bool status) {
return status;
})
<< "... Done" << endl;
cout << "\nPerson has not come in contact with an infected person\n";
}
cout << "\nTime Taken: " << time << endl;
cudaFree(solution);
cudaFree(visited);
}
return 0;
} |
fcaf0cd5579507609fa7a30535a2859ccac289d8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "counting.h"
#include <cstdio>
#include <cassert>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
const int kMaxWordSize = 512;
const int kNumThread = 1024;
static_assert(kMaxWordSize <= kNumThread, "kMaxWordSize must be less than kNumThread.");
__device__ __host__ int CeilDiv( int a, int b ) { return (a-1)/b + 1; }
__device__ __host__ int CeilAlign( int a, int b ) { return CeilDiv(a, b) * b; }
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Count position, step 1, device
///
__global__
void CountPositionDevice1( const char *text, int *pos, int text_size ) {
auto idx = blockIdx.x * blockDim.x + threadIdx.x;
auto idx_end = (blockIdx.x + 1) * blockDim.x;
if ( idx_end > text_size ) {
idx_end = text_size;
}
// Initialize position
if ( idx < idx_end ) {
if ( text[idx] == '\n' ) {
pos[idx] = 0;
} else if ( idx == 0 ) {
pos[idx] = 1;
} else {
pos[idx] = -1;
}
}
__syncthreads();
// Binary search
for ( auto k = 1; k <= kMaxWordSize; k *= 2 ) {
auto idxk = idx + k;
if ( idxk < idx_end && pos[idx] >= 0 && pos[idxk] < 0 ) {
pos[idxk] = pos[idx] + k;
}
__syncthreads();
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Count position, step 2, device
///
__global__
void CountPositionDevice2( int *pos, int text_size ) {
auto idx = threadIdx.x;
pos += blockIdx.x * blockDim.x;
if ( pos[idx] < 0 ) {
pos[idx] = pos[-1] + idx + 1;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Count position, host
///
void CountPosition( const char *text, int *pos, int text_size ) {
hipLaunchKernelGGL(( CountPositionDevice1), dim3(text_size / kNumThread + 1), dim3(kNumThread), 0, 0, text, pos, text_size);
hipDeviceSynchronize();
hipLaunchKernelGGL(( CountPositionDevice2), dim3(text_size / kNumThread + 1), dim3(kNumThread), 0, 0, pos, text_size);
}
struct pred {
__host__ __device__
bool operator()( const int x ) {
return (x == 1);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Extract head
///
int ExtractHead( const int *pos, int *head, int text_size) {
int nhead;
thrust::device_ptr<const int> pos_d(pos);
thrust::device_ptr<int> head_d(head);
// Extract head
thrust::counting_iterator<int> idx_first(0), idx_last = idx_first + text_size;
nhead = thrust::copy_if(idx_first, idx_last, pos_d, head_d, pred()) - head_d;
return nhead;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Part 3, device, reverse words
///
__global__
void Part3Device( char *text, int *pos, int *head, int text_size, int n_head ) {
auto idx_start = head[blockIdx.x];
auto idx_end = ((blockIdx.x == n_head-1) ? text_size : head[blockIdx.x+1]);
while ( pos[--idx_end] == 0 );
char temp;
for ( auto i = threadIdx.x; i < (idx_end-idx_start) / 2; i += blockDim.x ) {
temp = text[idx_start+i];
text[idx_start+i] = text[idx_end-i];
text[idx_end-i] = temp;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Part 3
///
void Part3( char *text, int *pos, int *head, int text_size, int n_head ) {
hipLaunchKernelGGL(( Part3Device), dim3(n_head), dim3(kNumThread), 0, 0, text, pos, head, text_size, n_head);
}
| fcaf0cd5579507609fa7a30535a2859ccac289d8.cu | #include "counting.h"
#include <cstdio>
#include <cassert>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
const int kMaxWordSize = 512;
const int kNumThread = 1024;
static_assert(kMaxWordSize <= kNumThread, "kMaxWordSize must be less than kNumThread.");
__device__ __host__ int CeilDiv( int a, int b ) { return (a-1)/b + 1; }
__device__ __host__ int CeilAlign( int a, int b ) { return CeilDiv(a, b) * b; }
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Count position, step 1, device
///
__global__
void CountPositionDevice1( const char *text, int *pos, int text_size ) {
auto idx = blockIdx.x * blockDim.x + threadIdx.x;
auto idx_end = (blockIdx.x + 1) * blockDim.x;
if ( idx_end > text_size ) {
idx_end = text_size;
}
// Initialize position
if ( idx < idx_end ) {
if ( text[idx] == '\n' ) {
pos[idx] = 0;
} else if ( idx == 0 ) {
pos[idx] = 1;
} else {
pos[idx] = -1;
}
}
__syncthreads();
// Binary search
for ( auto k = 1; k <= kMaxWordSize; k *= 2 ) {
auto idxk = idx + k;
if ( idxk < idx_end && pos[idx] >= 0 && pos[idxk] < 0 ) {
pos[idxk] = pos[idx] + k;
}
__syncthreads();
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Count position, step 2, device
///
__global__
void CountPositionDevice2( int *pos, int text_size ) {
auto idx = threadIdx.x;
pos += blockIdx.x * blockDim.x;
if ( pos[idx] < 0 ) {
pos[idx] = pos[-1] + idx + 1;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Count position, host
///
void CountPosition( const char *text, int *pos, int text_size ) {
CountPositionDevice1<<<text_size / kNumThread + 1, kNumThread>>>(text, pos, text_size);
cudaDeviceSynchronize();
CountPositionDevice2<<<text_size / kNumThread + 1, kNumThread>>>(pos, text_size);
}
struct pred {
__host__ __device__
bool operator()( const int x ) {
return (x == 1);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Extract head
///
int ExtractHead( const int *pos, int *head, int text_size) {
int nhead;
thrust::device_ptr<const int> pos_d(pos);
thrust::device_ptr<int> head_d(head);
// Extract head
thrust::counting_iterator<int> idx_first(0), idx_last = idx_first + text_size;
nhead = thrust::copy_if(idx_first, idx_last, pos_d, head_d, pred()) - head_d;
return nhead;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Part 3, device, reverse words
///
__global__
void Part3Device( char *text, int *pos, int *head, int text_size, int n_head ) {
auto idx_start = head[blockIdx.x];
auto idx_end = ((blockIdx.x == n_head-1) ? text_size : head[blockIdx.x+1]);
while ( pos[--idx_end] == 0 );
char temp;
for ( auto i = threadIdx.x; i < (idx_end-idx_start) / 2; i += blockDim.x ) {
temp = text[idx_start+i];
text[idx_start+i] = text[idx_end-i];
text[idx_end-i] = temp;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Part 3
///
void Part3( char *text, int *pos, int *head, int text_size, int n_head ) {
Part3Device<<<n_head, kNumThread>>>(text, pos, head, text_size, n_head);
}
|
5ea7bcc66c0f1e9f16c7db0ece8db64f682e55c1.hip | // !!! This is a file automatically generated by hipify!!!
/*
Fractal code for CS 4380 / CS 5351
Copyright (c) 2019 Texas State University. All rights reserved.
Redistribution in source or binary form, with or without modification,
is *not* permitted. Use in source and binary forms, with or without
modification, is only permitted for academic use in CS 4380 or CS 5351
at Texas State University.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Martin Burtscher
*/
#include <cstdio>
#include <cmath>
#include <hip/hip_runtime.h>
static const int ThreadsPerBlock = 512;
static __global__ void fractal(const int width, const int start_frame, const int gpu_frames, unsigned char* const pic)
{
// todo: use the GPU to compute the requested frames (base the code on the previous project)
const int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i < gpu_frames * width * width)
{
const double Delta = 0.002;
const double xMid = 0.2315059;
const double yMid = 0.5214880;
// compute pixels of each frame
double delta = Delta;
const int frame = i / (width * width);
delta = Delta * pow(0.98, frame);
const double xMin = xMid - delta;
const double yMin = yMid - delta;
const double dw = 2.0 * delta / width;
const int row = (i / width) % width;
const double cy = yMin + row * dw;
const int col = i % width;
const double cx = xMin + col * dw;
double x = cx;
double y = cy;
double x2, y2;
int depth = 256;
do {
x2 = x * x;
y2 = y * y;
y = 2.0 * x * y + cy;
x = x2 - y2 + cx;
depth--;
} while ((depth > 0) && ((x2 + y2) < 5.0));
pic[frame * width * width + row * width + col] = (unsigned char)depth;
}
}
unsigned char* GPU_Init(const int gpu_frames, const int width)
{
unsigned char* d_pic;
if (hipSuccess != hipMalloc((void **)&d_pic, gpu_frames * width * width * sizeof(unsigned char))) {fprintf(stderr, "ERROR: could not allocate memory\n"); exit(-1);}
return d_pic;
}
void GPU_Exec(const int start_frame, const int gpu_frames, const int width, unsigned char* d_pic)
{
// todo: launch the kernel with ThreadsPerBlock and the appropriate number of blocks (do not wait for the kernel to finish)
hipLaunchKernelGGL(( fractal), dim3(((gpu_frames * width * width) + ThreadsPerBlock - 1) / ThreadsPerBlock), dim3(ThreadsPerBlock), 0, 0, width, start_frame, gpu_frames, d_pic);
}
void GPU_Fini(const int gpu_frames, const int width, unsigned char* pic, unsigned char* d_pic)
{
// todo: copy the result from the device to the host and free the device memory
if (hipSuccess != hipMemcpy(pic, d_pic, sizeof(unsigned char) * gpu_frames * width * width, hipMemcpyDeviceToHost))
{fprintf(stderr, "ERROR: copying from device failed\n");
exit(-1);}
hipFree(d_pic);
}
| 5ea7bcc66c0f1e9f16c7db0ece8db64f682e55c1.cu | /*
Fractal code for CS 4380 / CS 5351
Copyright (c) 2019 Texas State University. All rights reserved.
Redistribution in source or binary form, with or without modification,
is *not* permitted. Use in source and binary forms, with or without
modification, is only permitted for academic use in CS 4380 or CS 5351
at Texas State University.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Martin Burtscher
*/
#include <cstdio>
#include <cmath>
#include <cuda.h>
static const int ThreadsPerBlock = 512;
static __global__ void fractal(const int width, const int start_frame, const int gpu_frames, unsigned char* const pic)
{
// todo: use the GPU to compute the requested frames (base the code on the previous project)
const int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i < gpu_frames * width * width)
{
const double Delta = 0.002;
const double xMid = 0.2315059;
const double yMid = 0.5214880;
// compute pixels of each frame
double delta = Delta;
const int frame = i / (width * width);
delta = Delta * pow(0.98, frame);
const double xMin = xMid - delta;
const double yMin = yMid - delta;
const double dw = 2.0 * delta / width;
const int row = (i / width) % width;
const double cy = yMin + row * dw;
const int col = i % width;
const double cx = xMin + col * dw;
double x = cx;
double y = cy;
double x2, y2;
int depth = 256;
do {
x2 = x * x;
y2 = y * y;
y = 2.0 * x * y + cy;
x = x2 - y2 + cx;
depth--;
} while ((depth > 0) && ((x2 + y2) < 5.0));
pic[frame * width * width + row * width + col] = (unsigned char)depth;
}
}
unsigned char* GPU_Init(const int gpu_frames, const int width)
{
unsigned char* d_pic;
if (cudaSuccess != cudaMalloc((void **)&d_pic, gpu_frames * width * width * sizeof(unsigned char))) {fprintf(stderr, "ERROR: could not allocate memory\n"); exit(-1);}
return d_pic;
}
void GPU_Exec(const int start_frame, const int gpu_frames, const int width, unsigned char* d_pic)
{
// todo: launch the kernel with ThreadsPerBlock and the appropriate number of blocks (do not wait for the kernel to finish)
fractal<<<((gpu_frames * width * width) + ThreadsPerBlock - 1) / ThreadsPerBlock, ThreadsPerBlock>>>(width, start_frame, gpu_frames, d_pic);
}
void GPU_Fini(const int gpu_frames, const int width, unsigned char* pic, unsigned char* d_pic)
{
// todo: copy the result from the device to the host and free the device memory
if (cudaSuccess != cudaMemcpy(pic, d_pic, sizeof(unsigned char) * gpu_frames * width * width, cudaMemcpyDeviceToHost))
{fprintf(stderr, "ERROR: copying from device failed\n");
exit(-1);}
cudaFree(d_pic);
}
|
b57790553b60ced398e5048ed1d07b45a74250f6.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstring>
#include <cstdlib>
#include <random>
#include <algorithm>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
// #include <hip/device_functions.h>
#include <hip/hip_runtime_api.h>
using namespace std;
typedef double ld;
typedef long long LL;
using namespace std;
// ============= config =============
const int N = 5200 * 5200;
// ==================================
ld *genrandomarray(int n, ld l = 0, ld r = 100000) {
static default_random_engine dre(time(0));
uniform_int_distribution<int> u((int)round(l * 100), (int)round(r * 100));
ld *res = (ld*)malloc(n * sizeof(ld));
for (int i=0; i<n; ++i)
res[i] = 1.0 * u(dre) / 100;
return res;
}
inline void handleCudaError(hipError_t err, string name = "fuck") {
if (err != hipSuccess) {
cerr << name << endl;
cerr << hipGetErrorString(err) << endl;
exit(0);
}
}
ld *h_a, *h_b, *h_c, *d_a, *d_b, *d_c;
__global__ void vectorAdd(ld *d_a, ld *d_b, ld *d_c, int N) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < N) d_c[index] = d_a[index] + d_b[index];
}
int main() {
h_a = genrandomarray(N);
h_b = genrandomarray(N);
// h_c = genrandomarray(N);
int size = sizeof(ld) * N;
h_c = (ld*)malloc(size);
for (int i=0; i<N; ++i) {
h_c[i] = h_a[i] + h_b[i];
}
handleCudaError(hipMalloc(&d_a, size));
handleCudaError(hipMalloc(&d_b, size));
handleCudaError(hipMalloc(&d_c, size));
handleCudaError(hipMemcpy(d_a, h_a, size, hipMemcpyHostToDevice));
handleCudaError(hipMemcpy(d_b, h_b, size, hipMemcpyHostToDevice));
int block_size = 2048;
int grids = (N + block_size - 1) / block_size;
handleCudaError(hipGetLastError(), "check before kernel");
hipLaunchKernelGGL(( vectorAdd), dim3(grids), dim3(block_size), 0, 0, d_a, d_b, d_c, N);
handleCudaError(hipGetLastError(), "after kernel");
// bool suc = true;
for (int i=0; i<N; ++i) {
ld relative_error = fabs(1 - d_c[i] / h_c[i]);
if (relative_error > 1e-6) {
puts("WA");
return 0;
}
}
puts("AC");
return 0;
} | b57790553b60ced398e5048ed1d07b45a74250f6.cu | #include <cstdio>
#include <cstring>
#include <cstdlib>
#include <random>
#include <algorithm>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
// #include <device_functions.h>
#include <cuda_runtime_api.h>
using namespace std;
typedef double ld;
typedef long long LL;
using namespace std;
// ============= config =============
const int N = 5200 * 5200;
// ==================================
ld *genrandomarray(int n, ld l = 0, ld r = 100000) {
static default_random_engine dre(time(0));
uniform_int_distribution<int> u((int)round(l * 100), (int)round(r * 100));
ld *res = (ld*)malloc(n * sizeof(ld));
for (int i=0; i<n; ++i)
res[i] = 1.0 * u(dre) / 100;
return res;
}
inline void handleCudaError(cudaError_t err, string name = "fuck") {
if (err != cudaSuccess) {
cerr << name << endl;
cerr << cudaGetErrorString(err) << endl;
exit(0);
}
}
ld *h_a, *h_b, *h_c, *d_a, *d_b, *d_c;
__global__ void vectorAdd(ld *d_a, ld *d_b, ld *d_c, int N) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < N) d_c[index] = d_a[index] + d_b[index];
}
int main() {
h_a = genrandomarray(N);
h_b = genrandomarray(N);
// h_c = genrandomarray(N);
int size = sizeof(ld) * N;
h_c = (ld*)malloc(size);
for (int i=0; i<N; ++i) {
h_c[i] = h_a[i] + h_b[i];
}
handleCudaError(cudaMalloc(&d_a, size));
handleCudaError(cudaMalloc(&d_b, size));
handleCudaError(cudaMalloc(&d_c, size));
handleCudaError(cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice));
handleCudaError(cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice));
int block_size = 2048;
int grids = (N + block_size - 1) / block_size;
handleCudaError(cudaGetLastError(), "check before kernel");
vectorAdd<<<grids, block_size>>>(d_a, d_b, d_c, N);
handleCudaError(cudaGetLastError(), "after kernel");
// bool suc = true;
for (int i=0; i<N; ++i) {
ld relative_error = fabs(1 - d_c[i] / h_c[i]);
if (relative_error > 1e-6) {
puts("WA");
return 0;
}
}
puts("AC");
return 0;
} |
a673c061fd5200fc559406df567913505ebb02dd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
*
* globalRead.cu
*
* Microbenchmark for read bandwidth from global memory.
*
* Build with: nvcc -I ../chLib <options> globalRead.cu
* Requires: No minimum SM requirement.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
#include <chError.h>
#include <chCommandLine.h>
template<class T>
__device__ __host__ T
plus( const T& a, const T& b )
{
T ret = a;
ret += b;
return ret;
}
struct myInt2 {
int2 i2;
__host__ __device__ myInt2() { }
__host__ __device__ myInt2( int i ) { i2.x = i2.y = i; }
};
template<>
__device__ __host__ myInt2
plus( const myInt2& a, const myInt2& b )
{
myInt2 ret;
ret.i2.x = a.i2.x + b.i2.x;
ret.i2.y = a.i2.y + b.i2.y;
return ret;
}
struct myInt4 {
int4 i4;
__host__ __device__ myInt4() { }
__host__ __device__ myInt4( int i ) { i4.x = i4.y = i4.z = i4.w = i; }
};
template<>
__device__ __host__ myInt4
plus( const myInt4& a, const myInt4& b )
{
myInt4 ret;
ret.i4.x = a.i4.x + b.i4.x;
ret.i4.y = a.i4.y + b.i4.y;
ret.i4.z = a.i4.z + b.i4.z;
ret.i4.w = a.i4.w + b.i4.w;
return ret;
}
template<class T, const int n>
__global__ void
GlobalReads( T *out, const T *in, size_t N, bool bWriteResults )
{
T sums[n];
size_t i;
for ( int j = 0; j < n; j++ ) {
sums[j] = T(0);
}
for ( i = n*blockIdx.x*blockDim.x+threadIdx.x;
i < N-n*blockDim.x*gridDim.x;
i += n*blockDim.x*gridDim.x ) {
for ( int j = 0; j < n; j++ ) {
size_t index = i+j*blockDim.x;
sums[j] = plus( sums[j], in[index] );
}
}
// to avoid the (index<N) conditional in the inner loop,
// we left off some work at the end
for ( int j = 0; j < n; j++ ) {
size_t index = i+j*blockDim.x;
if ( index<N ) sums[j] = plus( sums[j], in[index] );
}
if ( bWriteResults ) {
T sum = T(0);
for ( int j = 0; j < n; j++ ) {
sum = plus( sum, sums[j] );
}
out[blockIdx.x*blockDim.x+threadIdx.x] = sum;
}
}
template<class T, const int n, bool bOffset>
double
BandwidthReads( size_t N, int cBlocks, int cThreads )
{
T *in = 0;
T *out = 0;
T *hostIn = 0;
T *hostOut = 0;
double ret = 0.0;
double elapsedTime;
float ms;
int cIterations;
hipError_t status;
T sumCPU;
hipEvent_t evStart = 0;
hipEvent_t evStop = 0;
cuda(Malloc( &in, N*sizeof(T) ) );
cuda(Malloc( &out, cBlocks*cThreads*sizeof(T) ) );
hostIn = new T[N];
if ( ! hostIn )
goto Error;
hostOut = new T[cBlocks*cThreads];
if ( ! hostOut )
goto Error;
sumCPU = T(0);
// populate input array with random numbers
for ( size_t i = bOffset; i < N; i++ ) {
T nextrand = T(rand());
sumCPU = plus( sumCPU, nextrand );
hostIn[i] = nextrand;
}
cuda(Memcpy( in, hostIn, N*sizeof(T), hipMemcpyHostToDevice ) );
cuda(EventCreate( &evStart ) );
cuda(EventCreate( &evStop ) );
{
// confirm that kernel launch with this configuration writes correct result
hipLaunchKernelGGL(( GlobalReads<T,n>), dim3(cBlocks),dim3(cThreads), 0, 0, out, in+bOffset, N-bOffset, true );
cuda(Memcpy( hostOut, out, cBlocks*cThreads*sizeof(T), hipMemcpyDeviceToHost ) );
cuda(GetLastError() );
T sumGPU = T(0);
for ( size_t i = 0; i < cBlocks*cThreads; i++ ) {
sumGPU = plus( sumGPU, hostOut[i] );
}
if ( memcmp( &sumCPU, &sumGPU, sizeof(T) ) ) {
printf( "Incorrect sum computed!\n" );
goto Error;
}
}
cIterations = 10;
hipEventRecord( evStart );
for ( int i = 0; i < cIterations; i++ ) {
hipLaunchKernelGGL(( GlobalReads<T,n>), dim3(cBlocks),dim3(cThreads), 0, 0, out, in+bOffset, N-bOffset, false );
}
hipEventRecord( evStop );
cuda(DeviceSynchronize() );
// make configurations that cannot launch error-out with 0 bandwidth
cuda(GetLastError() );
cuda(EventElapsedTime( &ms, evStart, evStop ) );
elapsedTime = ms/1000.0f;
// bytes per second
ret = ((double)N*cIterations*sizeof(T)) / elapsedTime;
// gigabytes per second
ret /= 1024.0*1048576.0;
Error:
if ( hostIn ) delete[] hostIn;
if ( hostOut ) delete[] hostOut;
hipEventDestroy( evStart );
hipEventDestroy( evStop );
hipFree( in );
hipFree( out );
return ret;
}
template<class T, const int n, bool bOffset>
double
ReportRow( size_t N, size_t threadStart, size_t threadStop, size_t cBlocks )
{
int maxThreads = 0;
double maxBW = 0.0;
printf( "%d\t", n );
for ( int cThreads = threadStart; cThreads <= threadStop; cThreads *= 2 ) {
double bw = BandwidthReads<T,n,bOffset>( N, cBlocks, cThreads );
if ( bw > maxBW ) {
maxBW = bw;
maxThreads = cThreads;
}
printf( "%.2f\t", bw );
}
printf( "%.2f\t%d\n", maxBW, maxThreads );
return maxBW;
}
template<class T, bool bCoalesced>
void
Shmoo( size_t N, size_t threadStart, size_t threadStop, size_t cBlocks )
{
printf( "Operand size: %d byte%c\n", (int) sizeof(T), sizeof(T)==1 ? '\0' : 's' );
printf( "Input size: %dM operands\n", (int) (N>>20) );
printf( "Unroll\t" );
for ( int cThreads = threadStart; cThreads <= threadStop; cThreads *= 2 ) {
printf( "%d\t", cThreads );
}
printf( "maxBW\tmaxThreads\n" );
ReportRow<T, 1, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T, 2, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T, 3, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T, 4, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T, 5, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T, 6, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T, 7, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T, 8, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T, 9, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T,10, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T,11, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T,12, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T,13, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T,14, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T,15, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T,16, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
}
int
main( int argc, char *argv[] )
{
hipError_t status;
int device = 0;
int size = 16;
hipDeviceProp_t prop;
if ( chCommandLineGet( &device, "device", argc, argv ) ) {
printf( "Using device %d...\n", device );
}
cuda(SetDevice(device) );
cuda(GetDeviceProperties( &prop, device ) );
printf( "Running globalRead.cu microbenchmark on %s\n", prop.name );
if ( chCommandLineGet( &size, "size", argc, argv ) ) {
printf( "Using %dM operands ...\n", size );
}
if ( chCommandLineGetBool( "uncoalesced", argc, argv ) ) {
printf( "Using uncoalesced memory transactions\n" );
Shmoo<char,false>( (size_t) size*1048576, 32, 512, 1500 );
Shmoo<short,false>( (size_t) size*1048576, 32, 512, 1500 );
Shmoo<int,false>( (size_t) size*1048576, 32, 512, 1500 );
Shmoo<myInt2,false>( (size_t) size*1048576, 32, 512, 1500 );
Shmoo<myInt4,false>( (size_t) size*1048576, 32, 512, 1500 );
} else {
printf( "Using coalesced memory transactions\n" );
Shmoo<char,true>( (size_t) size*1048576, 32, 512, 1500 );
Shmoo<short,true>( (size_t) size*1048576, 32, 512, 1500 );
Shmoo<int,true>( (size_t) size*1048576, 32, 512, 1500 );
Shmoo<myInt2,true>( (size_t) size*1048576, 32, 512, 1500 );
Shmoo<myInt4,true>( (size_t) size*1048576, 32, 512, 1500 );
}
return 0;
Error:
return 1;
}
| a673c061fd5200fc559406df567913505ebb02dd.cu | /*
*
* globalRead.cu
*
* Microbenchmark for read bandwidth from global memory.
*
* Build with: nvcc -I ../chLib <options> globalRead.cu
* Requires: No minimum SM requirement.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
#include <chError.h>
#include <chCommandLine.h>
template<class T>
__device__ __host__ T
plus( const T& a, const T& b )
{
T ret = a;
ret += b;
return ret;
}
struct myInt2 {
int2 i2;
__host__ __device__ myInt2() { }
__host__ __device__ myInt2( int i ) { i2.x = i2.y = i; }
};
template<>
__device__ __host__ myInt2
plus( const myInt2& a, const myInt2& b )
{
myInt2 ret;
ret.i2.x = a.i2.x + b.i2.x;
ret.i2.y = a.i2.y + b.i2.y;
return ret;
}
struct myInt4 {
int4 i4;
__host__ __device__ myInt4() { }
__host__ __device__ myInt4( int i ) { i4.x = i4.y = i4.z = i4.w = i; }
};
template<>
__device__ __host__ myInt4
plus( const myInt4& a, const myInt4& b )
{
myInt4 ret;
ret.i4.x = a.i4.x + b.i4.x;
ret.i4.y = a.i4.y + b.i4.y;
ret.i4.z = a.i4.z + b.i4.z;
ret.i4.w = a.i4.w + b.i4.w;
return ret;
}
template<class T, const int n>
__global__ void
GlobalReads( T *out, const T *in, size_t N, bool bWriteResults )
{
T sums[n];
size_t i;
for ( int j = 0; j < n; j++ ) {
sums[j] = T(0);
}
for ( i = n*blockIdx.x*blockDim.x+threadIdx.x;
i < N-n*blockDim.x*gridDim.x;
i += n*blockDim.x*gridDim.x ) {
for ( int j = 0; j < n; j++ ) {
size_t index = i+j*blockDim.x;
sums[j] = plus( sums[j], in[index] );
}
}
// to avoid the (index<N) conditional in the inner loop,
// we left off some work at the end
for ( int j = 0; j < n; j++ ) {
size_t index = i+j*blockDim.x;
if ( index<N ) sums[j] = plus( sums[j], in[index] );
}
if ( bWriteResults ) {
T sum = T(0);
for ( int j = 0; j < n; j++ ) {
sum = plus( sum, sums[j] );
}
out[blockIdx.x*blockDim.x+threadIdx.x] = sum;
}
}
template<class T, const int n, bool bOffset>
double
BandwidthReads( size_t N, int cBlocks, int cThreads )
{
T *in = 0;
T *out = 0;
T *hostIn = 0;
T *hostOut = 0;
double ret = 0.0;
double elapsedTime;
float ms;
int cIterations;
cudaError_t status;
T sumCPU;
cudaEvent_t evStart = 0;
cudaEvent_t evStop = 0;
cuda(Malloc( &in, N*sizeof(T) ) );
cuda(Malloc( &out, cBlocks*cThreads*sizeof(T) ) );
hostIn = new T[N];
if ( ! hostIn )
goto Error;
hostOut = new T[cBlocks*cThreads];
if ( ! hostOut )
goto Error;
sumCPU = T(0);
// populate input array with random numbers
for ( size_t i = bOffset; i < N; i++ ) {
T nextrand = T(rand());
sumCPU = plus( sumCPU, nextrand );
hostIn[i] = nextrand;
}
cuda(Memcpy( in, hostIn, N*sizeof(T), cudaMemcpyHostToDevice ) );
cuda(EventCreate( &evStart ) );
cuda(EventCreate( &evStop ) );
{
// confirm that kernel launch with this configuration writes correct result
GlobalReads<T,n><<<cBlocks,cThreads>>>( out, in+bOffset, N-bOffset, true );
cuda(Memcpy( hostOut, out, cBlocks*cThreads*sizeof(T), cudaMemcpyDeviceToHost ) );
cuda(GetLastError() );
T sumGPU = T(0);
for ( size_t i = 0; i < cBlocks*cThreads; i++ ) {
sumGPU = plus( sumGPU, hostOut[i] );
}
if ( memcmp( &sumCPU, &sumGPU, sizeof(T) ) ) {
printf( "Incorrect sum computed!\n" );
goto Error;
}
}
cIterations = 10;
cudaEventRecord( evStart );
for ( int i = 0; i < cIterations; i++ ) {
GlobalReads<T,n><<<cBlocks,cThreads>>>( out, in+bOffset, N-bOffset, false );
}
cudaEventRecord( evStop );
cuda(DeviceSynchronize() );
// make configurations that cannot launch error-out with 0 bandwidth
cuda(GetLastError() );
cuda(EventElapsedTime( &ms, evStart, evStop ) );
elapsedTime = ms/1000.0f;
// bytes per second
ret = ((double)N*cIterations*sizeof(T)) / elapsedTime;
// gigabytes per second
ret /= 1024.0*1048576.0;
Error:
if ( hostIn ) delete[] hostIn;
if ( hostOut ) delete[] hostOut;
cudaEventDestroy( evStart );
cudaEventDestroy( evStop );
cudaFree( in );
cudaFree( out );
return ret;
}
template<class T, const int n, bool bOffset>
double
ReportRow( size_t N, size_t threadStart, size_t threadStop, size_t cBlocks )
{
int maxThreads = 0;
double maxBW = 0.0;
printf( "%d\t", n );
for ( int cThreads = threadStart; cThreads <= threadStop; cThreads *= 2 ) {
double bw = BandwidthReads<T,n,bOffset>( N, cBlocks, cThreads );
if ( bw > maxBW ) {
maxBW = bw;
maxThreads = cThreads;
}
printf( "%.2f\t", bw );
}
printf( "%.2f\t%d\n", maxBW, maxThreads );
return maxBW;
}
template<class T, bool bCoalesced>
void
Shmoo( size_t N, size_t threadStart, size_t threadStop, size_t cBlocks )
{
printf( "Operand size: %d byte%c\n", (int) sizeof(T), sizeof(T)==1 ? '\0' : 's' );
printf( "Input size: %dM operands\n", (int) (N>>20) );
printf( "Unroll\t" );
for ( int cThreads = threadStart; cThreads <= threadStop; cThreads *= 2 ) {
printf( "%d\t", cThreads );
}
printf( "maxBW\tmaxThreads\n" );
ReportRow<T, 1, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T, 2, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T, 3, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T, 4, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T, 5, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T, 6, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T, 7, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T, 8, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T, 9, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T,10, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T,11, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T,12, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T,13, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T,14, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T,15, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
ReportRow<T,16, ! bCoalesced>( N, threadStart, threadStop, cBlocks );
}
int
main( int argc, char *argv[] )
{
cudaError_t status;
int device = 0;
int size = 16;
cudaDeviceProp prop;
if ( chCommandLineGet( &device, "device", argc, argv ) ) {
printf( "Using device %d...\n", device );
}
cuda(SetDevice(device) );
cuda(GetDeviceProperties( &prop, device ) );
printf( "Running globalRead.cu microbenchmark on %s\n", prop.name );
if ( chCommandLineGet( &size, "size", argc, argv ) ) {
printf( "Using %dM operands ...\n", size );
}
if ( chCommandLineGetBool( "uncoalesced", argc, argv ) ) {
printf( "Using uncoalesced memory transactions\n" );
Shmoo<char,false>( (size_t) size*1048576, 32, 512, 1500 );
Shmoo<short,false>( (size_t) size*1048576, 32, 512, 1500 );
Shmoo<int,false>( (size_t) size*1048576, 32, 512, 1500 );
Shmoo<myInt2,false>( (size_t) size*1048576, 32, 512, 1500 );
Shmoo<myInt4,false>( (size_t) size*1048576, 32, 512, 1500 );
} else {
printf( "Using coalesced memory transactions\n" );
Shmoo<char,true>( (size_t) size*1048576, 32, 512, 1500 );
Shmoo<short,true>( (size_t) size*1048576, 32, 512, 1500 );
Shmoo<int,true>( (size_t) size*1048576, 32, 512, 1500 );
Shmoo<myInt2,true>( (size_t) size*1048576, 32, 512, 1500 );
Shmoo<myInt4,true>( (size_t) size*1048576, 32, 512, 1500 );
}
return 0;
Error:
return 1;
}
|
24d85142008e2359705ab31be6bb7df6b5124bc7.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO ORC reader class implementation
*/
#include "reader_impl.hpp"
#include "timezone.cuh"
#include <io/comp/gpuinflate.h>
#include <io/orc/orc.h>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <algorithm>
#include <array>
namespace cudf {
namespace io {
namespace detail {
namespace orc {
// Import functionality that's independent of legacy code
using namespace cudf::io::orc;
using namespace cudf::io;
namespace {
/**
* @brief Function that translates ORC data kind to cuDF type enum
*/
constexpr type_id to_type_id(const orc::SchemaType &schema,
bool use_np_dtypes,
type_id timestamp_type_id,
bool decimals_as_float64)
{
switch (schema.kind) {
case orc::BOOLEAN: return type_id::BOOL8;
case orc::BYTE: return type_id::INT8;
case orc::SHORT: return type_id::INT16;
case orc::INT: return type_id::INT32;
case orc::LONG: return type_id::INT64;
case orc::FLOAT: return type_id::FLOAT32;
case orc::DOUBLE: return type_id::FLOAT64;
case orc::STRING:
case orc::BINARY:
case orc::VARCHAR:
case orc::CHAR:
// Variable-length types can all be mapped to STRING
return type_id::STRING;
case orc::TIMESTAMP:
return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id
: type_id::TIMESTAMP_NANOSECONDS;
case orc::DATE:
// There isn't a (DAYS -> np.dtype) mapping
return (use_np_dtypes) ? type_id::TIMESTAMP_MILLISECONDS : type_id::TIMESTAMP_DAYS;
case orc::DECIMAL:
// There isn't an arbitrary-precision type in cuDF, so map as float or int
return (decimals_as_float64) ? type_id::FLOAT64 : type_id::INT64;
default: break;
}
return type_id::EMPTY;
}
/**
* @brief Function that translates cuDF time unit to ORC clock frequency
*/
constexpr int32_t to_clockrate(type_id timestamp_type_id)
{
switch (timestamp_type_id) {
case type_id::TIMESTAMP_SECONDS: return 1;
case type_id::TIMESTAMP_MILLISECONDS: return 1000;
case type_id::TIMESTAMP_MICROSECONDS: return 1000000;
case type_id::TIMESTAMP_NANOSECONDS: return 1000000000;
default: return 0;
}
}
constexpr std::pair<gpu::StreamIndexType, uint32_t> get_index_type_and_pos(
const orc::StreamKind kind, uint32_t skip_count, bool non_child)
{
switch (kind) {
case orc::DATA:
skip_count += 1;
skip_count |= (skip_count & 0xff) << 8;
return std::make_pair(gpu::CI_DATA, skip_count);
case orc::LENGTH:
case orc::SECONDARY:
skip_count += 1;
skip_count |= (skip_count & 0xff) << 16;
return std::make_pair(gpu::CI_DATA2, skip_count);
case orc::DICTIONARY_DATA: return std::make_pair(gpu::CI_DICTIONARY, skip_count);
case orc::PRESENT:
skip_count += (non_child ? 1 : 0);
return std::make_pair(gpu::CI_PRESENT, skip_count);
case orc::ROW_INDEX: return std::make_pair(gpu::CI_INDEX, skip_count);
default:
// Skip this stream as it's not strictly required
return std::make_pair(gpu::CI_NUM_STREAMS, 0);
}
}
} // namespace
namespace {
/**
* @brief Struct that maps ORC streams to columns
*/
struct orc_stream_info {
orc_stream_info() = default;
explicit orc_stream_info(
uint64_t offset_, size_t dst_pos_, uint32_t length_, uint32_t gdf_idx_, uint32_t stripe_idx_)
: offset(offset_),
dst_pos(dst_pos_),
length(length_),
gdf_idx(gdf_idx_),
stripe_idx(stripe_idx_)
{
}
uint64_t offset; // offset in file
size_t dst_pos; // offset in memory relative to start of compressed stripe data
size_t length; // length in file
uint32_t gdf_idx; // column index
uint32_t stripe_idx; // stripe index
};
/**
* @brief Function that populates column descriptors stream/chunk
*/
size_t gather_stream_info(const size_t stripe_index,
const orc::StripeInformation *stripeinfo,
const orc::StripeFooter *stripefooter,
const std::vector<int> &orc2gdf,
const std::vector<int> &gdf2orc,
const std::vector<orc::SchemaType> types,
bool use_index,
size_t *num_dictionary_entries,
hostdevice_vector<gpu::ColumnDesc> &chunks,
std::vector<orc_stream_info> &stream_info)
{
const auto num_columns = gdf2orc.size();
uint64_t src_offset = 0;
uint64_t dst_offset = 0;
for (const auto &stream : stripefooter->streams) {
if (stream.column >= orc2gdf.size()) {
dst_offset += stream.length;
continue;
}
auto col = orc2gdf[stream.column];
if (col == -1) {
// A struct-type column has no data itself, but rather child columns
// for each of its fields. There is only a PRESENT stream, which
// needs to be included for the reader.
const auto schema_type = types[stream.column];
if (schema_type.subtypes.size() != 0) {
if (schema_type.kind == orc::STRUCT && stream.kind == orc::PRESENT) {
for (const auto &idx : schema_type.subtypes) {
auto child_idx = (idx < orc2gdf.size()) ? orc2gdf[idx] : -1;
if (child_idx >= 0) {
col = child_idx;
auto &chunk = chunks[stripe_index * num_columns + col];
chunk.strm_id[gpu::CI_PRESENT] = stream_info.size();
chunk.strm_len[gpu::CI_PRESENT] = stream.length;
}
}
}
}
}
if (col != -1) {
if (src_offset >= stripeinfo->indexLength || use_index) {
// NOTE: skip_count field is temporarily used to track index ordering
auto &chunk = chunks[stripe_index * num_columns + col];
const auto idx =
get_index_type_and_pos(stream.kind, chunk.skip_count, col == orc2gdf[stream.column]);
if (idx.first < gpu::CI_NUM_STREAMS) {
chunk.strm_id[idx.first] = stream_info.size();
chunk.strm_len[idx.first] = stream.length;
chunk.skip_count = idx.second;
if (idx.first == gpu::CI_DICTIONARY) {
chunk.dictionary_start = *num_dictionary_entries;
chunk.dict_len = stripefooter->columns[stream.column].dictionarySize;
*num_dictionary_entries += stripefooter->columns[stream.column].dictionarySize;
}
}
}
stream_info.emplace_back(
stripeinfo->offset + src_offset, dst_offset, stream.length, col, stripe_index);
dst_offset += stream.length;
}
src_offset += stream.length;
}
return dst_offset;
}
} // namespace
rmm::device_buffer reader::impl::decompress_stripe_data(
hostdevice_vector<gpu::ColumnDesc> &chunks,
const std::vector<rmm::device_buffer> &stripe_data,
const OrcDecompressor *decompressor,
std::vector<orc_stream_info> &stream_info,
size_t num_stripes,
device_span<gpu::RowGroup> row_groups,
size_t row_index_stride,
rmm::cuda_stream_view stream)
{
// Parse the columns' compressed info
hostdevice_vector<gpu::CompressedStreamInfo> compinfo(0, stream_info.size(), stream);
for (const auto &info : stream_info) {
compinfo.insert(gpu::CompressedStreamInfo(
static_cast<const uint8_t *>(stripe_data[info.stripe_idx].data()) + info.dst_pos,
info.length));
}
compinfo.host_to_device(stream);
gpu::ParseCompressedStripeData(compinfo.device_ptr(),
compinfo.size(),
decompressor->GetBlockSize(),
decompressor->GetLog2MaxCompressionRatio(),
stream);
compinfo.device_to_host(stream, true);
// Count the exact number of compressed blocks
size_t num_compressed_blocks = 0;
size_t num_uncompressed_blocks = 0;
size_t total_decomp_size = 0;
for (size_t i = 0; i < compinfo.size(); ++i) {
num_compressed_blocks += compinfo[i].num_compressed_blocks;
num_uncompressed_blocks += compinfo[i].num_uncompressed_blocks;
total_decomp_size += compinfo[i].max_uncompressed_size;
}
CUDF_EXPECTS(total_decomp_size > 0, "No decompressible data found");
rmm::device_buffer decomp_data(total_decomp_size, stream);
rmm::device_uvector<gpu_inflate_input_s> inflate_in(
num_compressed_blocks + num_uncompressed_blocks, stream);
rmm::device_uvector<gpu_inflate_status_s> inflate_out(num_compressed_blocks, stream);
// Parse again to populate the decompression input/output buffers
size_t decomp_offset = 0;
uint32_t start_pos = 0;
uint32_t start_pos_uncomp = (uint32_t)num_compressed_blocks;
for (size_t i = 0; i < compinfo.size(); ++i) {
auto dst_base = static_cast<uint8_t *>(decomp_data.data());
compinfo[i].uncompressed_data = dst_base + decomp_offset;
compinfo[i].decctl = inflate_in.data() + start_pos;
compinfo[i].decstatus = inflate_out.data() + start_pos;
compinfo[i].copyctl = inflate_in.data() + start_pos_uncomp;
stream_info[i].dst_pos = decomp_offset;
decomp_offset += compinfo[i].max_uncompressed_size;
start_pos += compinfo[i].num_compressed_blocks;
start_pos_uncomp += compinfo[i].num_uncompressed_blocks;
}
compinfo.host_to_device(stream);
gpu::ParseCompressedStripeData(compinfo.device_ptr(),
compinfo.size(),
decompressor->GetBlockSize(),
decompressor->GetLog2MaxCompressionRatio(),
stream);
// Dispatch batches of blocks to decompress
if (num_compressed_blocks > 0) {
switch (decompressor->GetKind()) {
case orc::ZLIB:
CUDA_TRY(
gpuinflate(inflate_in.data(), inflate_out.data(), num_compressed_blocks, 0, stream));
break;
case orc::SNAPPY:
CUDA_TRY(gpu_unsnap(inflate_in.data(), inflate_out.data(), num_compressed_blocks, stream));
break;
default: CUDF_EXPECTS(false, "Unexpected decompression dispatch"); break;
}
}
if (num_uncompressed_blocks > 0) {
CUDA_TRY(gpu_copy_uncompressed_blocks(
inflate_in.data() + num_compressed_blocks, num_uncompressed_blocks, stream));
}
gpu::PostDecompressionReassemble(compinfo.device_ptr(), compinfo.size(), stream);
// Update the stream information with the updated uncompressed info
// TBD: We could update the value from the information we already
// have in stream_info[], but using the gpu results also updates
// max_uncompressed_size to the actual uncompressed size, or zero if
// decompression failed.
compinfo.device_to_host(stream, true);
const size_t num_columns = chunks.size() / num_stripes;
for (size_t i = 0; i < num_stripes; ++i) {
for (size_t j = 0; j < num_columns; ++j) {
auto &chunk = chunks[i * num_columns + j];
for (int k = 0; k < gpu::CI_NUM_STREAMS; ++k) {
if (chunk.strm_len[k] > 0 && chunk.strm_id[k] < compinfo.size()) {
chunk.streams[k] = compinfo[chunk.strm_id[k]].uncompressed_data;
chunk.strm_len[k] = compinfo[chunk.strm_id[k]].max_uncompressed_size;
}
}
}
}
if (not row_groups.empty()) {
chunks.host_to_device(stream);
gpu::ParseRowGroupIndex(row_groups.data(),
compinfo.device_ptr(),
chunks.device_ptr(),
num_columns,
num_stripes,
row_groups.size() / num_columns,
row_index_stride,
stream);
}
return decomp_data;
}
void reader::impl::decode_stream_data(hostdevice_vector<gpu::ColumnDesc> &chunks,
size_t num_dicts,
size_t skip_rows,
size_t num_rows,
timezone_table_view tz_table,
device_span<gpu::RowGroup const> row_groups,
size_t row_index_stride,
std::vector<column_buffer> &out_buffers,
rmm::cuda_stream_view stream)
{
const auto num_columns = out_buffers.size();
const auto num_stripes = chunks.size() / out_buffers.size();
// Update chunks with pointers to column data
for (size_t i = 0; i < num_stripes; ++i) {
for (size_t j = 0; j < num_columns; ++j) {
auto &chunk = chunks[i * num_columns + j];
chunk.column_data_base = out_buffers[j].data();
chunk.valid_map_base = out_buffers[j].null_mask();
}
}
// Allocate global dictionary for deserializing
rmm::device_uvector<gpu::DictionaryEntry> global_dict(num_dicts, stream);
chunks.host_to_device(stream);
gpu::DecodeNullsAndStringDictionaries(
chunks.device_ptr(), global_dict.data(), num_columns, num_stripes, num_rows, skip_rows, stream);
gpu::DecodeOrcColumnData(chunks.device_ptr(),
global_dict.data(),
num_columns,
num_stripes,
num_rows,
skip_rows,
tz_table,
row_groups.data(),
row_groups.size() / num_columns,
row_index_stride,
stream);
chunks.device_to_host(stream, true);
for (size_t i = 0; i < num_stripes; ++i) {
for (size_t j = 0; j < num_columns; ++j) {
out_buffers[j].null_count() += chunks[i * num_columns + j].null_count;
}
}
}
reader::impl::impl(std::unique_ptr<datasource> source,
orc_reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _mr(mr), _source(std::move(source))
{
// Open and parse the source dataset metadata
_metadata = std::make_unique<cudf::io::orc::metadata>(_source.get());
// Select only columns required by the options
_selected_columns = _metadata->select_columns(options.get_columns(), _has_timestamp_column);
// Override output timestamp resolution if requested
if (options.get_timestamp_type().id() != type_id::EMPTY) {
_timestamp_type = options.get_timestamp_type();
}
// Enable or disable attempt to use row index for parsing
_use_index = options.is_enabled_use_index();
// Enable or disable the conversion to numpy-compatible dtypes
_use_np_dtypes = options.is_enabled_use_np_dtypes();
// Control decimals conversion (float64 or int64 with optional scale)
_decimals_as_float64 = options.is_enabled_decimals_as_float64();
_decimals_as_int_scale = options.get_forced_decimals_scale();
}
table_with_metadata reader::impl::read(size_type skip_rows,
size_type num_rows,
const std::vector<size_type> &stripes,
rmm::cuda_stream_view stream)
{
std::vector<std::unique_ptr<column>> out_columns;
table_metadata out_metadata;
// There are no columns in table
if (_selected_columns.size() == 0) return {std::make_unique<table>(), std::move(out_metadata)};
// Select only stripes required (aka row groups)
const auto selected_stripes = _metadata->select_stripes(stripes, skip_rows, num_rows);
// Association between each ORC column and its cudf::column
std::vector<int32_t> orc_col_map(_metadata->get_num_columns(), -1);
// Get a list of column data types
std::vector<data_type> column_types;
for (const auto &col : _selected_columns) {
auto col_type = to_type_id(
_metadata->ff.types[col], _use_np_dtypes, _timestamp_type.id(), _decimals_as_float64);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type");
column_types.emplace_back(col_type);
// Map each ORC column to its column
orc_col_map[col] = column_types.size() - 1;
}
// If no rows or stripes to read, return empty columns
if (num_rows <= 0 || selected_stripes.empty()) {
std::transform(column_types.cbegin(),
column_types.cend(),
std::back_inserter(out_columns),
[](auto const &dtype) { return make_empty_column(dtype); });
} else {
const auto num_columns = _selected_columns.size();
const auto num_chunks = selected_stripes.size() * num_columns;
hostdevice_vector<gpu::ColumnDesc> chunks(num_chunks, stream);
memset(chunks.host_ptr(), 0, chunks.memory_size());
const bool use_index =
(_use_index == true) &&
// Only use if we don't have much work with complete columns & stripes
// TODO: Consider nrows, gpu, and tune the threshold
(num_rows > _metadata->get_row_index_stride() && !(_metadata->get_row_index_stride() & 7) &&
_metadata->get_row_index_stride() > 0 && num_columns * selected_stripes.size() < 8 * 128) &&
// Only use if first row is aligned to a stripe boundary
// TODO: Fix logic to handle unaligned rows
(skip_rows == 0);
// Logically view streams as columns
std::vector<orc_stream_info> stream_info;
// Tracker for eventually deallocating compressed and uncompressed data
std::vector<rmm::device_buffer> stripe_data;
size_t stripe_start_row = 0;
size_t num_dict_entries = 0;
size_t num_rowgroups = 0;
for (size_t i = 0; i < selected_stripes.size(); ++i) {
const auto stripe_info = selected_stripes[i].first;
const auto stripe_footer = selected_stripes[i].second;
auto stream_count = stream_info.size();
const auto total_data_size = gather_stream_info(i,
stripe_info,
stripe_footer,
orc_col_map,
_selected_columns,
_metadata->ff.types,
use_index,
&num_dict_entries,
chunks,
stream_info);
CUDF_EXPECTS(total_data_size > 0, "Expected streams data within stripe");
stripe_data.emplace_back(total_data_size, stream);
auto dst_base = static_cast<uint8_t *>(stripe_data.back().data());
// Coalesce consecutive streams into one read
while (stream_count < stream_info.size()) {
const auto d_dst = dst_base + stream_info[stream_count].dst_pos;
const auto offset = stream_info[stream_count].offset;
auto len = stream_info[stream_count].length;
stream_count++;
while (stream_count < stream_info.size() &&
stream_info[stream_count].offset == offset + len) {
len += stream_info[stream_count].length;
stream_count++;
}
const auto buffer = _source->host_read(offset, len);
CUDA_TRY(
hipMemcpyAsync(d_dst, buffer->data(), len, hipMemcpyHostToDevice, stream.value()));
stream.synchronize();
}
// Update chunks to reference streams pointers
for (size_t j = 0; j < num_columns; j++) {
auto &chunk = chunks[i * num_columns + j];
chunk.start_row = stripe_start_row;
chunk.num_rows = stripe_info->numberOfRows;
chunk.encoding_kind = stripe_footer->columns[_selected_columns[j]].kind;
chunk.type_kind = _metadata->ff.types[_selected_columns[j]].kind;
if (_decimals_as_float64) {
chunk.decimal_scale =
_metadata->ff.types[_selected_columns[j]].scale | orc::gpu::orc_decimal2float64_scale;
} else if (_decimals_as_int_scale < 0) {
chunk.decimal_scale = _metadata->ff.types[_selected_columns[j]].scale;
} else {
chunk.decimal_scale = _decimals_as_int_scale;
}
chunk.rowgroup_id = num_rowgroups;
chunk.dtype_len = (column_types[j].id() == type_id::STRING)
? sizeof(std::pair<const char *, size_t>)
: cudf::size_of(column_types[j]);
if (chunk.type_kind == orc::TIMESTAMP) {
chunk.ts_clock_rate = to_clockrate(_timestamp_type.id());
}
for (int k = 0; k < gpu::CI_NUM_STREAMS; k++) {
chunk.streams[k] = dst_base + stream_info[chunk.strm_id[k]].dst_pos;
}
}
stripe_start_row += stripe_info->numberOfRows;
if (use_index) {
num_rowgroups += (stripe_info->numberOfRows + _metadata->get_row_index_stride() - 1) /
_metadata->get_row_index_stride();
}
}
// Process dataset chunk pages into output columns
if (stripe_data.size() != 0) {
// Setup row group descriptors if using indexes
rmm::device_uvector<gpu::RowGroup> row_groups(num_rowgroups * num_columns, stream);
if (_metadata->ps.compression != orc::NONE) {
auto decomp_data = decompress_stripe_data(chunks,
stripe_data,
_metadata->decompressor.get(),
stream_info,
selected_stripes.size(),
row_groups,
_metadata->get_row_index_stride(),
stream);
stripe_data.clear();
stripe_data.push_back(std::move(decomp_data));
} else {
if (not row_groups.is_empty()) {
chunks.host_to_device(stream);
gpu::ParseRowGroupIndex(row_groups.data(),
nullptr,
chunks.device_ptr(),
num_columns,
selected_stripes.size(),
num_rowgroups,
_metadata->get_row_index_stride(),
stream);
}
}
// Setup table for converting timestamp columns from local to UTC time
auto const tz_table =
_has_timestamp_column
? build_timezone_transition_table(selected_stripes[0].second->writerTimezone, stream)
: timezone_table{};
std::vector<column_buffer> out_buffers;
for (size_t i = 0; i < column_types.size(); ++i) {
bool is_nullable = false;
for (size_t j = 0; j < selected_stripes.size(); ++j) {
if (chunks[j * num_columns + i].strm_len[gpu::CI_PRESENT] != 0) {
is_nullable = true;
break;
}
}
out_buffers.emplace_back(column_types[i], num_rows, is_nullable, stream, _mr);
}
decode_stream_data(chunks,
num_dict_entries,
skip_rows,
num_rows,
tz_table.view(),
row_groups,
_metadata->get_row_index_stride(),
out_buffers,
stream);
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(make_column(out_buffers[i], nullptr, stream, _mr));
}
}
}
// Return column names (must match order of returned columns)
out_metadata.column_names.resize(_selected_columns.size());
for (size_t i = 0; i < _selected_columns.size(); i++) {
out_metadata.column_names[i] = _metadata->get_column_name(_selected_columns[i]);
}
// Return user metadata
for (const auto &kv : _metadata->ff.metadata) {
out_metadata.user_data.insert({kv.name, kv.value});
}
return {std::make_unique<table>(std::move(out_columns)), std::move(out_metadata)};
}
// Forward to implementation
reader::reader(std::vector<std::string> const &filepaths,
orc_reader_options const &options,
rmm::mr::device_memory_resource *mr)
{
CUDF_EXPECTS(filepaths.size() == 1, "Only a single source is currently supported.");
_impl = std::make_unique<impl>(datasource::create(filepaths[0]), options, mr);
}
// Forward to implementation
reader::reader(std::vector<std::unique_ptr<cudf::io::datasource>> &&sources,
orc_reader_options const &options,
rmm::mr::device_memory_resource *mr)
{
CUDF_EXPECTS(sources.size() == 1, "Only a single source is currently supported.");
_impl = std::make_unique<impl>(std::move(sources[0]), options, mr);
}
// Destructor within this translation unit
reader::~reader() = default;
// Forward to implementation
table_with_metadata reader::read(orc_reader_options const &options, rmm::cuda_stream_view stream)
{
return _impl->read(
options.get_skip_rows(), options.get_num_rows(), options.get_stripes(), stream);
}
} // namespace orc
} // namespace detail
} // namespace io
} // namespace cudf
| 24d85142008e2359705ab31be6bb7df6b5124bc7.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO ORC reader class implementation
*/
#include "reader_impl.hpp"
#include "timezone.cuh"
#include <io/comp/gpuinflate.h>
#include <io/orc/orc.h>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <algorithm>
#include <array>
namespace cudf {
namespace io {
namespace detail {
namespace orc {
// Import functionality that's independent of legacy code
using namespace cudf::io::orc;
using namespace cudf::io;
namespace {
/**
* @brief Function that translates ORC data kind to cuDF type enum
*/
constexpr type_id to_type_id(const orc::SchemaType &schema,
bool use_np_dtypes,
type_id timestamp_type_id,
bool decimals_as_float64)
{
switch (schema.kind) {
case orc::BOOLEAN: return type_id::BOOL8;
case orc::BYTE: return type_id::INT8;
case orc::SHORT: return type_id::INT16;
case orc::INT: return type_id::INT32;
case orc::LONG: return type_id::INT64;
case orc::FLOAT: return type_id::FLOAT32;
case orc::DOUBLE: return type_id::FLOAT64;
case orc::STRING:
case orc::BINARY:
case orc::VARCHAR:
case orc::CHAR:
// Variable-length types can all be mapped to STRING
return type_id::STRING;
case orc::TIMESTAMP:
return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id
: type_id::TIMESTAMP_NANOSECONDS;
case orc::DATE:
// There isn't a (DAYS -> np.dtype) mapping
return (use_np_dtypes) ? type_id::TIMESTAMP_MILLISECONDS : type_id::TIMESTAMP_DAYS;
case orc::DECIMAL:
// There isn't an arbitrary-precision type in cuDF, so map as float or int
return (decimals_as_float64) ? type_id::FLOAT64 : type_id::INT64;
default: break;
}
return type_id::EMPTY;
}
/**
* @brief Function that translates cuDF time unit to ORC clock frequency
*/
constexpr int32_t to_clockrate(type_id timestamp_type_id)
{
switch (timestamp_type_id) {
case type_id::TIMESTAMP_SECONDS: return 1;
case type_id::TIMESTAMP_MILLISECONDS: return 1000;
case type_id::TIMESTAMP_MICROSECONDS: return 1000000;
case type_id::TIMESTAMP_NANOSECONDS: return 1000000000;
default: return 0;
}
}
constexpr std::pair<gpu::StreamIndexType, uint32_t> get_index_type_and_pos(
const orc::StreamKind kind, uint32_t skip_count, bool non_child)
{
switch (kind) {
case orc::DATA:
skip_count += 1;
skip_count |= (skip_count & 0xff) << 8;
return std::make_pair(gpu::CI_DATA, skip_count);
case orc::LENGTH:
case orc::SECONDARY:
skip_count += 1;
skip_count |= (skip_count & 0xff) << 16;
return std::make_pair(gpu::CI_DATA2, skip_count);
case orc::DICTIONARY_DATA: return std::make_pair(gpu::CI_DICTIONARY, skip_count);
case orc::PRESENT:
skip_count += (non_child ? 1 : 0);
return std::make_pair(gpu::CI_PRESENT, skip_count);
case orc::ROW_INDEX: return std::make_pair(gpu::CI_INDEX, skip_count);
default:
// Skip this stream as it's not strictly required
return std::make_pair(gpu::CI_NUM_STREAMS, 0);
}
}
} // namespace
namespace {
/**
* @brief Struct that maps ORC streams to columns
*/
struct orc_stream_info {
orc_stream_info() = default;
explicit orc_stream_info(
uint64_t offset_, size_t dst_pos_, uint32_t length_, uint32_t gdf_idx_, uint32_t stripe_idx_)
: offset(offset_),
dst_pos(dst_pos_),
length(length_),
gdf_idx(gdf_idx_),
stripe_idx(stripe_idx_)
{
}
uint64_t offset; // offset in file
size_t dst_pos; // offset in memory relative to start of compressed stripe data
size_t length; // length in file
uint32_t gdf_idx; // column index
uint32_t stripe_idx; // stripe index
};
/**
* @brief Function that populates column descriptors stream/chunk
*/
size_t gather_stream_info(const size_t stripe_index,
const orc::StripeInformation *stripeinfo,
const orc::StripeFooter *stripefooter,
const std::vector<int> &orc2gdf,
const std::vector<int> &gdf2orc,
const std::vector<orc::SchemaType> types,
bool use_index,
size_t *num_dictionary_entries,
hostdevice_vector<gpu::ColumnDesc> &chunks,
std::vector<orc_stream_info> &stream_info)
{
const auto num_columns = gdf2orc.size();
uint64_t src_offset = 0;
uint64_t dst_offset = 0;
for (const auto &stream : stripefooter->streams) {
if (stream.column >= orc2gdf.size()) {
dst_offset += stream.length;
continue;
}
auto col = orc2gdf[stream.column];
if (col == -1) {
// A struct-type column has no data itself, but rather child columns
// for each of its fields. There is only a PRESENT stream, which
// needs to be included for the reader.
const auto schema_type = types[stream.column];
if (schema_type.subtypes.size() != 0) {
if (schema_type.kind == orc::STRUCT && stream.kind == orc::PRESENT) {
for (const auto &idx : schema_type.subtypes) {
auto child_idx = (idx < orc2gdf.size()) ? orc2gdf[idx] : -1;
if (child_idx >= 0) {
col = child_idx;
auto &chunk = chunks[stripe_index * num_columns + col];
chunk.strm_id[gpu::CI_PRESENT] = stream_info.size();
chunk.strm_len[gpu::CI_PRESENT] = stream.length;
}
}
}
}
}
if (col != -1) {
if (src_offset >= stripeinfo->indexLength || use_index) {
// NOTE: skip_count field is temporarily used to track index ordering
auto &chunk = chunks[stripe_index * num_columns + col];
const auto idx =
get_index_type_and_pos(stream.kind, chunk.skip_count, col == orc2gdf[stream.column]);
if (idx.first < gpu::CI_NUM_STREAMS) {
chunk.strm_id[idx.first] = stream_info.size();
chunk.strm_len[idx.first] = stream.length;
chunk.skip_count = idx.second;
if (idx.first == gpu::CI_DICTIONARY) {
chunk.dictionary_start = *num_dictionary_entries;
chunk.dict_len = stripefooter->columns[stream.column].dictionarySize;
*num_dictionary_entries += stripefooter->columns[stream.column].dictionarySize;
}
}
}
stream_info.emplace_back(
stripeinfo->offset + src_offset, dst_offset, stream.length, col, stripe_index);
dst_offset += stream.length;
}
src_offset += stream.length;
}
return dst_offset;
}
} // namespace
rmm::device_buffer reader::impl::decompress_stripe_data(
hostdevice_vector<gpu::ColumnDesc> &chunks,
const std::vector<rmm::device_buffer> &stripe_data,
const OrcDecompressor *decompressor,
std::vector<orc_stream_info> &stream_info,
size_t num_stripes,
device_span<gpu::RowGroup> row_groups,
size_t row_index_stride,
rmm::cuda_stream_view stream)
{
// Parse the columns' compressed info
hostdevice_vector<gpu::CompressedStreamInfo> compinfo(0, stream_info.size(), stream);
for (const auto &info : stream_info) {
compinfo.insert(gpu::CompressedStreamInfo(
static_cast<const uint8_t *>(stripe_data[info.stripe_idx].data()) + info.dst_pos,
info.length));
}
compinfo.host_to_device(stream);
gpu::ParseCompressedStripeData(compinfo.device_ptr(),
compinfo.size(),
decompressor->GetBlockSize(),
decompressor->GetLog2MaxCompressionRatio(),
stream);
compinfo.device_to_host(stream, true);
// Count the exact number of compressed blocks
size_t num_compressed_blocks = 0;
size_t num_uncompressed_blocks = 0;
size_t total_decomp_size = 0;
for (size_t i = 0; i < compinfo.size(); ++i) {
num_compressed_blocks += compinfo[i].num_compressed_blocks;
num_uncompressed_blocks += compinfo[i].num_uncompressed_blocks;
total_decomp_size += compinfo[i].max_uncompressed_size;
}
CUDF_EXPECTS(total_decomp_size > 0, "No decompressible data found");
rmm::device_buffer decomp_data(total_decomp_size, stream);
rmm::device_uvector<gpu_inflate_input_s> inflate_in(
num_compressed_blocks + num_uncompressed_blocks, stream);
rmm::device_uvector<gpu_inflate_status_s> inflate_out(num_compressed_blocks, stream);
// Parse again to populate the decompression input/output buffers
size_t decomp_offset = 0;
uint32_t start_pos = 0;
uint32_t start_pos_uncomp = (uint32_t)num_compressed_blocks;
for (size_t i = 0; i < compinfo.size(); ++i) {
auto dst_base = static_cast<uint8_t *>(decomp_data.data());
compinfo[i].uncompressed_data = dst_base + decomp_offset;
compinfo[i].decctl = inflate_in.data() + start_pos;
compinfo[i].decstatus = inflate_out.data() + start_pos;
compinfo[i].copyctl = inflate_in.data() + start_pos_uncomp;
stream_info[i].dst_pos = decomp_offset;
decomp_offset += compinfo[i].max_uncompressed_size;
start_pos += compinfo[i].num_compressed_blocks;
start_pos_uncomp += compinfo[i].num_uncompressed_blocks;
}
compinfo.host_to_device(stream);
gpu::ParseCompressedStripeData(compinfo.device_ptr(),
compinfo.size(),
decompressor->GetBlockSize(),
decompressor->GetLog2MaxCompressionRatio(),
stream);
// Dispatch batches of blocks to decompress
if (num_compressed_blocks > 0) {
switch (decompressor->GetKind()) {
case orc::ZLIB:
CUDA_TRY(
gpuinflate(inflate_in.data(), inflate_out.data(), num_compressed_blocks, 0, stream));
break;
case orc::SNAPPY:
CUDA_TRY(gpu_unsnap(inflate_in.data(), inflate_out.data(), num_compressed_blocks, stream));
break;
default: CUDF_EXPECTS(false, "Unexpected decompression dispatch"); break;
}
}
if (num_uncompressed_blocks > 0) {
CUDA_TRY(gpu_copy_uncompressed_blocks(
inflate_in.data() + num_compressed_blocks, num_uncompressed_blocks, stream));
}
gpu::PostDecompressionReassemble(compinfo.device_ptr(), compinfo.size(), stream);
// Update the stream information with the updated uncompressed info
// TBD: We could update the value from the information we already
// have in stream_info[], but using the gpu results also updates
// max_uncompressed_size to the actual uncompressed size, or zero if
// decompression failed.
compinfo.device_to_host(stream, true);
const size_t num_columns = chunks.size() / num_stripes;
for (size_t i = 0; i < num_stripes; ++i) {
for (size_t j = 0; j < num_columns; ++j) {
auto &chunk = chunks[i * num_columns + j];
for (int k = 0; k < gpu::CI_NUM_STREAMS; ++k) {
if (chunk.strm_len[k] > 0 && chunk.strm_id[k] < compinfo.size()) {
chunk.streams[k] = compinfo[chunk.strm_id[k]].uncompressed_data;
chunk.strm_len[k] = compinfo[chunk.strm_id[k]].max_uncompressed_size;
}
}
}
}
if (not row_groups.empty()) {
chunks.host_to_device(stream);
gpu::ParseRowGroupIndex(row_groups.data(),
compinfo.device_ptr(),
chunks.device_ptr(),
num_columns,
num_stripes,
row_groups.size() / num_columns,
row_index_stride,
stream);
}
return decomp_data;
}
void reader::impl::decode_stream_data(hostdevice_vector<gpu::ColumnDesc> &chunks,
size_t num_dicts,
size_t skip_rows,
size_t num_rows,
timezone_table_view tz_table,
device_span<gpu::RowGroup const> row_groups,
size_t row_index_stride,
std::vector<column_buffer> &out_buffers,
rmm::cuda_stream_view stream)
{
const auto num_columns = out_buffers.size();
const auto num_stripes = chunks.size() / out_buffers.size();
// Update chunks with pointers to column data
for (size_t i = 0; i < num_stripes; ++i) {
for (size_t j = 0; j < num_columns; ++j) {
auto &chunk = chunks[i * num_columns + j];
chunk.column_data_base = out_buffers[j].data();
chunk.valid_map_base = out_buffers[j].null_mask();
}
}
// Allocate global dictionary for deserializing
rmm::device_uvector<gpu::DictionaryEntry> global_dict(num_dicts, stream);
chunks.host_to_device(stream);
gpu::DecodeNullsAndStringDictionaries(
chunks.device_ptr(), global_dict.data(), num_columns, num_stripes, num_rows, skip_rows, stream);
gpu::DecodeOrcColumnData(chunks.device_ptr(),
global_dict.data(),
num_columns,
num_stripes,
num_rows,
skip_rows,
tz_table,
row_groups.data(),
row_groups.size() / num_columns,
row_index_stride,
stream);
chunks.device_to_host(stream, true);
for (size_t i = 0; i < num_stripes; ++i) {
for (size_t j = 0; j < num_columns; ++j) {
out_buffers[j].null_count() += chunks[i * num_columns + j].null_count;
}
}
}
reader::impl::impl(std::unique_ptr<datasource> source,
orc_reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _mr(mr), _source(std::move(source))
{
// Open and parse the source dataset metadata
_metadata = std::make_unique<cudf::io::orc::metadata>(_source.get());
// Select only columns required by the options
_selected_columns = _metadata->select_columns(options.get_columns(), _has_timestamp_column);
// Override output timestamp resolution if requested
if (options.get_timestamp_type().id() != type_id::EMPTY) {
_timestamp_type = options.get_timestamp_type();
}
// Enable or disable attempt to use row index for parsing
_use_index = options.is_enabled_use_index();
// Enable or disable the conversion to numpy-compatible dtypes
_use_np_dtypes = options.is_enabled_use_np_dtypes();
// Control decimals conversion (float64 or int64 with optional scale)
_decimals_as_float64 = options.is_enabled_decimals_as_float64();
_decimals_as_int_scale = options.get_forced_decimals_scale();
}
table_with_metadata reader::impl::read(size_type skip_rows,
size_type num_rows,
const std::vector<size_type> &stripes,
rmm::cuda_stream_view stream)
{
std::vector<std::unique_ptr<column>> out_columns;
table_metadata out_metadata;
// There are no columns in table
if (_selected_columns.size() == 0) return {std::make_unique<table>(), std::move(out_metadata)};
// Select only stripes required (aka row groups)
const auto selected_stripes = _metadata->select_stripes(stripes, skip_rows, num_rows);
// Association between each ORC column and its cudf::column
std::vector<int32_t> orc_col_map(_metadata->get_num_columns(), -1);
// Get a list of column data types
std::vector<data_type> column_types;
for (const auto &col : _selected_columns) {
auto col_type = to_type_id(
_metadata->ff.types[col], _use_np_dtypes, _timestamp_type.id(), _decimals_as_float64);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type");
column_types.emplace_back(col_type);
// Map each ORC column to its column
orc_col_map[col] = column_types.size() - 1;
}
// If no rows or stripes to read, return empty columns
if (num_rows <= 0 || selected_stripes.empty()) {
std::transform(column_types.cbegin(),
column_types.cend(),
std::back_inserter(out_columns),
[](auto const &dtype) { return make_empty_column(dtype); });
} else {
const auto num_columns = _selected_columns.size();
const auto num_chunks = selected_stripes.size() * num_columns;
hostdevice_vector<gpu::ColumnDesc> chunks(num_chunks, stream);
memset(chunks.host_ptr(), 0, chunks.memory_size());
const bool use_index =
(_use_index == true) &&
// Only use if we don't have much work with complete columns & stripes
// TODO: Consider nrows, gpu, and tune the threshold
(num_rows > _metadata->get_row_index_stride() && !(_metadata->get_row_index_stride() & 7) &&
_metadata->get_row_index_stride() > 0 && num_columns * selected_stripes.size() < 8 * 128) &&
// Only use if first row is aligned to a stripe boundary
// TODO: Fix logic to handle unaligned rows
(skip_rows == 0);
// Logically view streams as columns
std::vector<orc_stream_info> stream_info;
// Tracker for eventually deallocating compressed and uncompressed data
std::vector<rmm::device_buffer> stripe_data;
size_t stripe_start_row = 0;
size_t num_dict_entries = 0;
size_t num_rowgroups = 0;
for (size_t i = 0; i < selected_stripes.size(); ++i) {
const auto stripe_info = selected_stripes[i].first;
const auto stripe_footer = selected_stripes[i].second;
auto stream_count = stream_info.size();
const auto total_data_size = gather_stream_info(i,
stripe_info,
stripe_footer,
orc_col_map,
_selected_columns,
_metadata->ff.types,
use_index,
&num_dict_entries,
chunks,
stream_info);
CUDF_EXPECTS(total_data_size > 0, "Expected streams data within stripe");
stripe_data.emplace_back(total_data_size, stream);
auto dst_base = static_cast<uint8_t *>(stripe_data.back().data());
// Coalesce consecutive streams into one read
while (stream_count < stream_info.size()) {
const auto d_dst = dst_base + stream_info[stream_count].dst_pos;
const auto offset = stream_info[stream_count].offset;
auto len = stream_info[stream_count].length;
stream_count++;
while (stream_count < stream_info.size() &&
stream_info[stream_count].offset == offset + len) {
len += stream_info[stream_count].length;
stream_count++;
}
const auto buffer = _source->host_read(offset, len);
CUDA_TRY(
cudaMemcpyAsync(d_dst, buffer->data(), len, cudaMemcpyHostToDevice, stream.value()));
stream.synchronize();
}
// Update chunks to reference streams pointers
for (size_t j = 0; j < num_columns; j++) {
auto &chunk = chunks[i * num_columns + j];
chunk.start_row = stripe_start_row;
chunk.num_rows = stripe_info->numberOfRows;
chunk.encoding_kind = stripe_footer->columns[_selected_columns[j]].kind;
chunk.type_kind = _metadata->ff.types[_selected_columns[j]].kind;
if (_decimals_as_float64) {
chunk.decimal_scale =
_metadata->ff.types[_selected_columns[j]].scale | orc::gpu::orc_decimal2float64_scale;
} else if (_decimals_as_int_scale < 0) {
chunk.decimal_scale = _metadata->ff.types[_selected_columns[j]].scale;
} else {
chunk.decimal_scale = _decimals_as_int_scale;
}
chunk.rowgroup_id = num_rowgroups;
chunk.dtype_len = (column_types[j].id() == type_id::STRING)
? sizeof(std::pair<const char *, size_t>)
: cudf::size_of(column_types[j]);
if (chunk.type_kind == orc::TIMESTAMP) {
chunk.ts_clock_rate = to_clockrate(_timestamp_type.id());
}
for (int k = 0; k < gpu::CI_NUM_STREAMS; k++) {
chunk.streams[k] = dst_base + stream_info[chunk.strm_id[k]].dst_pos;
}
}
stripe_start_row += stripe_info->numberOfRows;
if (use_index) {
num_rowgroups += (stripe_info->numberOfRows + _metadata->get_row_index_stride() - 1) /
_metadata->get_row_index_stride();
}
}
// Process dataset chunk pages into output columns
if (stripe_data.size() != 0) {
// Setup row group descriptors if using indexes
rmm::device_uvector<gpu::RowGroup> row_groups(num_rowgroups * num_columns, stream);
if (_metadata->ps.compression != orc::NONE) {
auto decomp_data = decompress_stripe_data(chunks,
stripe_data,
_metadata->decompressor.get(),
stream_info,
selected_stripes.size(),
row_groups,
_metadata->get_row_index_stride(),
stream);
stripe_data.clear();
stripe_data.push_back(std::move(decomp_data));
} else {
if (not row_groups.is_empty()) {
chunks.host_to_device(stream);
gpu::ParseRowGroupIndex(row_groups.data(),
nullptr,
chunks.device_ptr(),
num_columns,
selected_stripes.size(),
num_rowgroups,
_metadata->get_row_index_stride(),
stream);
}
}
// Setup table for converting timestamp columns from local to UTC time
auto const tz_table =
_has_timestamp_column
? build_timezone_transition_table(selected_stripes[0].second->writerTimezone, stream)
: timezone_table{};
std::vector<column_buffer> out_buffers;
for (size_t i = 0; i < column_types.size(); ++i) {
bool is_nullable = false;
for (size_t j = 0; j < selected_stripes.size(); ++j) {
if (chunks[j * num_columns + i].strm_len[gpu::CI_PRESENT] != 0) {
is_nullable = true;
break;
}
}
out_buffers.emplace_back(column_types[i], num_rows, is_nullable, stream, _mr);
}
decode_stream_data(chunks,
num_dict_entries,
skip_rows,
num_rows,
tz_table.view(),
row_groups,
_metadata->get_row_index_stride(),
out_buffers,
stream);
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(make_column(out_buffers[i], nullptr, stream, _mr));
}
}
}
// Return column names (must match order of returned columns)
out_metadata.column_names.resize(_selected_columns.size());
for (size_t i = 0; i < _selected_columns.size(); i++) {
out_metadata.column_names[i] = _metadata->get_column_name(_selected_columns[i]);
}
// Return user metadata
for (const auto &kv : _metadata->ff.metadata) {
out_metadata.user_data.insert({kv.name, kv.value});
}
return {std::make_unique<table>(std::move(out_columns)), std::move(out_metadata)};
}
// Forward to implementation
reader::reader(std::vector<std::string> const &filepaths,
orc_reader_options const &options,
rmm::mr::device_memory_resource *mr)
{
CUDF_EXPECTS(filepaths.size() == 1, "Only a single source is currently supported.");
_impl = std::make_unique<impl>(datasource::create(filepaths[0]), options, mr);
}
// Forward to implementation
reader::reader(std::vector<std::unique_ptr<cudf::io::datasource>> &&sources,
orc_reader_options const &options,
rmm::mr::device_memory_resource *mr)
{
CUDF_EXPECTS(sources.size() == 1, "Only a single source is currently supported.");
_impl = std::make_unique<impl>(std::move(sources[0]), options, mr);
}
// Destructor within this translation unit
reader::~reader() = default;
// Forward to implementation
table_with_metadata reader::read(orc_reader_options const &options, rmm::cuda_stream_view stream)
{
return _impl->read(
options.get_skip_rows(), options.get_num_rows(), options.get_stripes(), stream);
}
} // namespace orc
} // namespace detail
} // namespace io
} // namespace cudf
|
d0179163c1a72ef6f9325908b916710fd8218efb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@author Azzam Haidar
@author Ahmad Ahmad
@generated from zpotf2_kernels.cu normal z -> s, Fri Sep 11 18:29:22 2015
*/
#include "common_magma.h"
#include "batched_kernel_param.h"
#define PRECISION_s
#if defined(VERSION31)
#define ENABLE_COND1
#define ENABLE_COND2
#endif
#define MAX_NTCOL 8
#ifdef PRECISION_s
#define NTCOL2 (4)
#define NTCOL1 (8)
#elif defined(PRECISION_d)
#define NTCOL2 (2)
#define NTCOL1 (4)
#else
#define NTCOL2 (1)
#define NTCOL1 (1)
#endif
#include "spotf2_devicesfunc.cuh"
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void spotf2_smlpout_fixwidth_kernel_batched(int m,
float **dA_array, int lda,
int localstep, int gbstep, magma_int_t *info_array, const int batchCount)
{
const int batchid = blockIdx.z * blockDim.y + threadIdx.y;
if (batchid >= batchCount) return;
spotf2_smlpout_fixwidth_device(m, dA_array[batchid]+localstep, dA_array[batchid]+localstep+localstep*lda, lda, localstep, gbstep, &(info_array[batchid]));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void spotf2_smlpout_anywidth_kernel_batched(int m, int n,
float **dA_array, int lda,
int localstep, int gbstep, magma_int_t *info_array, const int batchCount)
{
const int batchid = blockIdx.z * blockDim.y + threadIdx.y;
if (batchid >= batchCount) return;
spotf2_smlpout_anywidth_device(m, n, dA_array[batchid]+localstep, dA_array[batchid]+localstep+localstep*lda, lda, localstep, gbstep, &(info_array[batchid]));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/**
\n
This is an internal routine.
********************************************************************/
extern "C" magma_int_t
magma_spotrf_lpout_batched(
magma_uplo_t uplo, magma_int_t n,
float **dA_array, magma_int_t lda, magma_int_t gbstep,
magma_int_t *info_array, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t m = n;
magma_int_t arginfo = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower) {
arginfo = -1;
} else if (m < 0 || n < 0 ) {
arginfo = -2;
} else if (lda < max(1,m)) {
arginfo = -4;
} else if (m < n) {
arginfo = -10;
}
if (uplo == MagmaUpper) {
printf("Upper side is unavailable \n");
arginfo = -1;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
// Quick return if possible
if (m == 0 || n == 0) {
return arginfo;
}
magma_int_t roundup_m = m;
// rounding up need more investigation since it coul dmodify the matrix out of its bound
//magma_int_t m8 = magma_roundup( m, 8 );
//magma_int_t roundup_m = m8 > lda ? m : m8;
//magma_int_t m32 = magma_roundup( m, 32 );
//magma_int_t roundup_m = m32 > lda ? m : m32;
magma_int_t ib, rows;
for (magma_int_t j = 0; j < n; j += POTF2_NB) {
ib = min(POTF2_NB, n-j);
rows = roundup_m-j;
// tuning ntcol
magma_int_t ntcol; // for z precision, the best tuning is at NTCOL = 1 for all sizes
if (rows > 64) ntcol = 1;
else if (rows > 32) ntcol = NTCOL2;
else ntcol = NTCOL1;
// end of tuning ntcol
const magma_int_t nTB = magma_ceildiv( batchCount, ntcol );
dim3 dimGrid(1, 1, nTB);
magma_int_t nbth = rows;
magma_int_t shared_mem_size = ntcol * (sizeof(float)*(nbth+POTF2_NB)*POTF2_NB);
dim3 threads(nbth, ntcol);
if (shared_mem_size > 47000)
{
arginfo = -33;
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
if (ib == POTF2_NB)
{
hipLaunchKernelGGL(( spotf2_smlpout_fixwidth_kernel_batched), dim3(dimGrid), dim3(threads), shared_mem_size, queue , rows, dA_array, lda, j, gbstep, info_array, batchCount);
} else {
hipLaunchKernelGGL(( spotf2_smlpout_anywidth_kernel_batched), dim3(dimGrid), dim3(threads), shared_mem_size, queue , rows, ib, dA_array, lda, j, gbstep, info_array, batchCount);
}
}
return arginfo;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| d0179163c1a72ef6f9325908b916710fd8218efb.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@author Azzam Haidar
@author Ahmad Ahmad
@generated from zpotf2_kernels.cu normal z -> s, Fri Sep 11 18:29:22 2015
*/
#include "common_magma.h"
#include "batched_kernel_param.h"
#define PRECISION_s
#if defined(VERSION31)
#define ENABLE_COND1
#define ENABLE_COND2
#endif
#define MAX_NTCOL 8
#ifdef PRECISION_s
#define NTCOL2 (4)
#define NTCOL1 (8)
#elif defined(PRECISION_d)
#define NTCOL2 (2)
#define NTCOL1 (4)
#else
#define NTCOL2 (1)
#define NTCOL1 (1)
#endif
#include "spotf2_devicesfunc.cuh"
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void spotf2_smlpout_fixwidth_kernel_batched(int m,
float **dA_array, int lda,
int localstep, int gbstep, magma_int_t *info_array, const int batchCount)
{
const int batchid = blockIdx.z * blockDim.y + threadIdx.y;
if (batchid >= batchCount) return;
spotf2_smlpout_fixwidth_device(m, dA_array[batchid]+localstep, dA_array[batchid]+localstep+localstep*lda, lda, localstep, gbstep, &(info_array[batchid]));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void spotf2_smlpout_anywidth_kernel_batched(int m, int n,
float **dA_array, int lda,
int localstep, int gbstep, magma_int_t *info_array, const int batchCount)
{
const int batchid = blockIdx.z * blockDim.y + threadIdx.y;
if (batchid >= batchCount) return;
spotf2_smlpout_anywidth_device(m, n, dA_array[batchid]+localstep, dA_array[batchid]+localstep+localstep*lda, lda, localstep, gbstep, &(info_array[batchid]));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/**
\n
This is an internal routine.
********************************************************************/
extern "C" magma_int_t
magma_spotrf_lpout_batched(
magma_uplo_t uplo, magma_int_t n,
float **dA_array, magma_int_t lda, magma_int_t gbstep,
magma_int_t *info_array, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t m = n;
magma_int_t arginfo = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower) {
arginfo = -1;
} else if (m < 0 || n < 0 ) {
arginfo = -2;
} else if (lda < max(1,m)) {
arginfo = -4;
} else if (m < n) {
arginfo = -10;
}
if (uplo == MagmaUpper) {
printf("Upper side is unavailable \n");
arginfo = -1;
}
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
// Quick return if possible
if (m == 0 || n == 0) {
return arginfo;
}
magma_int_t roundup_m = m;
// rounding up need more investigation since it coul dmodify the matrix out of its bound
//magma_int_t m8 = magma_roundup( m, 8 );
//magma_int_t roundup_m = m8 > lda ? m : m8;
//magma_int_t m32 = magma_roundup( m, 32 );
//magma_int_t roundup_m = m32 > lda ? m : m32;
magma_int_t ib, rows;
for (magma_int_t j = 0; j < n; j += POTF2_NB) {
ib = min(POTF2_NB, n-j);
rows = roundup_m-j;
// tuning ntcol
magma_int_t ntcol; // for z precision, the best tuning is at NTCOL = 1 for all sizes
if (rows > 64) ntcol = 1;
else if (rows > 32) ntcol = NTCOL2;
else ntcol = NTCOL1;
// end of tuning ntcol
const magma_int_t nTB = magma_ceildiv( batchCount, ntcol );
dim3 dimGrid(1, 1, nTB);
magma_int_t nbth = rows;
magma_int_t shared_mem_size = ntcol * (sizeof(float)*(nbth+POTF2_NB)*POTF2_NB);
dim3 threads(nbth, ntcol);
if (shared_mem_size > 47000)
{
arginfo = -33;
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
if (ib == POTF2_NB)
{
spotf2_smlpout_fixwidth_kernel_batched<<<dimGrid, threads, shared_mem_size, queue >>>(rows, dA_array, lda, j, gbstep, info_array, batchCount);
} else {
spotf2_smlpout_anywidth_kernel_batched<<<dimGrid, threads, shared_mem_size, queue >>>(rows, ib, dA_array, lda, j, gbstep, info_array, batchCount);
}
}
return arginfo;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
|
3ebf037c534c2223d193add18506d349ff5b000e.hip | // !!! This is a file automatically generated by hipify!!!
//boxfilter
#include <stdio.h>
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/device_functions.h"
#include "dehaze_kernel.h"
#define TILE_DIM 16
#define BLOCKSIZE 128
__global__ void d_boxfilter_x_global(float *src, float *dst, int width, int height, int r)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int offset = 1;
int num = (width + 2 * r + 2 * BLOCKSIZE - 1) / (2 * BLOCKSIZE); //BLOCKSIZE*2numsegment
int len = num * 2 * BLOCKSIZE;
int extra = len - r - width;
float scale = 1.0f / (float)((r << 1) + 1);
__shared__ float sum[35]; sum[0] = 0;
extern __shared__ float temp[];
if (bid < height)
{
//
for (int i = tid; i < r; i += BLOCKSIZE)
{
temp[i] = src[bid*width + 0]; //r
}
//__syncthreads();
for (int i = tid; i < width; i += BLOCKSIZE)
{
temp[r + i] = src[bid * width + i];
}
//__syncthreads();
for (int i = tid; i < extra; i += BLOCKSIZE)
{
temp[r + width + i] = src[(bid + 1) * width - 1]; //extra
}
__syncthreads();
for (int cnt = 0; cnt < num; ++cnt) //num
{
int bias = cnt * BLOCKSIZE * 2;
//__syncthreads();
//up-sweep phase
for (int j = BLOCKSIZE; j > 0; j >>= 1)
{
if (tid < j)
{
int ai = bias + offset * (2 * tid + 1) - 1;
int bi = bias + offset * (2 * tid + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
__syncthreads();
}
//down-sweep phase
if (tid == 0)
{
sum[cnt + 1] = temp[(cnt + 1) * BLOCKSIZE * 2 - 1] + sum[cnt]; //segment[i]sum[i]sum[i]i BLOCKSIZE * 2
temp[(cnt + 1) * BLOCKSIZE * 2 - 1] = 0;
}
__syncthreads();
for (int j = 1; j < (BLOCKSIZE * 2); j *= 2)
{
offset >>= 1;
if (tid < j)
{
int ai = bias + offset * (2 * tid + 1) - 1;
int bi = bias + offset * (2 * tid + 2) - 1;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
__syncthreads();
}
}
for (int i = tid; i < width; i += BLOCKSIZE)
{
float sum_box = temp[i + 2 * r + 1] + sum[(i + 2 * r + 1) / (BLOCKSIZE * 2)] - temp[i] - sum[i / (BLOCKSIZE * 2)]; //sumi + 2 * r + 1i + 2 * r + 1
dst[bid * width + i] = sum_box * scale;
//dst[bid * width + i] = temp[i];
//dst[bid * width + i] = src[bid * width + i];
}
}
}
//2018.12.18,HILLIS
//sipenghui
__global__ void d_boxfilter_x_hillis(float *src, float *dst, int width, int height, int r)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int offset = 1;
int num = (width + 2 * r + 2 * BLOCKSIZE - 1) / (2 * BLOCKSIZE); //BLOCKSIZE*2numsegment
int len = num * 2 * BLOCKSIZE;
int extra = len - r - width;
float scale = 1.0f / (float)((r << 1) + 1);
__shared__ float sum[35]; sum[0] = 0;
extern __shared__ float temp[];
if (bid < height)
{
//
for (int i = tid; i < r; i += 2*BLOCKSIZE) //2019.2.28CLOCKSIZE2
{
temp[i] = src[bid*width + 0]; //r
temp[len*2 + i] = src[bid*width + 0];
}
for (int i = tid; i < width; i += 2*BLOCKSIZE)
{
temp[r + i] = src[bid * width + i];
temp[len * 2 + r + i] = src[bid * width + i];
}
for (int i = tid; i < extra; i += 2*BLOCKSIZE)
{
temp[r + width + i] = src[(bid + 1) * width - 1]; //extra
temp[len * 2 + r + width + i] = src[(bid + 1) * width - 1];
}
__syncthreads();
int pout = 0;
int pin = 1;
for (int cnt = 0; cnt < num; ++cnt) //num
{
int bias = cnt * BLOCKSIZE * 2;
pout = 0;
pin = 1;
for (offset = 1; offset < BLOCKSIZE * 2; offset *= 2)
{
pout = 1 - pout;
pin = 1 - pin;
//if (tid<2*BLOCKSIZE)
// temp[len*2+bias + tid] = temp[bias + tid];
//__syncthreads();
//temp[pout*len + bias + tid] = temp[pin*len + bias + tid]; //1
int ai = pout*len + bias + tid;
int bi = pin*len + bias + tid;
if (tid >= offset && tid < 2 * BLOCKSIZE) //2
//temp[bias + tid] += temp[bias + tid - offset]; //2019.2.28
temp[ai] = temp[bi] + temp[bi - offset];
else
temp[ai] = temp[bi];
__syncthreads();
}
if (tid == 2 * BLOCKSIZE-1)
{
sum[cnt + 1] = temp[pout*len + bias + tid] + sum[cnt];
}
if (tid < 2 * BLOCKSIZE)
temp[pout*len + bias + tid] = temp[pout*len + bias + tid] - temp[len * 2 + bias + tid];
__syncthreads();
}
for (int i = tid; i < width; i += BLOCKSIZE)
{
float sum_box = temp[pout*len + i + 2 * r + 1] + sum[(i + 2 * r + 1) / (BLOCKSIZE * 2)] - temp[pout*len + i] - sum[i / (BLOCKSIZE * 2)]; //sumi + 2 * r + 1i + 2 * r + 1
dst[bid * width + i] = sum_box * scale;
}
}
}
extern "C"
void boxfilter(float *id, float *od, float *d_temp, float *d_temp1, int width, int height, int r)
{
int num_shared1 = ((width + 2 * r + BLOCKSIZE * 2 - 1) / (BLOCKSIZE * 2)) * 2 * BLOCKSIZE;
int num_shared2 = ((height + 2 * r + BLOCKSIZE * 2 - 1) / (BLOCKSIZE * 2)) * 2 * BLOCKSIZE;
dim3 grid1(width / TILE_DIM + 1, height / TILE_DIM + 1);
dim3 grid2(height / TILE_DIM + 1, width / TILE_DIM + 1);
dim3 block(TILE_DIM, TILE_DIM);
//d_boxfilter_x_global << < height, BLOCKSIZE, num_shared1 * sizeof(float) >> > (id, d_temp, width, height, r);
//transpose << <grid1, block >> >(d_temp1, d_temp, width, height);
//d_boxfilter_x_global << < width, BLOCKSIZE, num_shared2 * sizeof(float) >> > (d_temp1, od, height, width, r);
d_boxfilter_x_hillis << < height, BLOCKSIZE * 2, num_shared1 * sizeof(float)*3 >> > (id, d_temp, width, height, r);
transpose << <grid1, block >> >(d_temp1, d_temp, width, height);
d_boxfilter_x_hillis << < width, BLOCKSIZE * 2, num_shared2 * sizeof(float)*3 >> > (d_temp1, od, height, width, r);
}
| 3ebf037c534c2223d193add18506d349ff5b000e.cu | //均值滤波被称为boxfilter
#include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
#include "dehaze_kernel.h"
#define TILE_DIM 16
#define BLOCKSIZE 128
__global__ void d_boxfilter_x_global(float *src, float *dst, int width, int height, int r)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int offset = 1;
int num = (width + 2 * r + 2 * BLOCKSIZE - 1) / (2 * BLOCKSIZE); //每一个线程块被BLOCKSIZE*2分割成了num个segment
int len = num * 2 * BLOCKSIZE;
int extra = len - r - width;
float scale = 1.0f / (float)((r << 1) + 1);
__shared__ float sum[35]; sum[0] = 0;
extern __shared__ float temp[];
if (bid < height)
{
//边界填充
for (int i = tid; i < r; i += BLOCKSIZE)
{
temp[i] = src[bid*width + 0]; //前r个填充这一行的第一个元素
}
//__syncthreads();
for (int i = tid; i < width; i += BLOCKSIZE)
{
temp[r + i] = src[bid * width + i];
}
//__syncthreads();
for (int i = tid; i < extra; i += BLOCKSIZE)
{
temp[r + width + i] = src[(bid + 1) * width - 1]; //最后extra个填充这一行最后一个元素
}
__syncthreads();
for (int cnt = 0; cnt < num; ++cnt) //num为迭代次数
{
int bias = cnt * BLOCKSIZE * 2;
//__syncthreads();
//up-sweep phase
for (int j = BLOCKSIZE; j > 0; j >>= 1)
{
if (tid < j)
{
int ai = bias + offset * (2 * tid + 1) - 1;
int bi = bias + offset * (2 * tid + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
__syncthreads();
}
//down-sweep phase
if (tid == 0)
{
sum[cnt + 1] = temp[(cnt + 1) * BLOCKSIZE * 2 - 1] + sum[cnt]; //之后每行的每个segment[i]要加上sum[i],,,sum[i]表示前i个数据块中所有元素的和,每个数据块是 BLOCKSIZE * 2大小
temp[(cnt + 1) * BLOCKSIZE * 2 - 1] = 0;
}
__syncthreads();
for (int j = 1; j < (BLOCKSIZE * 2); j *= 2)
{
offset >>= 1;
if (tid < j)
{
int ai = bias + offset * (2 * tid + 1) - 1;
int bi = bias + offset * (2 * tid + 2) - 1;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
__syncthreads();
}
}
for (int i = tid; i < width; i += BLOCKSIZE)
{
float sum_box = temp[i + 2 * r + 1] + sum[(i + 2 * r + 1) / (BLOCKSIZE * 2)] - temp[i] - sum[i / (BLOCKSIZE * 2)]; //sum只是第i + 2 * r + 1之前的所有元素之和不包括第i + 2 * r + 1个元素
dst[bid * width + i] = sum_box * scale;
//dst[bid * width + i] = temp[i];
//dst[bid * width + i] = src[bid * width + i];
}
}
}
//2018.12.18,改用HILLIS积分图
//sipenghui
__global__ void d_boxfilter_x_hillis(float *src, float *dst, int width, int height, int r)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int offset = 1;
int num = (width + 2 * r + 2 * BLOCKSIZE - 1) / (2 * BLOCKSIZE); //每一个线程块被BLOCKSIZE*2分割成了num个segment
int len = num * 2 * BLOCKSIZE;
int extra = len - r - width;
float scale = 1.0f / (float)((r << 1) + 1);
__shared__ float sum[35]; sum[0] = 0;
extern __shared__ float temp[];
if (bid < height)
{
//边界填充
for (int i = tid; i < r; i += 2*BLOCKSIZE) //2019.2.28修改CLOCKSIZE乘2
{
temp[i] = src[bid*width + 0]; //前r个填充这一行的第一个元素
temp[len*2 + i] = src[bid*width + 0];
}
for (int i = tid; i < width; i += 2*BLOCKSIZE)
{
temp[r + i] = src[bid * width + i];
temp[len * 2 + r + i] = src[bid * width + i];
}
for (int i = tid; i < extra; i += 2*BLOCKSIZE)
{
temp[r + width + i] = src[(bid + 1) * width - 1]; //最后extra个填充这一行最后一个元素
temp[len * 2 + r + width + i] = src[(bid + 1) * width - 1];
}
__syncthreads();
int pout = 0;
int pin = 1;
for (int cnt = 0; cnt < num; ++cnt) //num为迭代次数
{
int bias = cnt * BLOCKSIZE * 2;
pout = 0;
pin = 1;
for (offset = 1; offset < BLOCKSIZE * 2; offset *= 2)
{
pout = 1 - pout;
pin = 1 - pin;
//if (tid<2*BLOCKSIZE)
// temp[len*2+bias + tid] = temp[bias + tid];
//__syncthreads();
//temp[pout*len + bias + tid] = temp[pin*len + bias + tid]; //版本1
int ai = pout*len + bias + tid;
int bi = pin*len + bias + tid;
if (tid >= offset && tid < 2 * BLOCKSIZE) //版本2
//temp[bias + tid] += temp[bias + tid - offset]; //2019.2.28此处存在冲突
temp[ai] = temp[bi] + temp[bi - offset];
else
temp[ai] = temp[bi];
__syncthreads();
}
if (tid == 2 * BLOCKSIZE-1)
{
sum[cnt + 1] = temp[pout*len + bias + tid] + sum[cnt];
}
if (tid < 2 * BLOCKSIZE)
temp[pout*len + bias + tid] = temp[pout*len + bias + tid] - temp[len * 2 + bias + tid];
__syncthreads();
}
for (int i = tid; i < width; i += BLOCKSIZE)
{
float sum_box = temp[pout*len + i + 2 * r + 1] + sum[(i + 2 * r + 1) / (BLOCKSIZE * 2)] - temp[pout*len + i] - sum[i / (BLOCKSIZE * 2)]; //sum只是第i + 2 * r + 1之前的所有元素之和不包括第i + 2 * r + 1个元素
dst[bid * width + i] = sum_box * scale;
}
}
}
extern "C"
void boxfilter(float *id, float *od, float *d_temp, float *d_temp1, int width, int height, int r)
{
int num_shared1 = ((width + 2 * r + BLOCKSIZE * 2 - 1) / (BLOCKSIZE * 2)) * 2 * BLOCKSIZE;
int num_shared2 = ((height + 2 * r + BLOCKSIZE * 2 - 1) / (BLOCKSIZE * 2)) * 2 * BLOCKSIZE;
dim3 grid1(width / TILE_DIM + 1, height / TILE_DIM + 1);
dim3 grid2(height / TILE_DIM + 1, width / TILE_DIM + 1);
dim3 block(TILE_DIM, TILE_DIM);
//d_boxfilter_x_global << < height, BLOCKSIZE, num_shared1 * sizeof(float) >> > (id, d_temp, width, height, r);
//transpose << <grid1, block >> >(d_temp1, d_temp, width, height);
//d_boxfilter_x_global << < width, BLOCKSIZE, num_shared2 * sizeof(float) >> > (d_temp1, od, height, width, r);
d_boxfilter_x_hillis << < height, BLOCKSIZE * 2, num_shared1 * sizeof(float)*3 >> > (id, d_temp, width, height, r);
transpose << <grid1, block >> >(d_temp1, d_temp, width, height);
d_boxfilter_x_hillis << < width, BLOCKSIZE * 2, num_shared2 * sizeof(float)*3 >> > (d_temp1, od, height, width, r);
}
|
2e8ba71c216b7466f8ebfcb703f551749fdf4f32.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <rocblas.h>
#include "cml/cml_blas.cuh"
#include "cml/cml_matrix.cuh"
#include "cml/cml_vector.cuh"
#include "equil_helper.cuh"
#include "matrix/matrix.h"
#include "matrix/matrix_dense.h"
#include "util.h"
namespace pogs {
////////////////////////////////////////////////////////////////////////////////
////////////////////////////// Helper Functions ////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace {
// File scoped constants.
const NormTypes kNormEquilibrate = kNorm2;
const NormTypes kNormNormalize = kNormFro;
template<typename T>
struct GpuData {
const T *orig_data;
hipblasHandle_t handle;
GpuData(const T *orig_data) : orig_data(orig_data) {
hipblasCreate(&handle);
DEBUG_CUDA_CHECK_ERR();
}
~GpuData() {
hipblasDestroy(handle);
DEBUG_CUDA_CHECK_ERR();
}
};
hipblasOperation_t OpToCublasOp(char trans) {
ASSERT(trans == 'n' || trans == 'N' || trans == 't' || trans == 'T');
return trans == 'n' || trans == 'N' ? HIPBLAS_OP_N : HIPBLAS_OP_T;
}
template <typename T>
T NormEst(hipblasHandle_t hdl, NormTypes norm_type, const MatrixDense<T>& A);
template <typename T>
void MultDiag(const T *d, const T *e, size_t m, size_t n,
typename MatrixDense<T>::Ord ord, T *data);
} // namespace
////////////////////////////////////////////////////////////////////////////////
/////////////////////// MatrixDense Implementation /////////////////////////////
////////////////////////////////////////////////////////////////////////////////
template <typename T>
MatrixDense<T>::MatrixDense(char ord, size_t m, size_t n, const T *data)
: Matrix<T>(m, n), _data(0) {
ASSERT(ord == 'r' || ord == 'R' || ord == 'c' || ord == 'C');
_ord = (ord == 'r' || ord == 'R') ? ROW : COL;
// Set GPU specific _info.
GpuData<T> *info = new GpuData<T>(data);
this->_info = reinterpret_cast<void*>(info);
}
template <typename T>
MatrixDense<T>::MatrixDense(const MatrixDense<T>& A)
: Matrix<T>(A._m, A._n), _data(0), _ord(A._ord) {
GpuData<T> *info_A = reinterpret_cast<GpuData<T>*>(A._info);
GpuData<T> *info = new GpuData<T>(info_A->orig_data);
this->_info = reinterpret_cast<void*>(info);
}
template <typename T>
MatrixDense<T>::~MatrixDense() {
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
delete info;
this->_info = 0;
if (this->_done_init && _data) {
hipFree(_data);
this->_data = 0;
DEBUG_CUDA_CHECK_ERR();
}
}
template <typename T>
int MatrixDense<T>::Init() {
DEBUG_EXPECT(!this->_done_init);
if (this->_done_init)
return 1;
this->_done_init = true;
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
// Copy Matrix to GPU.
hipMalloc(&_data, this->_m * this->_n * sizeof(T));
hipMemcpy(_data, info->orig_data, this->_m * this->_n * sizeof(T),
hipMemcpyHostToDevice);
DEBUG_CUDA_CHECK_ERR();
return 0;
}
template <typename T>
int MatrixDense<T>::Mul(char trans, T alpha, const T *x, T beta, T *y) const {
DEBUG_EXPECT(this->_done_init);
if (!this->_done_init)
return 1;
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
hipblasHandle_t hdl = info->handle;
const cml::vector<T> x_vec = cml::vector_view_array<T>(x, this->_n);
cml::vector<T> y_vec = cml::vector_view_array<T>(y, this->_m);
if (_ord == ROW) {
cml::matrix<T, CblasRowMajor> A =
cml::matrix_view_array<T, CblasRowMajor>(_data, this->_m, this->_n);
cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta,
&y_vec);
} else {
cml::matrix<T, CblasColMajor> A =
cml::matrix_view_array<T, CblasColMajor>(_data, this->_m, this->_n);
cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta, &y_vec);
}
CUDA_CHECK_ERR();
return 0;
}
template <typename T>
int MatrixDense<T>::Equil(T *d, T *e) {
DEBUG_ASSERT(this->_done_init);
if (!this->_done_init)
return 1;
// Extract cublas handle from _info.
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
hipblasHandle_t hdl = info->handle;
// Number of elements in matrix.
size_t num_el = this->_m * this->_n;
// Create bit-vector with signs of entries in A and then let A = f(A),
// where f = |A| or f = |A|.^2.
unsigned char *sign;
size_t num_sign_bytes = (num_el + 7) / 8;
hipMalloc(&sign, num_sign_bytes);
CUDA_CHECK_ERR();
// Fill sign bits, assigning each thread a multiple of 8 elements.
size_t num_chars = num_el / 8;
size_t grid_size = cml::calc_grid_dim(num_chars, cml::kBlockSize);
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
hipLaunchKernelGGL(( __SetSign), dim3(grid_size), dim3(cml::kBlockSize), 0, 0, _data, sign, num_chars,
SquareF<T>());
} else {
hipLaunchKernelGGL(( __SetSign), dim3(grid_size), dim3(cml::kBlockSize), 0, 0, _data, sign, num_chars,
AbsF<T>());
}
hipDeviceSynchronize();
CUDA_CHECK_ERR();
// If numel(A) is not a multiple of 8, then we need to set the last couple
// of sign bits too.
if (num_el > num_chars * 8) {
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
hipLaunchKernelGGL(( __SetSignSingle), dim3(1), dim3(1), 0, 0, _data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, SquareF<T>());
} else {
hipLaunchKernelGGL(( __SetSignSingle), dim3(1), dim3(1), 0, 0, _data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, AbsF<T>());
}
hipDeviceSynchronize();
CUDA_CHECK_ERR();
}
// Perform Sinkhorn-Knopp equilibration.
SinkhornKnopp(this, d, e);
hipDeviceSynchronize();
// Transform A = sign(A) .* sqrt(A) if 2-norm equilibration was performed,
// or A = sign(A) .* A if the 1-norm was equilibrated.
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
hipLaunchKernelGGL(( __UnSetSign), dim3(grid_size), dim3(cml::kBlockSize), 0, 0, _data, sign, num_chars,
SqrtF<T>());
} else {
hipLaunchKernelGGL(( __UnSetSign), dim3(grid_size), dim3(cml::kBlockSize), 0, 0, _data, sign, num_chars,
IdentityF<T>());
}
hipDeviceSynchronize();
CUDA_CHECK_ERR();
// Deal with last few entries if num_el is not a multiple of 8.
if (num_el > num_chars * 8) {
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
hipLaunchKernelGGL(( __UnSetSignSingle), dim3(1), dim3(1), 0, 0, _data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, SqrtF<T>());
} else {
hipLaunchKernelGGL(( __UnSetSignSingle), dim3(1), dim3(1), 0, 0, _data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, IdentityF<T>());
}
hipDeviceSynchronize();
CUDA_CHECK_ERR();
}
// Compute D := sqrt(D), E := sqrt(E), if 2-norm was equilibrated.
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
thrust::transform(thrust::device_pointer_cast(d),
thrust::device_pointer_cast(d + this->_m),
thrust::device_pointer_cast(d), SqrtF<T>());
thrust::transform(thrust::device_pointer_cast(e),
thrust::device_pointer_cast(e + this->_n),
thrust::device_pointer_cast(e), SqrtF<T>());
hipDeviceSynchronize();
CUDA_CHECK_ERR();
}
// Compute A := D * A * E.
MultDiag(d, e, this->_m, this->_n, _ord, _data);
hipDeviceSynchronize();
CUDA_CHECK_ERR();
// Scale A to have norm of 1 (in the kNormNormalize norm).
T normA = NormEst(hdl, kNormNormalize, *this);
CUDA_CHECK_ERR();
hipDeviceSynchronize();
cml::vector<T> a_vec = cml::vector_view_array(_data, num_el);
cml::vector_scale(&a_vec, 1 / normA);
hipDeviceSynchronize();
// Scale d and e to account for normalization of A.
cml::vector<T> d_vec = cml::vector_view_array<T>(d, this->_m);
cml::vector<T> e_vec = cml::vector_view_array<T>(e, this->_n);
cml::vector_scale(&d_vec, 1 / sqrt(normA));
cml::vector_scale(&e_vec, 1 / sqrt(normA));
hipDeviceSynchronize();
DEBUG_PRINTF("norm A = %e, normd = %e, norme = %e\n", normA,
cml::blas_nrm2(hdl, &d_vec), cml::blas_nrm2(hdl, &e_vec));
hipFree(sign);
CUDA_CHECK_ERR();
return 0;
}
////////////////////////////////////////////////////////////////////////////////
/////////////////////// Equilibration Helpers //////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace {
// Estimates norm of A. norm_type should either be kNorm2 or kNormFro.
template <typename T>
T NormEst(hipblasHandle_t hdl, NormTypes norm_type, const MatrixDense<T>& A) {
switch (norm_type) {
case kNorm2: {
return Norm2Est(hdl, &A);
}
case kNormFro: {
const cml::vector<T> a = cml::vector_view_array(A.Data(),
A.Rows() * A.Cols());
return cml::blas_nrm2(hdl, &a) / std::sqrt(::min(A.Rows(), A.Cols()));
}
case kNorm1:
// 1-norm normalization doens't make make sense since it treats rows and
// columns differently.
default:
ASSERT(false);
return static_cast<T>(0.);
}
}
// Performs A := D * A * E for A in row major
template <typename T>
void __global__ __MultRow(size_t m, size_t n, const T *d, const T *e, T *data) {
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t t = tid; t < m * n; t += gridDim.x * blockDim.x)
data[t] *= d[t / n] * e[t % n];
}
// Performs A := D * A * E for A in col major
template <typename T>
void __global__ __MultCol(size_t m, size_t n, const T *d, const T *e, T *data) {
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t t = tid; t < m * n; t += gridDim.x * blockDim.x)
data[t] *= d[t % m] * e[t / m];
}
template <typename T>
void MultDiag(const T *d, const T *e, size_t m, size_t n,
typename MatrixDense<T>::Ord ord, T *data) {
if (ord == MatrixDense<T>::ROW) {
size_t grid_dim_row = cml::calc_grid_dim(m * n, cml::kBlockSize);
hipLaunchKernelGGL(( __MultRow), dim3(grid_dim_row), dim3(cml::kBlockSize), 0, 0, m, n, d, e, data);
} else {
size_t grid_dim_row = cml::calc_grid_dim(m * n, cml::kBlockSize);
hipLaunchKernelGGL(( __MultCol), dim3(grid_dim_row), dim3(cml::kBlockSize), 0, 0, m, n, d, e, data);
}
}
} // namespace
// Explicit template instantiation.
template class MatrixDense<double>;
template class MatrixDense<float>;
} // namespace pogs
| 2e8ba71c216b7466f8ebfcb703f551749fdf4f32.cu | #include <cublas_v2.h>
#include "cml/cml_blas.cuh"
#include "cml/cml_matrix.cuh"
#include "cml/cml_vector.cuh"
#include "equil_helper.cuh"
#include "matrix/matrix.h"
#include "matrix/matrix_dense.h"
#include "util.h"
namespace pogs {
////////////////////////////////////////////////////////////////////////////////
////////////////////////////// Helper Functions ////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace {
// File scoped constants.
const NormTypes kNormEquilibrate = kNorm2;
const NormTypes kNormNormalize = kNormFro;
template<typename T>
struct GpuData {
const T *orig_data;
cublasHandle_t handle;
GpuData(const T *orig_data) : orig_data(orig_data) {
cublasCreate(&handle);
DEBUG_CUDA_CHECK_ERR();
}
~GpuData() {
cublasDestroy(handle);
DEBUG_CUDA_CHECK_ERR();
}
};
cublasOperation_t OpToCublasOp(char trans) {
ASSERT(trans == 'n' || trans == 'N' || trans == 't' || trans == 'T');
return trans == 'n' || trans == 'N' ? CUBLAS_OP_N : CUBLAS_OP_T;
}
template <typename T>
T NormEst(cublasHandle_t hdl, NormTypes norm_type, const MatrixDense<T>& A);
template <typename T>
void MultDiag(const T *d, const T *e, size_t m, size_t n,
typename MatrixDense<T>::Ord ord, T *data);
} // namespace
////////////////////////////////////////////////////////////////////////////////
/////////////////////// MatrixDense Implementation /////////////////////////////
////////////////////////////////////////////////////////////////////////////////
template <typename T>
MatrixDense<T>::MatrixDense(char ord, size_t m, size_t n, const T *data)
: Matrix<T>(m, n), _data(0) {
ASSERT(ord == 'r' || ord == 'R' || ord == 'c' || ord == 'C');
_ord = (ord == 'r' || ord == 'R') ? ROW : COL;
// Set GPU specific _info.
GpuData<T> *info = new GpuData<T>(data);
this->_info = reinterpret_cast<void*>(info);
}
template <typename T>
MatrixDense<T>::MatrixDense(const MatrixDense<T>& A)
: Matrix<T>(A._m, A._n), _data(0), _ord(A._ord) {
GpuData<T> *info_A = reinterpret_cast<GpuData<T>*>(A._info);
GpuData<T> *info = new GpuData<T>(info_A->orig_data);
this->_info = reinterpret_cast<void*>(info);
}
template <typename T>
MatrixDense<T>::~MatrixDense() {
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
delete info;
this->_info = 0;
if (this->_done_init && _data) {
cudaFree(_data);
this->_data = 0;
DEBUG_CUDA_CHECK_ERR();
}
}
template <typename T>
int MatrixDense<T>::Init() {
DEBUG_EXPECT(!this->_done_init);
if (this->_done_init)
return 1;
this->_done_init = true;
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
// Copy Matrix to GPU.
cudaMalloc(&_data, this->_m * this->_n * sizeof(T));
cudaMemcpy(_data, info->orig_data, this->_m * this->_n * sizeof(T),
cudaMemcpyHostToDevice);
DEBUG_CUDA_CHECK_ERR();
return 0;
}
template <typename T>
int MatrixDense<T>::Mul(char trans, T alpha, const T *x, T beta, T *y) const {
DEBUG_EXPECT(this->_done_init);
if (!this->_done_init)
return 1;
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
cublasHandle_t hdl = info->handle;
const cml::vector<T> x_vec = cml::vector_view_array<T>(x, this->_n);
cml::vector<T> y_vec = cml::vector_view_array<T>(y, this->_m);
if (_ord == ROW) {
cml::matrix<T, CblasRowMajor> A =
cml::matrix_view_array<T, CblasRowMajor>(_data, this->_m, this->_n);
cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta,
&y_vec);
} else {
cml::matrix<T, CblasColMajor> A =
cml::matrix_view_array<T, CblasColMajor>(_data, this->_m, this->_n);
cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta, &y_vec);
}
CUDA_CHECK_ERR();
return 0;
}
template <typename T>
int MatrixDense<T>::Equil(T *d, T *e) {
DEBUG_ASSERT(this->_done_init);
if (!this->_done_init)
return 1;
// Extract cublas handle from _info.
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
cublasHandle_t hdl = info->handle;
// Number of elements in matrix.
size_t num_el = this->_m * this->_n;
// Create bit-vector with signs of entries in A and then let A = f(A),
// where f = |A| or f = |A|.^2.
unsigned char *sign;
size_t num_sign_bytes = (num_el + 7) / 8;
cudaMalloc(&sign, num_sign_bytes);
CUDA_CHECK_ERR();
// Fill sign bits, assigning each thread a multiple of 8 elements.
size_t num_chars = num_el / 8;
size_t grid_size = cml::calc_grid_dim(num_chars, cml::kBlockSize);
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
__SetSign<<<grid_size, cml::kBlockSize>>>(_data, sign, num_chars,
SquareF<T>());
} else {
__SetSign<<<grid_size, cml::kBlockSize>>>(_data, sign, num_chars,
AbsF<T>());
}
cudaDeviceSynchronize();
CUDA_CHECK_ERR();
// If numel(A) is not a multiple of 8, then we need to set the last couple
// of sign bits too.
if (num_el > num_chars * 8) {
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
__SetSignSingle<<<1, 1>>>(_data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, SquareF<T>());
} else {
__SetSignSingle<<<1, 1>>>(_data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, AbsF<T>());
}
cudaDeviceSynchronize();
CUDA_CHECK_ERR();
}
// Perform Sinkhorn-Knopp equilibration.
SinkhornKnopp(this, d, e);
cudaDeviceSynchronize();
// Transform A = sign(A) .* sqrt(A) if 2-norm equilibration was performed,
// or A = sign(A) .* A if the 1-norm was equilibrated.
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
__UnSetSign<<<grid_size, cml::kBlockSize>>>(_data, sign, num_chars,
SqrtF<T>());
} else {
__UnSetSign<<<grid_size, cml::kBlockSize>>>(_data, sign, num_chars,
IdentityF<T>());
}
cudaDeviceSynchronize();
CUDA_CHECK_ERR();
// Deal with last few entries if num_el is not a multiple of 8.
if (num_el > num_chars * 8) {
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
__UnSetSignSingle<<<1, 1>>>(_data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, SqrtF<T>());
} else {
__UnSetSignSingle<<<1, 1>>>(_data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, IdentityF<T>());
}
cudaDeviceSynchronize();
CUDA_CHECK_ERR();
}
// Compute D := sqrt(D), E := sqrt(E), if 2-norm was equilibrated.
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
thrust::transform(thrust::device_pointer_cast(d),
thrust::device_pointer_cast(d + this->_m),
thrust::device_pointer_cast(d), SqrtF<T>());
thrust::transform(thrust::device_pointer_cast(e),
thrust::device_pointer_cast(e + this->_n),
thrust::device_pointer_cast(e), SqrtF<T>());
cudaDeviceSynchronize();
CUDA_CHECK_ERR();
}
// Compute A := D * A * E.
MultDiag(d, e, this->_m, this->_n, _ord, _data);
cudaDeviceSynchronize();
CUDA_CHECK_ERR();
// Scale A to have norm of 1 (in the kNormNormalize norm).
T normA = NormEst(hdl, kNormNormalize, *this);
CUDA_CHECK_ERR();
cudaDeviceSynchronize();
cml::vector<T> a_vec = cml::vector_view_array(_data, num_el);
cml::vector_scale(&a_vec, 1 / normA);
cudaDeviceSynchronize();
// Scale d and e to account for normalization of A.
cml::vector<T> d_vec = cml::vector_view_array<T>(d, this->_m);
cml::vector<T> e_vec = cml::vector_view_array<T>(e, this->_n);
cml::vector_scale(&d_vec, 1 / sqrt(normA));
cml::vector_scale(&e_vec, 1 / sqrt(normA));
cudaDeviceSynchronize();
DEBUG_PRINTF("norm A = %e, normd = %e, norme = %e\n", normA,
cml::blas_nrm2(hdl, &d_vec), cml::blas_nrm2(hdl, &e_vec));
cudaFree(sign);
CUDA_CHECK_ERR();
return 0;
}
////////////////////////////////////////////////////////////////////////////////
/////////////////////// Equilibration Helpers //////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace {
// Estimates norm of A. norm_type should either be kNorm2 or kNormFro.
template <typename T>
T NormEst(cublasHandle_t hdl, NormTypes norm_type, const MatrixDense<T>& A) {
switch (norm_type) {
case kNorm2: {
return Norm2Est(hdl, &A);
}
case kNormFro: {
const cml::vector<T> a = cml::vector_view_array(A.Data(),
A.Rows() * A.Cols());
return cml::blas_nrm2(hdl, &a) / std::sqrt(std::min(A.Rows(), A.Cols()));
}
case kNorm1:
// 1-norm normalization doens't make make sense since it treats rows and
// columns differently.
default:
ASSERT(false);
return static_cast<T>(0.);
}
}
// Performs A := D * A * E for A in row major
template <typename T>
void __global__ __MultRow(size_t m, size_t n, const T *d, const T *e, T *data) {
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t t = tid; t < m * n; t += gridDim.x * blockDim.x)
data[t] *= d[t / n] * e[t % n];
}
// Performs A := D * A * E for A in col major
template <typename T>
void __global__ __MultCol(size_t m, size_t n, const T *d, const T *e, T *data) {
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t t = tid; t < m * n; t += gridDim.x * blockDim.x)
data[t] *= d[t % m] * e[t / m];
}
template <typename T>
void MultDiag(const T *d, const T *e, size_t m, size_t n,
typename MatrixDense<T>::Ord ord, T *data) {
if (ord == MatrixDense<T>::ROW) {
size_t grid_dim_row = cml::calc_grid_dim(m * n, cml::kBlockSize);
__MultRow<<<grid_dim_row, cml::kBlockSize>>>(m, n, d, e, data);
} else {
size_t grid_dim_row = cml::calc_grid_dim(m * n, cml::kBlockSize);
__MultCol<<<grid_dim_row, cml::kBlockSize>>>(m, n, d, e, data);
}
}
} // namespace
// Explicit template instantiation.
template class MatrixDense<double>;
template class MatrixDense<float>;
} // namespace pogs
|
9383ea6dee57f9c1d4f5176acdcfb9fdfbeb427e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@precisions normal s
*/
/*
This version provides general blocking
blk_M=80 blk_N=80 blk_K=16 nthd_x=64 nthd_y=4
*/
#include "common_magma.h"
#include "commonblas_s.h"
#define blks 5
#define bx 5
#define by 5
#define blk_K 16
#define blk_M (16*bx)
#define blk_N (16*by)
texture<float,1> tex_x_float_A;
texture<float,1> tex_x_float_B;
static __inline__ __device__ float fetch_x_A(const int& i, const float * x)
{
return tex1Dfetch(tex_x_float_A, i);
}
static __inline__ __device__ float fetch_x_B(const int& i, const float * x)
{
return tex1Dfetch(tex_x_float_B, i);
}
extern "C" __global__ void
fermiSgemm_v2_kernel_NN_80(float *C, const float *A, const float *B,
int m, int n, int k, int lda, int ldb, int ldc,
float alpha, float beta,
int offsetA, int offsetB)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * blk_N;
const int ibx = blockIdx.x * blk_M;
const int idt = ty * 64 + tx;
const int tx2= idt%16; // idx2 / res
const int ty2= idt/16; // idy2 / qot
__shared__ float Bb[blk_K][blk_N+1];
__shared__ float Abs[blk_M][blk_K+1];
float xxA[bx];
float xxB[by];
int trackA = offsetA + ibx + tx2 + __mul24(ty2, lda);
int trackB = offsetB + tx2 + __mul24(iby+ty2*by, ldb);
int tll = ty2;
A += trackA;
B += trackB;
// read a block of 96x16 to A and 16x96 to B
// each thread reads 6 data point, 1 point in each 16x16 subblock
#pragma unroll
for(int y=0; y<bx; y++)
Abs[tx2+y*16][ty2] = /* (tll<k)* */ fetch_x_A(trackA + y*16, A);
#pragma unroll
for(int y=0; y<by; y++)
Bb[tx2][ty2*by+y] = fetch_x_B( trackB + y*ldb, B) ;
__syncthreads();
const float *Bend = B + k-16;
float Axs[bx];
float Bxp[by];
float Cb[25] = {0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0,
0,0,0,0,0};
do
{
tll += 16;
A += lda*16;
B += 16;
trackA+=16*lda ;
trackB+=16;
// calculate part of C using the first 96x8 of A and 8x96 of B
#pragma unroll
for( int j1=0;j1<8;j1++)
{
#pragma unroll
for( int y=0; y<bx; y++)
Axs[y] = Abs[tx2+y*16][j1];
#pragma unroll
for( int y=0; y<by; y++)
Bxp[y]= Bb[j1][ty2+y*16];
#pragma unroll
for( int x=0; x<bx; x++)
{
#pragma unroll
for( int y=0; y<by; y++)
{
Cb[x*by + y] += Axs[x]*Bxp[y];
}
}
}
// pre-read the next A and B
#pragma unroll
for( int y=0; y<bx; y++)
xxA[y] = /* (tll<k)* */ fetch_x_A(trackA + y*16, A);
// without going through texture and tll protection,
// nonzeros are fetched
// texture boundary control seems to be providing
// safety here but officially tex1Dfetch is not suppoted
// by clamping or filtering (programming guide B.8.1)
#pragma unroll
for( int y=0; y<by; y++)
xxB[y] = fetch_x_B( trackB + y*ldb, B);
// calculate another part of C using the 2nd 96x8 of A and 8x96 of B
#pragma unroll
for( int j1=8;j1<16;j1++)
{
#pragma unroll
for( int y=0;y<bx;y++)
Axs[y] = Abs[tx2+y*16][j1] ;
#pragma unroll
for( int y=0;y<by;y++)
Bxp[y]= Bb[j1][ty2+y*16];
#pragma unroll
for( int x=0;x<bx;x++)
{
#pragma unroll
for( int y=0; y<by; y++)
{
Cb[x*by+y] += Axs[x]*Bxp[y];
}
}
}
__syncthreads();
// put the next A and B into position
#pragma unroll
for(int y=0;y<bx;y++)
Abs[tx2+y*16][ty2] =xxA[y];
#pragma unroll
for(int y=0; y<by; y++)
Bb[tx2][ty2*by + y] =xxB[y];
__syncthreads();
}
while (B < Bend);
//C += ty2 + ibx + __mul24 (tx2 + iby ,ldc);
C += tx2 + ibx + __mul24 (ty2 + iby ,ldc);
// tail case
#pragma unroll
for( int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0;y<by;y++)
Bxp[y]= Bb[j1][ty2+y*16];
#pragma unroll
for( int y=0;y<bx;y++)
Axs[y] = Abs[tx2+y*16][j1] ;
#pragma unroll
for( int x=0;x<bx;x++)
#pragma unroll
for( int y=0; y<by; y++)
Cb[x*by+y] += Axs[x]*Bxp[y];
}
// __syncthreads();
// C += ty2 + ibx + __mul24 (tx2 + iby ,ldc);
int gy = iby + ty2;
// writing C
#pragma unroll
for( int y=0; y<by; y++, gy+=16)
{
int gx = ibx + tx2;
#pragma unroll
for(int x=0; x<bx; x++, gx+=16)
{
if (gx < m && gy < n)
C[x*16] = alpha*Cb[y+x*by] + beta * C[x*16];
}
C+=ldc*16;
}
}
//========================================================================
extern "C" __global__ void
fermiSgemm_v2_kernel_TN_80(float *C, const float *A, const float *B,
int m, int n, int k, int lda, int ldb,
int ldc, float alpha, float beta,
int offsetA, int offsetB)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * blk_N;
const int ibx = blockIdx.x * blk_M;
const int idt = ty * 64 + tx;
const int tx2 = idt%16;
const int ty2 = idt/16;
__shared__ float Bb[blk_K][blk_N+1];
__shared__ float Abs[blk_M][blk_K+1];
float xxA[blks];
float xxB[blks];
int trackA = offsetA + tx2+ __mul24(ibx + ty2*blks, lda );
int trackB = offsetB + tx2+ __mul24(iby + ty2*blks, ldb );
A+= trackA;
B+= trackB;
int tll = tx2;
#pragma unroll
for(int y=0; y<blks; y++)
Abs[ty2*blks+y][tx2] = (tll<k)*fetch_x_A(trackA + y*lda, A);
#pragma unroll
for(int y=0; y<blks; y++)
Bb[tx2][ty2*blks+y] = /* (tll<k)* */ fetch_x_B(trackB + y*ldb, B ) ;
__syncthreads();
const float *Bend = B + k-16;
float Axs[blks];
float Bxp[blks];
float Cb[25] = {0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0,
0,0,0,0,0};
do
{
tll += 16;
A += 16;
B += 16;
trackA += 16;
trackB += 16;
// pre-read the next strip of A and B
#pragma unroll
for( int y=0; y<blks; y++)
xxA[y] = (tll<k)*fetch_x_A(trackA + y*lda, A);
#pragma unroll
for( int y=0; y<blks; y++)
xxB[y] = /* (tll<k)* */ fetch_x_B(trackB + y*ldb, B);
// computing
#pragma unroll
for( int j1=0; j1<16; j1++)
{
#pragma unroll
for( int y=0; y<blks; y++)
Axs[y] = Abs[tx2 + y*16][j1];
#pragma unroll
for( int y=0; y<blks; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0;x<blks; x++)
#pragma unroll
for(int y=0; y<blks; y++)
Cb[x*blks + y] += Axs[x]*Bxp[y];
}
__syncthreads();
#pragma unroll
for(int y=0; y<blks; y++)
Abs[ty2*blks + y][tx2] = xxA[y];
#pragma unroll
for( int y=0; y<blks; y++)
Bb[tx2][ty2*blks + y] = xxB[y];
__syncthreads();
}
while (B < Bend);
C += tx2 + ibx + __mul24 (ty2 + iby, ldc);
#pragma unroll
for( int j1=0; j1<16; j1++)
{
#pragma unroll
for( int y=0; y<blks; y++)
Axs[y] = Abs[tx2 + y*16][j1] ;
#pragma unroll
for( int y=0; y<blks; y++)
Bxp[y] = Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0; x<blks; x++)
#pragma unroll
for( int y=0; y<blks; y++)
Cb[x*blks + y] += Axs[x]*Bxp[y];
}
int gy = iby+ty2;
#pragma unroll
for(int y=0; y<blks; y++, gy+=16)
{
int gx = ibx+tx2;
#pragma unroll
for(int x=0; x<blks; x++, gx+=16)
{
if (gx < m && gy < n)
C[x*16] = alpha*Cb[y+x*blks] + beta * C[x*16];
}
C+=ldc*16;
}
}
//========================================================================
extern "C" __global__ void
fermiSgemm_v2_kernel_TT_80(float *C, const float *A, const float *B,
int m, int n, int k, int lda, int ldb,
int ldc, float alpha, float beta,
int offsetA, int offsetB)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * blk_N;
const int ibx = blockIdx.x * blk_M;
const int idt = ty * 64 + tx;
const int tx2 = idt%16;
const int ty2 = idt/16;
__shared__ float Bb[blk_K][blk_N+1];
__shared__ float Abs[blk_M][blk_K+1];
float xxA[blks];
float xxB[blks];
int trackA = offsetA + __mul24( ibx + ty2, lda) + tx2;
int trackB = offsetB + iby+ tx2 + __mul24(ty2, ldb);
A += trackA;
B += trackB;
int tll = tx2;
#pragma unroll
for(int y=0; y<blks; y++)
Abs[ty2+16*y][tx2] = /* (tll<k)* */ fetch_x_A(trackA + lda*16*y, A);
#pragma unroll
for(int y=0; y<blks; y++)
Bb[ty2][tx2+16*y] = fetch_x_B(trackB+16*y, B);
__syncthreads();
const float *Bend = B + k*ldb - 16*ldb;
float Axs[blks];
float Bxp[blks];
float Cb[25] = {0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0,
0,0,0,0,0};
do
{
tll+=16;
A += 16;
B += 16*ldb;
trackA+=16;
trackB+=16*ldb;
#pragma unroll
for( int y=0; y<blks; y++)
xxA[y] = /* (tll<k)* */ fetch_x_A(trackA + lda*y*16, A);
#pragma unroll
for( int y=0; y<blks; y++)
xxB[y] = fetch_x_B(trackB + 16*y, B);
#pragma unroll
for( int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<blks; y++)
Axs[y] = Abs[tx2 + y*16][j1];
#pragma unroll
for( int y=0; y<blks; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0; x<blks; x++)
#pragma unroll
for( int y=0;y<blks;y++)
Cb[x*blks+y] += Axs[x]*Bxp[y];
}
__syncthreads();
#pragma unroll
for( int y=0; y<blks; y++)
Abs[ty2 + 16*y][tx2] = xxA[y];
#pragma unroll
for( int y=0; y<blks; y++)
Bb[ty2][tx2+y*16] = xxB[y];
__syncthreads();
}
while (B < Bend);
C += tx2 + ibx + __mul24 (ty2 + iby ,ldc);
#pragma unroll
for( int j1=0; j1<16; j1++)
{
#pragma unroll
for( int y=0; y<blks; y++)
Axs[y] = Abs[tx2 + y*16][j1];
#pragma unroll
for( int y=0; y<blks; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0; x<blks; x++)
#pragma unroll
for( int y=0; y<blks; y++)
Cb[x*blks+y] += Axs[x]*Bxp[y];
}
int gy = iby + ty2;
#pragma unroll
for( int y=0; y<blks; y++, gy+=16)
{
int gx = ibx + tx2;
#pragma unroll
for(int x=0; x<blks; x++, gx+=16)
{
if (gx < m && gy < n)
C[x*16] = alpha*Cb[y+x*blks] + beta * C[x*16];
}
C+=ldc*16;
}
}
//========================================================================
extern "C" __global__ void
fermiSgemm_v2_kernel_NT_80(float *C, const float *A, const float *B,
int m, int n, int k, int lda, int ldb,
int ldc, float alpha, float beta,
int offsetA, int offsetB)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * blk_N;
const int ibx = blockIdx.x * blk_M;
const int idt = ty * 64 + tx;
const int tx2= idt%16;
const int ty2= idt/16;
__shared__ float Bb[blk_K][blk_N+1];
__shared__ float Abs[blk_M][blk_K+1];
float xxA[blks];
float xxB[blks];
int trackA = offsetA + ibx +__mul24(ty2, lda) + tx2 ;
int trackB = offsetB + iby + tx2 + __mul24(ty2, ldb);
A+= trackA;
B += trackB;
int tll = ty2;
#pragma unroll
for(int y=0; y<blks; y++)
Abs[tx2+ y*16][ty2] = /* (tll<k)* */ fetch_x_A(trackA + y*16, A);
#pragma unroll
for(int y=0; y<blks; y++)
Bb[ty2][tx2+16*y] = /* (tll<k)* */ fetch_x_B(trackB+16*y, B);
__syncthreads();
const float *Bend = B + k*ldb - 16*ldb;
float Axs[blks];
float Bxp[blks];
float Cb[25] = {0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0,
0,0,0,0,0};
do
{
tll += 16;
A += lda *16 ;
B += 16*ldb;
trackA+=16*lda ;
trackB+=16*ldb;
#pragma unroll
for( int y=0; y<blks; y++)
xxA[y] = /* (tll<k)* */ fetch_x_A(trackA + y*16, A);
// tll same in the NN case
#pragma unroll
for( int y=0; y<blks; y++)
xxB[y] = /* (tll<k)* */ fetch_x_B( trackB + 16*y, B);
#pragma unroll
for( int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<blks; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int y=0; y<blks; y++)
Axs[y] = Abs[tx2 + y*16][j1] ;
#pragma unroll
for( int x=0; x<blks; x++)
#pragma unroll
for( int y=0; y<blks; y++)
Cb[x*blks+y] += Axs[x]*Bxp[y];
}
__syncthreads();
#pragma unroll
for( int y=0; y<blks; y++)
Abs[tx2 + y*16][ty2] = xxA[y];
#pragma unroll
for( int y=0; y<blks; y++)
Bb[ty2][tx2+y*16] = xxB[y];
__syncthreads();
}
while (B < Bend);
C += tx2 + ibx + __mul24(ty2 + iby ,ldc);
#pragma unroll
for(int j1=0; j1<16; j1++)
{
#pragma unroll
for( int y=0; y<blks; y++)
Bxp[y] = Bb[j1][ty2 + y*16];
#pragma unroll
for( int y=0; y<blks; y++)
Axs[y] = Abs[tx2 + y*16][j1] ;
#pragma unroll
for( int x=0; x<blks; x++)
#pragma unroll
for( int y=0;y<blks;y++)
Cb[x*blks+y] += Axs[x]*Bxp[y];
}
int gy = iby + ty2;
#pragma unroll
for( int y=0; y<blks; y++, gy+=16)
{
int gx = ibx + tx2;
#pragma unroll
for(int x=0; x<blks; x++, gx+=16)
{
if (gx < m && gy < n)
C[x*16] = alpha*Cb[y + x*blks] + beta * C[x*16];
}
C+=ldc*16;
}
}
//=================================================================================
extern "C" void
magmablas_sgemm_fermi80(char TRANSA, char TRANSB, magma_int_t m , magma_int_t n , magma_int_t k ,
float alpha, const float *A, magma_int_t lda,
const float *B, magma_int_t ldb,
float beta, float *C, magma_int_t ldc )
{
/* -- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
Purpose
=======
SGEMM performs one of the matrix-matrix operations
C := alpha*op( A )*op( B ) + beta*C,
where op( X ) is one of
op( X ) = X or op( X ) = X',
alpha and beta are scalars, and A, B and C are matrices, with op( A )
an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
Parameters
==========
TRANSA - CHARACTER*1.
On entry, TRANSA specifies the form of op( A ) to be used in
the matrix multiplication as follows:
TRANSA = 'N' or 'n', op( A ) = A.
TRANSA = 'T' or 't', op( A ) = A'.
TRANSA = 'C' or 'c', op( A ) = A'.
Unchanged on exit.
TRANSB - CHARACTER*1.
On entry, TRANSB specifies the form of op( B ) to be used in
the matrix multiplication as follows:
TRANSB = 'N' or 'n', op( B ) = B.
TRANSB = 'T' or 't', op( B ) = B'.
TRANSB = 'C' or 'c', op( B ) = B'.
Unchanged on exit.
M - INTEGER.
On entry, M specifies the number of rows of the matrix
op( A ) and of the matrix C. M must be at least zero.
Unchanged on exit.
N - INTEGER.
On entry, N specifies the number of columns of the matrix
op( B ) and the number of columns of the matrix C. N must be
at least zero.
Unchanged on exit.
K - INTEGER.
On entry, K specifies the number of columns of the matrix
op( A ) and the number of rows of the matrix op( B ). K must
be at least zero.
Unchanged on exit.
ALPHA - SINGLE PRECISION.
On entry, ALPHA specifies the scalar alpha.
Unchanged on exit.
A - SINGLE PRECISION array of DIMENSION ( LDA, ka ), where ka is
k when TRANSA = 'N' or 'n', and is m otherwise.
Before entry with TRANSA = 'N' or 'n', the leading m by k
part of the array A must contain the matrix A, otherwise
the leading k by m part of the array A must contain the
matrix A.
Unchanged on exit.
LDA - INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. When TRANSA = 'N' or 'n' then
LDA must be at least max( 1, m ), otherwise LDA must be at
least max( 1, k ).
Unchanged on exit.
B - SINGLE PRECISION array of DIMENSION ( LDB, kb ), where kb is
n when TRANSB = 'N' or 'n', and is k otherwise.
Before entry with TRANSB = 'N' or 'n', the leading k by n
part of the array B must contain the matrix B, otherwise
the leading n by k part of the array B must contain the
matrix B.
Unchanged on exit.
LDB - INTEGER.
On entry, LDB specifies the first dimension of B as declared
in the calling (sub) program. When TRANSB = 'N' or 'n' then
LDB must be at least max( 1, k ), otherwise LDB must be at
least max( 1, n ).
Unchanged on exit.
BETA - SINGLE PRECISION.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then C need not be set on input.
Unchanged on exit.
C - SINGLE PRECISION array of DIMENSION ( LDC, n ).
Before entry, the leading m by n part of the array C must
contain the matrix C, except when beta is zero, in which
case C need not be set on entry.
On exit, the array C is overwritten by the m by n matrix
( alpha*op( A )*op( B ) + beta*C ).
LDC - INTEGER.
On entry, LDC specifies the first dimension of C as declared
in the calling (sub) program. LDC must be at least
max( 1, m ).
Unchanged on exit.
===================================================================== */
if (m<=0 || n<=0 || k<=0)
return;
size_t offsetA = 0;
size_t offsetB = 0;
int TransA = 1, TransB = 1;
if (TRANSA == 'N' || TRANSA == 'n')
TransA = 0;
if (TRANSB == 'N' || TRANSB == 'n')
TransB = 0;
size_t sizeA = (size_t) lda * (size_t) (!TransA ? k : m);
size_t sizeB = (size_t) ldb * (size_t) (!TransB ? n : k);
size_t CUBLAS_MAX_1DBUF_SIZE = (1 << 27) - 512;
if (sizeA>=CUBLAS_MAX_1DBUF_SIZE ||
sizeB>=CUBLAS_MAX_1DBUF_SIZE )
{
hipblasSgemm(TRANSA, TRANSB, m, n, k, alpha,
A, lda, B, ldb,
beta, C, ldc);
return;
}
hipError_t errt;
errt = hipBindTexture(&offsetA, tex_x_float_A, (int2 *)A,
sizeA * sizeof(A[0]));
if( errt != hipSuccess) printf("can not bind to texture \n");
errt = hipBindTexture(&offsetB, tex_x_float_B, (int2 *)B,
sizeB * sizeof(B[0]));
if( errt != hipSuccess) printf("can not bind to texture \n");
dim3 threads( 64, 4 );
dim3 grid(m/(16*bx)+(m%(16*bx)!=0),n/(16*by)+(n%(16*by)!=0));
offsetA = offsetA/sizeof(A[0]);
offsetB = offsetB/sizeof(B[0]);
if ( TransB )
if( !TransA )
hipLaunchKernelGGL(( fermiSgemm_v2_kernel_NT_80), dim3(grid), dim3(threads), 0, magma_stream , C, A, B, m, n, k, lda, ldb,
ldc, alpha, beta,
(int)offsetA, (int)offsetB);
else
hipLaunchKernelGGL(( fermiSgemm_v2_kernel_TT_80), dim3(grid), dim3(threads), 0, magma_stream , C, A, B, m, n, k, lda, ldb,
ldc, alpha, beta,
(int)offsetA, (int)offsetB);
else
if( !TransA )
hipLaunchKernelGGL(( fermiSgemm_v2_kernel_NN_80), dim3(grid), dim3(threads), 0, magma_stream , C, A, B, m, n, k, lda, ldb,
ldc, alpha, beta,
(int)offsetA, (int)offsetB);
else
hipLaunchKernelGGL(( fermiSgemm_v2_kernel_TN_80), dim3(grid), dim3(threads), 0, magma_stream , C, A, B, m, n, k, lda, ldb,
ldc, alpha, beta,
(int)offsetA, (int)offsetB);
hipUnbindTexture ( tex_x_float_A ) ;
hipUnbindTexture ( tex_x_float_B ) ;
}
| 9383ea6dee57f9c1d4f5176acdcfb9fdfbeb427e.cu | /*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@precisions normal s
*/
/*
This version provides general blocking
blk_M=80 blk_N=80 blk_K=16 nthd_x=64 nthd_y=4
*/
#include "common_magma.h"
#include "commonblas_s.h"
#define blks 5
#define bx 5
#define by 5
#define blk_K 16
#define blk_M (16*bx)
#define blk_N (16*by)
texture<float,1> tex_x_float_A;
texture<float,1> tex_x_float_B;
static __inline__ __device__ float fetch_x_A(const int& i, const float * x)
{
return tex1Dfetch(tex_x_float_A, i);
}
static __inline__ __device__ float fetch_x_B(const int& i, const float * x)
{
return tex1Dfetch(tex_x_float_B, i);
}
extern "C" __global__ void
fermiSgemm_v2_kernel_NN_80(float *C, const float *A, const float *B,
int m, int n, int k, int lda, int ldb, int ldc,
float alpha, float beta,
int offsetA, int offsetB)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * blk_N;
const int ibx = blockIdx.x * blk_M;
const int idt = ty * 64 + tx;
const int tx2= idt%16; // idx2 / res
const int ty2= idt/16; // idy2 / qot
__shared__ float Bb[blk_K][blk_N+1];
__shared__ float Abs[blk_M][blk_K+1];
float xxA[bx];
float xxB[by];
int trackA = offsetA + ibx + tx2 + __mul24(ty2, lda);
int trackB = offsetB + tx2 + __mul24(iby+ty2*by, ldb);
int tll = ty2;
A += trackA;
B += trackB;
// read a block of 96x16 to A and 16x96 to B
// each thread reads 6 data point, 1 point in each 16x16 subblock
#pragma unroll
for(int y=0; y<bx; y++)
Abs[tx2+y*16][ty2] = /* (tll<k)* */ fetch_x_A(trackA + y*16, A);
#pragma unroll
for(int y=0; y<by; y++)
Bb[tx2][ty2*by+y] = fetch_x_B( trackB + y*ldb, B) ;
__syncthreads();
const float *Bend = B + k-16;
float Axs[bx];
float Bxp[by];
float Cb[25] = {0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0,
0,0,0,0,0};
do
{
tll += 16;
A += lda*16;
B += 16;
trackA+=16*lda ;
trackB+=16;
// calculate part of C using the first 96x8 of A and 8x96 of B
#pragma unroll
for( int j1=0;j1<8;j1++)
{
#pragma unroll
for( int y=0; y<bx; y++)
Axs[y] = Abs[tx2+y*16][j1];
#pragma unroll
for( int y=0; y<by; y++)
Bxp[y]= Bb[j1][ty2+y*16];
#pragma unroll
for( int x=0; x<bx; x++)
{
#pragma unroll
for( int y=0; y<by; y++)
{
Cb[x*by + y] += Axs[x]*Bxp[y];
}
}
}
// pre-read the next A and B
#pragma unroll
for( int y=0; y<bx; y++)
xxA[y] = /* (tll<k)* */ fetch_x_A(trackA + y*16, A);
// without going through texture and tll protection,
// nonzeros are fetched
// texture boundary control seems to be providing
// safety here but officially tex1Dfetch is not suppoted
// by clamping or filtering (programming guide B.8.1)
#pragma unroll
for( int y=0; y<by; y++)
xxB[y] = fetch_x_B( trackB + y*ldb, B);
// calculate another part of C using the 2nd 96x8 of A and 8x96 of B
#pragma unroll
for( int j1=8;j1<16;j1++)
{
#pragma unroll
for( int y=0;y<bx;y++)
Axs[y] = Abs[tx2+y*16][j1] ;
#pragma unroll
for( int y=0;y<by;y++)
Bxp[y]= Bb[j1][ty2+y*16];
#pragma unroll
for( int x=0;x<bx;x++)
{
#pragma unroll
for( int y=0; y<by; y++)
{
Cb[x*by+y] += Axs[x]*Bxp[y];
}
}
}
__syncthreads();
// put the next A and B into position
#pragma unroll
for(int y=0;y<bx;y++)
Abs[tx2+y*16][ty2] =xxA[y];
#pragma unroll
for(int y=0; y<by; y++)
Bb[tx2][ty2*by + y] =xxB[y];
__syncthreads();
}
while (B < Bend);
//C += ty2 + ibx + __mul24 (tx2 + iby ,ldc);
C += tx2 + ibx + __mul24 (ty2 + iby ,ldc);
// tail case
#pragma unroll
for( int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0;y<by;y++)
Bxp[y]= Bb[j1][ty2+y*16];
#pragma unroll
for( int y=0;y<bx;y++)
Axs[y] = Abs[tx2+y*16][j1] ;
#pragma unroll
for( int x=0;x<bx;x++)
#pragma unroll
for( int y=0; y<by; y++)
Cb[x*by+y] += Axs[x]*Bxp[y];
}
// __syncthreads();
// C += ty2 + ibx + __mul24 (tx2 + iby ,ldc);
int gy = iby + ty2;
// writing C
#pragma unroll
for( int y=0; y<by; y++, gy+=16)
{
int gx = ibx + tx2;
#pragma unroll
for(int x=0; x<bx; x++, gx+=16)
{
if (gx < m && gy < n)
C[x*16] = alpha*Cb[y+x*by] + beta * C[x*16];
}
C+=ldc*16;
}
}
//========================================================================
extern "C" __global__ void
fermiSgemm_v2_kernel_TN_80(float *C, const float *A, const float *B,
int m, int n, int k, int lda, int ldb,
int ldc, float alpha, float beta,
int offsetA, int offsetB)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * blk_N;
const int ibx = blockIdx.x * blk_M;
const int idt = ty * 64 + tx;
const int tx2 = idt%16;
const int ty2 = idt/16;
__shared__ float Bb[blk_K][blk_N+1];
__shared__ float Abs[blk_M][blk_K+1];
float xxA[blks];
float xxB[blks];
int trackA = offsetA + tx2+ __mul24(ibx + ty2*blks, lda );
int trackB = offsetB + tx2+ __mul24(iby + ty2*blks, ldb );
A+= trackA;
B+= trackB;
int tll = tx2;
#pragma unroll
for(int y=0; y<blks; y++)
Abs[ty2*blks+y][tx2] = (tll<k)*fetch_x_A(trackA + y*lda, A);
#pragma unroll
for(int y=0; y<blks; y++)
Bb[tx2][ty2*blks+y] = /* (tll<k)* */ fetch_x_B(trackB + y*ldb, B ) ;
__syncthreads();
const float *Bend = B + k-16;
float Axs[blks];
float Bxp[blks];
float Cb[25] = {0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0,
0,0,0,0,0};
do
{
tll += 16;
A += 16;
B += 16;
trackA += 16;
trackB += 16;
// pre-read the next strip of A and B
#pragma unroll
for( int y=0; y<blks; y++)
xxA[y] = (tll<k)*fetch_x_A(trackA + y*lda, A);
#pragma unroll
for( int y=0; y<blks; y++)
xxB[y] = /* (tll<k)* */ fetch_x_B(trackB + y*ldb, B);
// computing
#pragma unroll
for( int j1=0; j1<16; j1++)
{
#pragma unroll
for( int y=0; y<blks; y++)
Axs[y] = Abs[tx2 + y*16][j1];
#pragma unroll
for( int y=0; y<blks; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0;x<blks; x++)
#pragma unroll
for(int y=0; y<blks; y++)
Cb[x*blks + y] += Axs[x]*Bxp[y];
}
__syncthreads();
#pragma unroll
for(int y=0; y<blks; y++)
Abs[ty2*blks + y][tx2] = xxA[y];
#pragma unroll
for( int y=0; y<blks; y++)
Bb[tx2][ty2*blks + y] = xxB[y];
__syncthreads();
}
while (B < Bend);
C += tx2 + ibx + __mul24 (ty2 + iby, ldc);
#pragma unroll
for( int j1=0; j1<16; j1++)
{
#pragma unroll
for( int y=0; y<blks; y++)
Axs[y] = Abs[tx2 + y*16][j1] ;
#pragma unroll
for( int y=0; y<blks; y++)
Bxp[y] = Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0; x<blks; x++)
#pragma unroll
for( int y=0; y<blks; y++)
Cb[x*blks + y] += Axs[x]*Bxp[y];
}
int gy = iby+ty2;
#pragma unroll
for(int y=0; y<blks; y++, gy+=16)
{
int gx = ibx+tx2;
#pragma unroll
for(int x=0; x<blks; x++, gx+=16)
{
if (gx < m && gy < n)
C[x*16] = alpha*Cb[y+x*blks] + beta * C[x*16];
}
C+=ldc*16;
}
}
//========================================================================
extern "C" __global__ void
fermiSgemm_v2_kernel_TT_80(float *C, const float *A, const float *B,
int m, int n, int k, int lda, int ldb,
int ldc, float alpha, float beta,
int offsetA, int offsetB)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * blk_N;
const int ibx = blockIdx.x * blk_M;
const int idt = ty * 64 + tx;
const int tx2 = idt%16;
const int ty2 = idt/16;
__shared__ float Bb[blk_K][blk_N+1];
__shared__ float Abs[blk_M][blk_K+1];
float xxA[blks];
float xxB[blks];
int trackA = offsetA + __mul24( ibx + ty2, lda) + tx2;
int trackB = offsetB + iby+ tx2 + __mul24(ty2, ldb);
A += trackA;
B += trackB;
int tll = tx2;
#pragma unroll
for(int y=0; y<blks; y++)
Abs[ty2+16*y][tx2] = /* (tll<k)* */ fetch_x_A(trackA + lda*16*y, A);
#pragma unroll
for(int y=0; y<blks; y++)
Bb[ty2][tx2+16*y] = fetch_x_B(trackB+16*y, B);
__syncthreads();
const float *Bend = B + k*ldb - 16*ldb;
float Axs[blks];
float Bxp[blks];
float Cb[25] = {0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0,
0,0,0,0,0};
do
{
tll+=16;
A += 16;
B += 16*ldb;
trackA+=16;
trackB+=16*ldb;
#pragma unroll
for( int y=0; y<blks; y++)
xxA[y] = /* (tll<k)* */ fetch_x_A(trackA + lda*y*16, A);
#pragma unroll
for( int y=0; y<blks; y++)
xxB[y] = fetch_x_B(trackB + 16*y, B);
#pragma unroll
for( int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<blks; y++)
Axs[y] = Abs[tx2 + y*16][j1];
#pragma unroll
for( int y=0; y<blks; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0; x<blks; x++)
#pragma unroll
for( int y=0;y<blks;y++)
Cb[x*blks+y] += Axs[x]*Bxp[y];
}
__syncthreads();
#pragma unroll
for( int y=0; y<blks; y++)
Abs[ty2 + 16*y][tx2] = xxA[y];
#pragma unroll
for( int y=0; y<blks; y++)
Bb[ty2][tx2+y*16] = xxB[y];
__syncthreads();
}
while (B < Bend);
C += tx2 + ibx + __mul24 (ty2 + iby ,ldc);
#pragma unroll
for( int j1=0; j1<16; j1++)
{
#pragma unroll
for( int y=0; y<blks; y++)
Axs[y] = Abs[tx2 + y*16][j1];
#pragma unroll
for( int y=0; y<blks; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0; x<blks; x++)
#pragma unroll
for( int y=0; y<blks; y++)
Cb[x*blks+y] += Axs[x]*Bxp[y];
}
int gy = iby + ty2;
#pragma unroll
for( int y=0; y<blks; y++, gy+=16)
{
int gx = ibx + tx2;
#pragma unroll
for(int x=0; x<blks; x++, gx+=16)
{
if (gx < m && gy < n)
C[x*16] = alpha*Cb[y+x*blks] + beta * C[x*16];
}
C+=ldc*16;
}
}
//========================================================================
extern "C" __global__ void
fermiSgemm_v2_kernel_NT_80(float *C, const float *A, const float *B,
int m, int n, int k, int lda, int ldb,
int ldc, float alpha, float beta,
int offsetA, int offsetB)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * blk_N;
const int ibx = blockIdx.x * blk_M;
const int idt = ty * 64 + tx;
const int tx2= idt%16;
const int ty2= idt/16;
__shared__ float Bb[blk_K][blk_N+1];
__shared__ float Abs[blk_M][blk_K+1];
float xxA[blks];
float xxB[blks];
int trackA = offsetA + ibx +__mul24(ty2, lda) + tx2 ;
int trackB = offsetB + iby + tx2 + __mul24(ty2, ldb);
A+= trackA;
B += trackB;
int tll = ty2;
#pragma unroll
for(int y=0; y<blks; y++)
Abs[tx2+ y*16][ty2] = /* (tll<k)* */ fetch_x_A(trackA + y*16, A);
#pragma unroll
for(int y=0; y<blks; y++)
Bb[ty2][tx2+16*y] = /* (tll<k)* */ fetch_x_B(trackB+16*y, B);
__syncthreads();
const float *Bend = B + k*ldb - 16*ldb;
float Axs[blks];
float Bxp[blks];
float Cb[25] = {0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0,
0,0,0,0,0};
do
{
tll += 16;
A += lda *16 ;
B += 16*ldb;
trackA+=16*lda ;
trackB+=16*ldb;
#pragma unroll
for( int y=0; y<blks; y++)
xxA[y] = /* (tll<k)* */ fetch_x_A(trackA + y*16, A);
// tll same in the NN case
#pragma unroll
for( int y=0; y<blks; y++)
xxB[y] = /* (tll<k)* */ fetch_x_B( trackB + 16*y, B);
#pragma unroll
for( int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<blks; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int y=0; y<blks; y++)
Axs[y] = Abs[tx2 + y*16][j1] ;
#pragma unroll
for( int x=0; x<blks; x++)
#pragma unroll
for( int y=0; y<blks; y++)
Cb[x*blks+y] += Axs[x]*Bxp[y];
}
__syncthreads();
#pragma unroll
for( int y=0; y<blks; y++)
Abs[tx2 + y*16][ty2] = xxA[y];
#pragma unroll
for( int y=0; y<blks; y++)
Bb[ty2][tx2+y*16] = xxB[y];
__syncthreads();
}
while (B < Bend);
C += tx2 + ibx + __mul24(ty2 + iby ,ldc);
#pragma unroll
for(int j1=0; j1<16; j1++)
{
#pragma unroll
for( int y=0; y<blks; y++)
Bxp[y] = Bb[j1][ty2 + y*16];
#pragma unroll
for( int y=0; y<blks; y++)
Axs[y] = Abs[tx2 + y*16][j1] ;
#pragma unroll
for( int x=0; x<blks; x++)
#pragma unroll
for( int y=0;y<blks;y++)
Cb[x*blks+y] += Axs[x]*Bxp[y];
}
int gy = iby + ty2;
#pragma unroll
for( int y=0; y<blks; y++, gy+=16)
{
int gx = ibx + tx2;
#pragma unroll
for(int x=0; x<blks; x++, gx+=16)
{
if (gx < m && gy < n)
C[x*16] = alpha*Cb[y + x*blks] + beta * C[x*16];
}
C+=ldc*16;
}
}
//=================================================================================
extern "C" void
magmablas_sgemm_fermi80(char TRANSA, char TRANSB, magma_int_t m , magma_int_t n , magma_int_t k ,
float alpha, const float *A, magma_int_t lda,
const float *B, magma_int_t ldb,
float beta, float *C, magma_int_t ldc )
{
/* -- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
Purpose
=======
SGEMM performs one of the matrix-matrix operations
C := alpha*op( A )*op( B ) + beta*C,
where op( X ) is one of
op( X ) = X or op( X ) = X',
alpha and beta are scalars, and A, B and C are matrices, with op( A )
an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
Parameters
==========
TRANSA - CHARACTER*1.
On entry, TRANSA specifies the form of op( A ) to be used in
the matrix multiplication as follows:
TRANSA = 'N' or 'n', op( A ) = A.
TRANSA = 'T' or 't', op( A ) = A'.
TRANSA = 'C' or 'c', op( A ) = A'.
Unchanged on exit.
TRANSB - CHARACTER*1.
On entry, TRANSB specifies the form of op( B ) to be used in
the matrix multiplication as follows:
TRANSB = 'N' or 'n', op( B ) = B.
TRANSB = 'T' or 't', op( B ) = B'.
TRANSB = 'C' or 'c', op( B ) = B'.
Unchanged on exit.
M - INTEGER.
On entry, M specifies the number of rows of the matrix
op( A ) and of the matrix C. M must be at least zero.
Unchanged on exit.
N - INTEGER.
On entry, N specifies the number of columns of the matrix
op( B ) and the number of columns of the matrix C. N must be
at least zero.
Unchanged on exit.
K - INTEGER.
On entry, K specifies the number of columns of the matrix
op( A ) and the number of rows of the matrix op( B ). K must
be at least zero.
Unchanged on exit.
ALPHA - SINGLE PRECISION.
On entry, ALPHA specifies the scalar alpha.
Unchanged on exit.
A - SINGLE PRECISION array of DIMENSION ( LDA, ka ), where ka is
k when TRANSA = 'N' or 'n', and is m otherwise.
Before entry with TRANSA = 'N' or 'n', the leading m by k
part of the array A must contain the matrix A, otherwise
the leading k by m part of the array A must contain the
matrix A.
Unchanged on exit.
LDA - INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. When TRANSA = 'N' or 'n' then
LDA must be at least max( 1, m ), otherwise LDA must be at
least max( 1, k ).
Unchanged on exit.
B - SINGLE PRECISION array of DIMENSION ( LDB, kb ), where kb is
n when TRANSB = 'N' or 'n', and is k otherwise.
Before entry with TRANSB = 'N' or 'n', the leading k by n
part of the array B must contain the matrix B, otherwise
the leading n by k part of the array B must contain the
matrix B.
Unchanged on exit.
LDB - INTEGER.
On entry, LDB specifies the first dimension of B as declared
in the calling (sub) program. When TRANSB = 'N' or 'n' then
LDB must be at least max( 1, k ), otherwise LDB must be at
least max( 1, n ).
Unchanged on exit.
BETA - SINGLE PRECISION.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then C need not be set on input.
Unchanged on exit.
C - SINGLE PRECISION array of DIMENSION ( LDC, n ).
Before entry, the leading m by n part of the array C must
contain the matrix C, except when beta is zero, in which
case C need not be set on entry.
On exit, the array C is overwritten by the m by n matrix
( alpha*op( A )*op( B ) + beta*C ).
LDC - INTEGER.
On entry, LDC specifies the first dimension of C as declared
in the calling (sub) program. LDC must be at least
max( 1, m ).
Unchanged on exit.
===================================================================== */
if (m<=0 || n<=0 || k<=0)
return;
size_t offsetA = 0;
size_t offsetB = 0;
int TransA = 1, TransB = 1;
if (TRANSA == 'N' || TRANSA == 'n')
TransA = 0;
if (TRANSB == 'N' || TRANSB == 'n')
TransB = 0;
size_t sizeA = (size_t) lda * (size_t) (!TransA ? k : m);
size_t sizeB = (size_t) ldb * (size_t) (!TransB ? n : k);
size_t CUBLAS_MAX_1DBUF_SIZE = (1 << 27) - 512;
if (sizeA>=CUBLAS_MAX_1DBUF_SIZE ||
sizeB>=CUBLAS_MAX_1DBUF_SIZE )
{
cublasSgemm(TRANSA, TRANSB, m, n, k, alpha,
A, lda, B, ldb,
beta, C, ldc);
return;
}
cudaError_t errt;
errt = cudaBindTexture(&offsetA, tex_x_float_A, (int2 *)A,
sizeA * sizeof(A[0]));
if( errt != cudaSuccess) printf("can not bind to texture \n");
errt = cudaBindTexture(&offsetB, tex_x_float_B, (int2 *)B,
sizeB * sizeof(B[0]));
if( errt != cudaSuccess) printf("can not bind to texture \n");
dim3 threads( 64, 4 );
dim3 grid(m/(16*bx)+(m%(16*bx)!=0),n/(16*by)+(n%(16*by)!=0));
offsetA = offsetA/sizeof(A[0]);
offsetB = offsetB/sizeof(B[0]);
if ( TransB )
if( !TransA )
fermiSgemm_v2_kernel_NT_80<<< grid, threads, 0, magma_stream >>>(C, A, B, m, n, k, lda, ldb,
ldc, alpha, beta,
(int)offsetA, (int)offsetB);
else
fermiSgemm_v2_kernel_TT_80<<< grid, threads, 0, magma_stream >>>(C, A, B, m, n, k, lda, ldb,
ldc, alpha, beta,
(int)offsetA, (int)offsetB);
else
if( !TransA )
fermiSgemm_v2_kernel_NN_80<<< grid, threads, 0, magma_stream >>>(C, A, B, m, n, k, lda, ldb,
ldc, alpha, beta,
(int)offsetA, (int)offsetB);
else
fermiSgemm_v2_kernel_TN_80<<< grid, threads, 0, magma_stream >>>(C, A, B, m, n, k, lda, ldb,
ldc, alpha, beta,
(int)offsetA, (int)offsetB);
cudaUnbindTexture ( tex_x_float_A ) ;
cudaUnbindTexture ( tex_x_float_B ) ;
}
|
7d6ecda21408d45302ff35eaf44428ee08e48689.hip | // !!! This is a file automatically generated by hipify!!!
#define USE_MNIST_LOADER
#define MNIST_DOUBLE
#include "mnist.h"
#include "layer.h"
#include <hip/hip_runtime.h>
#include <cstdio>
#include <time.h>
#include "support.h"
/* Some reference:
http://luniak.io/cuda-neural-network-implementation-part-1/
*/
static mnist_data *train_set, *test_set;
static unsigned int train_cnt, test_cnt;
// Define layers of CNN
static Layer l_input = Layer(0, 0, 28*28);
static Layer l_c1 = Layer(5*5, 6, 24*24*6);
static Layer l_s1 = Layer(4*4, 1, 6*6*6);
static Layer l_f = Layer(6*6*6, 10, 10);
static void learn();
static unsigned int classify(double data[28][28]);
static void test();
static double forward_pass(double data[28][28]);
static double back_pass();
static inline void loaddata()
{
mnist_load("data/train-images.idx3-ubyte", "data/train-labels.idx1-ubyte",
&train_set, &train_cnt);
mnist_load("data/t10k-images.idx3-ubyte", "data/t10k-labels.idx1-ubyte",
&test_set, &test_cnt);
}
int main(int argc, const char **argv)
{
Timer timer;
hipError_t cuda_ret;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
srand(time(NULL));
hipError_t err = hipInit(0);
if (err != hipSuccess) {
fprintf(stderr, "CUDA initialisation failed with error code - %d\n", err);
return 1;
}
// Launch kernel using standard sgemm interface ---------------------------
printf("Launching kernel...s\n"); fflush(stdout);
loaddata();
learn();
test();
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
return 0;
}
// Forward propagation of a single row in dataset
static double forward_pass(double data[28][28])
{
float input[28][28];
for (int i = 0; i < 28; ++i) {
for (int j = 0; j < 28; ++j) {
input[i][j] = data[i][j];
}
}
l_input.clear();
l_c1.clear();
l_s1.clear();
l_f.clear();
clock_t start, end;
start = clock();
l_input.setOutput((float *)input);
hipLaunchKernelGGL(( fp_preact_c1), dim3(64), dim3(64), 0, 0, (float (*)[28])l_input.output, (float (*)[24][24])l_c1.preact, (float (*)[5][5])l_c1.weight);
hipLaunchKernelGGL(( fp_bias_c1), dim3(64), dim3(64), 0, 0, (float (*)[24][24])l_c1.preact, l_c1.bias);
hipLaunchKernelGGL(( apply_step_function), dim3(64), dim3(64), 0, 0, l_c1.preact, l_c1.output, l_c1.O);
hipLaunchKernelGGL(( fp_preact_s1), dim3(64), dim3(64), 0, 0, (float (*)[24][24])l_c1.output, (float (*)[6][6])l_s1.preact, (float (*)[4][4])l_s1.weight);
hipLaunchKernelGGL(( fp_bias_s1), dim3(64), dim3(64), 0, 0, (float (*)[6][6])l_s1.preact, l_s1.bias);
hipLaunchKernelGGL(( apply_step_function), dim3(64), dim3(64), 0, 0, l_s1.preact, l_s1.output, l_s1.O);
hipLaunchKernelGGL(( fp_preact_f), dim3(64), dim3(64), 0, 0, (float (*)[6][6])l_s1.output, l_f.preact, (float (*)[6][6][6])l_f.weight);
hipLaunchKernelGGL(( fp_bias_f), dim3(64), dim3(64), 0, 0, l_f.preact, l_f.bias);
hipLaunchKernelGGL(( apply_step_function), dim3(64), dim3(64), 0, 0, l_f.preact, l_f.output, l_f.O);
end = clock();
return ((double) (end - start)) / CLOCKS_PER_SEC;
}
// Back propagation to update weights
static double back_pass()
{
clock_t start, end;
start = clock();
hipLaunchKernelGGL(( bp_weight_f), dim3(64), dim3(64), 0, 0, (float (*)[6][6][6])l_f.d_weight, l_f.d_preact, (float (*)[6][6])l_s1.output);
hipLaunchKernelGGL(( bp_bias_f), dim3(64), dim3(64), 0, 0, l_f.bias, l_f.d_preact);
hipLaunchKernelGGL(( bp_output_s1), dim3(64), dim3(64), 0, 0, (float (*)[6][6])l_s1.d_output, (float (*)[6][6][6])l_f.weight, l_f.d_preact);
hipLaunchKernelGGL(( bp_preact_s1), dim3(64), dim3(64), 0, 0, (float (*)[6][6])l_s1.d_preact, (float (*)[6][6])l_s1.d_output, (float (*)[6][6])l_s1.preact);
hipLaunchKernelGGL(( bp_weight_s1), dim3(64), dim3(64), 0, 0, (float (*)[4][4])l_s1.d_weight, (float (*)[6][6])l_s1.d_preact, (float (*)[24][24])l_c1.output);
hipLaunchKernelGGL(( bp_bias_s1), dim3(64), dim3(64), 0, 0, l_s1.bias, (float (*)[6][6])l_s1.d_preact);
hipLaunchKernelGGL(( bp_output_c1), dim3(64), dim3(64), 0, 0, (float (*)[24][24])l_c1.d_output, (float (*)[4][4])l_s1.weight, (float (*)[6][6])l_s1.d_preact);
hipLaunchKernelGGL(( bp_preact_c1), dim3(64), dim3(64), 0, 0, (float (*)[24][24])l_c1.d_preact, (float (*)[24][24])l_c1.d_output, (float (*)[24][24])l_c1.preact);
hipLaunchKernelGGL(( bp_weight_c1), dim3(64), dim3(64), 0, 0, (float (*)[5][5])l_c1.d_weight, (float (*)[24][24])l_c1.d_preact, (float (*)[28])l_input.output);
hipLaunchKernelGGL(( bp_bias_c1), dim3(64), dim3(64), 0, 0, l_c1.bias, (float (*)[24][24])l_c1.d_preact);
hipLaunchKernelGGL(( apply_grad), dim3(64), dim3(64), 0, 0, l_f.weight, l_f.d_weight, l_f.M * l_f.N);
hipLaunchKernelGGL(( apply_grad), dim3(64), dim3(64), 0, 0, l_s1.weight, l_s1.d_weight, l_s1.M * l_s1.N);
hipLaunchKernelGGL(( apply_grad), dim3(64), dim3(64), 0, 0, l_c1.weight, l_c1.d_weight, l_c1.M * l_c1.N);
end = clock();
return ((double) (end - start)) / CLOCKS_PER_SEC;
}
// Unfold the input layer
static void unfold_input(double input[28][28], double unfolded[24*24][5*5])
{
int a = 0;
(void)unfold_input;
for (int i = 0; i < 2; ++i)
for (int j = 0; j < 2; ++j) {
int b = 0;
for (int x = i; x < i + 2; ++x)
for (int y = j; y < j+2; ++y)
unfolded[a][b++] = input[x][y];
a++;
}
}
static void learn()
{
static hipblasHandle_t blas;
hipblasCreate(&blas);
float err;
int iter = 50;
double time_taken = 0.0;
fprintf(stdout ,"Learning\n");
while (iter < 0 || iter-- > 0) {
err = 0.0f;
for (int i = 0; i < train_cnt; ++i) {
float tmp_err;
time_taken += forward_pass(train_set[i].data);
l_f.bp_clear();
l_s1.bp_clear();
l_c1.bp_clear();
// Euclid distance of train_set[i]
hipLaunchKernelGGL(( makeError), dim3(10), dim3(1), 0, 0, l_f.d_preact, l_f.output, train_set[i].label, 10);
hipblasSnrm2(blas, 10, l_f.d_preact, 1, &tmp_err);
err += tmp_err;
time_taken += back_pass();
}
err /= train_cnt;
fprintf(stdout, "error: %e, GPU Time: %lf\n", err, time_taken);
if (err < threshold) {
fprintf(stdout, "Training complete, error less than threshold\n\n");
break;
}
}
fprintf(stdout, "\n GPU Time : %lf\n", time_taken);
}
// Returns label of given data (0-9)
static unsigned int classify(double data[28][28])
{
float res[10];
forward_pass(data);
unsigned int max = 0;
hipMemcpy(res, l_f.output, sizeof(float) * 10, hipMemcpyDeviceToHost);
for (int i = 1; i < 10; ++i) {
if (res[max] < res[i]) {
max = i;
}
}
return max;
}
// Perform forward propagation of test data
static void test()
{
int error = 0;
for (int i = 0; i < test_cnt; ++i) {
if (classify(test_set[i].data) != test_set[i].label) {
++error;
}
}
fprintf(stdout, "Error Rate: %.2lf%%\n",
double(error) / double(test_cnt) * 100.0);
}
| 7d6ecda21408d45302ff35eaf44428ee08e48689.cu | #define USE_MNIST_LOADER
#define MNIST_DOUBLE
#include "mnist.h"
#include "layer.h"
#include <cuda.h>
#include <cstdio>
#include <time.h>
#include "support.h"
/* Some reference:
http://luniak.io/cuda-neural-network-implementation-part-1/
*/
static mnist_data *train_set, *test_set;
static unsigned int train_cnt, test_cnt;
// Define layers of CNN
static Layer l_input = Layer(0, 0, 28*28);
static Layer l_c1 = Layer(5*5, 6, 24*24*6);
static Layer l_s1 = Layer(4*4, 1, 6*6*6);
static Layer l_f = Layer(6*6*6, 10, 10);
static void learn();
static unsigned int classify(double data[28][28]);
static void test();
static double forward_pass(double data[28][28]);
static double back_pass();
static inline void loaddata()
{
mnist_load("data/train-images.idx3-ubyte", "data/train-labels.idx1-ubyte",
&train_set, &train_cnt);
mnist_load("data/t10k-images.idx3-ubyte", "data/t10k-labels.idx1-ubyte",
&test_set, &test_cnt);
}
int main(int argc, const char **argv)
{
Timer timer;
cudaError_t cuda_ret;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
srand(time(NULL));
CUresult err = cuInit(0);
if (err != CUDA_SUCCESS) {
fprintf(stderr, "CUDA initialisation failed with error code - %d\n", err);
return 1;
}
// Launch kernel using standard sgemm interface ---------------------------
printf("Launching kernel...s\n"); fflush(stdout);
loaddata();
learn();
test();
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
return 0;
}
// Forward propagation of a single row in dataset
static double forward_pass(double data[28][28])
{
float input[28][28];
for (int i = 0; i < 28; ++i) {
for (int j = 0; j < 28; ++j) {
input[i][j] = data[i][j];
}
}
l_input.clear();
l_c1.clear();
l_s1.clear();
l_f.clear();
clock_t start, end;
start = clock();
l_input.setOutput((float *)input);
fp_preact_c1<<<64, 64>>>((float (*)[28])l_input.output, (float (*)[24][24])l_c1.preact, (float (*)[5][5])l_c1.weight);
fp_bias_c1<<<64, 64>>>((float (*)[24][24])l_c1.preact, l_c1.bias);
apply_step_function<<<64, 64>>>(l_c1.preact, l_c1.output, l_c1.O);
fp_preact_s1<<<64, 64>>>((float (*)[24][24])l_c1.output, (float (*)[6][6])l_s1.preact, (float (*)[4][4])l_s1.weight);
fp_bias_s1<<<64, 64>>>((float (*)[6][6])l_s1.preact, l_s1.bias);
apply_step_function<<<64, 64>>>(l_s1.preact, l_s1.output, l_s1.O);
fp_preact_f<<<64, 64>>>((float (*)[6][6])l_s1.output, l_f.preact, (float (*)[6][6][6])l_f.weight);
fp_bias_f<<<64, 64>>>(l_f.preact, l_f.bias);
apply_step_function<<<64, 64>>>(l_f.preact, l_f.output, l_f.O);
end = clock();
return ((double) (end - start)) / CLOCKS_PER_SEC;
}
// Back propagation to update weights
static double back_pass()
{
clock_t start, end;
start = clock();
bp_weight_f<<<64, 64>>>((float (*)[6][6][6])l_f.d_weight, l_f.d_preact, (float (*)[6][6])l_s1.output);
bp_bias_f<<<64, 64>>>(l_f.bias, l_f.d_preact);
bp_output_s1<<<64, 64>>>((float (*)[6][6])l_s1.d_output, (float (*)[6][6][6])l_f.weight, l_f.d_preact);
bp_preact_s1<<<64, 64>>>((float (*)[6][6])l_s1.d_preact, (float (*)[6][6])l_s1.d_output, (float (*)[6][6])l_s1.preact);
bp_weight_s1<<<64, 64>>>((float (*)[4][4])l_s1.d_weight, (float (*)[6][6])l_s1.d_preact, (float (*)[24][24])l_c1.output);
bp_bias_s1<<<64, 64>>>(l_s1.bias, (float (*)[6][6])l_s1.d_preact);
bp_output_c1<<<64, 64>>>((float (*)[24][24])l_c1.d_output, (float (*)[4][4])l_s1.weight, (float (*)[6][6])l_s1.d_preact);
bp_preact_c1<<<64, 64>>>((float (*)[24][24])l_c1.d_preact, (float (*)[24][24])l_c1.d_output, (float (*)[24][24])l_c1.preact);
bp_weight_c1<<<64, 64>>>((float (*)[5][5])l_c1.d_weight, (float (*)[24][24])l_c1.d_preact, (float (*)[28])l_input.output);
bp_bias_c1<<<64, 64>>>(l_c1.bias, (float (*)[24][24])l_c1.d_preact);
apply_grad<<<64, 64>>>(l_f.weight, l_f.d_weight, l_f.M * l_f.N);
apply_grad<<<64, 64>>>(l_s1.weight, l_s1.d_weight, l_s1.M * l_s1.N);
apply_grad<<<64, 64>>>(l_c1.weight, l_c1.d_weight, l_c1.M * l_c1.N);
end = clock();
return ((double) (end - start)) / CLOCKS_PER_SEC;
}
// Unfold the input layer
static void unfold_input(double input[28][28], double unfolded[24*24][5*5])
{
int a = 0;
(void)unfold_input;
for (int i = 0; i < 2; ++i)
for (int j = 0; j < 2; ++j) {
int b = 0;
for (int x = i; x < i + 2; ++x)
for (int y = j; y < j+2; ++y)
unfolded[a][b++] = input[x][y];
a++;
}
}
static void learn()
{
static cublasHandle_t blas;
cublasCreate(&blas);
float err;
int iter = 50;
double time_taken = 0.0;
fprintf(stdout ,"Learning\n");
while (iter < 0 || iter-- > 0) {
err = 0.0f;
for (int i = 0; i < train_cnt; ++i) {
float tmp_err;
time_taken += forward_pass(train_set[i].data);
l_f.bp_clear();
l_s1.bp_clear();
l_c1.bp_clear();
// Euclid distance of train_set[i]
makeError<<<10, 1>>>(l_f.d_preact, l_f.output, train_set[i].label, 10);
cublasSnrm2(blas, 10, l_f.d_preact, 1, &tmp_err);
err += tmp_err;
time_taken += back_pass();
}
err /= train_cnt;
fprintf(stdout, "error: %e, GPU Time: %lf\n", err, time_taken);
if (err < threshold) {
fprintf(stdout, "Training complete, error less than threshold\n\n");
break;
}
}
fprintf(stdout, "\n GPU Time : %lf\n", time_taken);
}
// Returns label of given data (0-9)
static unsigned int classify(double data[28][28])
{
float res[10];
forward_pass(data);
unsigned int max = 0;
cudaMemcpy(res, l_f.output, sizeof(float) * 10, cudaMemcpyDeviceToHost);
for (int i = 1; i < 10; ++i) {
if (res[max] < res[i]) {
max = i;
}
}
return max;
}
// Perform forward propagation of test data
static void test()
{
int error = 0;
for (int i = 0; i < test_cnt; ++i) {
if (classify(test_set[i].data) != test_set[i].label) {
++error;
}
}
fprintf(stdout, "Error Rate: %.2lf%%\n",
double(error) / double(test_cnt) * 100.0);
}
|
2da8a565b0c95ac98e7781b3527b7794a63b4b21.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "rempel_heat_conduction.h"
#include "constants.h"
// define variables for measuring performance
//float cpu_time = 0.0;
//float gpu_time = 0.0;
int main(void)
{
// allocate cpu memory
float * T_cpu = new float[L];
float * T_gpu = new float[L];
float * q_cpu = new float[L];
float * q_gpu = new float[L];
float * x = new float[Lx];
// initialize the grid
initial_grid(x);
// apply initial conditions
initial_conditions(T_cpu, q_cpu, x);
initial_conditions(T_gpu, q_gpu, x);
// run CPU test
float cpu_time = 0;
cpu_time = heat_1d_cpu_solve(T_cpu, q_cpu, x, false, "cpu/");
printf("cpu: %f ms\n", cpu_time);
// run GPU test
float gpu_time = 0;
gpu_time = heat_1d_cpu_solve(T_gpu, q_gpu, x, true, "gpu/");
// gpu_time = heat_1d_gpu_solve(T_gpu, q_gpu, x, false);
printf("gpu t = %f ms, R = %f\n", gpu_time, cpu_time / gpu_time);
// calculate rms error
float rms = 0.0;
for(uint l = 0; l < L; l++) {
rms += (T_cpu[l] - T_gpu[l]) * (T_cpu[l] - T_gpu[l]);
}
rms = std::sqrt(rms / (float) L);
printf("CPU-GPU RMS error = %e \n", rms);
// print something so we know it didn't crash somewhere
printf("All tests completed!\n");
return 0;
}
float heat_1d_gpu_solve(float * T, float * q, float * x, bool fickian, std::string path){
// Set up device
int dev = 0;
CHECK(hipSetDevice(dev));
// Print device and precision
// hipDeviceProp_t prop;
// CHECK(hipGetDeviceProperties(&prop, 0));
// print_device_properties(prop);
// configure the device to have the largest possible L1 cache
CHECK(hipDeviceSetCacheConfig(hipFuncCachePreferL1));
// allocate pinned host memory
float *T_h; // dependent variables
float *q_h;
float *x_h; // independent variables
CHECK(hipHostMalloc((float **) &T_h, L * sizeof(float)));
CHECK(hipHostMalloc((float **) &q_h, L * sizeof(float)));
CHECK(hipHostMalloc((float **) &x_h, Lx * sizeof(float)));
// allocate device memory
float *T_d, *q_d; // dependent variables
float *x_d; // independent variables
CHECK(hipMalloc((float **) &T_d, wt * Lx * sizeof(float)));
CHECK(hipMalloc((float **) &q_d, wt * Lx * sizeof(float)));
CHECK(hipMalloc((float **) &x_d, Lx * sizeof(float)));
// transfer initial condition from argument to pinned host memory
CHECK(hipMemcpy(T_h, T, Lx * sizeof(float), hipMemcpyHostToHost));
CHECK(hipMemcpy(q_h, q, Lx * sizeof(float), hipMemcpyHostToHost));
CHECK(hipMemcpy(x_h, x, Lx * sizeof(float), hipMemcpyHostToHost));
// transfer data from the host to the device
CHECK(hipMemcpy(T_d, T_h, Lx * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(q_d, q_h, Lx * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(x_d, x_h, Lx * sizeof(float), hipMemcpyHostToDevice));
// set the number of threads and blocks
const uint threads = lx;
const uint blocks = Nx;
// set the amount of shared memory
const uint shared = 0;
// initialize streams
hipStream_t k_stream, m_stream;
hipStreamCreate(&k_stream); // initialize computation stream
hipStreamCreate(&m_stream); // initialize memory stream
// initialize timing events
float gpu_time;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
// main time-marching loop
for(uint ti = 0; ti < Wt; ti++){ // downsampled resolution
for(uint tj = 0; tj < wt; tj++){ // original resolution
// start memory transfer
if(tj == 0 and ti > 0){
hipStreamSynchronize(m_stream); // check if memory transfer is completed
hipMemcpyAsync(T_h + (ti * Lx), T_d, Lx * sizeof(float), hipMemcpyDeviceToHost, m_stream);
}
// perform timestep
hipLaunchKernelGGL(( heat_1d_gpu_parabolic_step), dim3(blocks), dim3(threads), shared, k_stream, T_d, q_d, x_d, tj);
hipStreamSynchronize(k_stream);
}
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpu_time, start, stop);
// copy to original argument pointers
CHECK(hipMemcpy(T, T_h, L * sizeof(float), hipMemcpyHostToHost));
save_results(path, T, q, x);
return gpu_time;
}
__global__ void heat_1d_gpu_parabolic_step(float * T_d, float * q, float * x, uint n){
// Find index from threadId
uint i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < Lx - 1){
// Load stencil
float T0 = T_d[n * Lx + (i + 0)];
float T1 = T_d[n * Lx + (i + 1)];
float q0 = q[n * Lx + (i + 0)];
// Load position grid
float x0 = x[i + 0];
float x1 = x[i + 1];
float kappa = (T0 * T0 * sqrt(T0) + T1 * T1 * sqrt(T1)) / 2;
float dt = dt_p;
float qa = -kappa * (T1 - T0) / (x1 - x0);
q[((n + 1) % wt) * Lx + i] = qa;
if(i > 0) {
float q9 = q[n * Lx + (i - 1)];
float x9 = x[i - 1];
float Ta = T0 - ((q0 - q9) * dt / (x0 - x9));
T_d[((n + 1) % wt) * Lx + i] = Ta;
} else {
T_d[((n + 1) % wt) * Lx + i] = T_left;
}
} else {
T_d[((n + 1) % wt) * Lx + i] = T_right;
}
return;
}
float heat_1d_cpu_solve(float * T, float * q, float * x, bool fickian, std::string path){
printf(" dx = %e\n dt_p = %e\n dt_h = %e\n c_h = %e\n tau = %e\n", dx, dt_p, dt_h, v_h, kappa_max / (v_h * v_h));
float cpu_time;
struct timeval t1, t2;
gettimeofday(&t1, 0);
float * T_d = new float[bt * Lx];
float * q_d = new float[bt * Lx];
memcpy(T_d, T, Lx * sizeof(float));
memcpy(q_d, q, Lx * sizeof(float));\
// main time-marching loop
uint ti = 0;
for(uint n = 0; n < Lt; n++){
if(fickian){
heat_1d_cpu_parabolic_step(T, T_d, q_d, x, n);
} else {
heat_1d_cpu_hyperbolic_step(T, T_d, q_d, x, n);
}
if((n % wt) == 0){
// memcpy(T + (ti * Lx), T_d, Lx * sizeof(float));
// memcpy(q + (ti * Lx), q_d, Lx * sizeof(float));
for(int i = 0; i < Lx; i++){
T[ti * Lx + i] = T_d[(n % bt) * Lx + i];
q[ti * Lx + i] = q_d[(n % bt) * Lx + i];
}
ti++;
}
}
// // main time-marching loop
// for(uint ti = 0; ti < Wt; ti++){ // downsampled resolution
//
// for(uint tj = 0; tj < wt; tj++){ // original resolution
//
//
//
// if(fickian){
// heat_1d_cpu_parabolic_step(T, T_d, q_d, x, tj);
// } else {
// heat_1d_cpu_hyperbolic_step(T, T_d, q_d, x, tj);
// }
//
// if(tj == 0 and ti > 0) {
// // memcpy(T + (ti * Lx), T_d, Lx * sizeof(float));
// // memcpy(q + (ti * Lx), q_d, Lx * sizeof(float));
// for(int i = 0; i < Lx; i++){
// T[ti * Lx + i] = T_d[i];
// q[ti * Lx + i] = q_d[i];
// }
// }
//
// //
//
// }
//
// }
gettimeofday(&t2, 0);
cpu_time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
save_results(path, T, q, x);
return cpu_time;
}
void heat_1d_cpu_hyperbolic_step(float * T, float * T_d, float * q, float * x, uint n){
// printf("________________\n");
// perform timestep
uint i;
for(i = 0; i < (Lx - 1); i++){
// Load stencil
float T0 = T_d[(n % bt) * Lx + (i + 0)];
float T1 = T_d[(n % bt) * Lx + (i + 1)];
float q0 = q[(n % bt) * Lx + (i + 0)];
float qz = q[((n - 1) % bt) * Lx + (i + 0)];
// Load position grid
float x0 = x[i + 0];
float x1 = x[i + 1];
float v2 = v_h * v_h;
float kappa_0 = T0 * T0 * sqrt(T0);
float kappa_1 = T1 * T1 * sqrt(T1);
float kappa = (kappa_0 + kappa_1) / 2.0;
// float kappa = 1.0;
// compute hyperbolic timescale
// float tau = g*kappa;
float tau = kappa / v_h;
// float tau = dt_p;
float c2 = kappa / tau;
float dt = dt_h;
tau = max(tau, 4 * dt);
// if(i == Lx - 2){
// printf("%e\n", q0);
// }
//
// printf("n = %d\n",n);
// printf("tau = %e\n", tau);
// printf("q0 - ((c2 * (T1 - T0) * dt) / (x1 - x0)) - (q0 * dt / tau)\n");
// printf("%e - ((%e * (%e - %e) * %e) / (%e - %e)) - (%e * %e / %e)\n", q0, c2, T1,T0, dt, x1, x0, q0, dt, tau);
// printf("%e - %e - %e\n", q0, ((c2 * (T1 - T0) * dt) / (x1 - x0)), (q0 * dt / tau));
// }
float R = (tau - dt) / tau;
float S = (kappa * dt) / (tau * dx);
// float qa = - tau * (q0 - qz) - kappa * (T1 - T0) / (x1 - x0);
float qa = R * q0 - S * (T1 - T0);
// float qa = q0 - dt * (q0 + kappa * (T1 - T0) / dx) / tau;
// float qa = dt * (q0 * tau / dt - kappa * (T1 - T0) / dx) / (tau + dt);
// float qa = ((2 * tau - dt) * q0 - dt * kappa * (T1 - T0) / dx) / (2 * tau + dt);
q[((n + 1) % bt) * Lx + i] = qa;
if(i > 0) {
float q9 = q[(n % bt) * Lx + (i - 1)];
float x9 = x[i - 1];
float Ta = T0 - ((q0 - q9) * dt / (x0 - x9));
T_d[((n + 1) % bt) * Lx + i] = Ta;
// T_d[((n + 1) % bt) * Lx + i] = T0;
if(i > Lx - 5){
// printf("S = (%f * %f) / (%f * %f)\n", kappa, dt, tau, dx);
// printf(" = %f / %f\n", kappa * dt, tau * dx);
// printf(" = %f\n", S);
// printf("------------------\n");
// printf("qa_%d = %f * %f - %f * (%f - %f)\n",i, R, q0, S, T1, T0);
// printf(" = %f * %f - %f * %f\n", R, q0, S, T1 - T0);
// printf(" = %f - %f\n", R * q0, S * (T1 - T0));
// printf(" = %f\n", qa);
// printf("------------------\n");
// printf("Ta_%d = %f - %f * (%f - %f)\n",i, T0, dt / dx, q0, q9);
// printf(" = %f - %f * %f\n", T0, dt / dx, q0 - q9);
// printf(" = %f - %f\n", T0, dt / dx * (q0 - q9));
// printf(" = %f\n", Ta);
}
}
}
// printf("_______________________________________________________\n");
// apply left boundary conditions
i = 0;
T_d[((n + 1) % bt) * Lx + i] = T_left;
// apply right boundary conditions
i = Lx - 1;
T_d[((n + 1) % bt) * Lx + i] = T_right;
// i = Lx - 2;
// q[((n + 1) % bt) * Lx + i] = 0;
}
void heat_1d_cpu_parabolic_step(float * T, float * T_d, float * q, float * x, uint n){
// perform timestep
uint i;
for(i = 0; i < Lx - 1; i++){
// Load stencil
float T0 = T_d[(n % bt) * Lx + (i + 0)];
float T1 = T_d[(n % bt) * Lx + (i + 1)];
float q0 = q[(n % bt) * Lx + (i + 0)];
// Load position grid
float x0 = x[i + 0];
float x1 = x[i + 1];
float kappa = (T0 * T0 * sqrt(T0) + T1 * T1 * sqrt(T1)) / 2.0;
// float kappa = 1.0;
float dt = dt_p;
float qa = -kappa * (T1 - T0) / (x1 - x0);
q[((n + 1) % bt) * Lx + i] = qa;
if(i > 0) {
float q9 = q[(n % bt) * Lx + (i - 1)];
float x9 = x[i - 1];
float Ta = T0 - ((q0 - q9) * dt / (x0 - x9));
T_d[((n + 1) % bt) * Lx + i] = Ta;
}
}
// apply left boundary conditions
i = 0;
T_d[((n + 1) % bt) * Lx + i] = T_left;
// apply right boundary conditions
i = Lx - 1;
T_d[((n + 1) % bt) * Lx + i] = T_right;
}
void initial_conditions(float * T, float * q, float * x){
int n = 0;
// initialize host memory
for(int i = 0; i < Lx; i++){ // Initial condition for dependent variable
float x0 = x[i];
float x1 = x[i + 1];
float T0 = 0.1 + 0.9 * pow(x0,5);
float T1 = 0.1 + 0.9 * pow(x1,5);
float kappa = (T0 * T0 * sqrt(T0) + T1 * T1 * sqrt(T1)) / 2.0;
T[(n % wt) * Lx + i] = T0;
if(i < (Lx - 1)){
// q[n * Lx + i] = -kappa * (T1 - T0) / (x1 - x0);
q[n * Lx + i] = 0;
}
}
printf("%e\n",x[Lx-1]);
}
void initial_grid(float * x){
for(int i = 0; i < Lx; i++) x[i] = i * dx; // initialize rectangular grid in x
}
void save_results(std::string path, float * T, float * q, float * x){
// open files
FILE * meta_f = fopen(("output/" + path + "meta.dat").c_str(), "wb");
FILE * T_f = fopen(("output/" + path + "T.dat").c_str(), "wb");
FILE * q_f = fopen(("output/" + path + "q.dat").c_str(), "wb");
FILE * x_f = fopen(("output/" + path + "x.dat").c_str(), "wb");
// save state variables
fwrite(&Wt, sizeof(uint), 1, meta_f);
fwrite(&wt, sizeof(uint), 1, meta_f);
fwrite(&Lx, sizeof(uint), 1, meta_f);
fwrite(&dx, sizeof(float), 1, meta_f);
fwrite(&dt_p, sizeof(float), 1, meta_f);
// write data
fwrite(T, sizeof(float), L, T_f);
fwrite(q, sizeof(float), L, q_f);
fwrite(x, sizeof(float), Lx, x_f);
// close files
fclose(meta_f);
fclose(T_f);
fclose(q_f);
fclose(x_f);
}
| 2da8a565b0c95ac98e7781b3527b7794a63b4b21.cu | #include "rempel_heat_conduction.h"
#include "constants.h"
// define variables for measuring performance
//float cpu_time = 0.0;
//float gpu_time = 0.0;
int main(void)
{
// allocate cpu memory
float * T_cpu = new float[L];
float * T_gpu = new float[L];
float * q_cpu = new float[L];
float * q_gpu = new float[L];
float * x = new float[Lx];
// initialize the grid
initial_grid(x);
// apply initial conditions
initial_conditions(T_cpu, q_cpu, x);
initial_conditions(T_gpu, q_gpu, x);
// run CPU test
float cpu_time = 0;
cpu_time = heat_1d_cpu_solve(T_cpu, q_cpu, x, false, "cpu/");
printf("cpu: %f ms\n", cpu_time);
// run GPU test
float gpu_time = 0;
gpu_time = heat_1d_cpu_solve(T_gpu, q_gpu, x, true, "gpu/");
// gpu_time = heat_1d_gpu_solve(T_gpu, q_gpu, x, false);
printf("gpu t = %f ms, R = %f\n", gpu_time, cpu_time / gpu_time);
// calculate rms error
float rms = 0.0;
for(uint l = 0; l < L; l++) {
rms += (T_cpu[l] - T_gpu[l]) * (T_cpu[l] - T_gpu[l]);
}
rms = std::sqrt(rms / (float) L);
printf("CPU-GPU RMS error = %e \n", rms);
// print something so we know it didn't crash somewhere
printf("All tests completed!\n");
return 0;
}
float heat_1d_gpu_solve(float * T, float * q, float * x, bool fickian, std::string path){
// Set up device
int dev = 0;
CHECK(cudaSetDevice(dev));
// Print device and precision
// cudaDeviceProp prop;
// CHECK(cudaGetDeviceProperties(&prop, 0));
// print_device_properties(prop);
// configure the device to have the largest possible L1 cache
CHECK(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1));
// allocate pinned host memory
float *T_h; // dependent variables
float *q_h;
float *x_h; // independent variables
CHECK(cudaMallocHost((float **) &T_h, L * sizeof(float)));
CHECK(cudaMallocHost((float **) &q_h, L * sizeof(float)));
CHECK(cudaMallocHost((float **) &x_h, Lx * sizeof(float)));
// allocate device memory
float *T_d, *q_d; // dependent variables
float *x_d; // independent variables
CHECK(cudaMalloc((float **) &T_d, wt * Lx * sizeof(float)));
CHECK(cudaMalloc((float **) &q_d, wt * Lx * sizeof(float)));
CHECK(cudaMalloc((float **) &x_d, Lx * sizeof(float)));
// transfer initial condition from argument to pinned host memory
CHECK(cudaMemcpy(T_h, T, Lx * sizeof(float), cudaMemcpyHostToHost));
CHECK(cudaMemcpy(q_h, q, Lx * sizeof(float), cudaMemcpyHostToHost));
CHECK(cudaMemcpy(x_h, x, Lx * sizeof(float), cudaMemcpyHostToHost));
// transfer data from the host to the device
CHECK(cudaMemcpy(T_d, T_h, Lx * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(q_d, q_h, Lx * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(x_d, x_h, Lx * sizeof(float), cudaMemcpyHostToDevice));
// set the number of threads and blocks
const uint threads = lx;
const uint blocks = Nx;
// set the amount of shared memory
const uint shared = 0;
// initialize streams
cudaStream_t k_stream, m_stream;
cudaStreamCreate(&k_stream); // initialize computation stream
cudaStreamCreate(&m_stream); // initialize memory stream
// initialize timing events
float gpu_time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
// main time-marching loop
for(uint ti = 0; ti < Wt; ti++){ // downsampled resolution
for(uint tj = 0; tj < wt; tj++){ // original resolution
// start memory transfer
if(tj == 0 and ti > 0){
cudaStreamSynchronize(m_stream); // check if memory transfer is completed
cudaMemcpyAsync(T_h + (ti * Lx), T_d, Lx * sizeof(float), cudaMemcpyDeviceToHost, m_stream);
}
// perform timestep
heat_1d_gpu_parabolic_step<<<blocks, threads, shared, k_stream>>>(T_d, q_d, x_d, tj);
cudaStreamSynchronize(k_stream);
}
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_time, start, stop);
// copy to original argument pointers
CHECK(cudaMemcpy(T, T_h, L * sizeof(float), cudaMemcpyHostToHost));
save_results(path, T, q, x);
return gpu_time;
}
__global__ void heat_1d_gpu_parabolic_step(float * T_d, float * q, float * x, uint n){
// Find index from threadId
uint i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < Lx - 1){
// Load stencil
float T0 = T_d[n * Lx + (i + 0)];
float T1 = T_d[n * Lx + (i + 1)];
float q0 = q[n * Lx + (i + 0)];
// Load position grid
float x0 = x[i + 0];
float x1 = x[i + 1];
float kappa = (T0 * T0 * sqrt(T0) + T1 * T1 * sqrt(T1)) / 2;
float dt = dt_p;
float qa = -kappa * (T1 - T0) / (x1 - x0);
q[((n + 1) % wt) * Lx + i] = qa;
if(i > 0) {
float q9 = q[n * Lx + (i - 1)];
float x9 = x[i - 1];
float Ta = T0 - ((q0 - q9) * dt / (x0 - x9));
T_d[((n + 1) % wt) * Lx + i] = Ta;
} else {
T_d[((n + 1) % wt) * Lx + i] = T_left;
}
} else {
T_d[((n + 1) % wt) * Lx + i] = T_right;
}
return;
}
float heat_1d_cpu_solve(float * T, float * q, float * x, bool fickian, std::string path){
printf(" dx = %e\n dt_p = %e\n dt_h = %e\n c_h = %e\n tau = %e\n", dx, dt_p, dt_h, v_h, kappa_max / (v_h * v_h));
float cpu_time;
struct timeval t1, t2;
gettimeofday(&t1, 0);
float * T_d = new float[bt * Lx];
float * q_d = new float[bt * Lx];
memcpy(T_d, T, Lx * sizeof(float));
memcpy(q_d, q, Lx * sizeof(float));\
// main time-marching loop
uint ti = 0;
for(uint n = 0; n < Lt; n++){
if(fickian){
heat_1d_cpu_parabolic_step(T, T_d, q_d, x, n);
} else {
heat_1d_cpu_hyperbolic_step(T, T_d, q_d, x, n);
}
if((n % wt) == 0){
// memcpy(T + (ti * Lx), T_d, Lx * sizeof(float));
// memcpy(q + (ti * Lx), q_d, Lx * sizeof(float));
for(int i = 0; i < Lx; i++){
T[ti * Lx + i] = T_d[(n % bt) * Lx + i];
q[ti * Lx + i] = q_d[(n % bt) * Lx + i];
}
ti++;
}
}
// // main time-marching loop
// for(uint ti = 0; ti < Wt; ti++){ // downsampled resolution
//
// for(uint tj = 0; tj < wt; tj++){ // original resolution
//
//
//
// if(fickian){
// heat_1d_cpu_parabolic_step(T, T_d, q_d, x, tj);
// } else {
// heat_1d_cpu_hyperbolic_step(T, T_d, q_d, x, tj);
// }
//
// if(tj == 0 and ti > 0) {
// // memcpy(T + (ti * Lx), T_d, Lx * sizeof(float));
// // memcpy(q + (ti * Lx), q_d, Lx * sizeof(float));
// for(int i = 0; i < Lx; i++){
// T[ti * Lx + i] = T_d[i];
// q[ti * Lx + i] = q_d[i];
// }
// }
//
// //
//
// }
//
// }
gettimeofday(&t2, 0);
cpu_time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
save_results(path, T, q, x);
return cpu_time;
}
void heat_1d_cpu_hyperbolic_step(float * T, float * T_d, float * q, float * x, uint n){
// printf("________________\n");
// perform timestep
uint i;
for(i = 0; i < (Lx - 1); i++){
// Load stencil
float T0 = T_d[(n % bt) * Lx + (i + 0)];
float T1 = T_d[(n % bt) * Lx + (i + 1)];
float q0 = q[(n % bt) * Lx + (i + 0)];
float qz = q[((n - 1) % bt) * Lx + (i + 0)];
// Load position grid
float x0 = x[i + 0];
float x1 = x[i + 1];
float v2 = v_h * v_h;
float kappa_0 = T0 * T0 * sqrt(T0);
float kappa_1 = T1 * T1 * sqrt(T1);
float kappa = (kappa_0 + kappa_1) / 2.0;
// float kappa = 1.0;
// compute hyperbolic timescale
// float tau = g*kappa;
float tau = kappa / v_h;
// float tau = dt_p;
float c2 = kappa / tau;
float dt = dt_h;
tau = max(tau, 4 * dt);
// if(i == Lx - 2){
// printf("%e\n", q0);
// }
//
// printf("n = %d\n",n);
// printf("tau = %e\n", tau);
// printf("q0 - ((c2 * (T1 - T0) * dt) / (x1 - x0)) - (q0 * dt / tau)\n");
// printf("%e - ((%e * (%e - %e) * %e) / (%e - %e)) - (%e * %e / %e)\n", q0, c2, T1,T0, dt, x1, x0, q0, dt, tau);
// printf("%e - %e - %e\n", q0, ((c2 * (T1 - T0) * dt) / (x1 - x0)), (q0 * dt / tau));
// }
float R = (tau - dt) / tau;
float S = (kappa * dt) / (tau * dx);
// float qa = - tau * (q0 - qz) - kappa * (T1 - T0) / (x1 - x0);
float qa = R * q0 - S * (T1 - T0);
// float qa = q0 - dt * (q0 + kappa * (T1 - T0) / dx) / tau;
// float qa = dt * (q0 * tau / dt - kappa * (T1 - T0) / dx) / (tau + dt);
// float qa = ((2 * tau - dt) * q0 - dt * kappa * (T1 - T0) / dx) / (2 * tau + dt);
q[((n + 1) % bt) * Lx + i] = qa;
if(i > 0) {
float q9 = q[(n % bt) * Lx + (i - 1)];
float x9 = x[i - 1];
float Ta = T0 - ((q0 - q9) * dt / (x0 - x9));
T_d[((n + 1) % bt) * Lx + i] = Ta;
// T_d[((n + 1) % bt) * Lx + i] = T0;
if(i > Lx - 5){
// printf("S = (%f * %f) / (%f * %f)\n", kappa, dt, tau, dx);
// printf(" = %f / %f\n", kappa * dt, tau * dx);
// printf(" = %f\n", S);
// printf("------------------\n");
// printf("qa_%d = %f * %f - %f * (%f - %f)\n",i, R, q0, S, T1, T0);
// printf(" = %f * %f - %f * %f\n", R, q0, S, T1 - T0);
// printf(" = %f - %f\n", R * q0, S * (T1 - T0));
// printf(" = %f\n", qa);
// printf("------------------\n");
// printf("Ta_%d = %f - %f * (%f - %f)\n",i, T0, dt / dx, q0, q9);
// printf(" = %f - %f * %f\n", T0, dt / dx, q0 - q9);
// printf(" = %f - %f\n", T0, dt / dx * (q0 - q9));
// printf(" = %f\n", Ta);
}
}
}
// printf("_______________________________________________________\n");
// apply left boundary conditions
i = 0;
T_d[((n + 1) % bt) * Lx + i] = T_left;
// apply right boundary conditions
i = Lx - 1;
T_d[((n + 1) % bt) * Lx + i] = T_right;
// i = Lx - 2;
// q[((n + 1) % bt) * Lx + i] = 0;
}
void heat_1d_cpu_parabolic_step(float * T, float * T_d, float * q, float * x, uint n){
// perform timestep
uint i;
for(i = 0; i < Lx - 1; i++){
// Load stencil
float T0 = T_d[(n % bt) * Lx + (i + 0)];
float T1 = T_d[(n % bt) * Lx + (i + 1)];
float q0 = q[(n % bt) * Lx + (i + 0)];
// Load position grid
float x0 = x[i + 0];
float x1 = x[i + 1];
float kappa = (T0 * T0 * sqrt(T0) + T1 * T1 * sqrt(T1)) / 2.0;
// float kappa = 1.0;
float dt = dt_p;
float qa = -kappa * (T1 - T0) / (x1 - x0);
q[((n + 1) % bt) * Lx + i] = qa;
if(i > 0) {
float q9 = q[(n % bt) * Lx + (i - 1)];
float x9 = x[i - 1];
float Ta = T0 - ((q0 - q9) * dt / (x0 - x9));
T_d[((n + 1) % bt) * Lx + i] = Ta;
}
}
// apply left boundary conditions
i = 0;
T_d[((n + 1) % bt) * Lx + i] = T_left;
// apply right boundary conditions
i = Lx - 1;
T_d[((n + 1) % bt) * Lx + i] = T_right;
}
void initial_conditions(float * T, float * q, float * x){
int n = 0;
// initialize host memory
for(int i = 0; i < Lx; i++){ // Initial condition for dependent variable
float x0 = x[i];
float x1 = x[i + 1];
float T0 = 0.1 + 0.9 * pow(x0,5);
float T1 = 0.1 + 0.9 * pow(x1,5);
float kappa = (T0 * T0 * sqrt(T0) + T1 * T1 * sqrt(T1)) / 2.0;
T[(n % wt) * Lx + i] = T0;
if(i < (Lx - 1)){
// q[n * Lx + i] = -kappa * (T1 - T0) / (x1 - x0);
q[n * Lx + i] = 0;
}
}
printf("%e\n",x[Lx-1]);
}
void initial_grid(float * x){
for(int i = 0; i < Lx; i++) x[i] = i * dx; // initialize rectangular grid in x
}
void save_results(std::string path, float * T, float * q, float * x){
// open files
FILE * meta_f = fopen(("output/" + path + "meta.dat").c_str(), "wb");
FILE * T_f = fopen(("output/" + path + "T.dat").c_str(), "wb");
FILE * q_f = fopen(("output/" + path + "q.dat").c_str(), "wb");
FILE * x_f = fopen(("output/" + path + "x.dat").c_str(), "wb");
// save state variables
fwrite(&Wt, sizeof(uint), 1, meta_f);
fwrite(&wt, sizeof(uint), 1, meta_f);
fwrite(&Lx, sizeof(uint), 1, meta_f);
fwrite(&dx, sizeof(float), 1, meta_f);
fwrite(&dt_p, sizeof(float), 1, meta_f);
// write data
fwrite(T, sizeof(float), L, T_f);
fwrite(q, sizeof(float), L, q_f);
fwrite(x, sizeof(float), Lx, x_f);
// close files
fclose(meta_f);
fclose(T_f);
fclose(q_f);
fclose(x_f);
}
|
a750b4756d82e58c211a06e803d0c962f41dbecb.hip | // !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include<cuda.h>
void printDevProp(hipDeviceProp_t devProp) {
printf("%s\n", devProp.name);
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Total global memory: %u", devProp.totalGlobalMem);
printf(" bytes\n");
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Total shared memory per block: %u\n",devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Total constant memory: %u\n", devProp.totalConstMem);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
printf("Maximum threads per dimension: %d,%d,%d\n", devProp.maxThreadsDim[0], devProp.maxThreadsDim[1], devProp.maxThreadsDim[2]);
return;
}
int main(void) {
int deviceCount;
hipGetDeviceCount(&deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
printDevProp(deviceProp);
}
} | a750b4756d82e58c211a06e803d0c962f41dbecb.cu | #include<stdio.h>
#include<cuda.h>
void printDevProp(cudaDeviceProp devProp) {
printf("%s\n", devProp.name);
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Total global memory: %u", devProp.totalGlobalMem);
printf(" bytes\n");
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Total shared memory per block: %u\n",devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Total constant memory: %u\n", devProp.totalConstMem);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
printf("Maximum threads per dimension: %d,%d,%d\n", devProp.maxThreadsDim[0], devProp.maxThreadsDim[1], devProp.maxThreadsDim[2]);
return;
}
int main(void) {
int deviceCount;
cudaGetDeviceCount(&deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
printDevProp(deviceProp);
}
} |
4313f5bdf10dd1171bb82385c487a82a92af8516.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand_kernel.h"
#include "vec_kernels.cuh"
#include "matrixFunctions.cuh"
#include <math.h>
#include <time.h>
#include <stdio.h>
//Ax is the number of rows
//Ay is the number of cols
__global__ void meanOfFeatures(double *A, double *means, int Ax, int Ay) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
means[x] = 0;
int i;
for (i = 0; i < Ax; i++) {
means[x] = means[x] + A[x + (i*Ay)];
}
means[x] = means[x] / (double)Ax;
}
//allocate one thread for each element of Matrix A
__global__ void subMeansFromCols(double *A, double *B, double *means, int Ax, int Ay) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int col = x % Ay;
B[x] = A[x] - means[col];
}
//threads allotted is equal to size of covariance matrix
//which is Ay x Ay
__global__ void calcCovMatrix(double *A, double *cov, int Ax, int Ay) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
//feature 1 and feature 2 to correlate
//corresponds to row and col in covariance matrix
int f1 = x / Ay;
int f2 = x % Ay;
int i;
for (i = 0; i < Ax; i++) {
cov[x] += (A[f1 + (i * Ay)] * A[f2 + (i * Ay)]);
}
cov[x] /= (Ax - 1);
}
//https://en.wikipedia.org/wiki/Principal_component_analysis#Covariance-free_computation
//random vector that is of length num_features
__global__ void genRandVector(double *r, hiprandState_t *state, unsigned long clock_seed) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long seed = 132241684309342543 + clock_seed;
hiprand_init (seed, x, 0, &state[x]);
hiprandState_t localState = state[x];
r[x] = (double)hiprand_uniform(&localState);
state[x] = localState;
}
__global__ void clearSVector(double *s, int n) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
s[x] = 0;
}
//allocate a thread for each element of row
__global__ void getRow(double *X, double *out, int row_index, int cols) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
out[x] = X[x + (row_index * cols)];
//out[x] = row_index;
}
__global__ void MatrixMul(double * A, double * B, double * C, int Ax, int Ay, int Bx, int By) {
if (Ax == By) {
// total array position
int x = blockIdx.x * blockDim.x + threadIdx.x;
// reset C array
C[x] = 0;
__syncthreads();
int count;
int Aindex, Bindex;
double prod;
for (count = 0; count < Ax; count++) {
// row of C matrix
Aindex = (x / Bx) * Ax + count;
// column of C matrix
Bindex = (x % Bx) + Bx * count;
prod = A[Aindex] * B[Bindex];
C[x] += prod;
}
}
}
__global__
void vec_add(double *a, double *b, double *out, size_t stride, size_t n)
{
size_t tid = threadIdx.x;
size_t gid = blockIdx.x * blockDim.x + tid;
size_t idx = gid * stride;
if (idx < n)
out[idx] = a[idx] + b[idx];
}
__global__
void vec_dot_product(double *a, double *b, double *out, size_t stride, size_t n)
{
extern __shared__ double temp[];
size_t tid = threadIdx.x;
size_t gid = blockIdx.x * blockDim.x + tid;
size_t idx = gid * stride;
temp[tid] = (idx < n) ? a[idx] * b[idx] : 0;
__syncthreads();
for (size_t shf = blockDim.x / 2; shf > 0; shf >>= 1) {
if (tid < shf) {
temp[tid] += temp[tid + shf];
}
__syncthreads();
}
if (tid == 0)
out[blockIdx.x] = temp[0];
}
__global__
void vec_scalar_mul(double *a, double *out, double c, size_t stride, size_t n)
{
size_t tid = threadIdx.x;
size_t gid = blockIdx.x * blockDim.x + tid;
size_t idx = gid * stride;
if (idx < n)
out[idx] = a[idx] * c;
}
double *pca(double *data, int rows, int cols, int iterations) {
//host
volatile int i, j, k;
double dot_prod = 0;
double *r = (double *)calloc(cols, sizeof(double));
double *s = (double *)calloc(cols, sizeof(double));
double *eigenvalues = (double *)malloc(cols * sizeof(double));
unsigned long clock_seed = (unsigned long)clock();
//device
double *data_d, *means_d, *zero_mean_data_d, *r_d, *r_unit_d, *s_d, *x_row_d, *r_x_dot_d, *add_to_s_d, *eigenvalues_d;
hiprandState_t *state_d;
hipMalloc((void**)&data_d, rows * cols * sizeof(double));
hipMalloc((void**)&means_d, cols * sizeof(double));
hipMalloc((void**)&zero_mean_data_d, rows * cols * sizeof(double));
hipMalloc((void**)&r_d, cols * sizeof(double));
hipMalloc((void**)&r_unit_d, cols * sizeof(double));
hipMalloc((void**)&s_d, cols * sizeof(double));
hipMalloc((void**)&x_row_d, cols * sizeof(double));
hipMalloc((void**)&r_x_dot_d, cols * sizeof(double));
hipMalloc((void**)&add_to_s_d, cols * sizeof(double));
hipMalloc((void**)&eigenvalues_d, cols * sizeof(double));
hipMalloc((void**)&state_d, cols * sizeof(hiprandState_t));
printf("Data:\n");
for (k = 0; k < 6; k++) {
printf("%f ", data[k]);
if (k % 3 == 2) {
printf("\n");
}
}
printf("\n\n");
hipMemcpy(r_d, r, cols * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(data_d, data, rows * cols * sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( meanOfFeatures) , dim3(1), dim3(cols), 0, 0, data_d, means_d, rows, cols);
hipMemcpy(r, means_d, cols * sizeof(double), hipMemcpyDeviceToHost);
printf("Means:\n");
for (k = 0; k < 3; k++) {
printf("%f ", r[k]);
}
printf("\n\n");
hipLaunchKernelGGL(( subMeansFromCols) , dim3(rows), dim3(cols), 0, 0, data_d, zero_mean_data_d, means_d, rows, cols);
hipMemcpy(data, zero_mean_data_d, rows * cols * sizeof(double), hipMemcpyDeviceToHost);
printf("Zero Mean Data:\n");
for (k = 0; k < 6; k++) {
printf("%f ", data[k]);
if (k % 3 == 2) {
printf("\n");
}
}
printf("\n\n");
hipLaunchKernelGGL(( genRandVector) , dim3(1), dim3(cols), 0, 0, r_d, state_d, clock_seed);
hipDeviceSynchronize();
hipMemcpy(r, r_d, cols * sizeof(double), hipMemcpyDeviceToHost);
printf("Random Values:\n");
for (k = 0; k < 3; k++) {
printf("%f ", r[k]);
}
printf("\n\n");
double r_len = 0;
for (i = 0; i < cols; i++) {
r_len += (r[i] * r[i]);
}
r_len = 1/sqrt(r_len);
printf("Rlen: %f\n\n", r_len);
hipLaunchKernelGGL(( vec_scalar_mul) , dim3(1), dim3(cols), 0, 0, r_d, r_unit_d, r_len, 1, cols);
hipDeviceSynchronize();
hipMemcpy(r, r_unit_d, cols * sizeof(double), hipMemcpyDeviceToHost);
printf("Random Unit Vector:\n");
for (k = 0; k < 3; k++) {
printf("%f ", r[k]);
}
printf("\n\n");
double s_len;
for (i = 0; i < iterations; i++) {
printf("Iteration: %i\n", i);
//set s to 0
hipLaunchKernelGGL(( clearSVector) , dim3(1), dim3(cols), 0, 0, s_d, cols);
for (j = 0; j < rows; j++) {
printf("Row: %i\n", j);
hipDeviceSynchronize();
hipLaunchKernelGGL(( getRow) , dim3(1), dim3(cols), 0, 0, zero_mean_data_d, x_row_d, j, cols);
hipDeviceSynchronize();
hipMemcpy(s, x_row_d, cols * sizeof(double), hipMemcpyDeviceToHost);
printf("Row:\n");
for (k = 0; k < 3; k++) {
printf("%f ", s[k]);
}
printf("\n\n");
//printf("Got row\n");
//output is at index 0 of data_r_d
hipLaunchKernelGGL(( vec_dot_product) , dim3(1), dim3(cols), 0, 0, x_row_d, r_unit_d, r_x_dot_d, 1, cols);
//printf("Got dot product\n");
hipDeviceSynchronize();
hipMemcpy(&dot_prod, r_x_dot_d, sizeof(double), hipMemcpyDeviceToHost);
//printf("Isolated scalar value");
hipLaunchKernelGGL(( vec_scalar_mul) , dim3(1), dim3(cols), 0, 0, x_row_d, add_to_s_d, dot_prod, 1, cols);
//printf("Did scalar mult\n");
hipLaunchKernelGGL(( vec_add) , dim3(1), dim3(cols), 0, 0, s_d, add_to_s_d, s_d, 1, cols);
//printf("Did vector add\n");
}
hipDeviceSynchronize();
hipMemcpy(s, s_d, cols * sizeof(double), hipMemcpyDeviceToHost);
printf("S:\n");
for (k = 0; k < 3; k++) {
printf("%f ", s[k]);
}
printf("\n");
hipLaunchKernelGGL(( MatrixMul) , dim3(1), dim3(cols), 0, 0, r_unit_d, s_d, eigenvalues_d, cols, 1, 1, cols);
hipDeviceSynchronize();
hipMemcpy(eigenvalues, eigenvalues_d, cols * sizeof(double), hipMemcpyDeviceToHost);
printf("Eigenvalues:\n");
for (k = 0; k < 3; k++) {
printf("%f ", eigenvalues[k]);
}
printf("\n");
//printf("Did matrix mult\n");
hipDeviceSynchronize();
hipMemcpy(s, s_d, cols * sizeof(double), hipMemcpyDeviceToHost);
s_len = 0;
for (k = 0; k < cols; k++) {
s_len += (s[i] * s[i]);
}
s_len = 1/sqrt(s_len);
hipLaunchKernelGGL(( vec_scalar_mul) , dim3(1), dim3(cols), 0, 0, r_unit_d, r_unit_d, s_len, 1, cols);
}
hipDeviceSynchronize();
printf("Finished calculating eigenvalues\n");
hipMemcpy(eigenvalues, eigenvalues_d, cols * sizeof(double), hipMemcpyDeviceToHost);
printf("Eigenvalues:\n");
for (i = 0; i < 3; i++) {
printf("%f ", eigenvalues[i]);
}
printf("\n");
free(r);
free(s);
hipFree(data_d);
hipFree(means_d);
hipFree(zero_mean_data_d);
hipFree(r_d);
hipFree(r_unit_d);
hipFree(s_d);
hipFree(x_row_d);
hipFree(r_x_dot_d);
hipFree(add_to_s_d);
hipFree(state_d);
return eigenvalues;
}
#define SIZE 5
int main() {
double data[6] = {1, 2, 3, 4, 5, 6};
double *eigenvalues = pca(data, 2, 3, 2);
int i;
for (i = 0; i < 3; i++) {
printf("%f ", eigenvalues[i]);
}
printf("\n");
/*
double r[5] = {0, 0, 0, 0, 0};
unsigned long clock_seed = (unsigned long)clock();
double *r_d;
hiprandState_t *state_d;
unsigned long *clock_seed_d;
hipMalloc((void**)&r_d, SIZE * sizeof(double));
hipMalloc((void**)&state_d, SIZE * sizeof(hiprandState_t));
hipMalloc((void**)&clock_seed_d, sizeof(unsigned long));
hipMemcpy(r_d, r, SIZE * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(clock_seed_d, &clock_seed, sizeof(unsigned long), hipMemcpyHostToDevice);
genRandVector<<<5, 1>>>(r_d, state_d, (double)clock());
hipMemcpy(r, r_d, SIZE * sizeof(double), hipMemcpyDeviceToHost);
int i;
for (i = 0; i < SIZE; i++) {
printf("%f ", r[i]);
}
printf("\n");
*/
}
/*
vec_dot_mat(zero_mean_data_d, r_unit_d, data_r_d, rows, cols)
MatrixMul(zero_mean_data_d, data_r_d, add_to_s, cols, rows, 1, cols);
vec_add(s_d, add_to_s, new_s_d, 1, cols);
MatrixMul(r_unit_d, , double * C, int Ax, int Ay, int Bx, int By)
*/
| 4313f5bdf10dd1171bb82385c487a82a92af8516.cu | #include "cuda_runtime.h"
#include "curand_kernel.h"
#include "vec_kernels.cuh"
#include "matrixFunctions.cuh"
#include <math.h>
#include <time.h>
#include <stdio.h>
//Ax is the number of rows
//Ay is the number of cols
__global__ void meanOfFeatures(double *A, double *means, int Ax, int Ay) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
means[x] = 0;
int i;
for (i = 0; i < Ax; i++) {
means[x] = means[x] + A[x + (i*Ay)];
}
means[x] = means[x] / (double)Ax;
}
//allocate one thread for each element of Matrix A
__global__ void subMeansFromCols(double *A, double *B, double *means, int Ax, int Ay) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int col = x % Ay;
B[x] = A[x] - means[col];
}
//threads allotted is equal to size of covariance matrix
//which is Ay x Ay
__global__ void calcCovMatrix(double *A, double *cov, int Ax, int Ay) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
//feature 1 and feature 2 to correlate
//corresponds to row and col in covariance matrix
int f1 = x / Ay;
int f2 = x % Ay;
int i;
for (i = 0; i < Ax; i++) {
cov[x] += (A[f1 + (i * Ay)] * A[f2 + (i * Ay)]);
}
cov[x] /= (Ax - 1);
}
//https://en.wikipedia.org/wiki/Principal_component_analysis#Covariance-free_computation
//random vector that is of length num_features
__global__ void genRandVector(double *r, curandState *state, unsigned long clock_seed) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long seed = 132241684309342543 + clock_seed;
curand_init (seed, x, 0, &state[x]);
curandState localState = state[x];
r[x] = (double)curand_uniform(&localState);
state[x] = localState;
}
__global__ void clearSVector(double *s, int n) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
s[x] = 0;
}
//allocate a thread for each element of row
__global__ void getRow(double *X, double *out, int row_index, int cols) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
out[x] = X[x + (row_index * cols)];
//out[x] = row_index;
}
__global__ void MatrixMul(double * A, double * B, double * C, int Ax, int Ay, int Bx, int By) {
if (Ax == By) {
// total array position
int x = blockIdx.x * blockDim.x + threadIdx.x;
// reset C array
C[x] = 0;
__syncthreads();
int count;
int Aindex, Bindex;
double prod;
for (count = 0; count < Ax; count++) {
// row of C matrix
Aindex = (x / Bx) * Ax + count;
// column of C matrix
Bindex = (x % Bx) + Bx * count;
prod = A[Aindex] * B[Bindex];
C[x] += prod;
}
}
}
__global__
void vec_add(double *a, double *b, double *out, size_t stride, size_t n)
{
size_t tid = threadIdx.x;
size_t gid = blockIdx.x * blockDim.x + tid;
size_t idx = gid * stride;
if (idx < n)
out[idx] = a[idx] + b[idx];
}
__global__
void vec_dot_product(double *a, double *b, double *out, size_t stride, size_t n)
{
extern __shared__ double temp[];
size_t tid = threadIdx.x;
size_t gid = blockIdx.x * blockDim.x + tid;
size_t idx = gid * stride;
temp[tid] = (idx < n) ? a[idx] * b[idx] : 0;
__syncthreads();
for (size_t shf = blockDim.x / 2; shf > 0; shf >>= 1) {
if (tid < shf) {
temp[tid] += temp[tid + shf];
}
__syncthreads();
}
if (tid == 0)
out[blockIdx.x] = temp[0];
}
__global__
void vec_scalar_mul(double *a, double *out, double c, size_t stride, size_t n)
{
size_t tid = threadIdx.x;
size_t gid = blockIdx.x * blockDim.x + tid;
size_t idx = gid * stride;
if (idx < n)
out[idx] = a[idx] * c;
}
double *pca(double *data, int rows, int cols, int iterations) {
//host
volatile int i, j, k;
double dot_prod = 0;
double *r = (double *)calloc(cols, sizeof(double));
double *s = (double *)calloc(cols, sizeof(double));
double *eigenvalues = (double *)malloc(cols * sizeof(double));
unsigned long clock_seed = (unsigned long)clock();
//device
double *data_d, *means_d, *zero_mean_data_d, *r_d, *r_unit_d, *s_d, *x_row_d, *r_x_dot_d, *add_to_s_d, *eigenvalues_d;
curandState *state_d;
cudaMalloc((void**)&data_d, rows * cols * sizeof(double));
cudaMalloc((void**)&means_d, cols * sizeof(double));
cudaMalloc((void**)&zero_mean_data_d, rows * cols * sizeof(double));
cudaMalloc((void**)&r_d, cols * sizeof(double));
cudaMalloc((void**)&r_unit_d, cols * sizeof(double));
cudaMalloc((void**)&s_d, cols * sizeof(double));
cudaMalloc((void**)&x_row_d, cols * sizeof(double));
cudaMalloc((void**)&r_x_dot_d, cols * sizeof(double));
cudaMalloc((void**)&add_to_s_d, cols * sizeof(double));
cudaMalloc((void**)&eigenvalues_d, cols * sizeof(double));
cudaMalloc((void**)&state_d, cols * sizeof(curandState));
printf("Data:\n");
for (k = 0; k < 6; k++) {
printf("%f ", data[k]);
if (k % 3 == 2) {
printf("\n");
}
}
printf("\n\n");
cudaMemcpy(r_d, r, cols * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(data_d, data, rows * cols * sizeof(double), cudaMemcpyHostToDevice);
meanOfFeatures <<<1, cols>>> (data_d, means_d, rows, cols);
cudaMemcpy(r, means_d, cols * sizeof(double), cudaMemcpyDeviceToHost);
printf("Means:\n");
for (k = 0; k < 3; k++) {
printf("%f ", r[k]);
}
printf("\n\n");
subMeansFromCols <<<rows, cols>>> (data_d, zero_mean_data_d, means_d, rows, cols);
cudaMemcpy(data, zero_mean_data_d, rows * cols * sizeof(double), cudaMemcpyDeviceToHost);
printf("Zero Mean Data:\n");
for (k = 0; k < 6; k++) {
printf("%f ", data[k]);
if (k % 3 == 2) {
printf("\n");
}
}
printf("\n\n");
genRandVector <<<1, cols>>> (r_d, state_d, clock_seed);
cudaDeviceSynchronize();
cudaMemcpy(r, r_d, cols * sizeof(double), cudaMemcpyDeviceToHost);
printf("Random Values:\n");
for (k = 0; k < 3; k++) {
printf("%f ", r[k]);
}
printf("\n\n");
double r_len = 0;
for (i = 0; i < cols; i++) {
r_len += (r[i] * r[i]);
}
r_len = 1/sqrt(r_len);
printf("Rlen: %f\n\n", r_len);
vec_scalar_mul <<<1, cols>>> (r_d, r_unit_d, r_len, 1, cols);
cudaDeviceSynchronize();
cudaMemcpy(r, r_unit_d, cols * sizeof(double), cudaMemcpyDeviceToHost);
printf("Random Unit Vector:\n");
for (k = 0; k < 3; k++) {
printf("%f ", r[k]);
}
printf("\n\n");
double s_len;
for (i = 0; i < iterations; i++) {
printf("Iteration: %i\n", i);
//set s to 0
clearSVector <<<1, cols>>> (s_d, cols);
for (j = 0; j < rows; j++) {
printf("Row: %i\n", j);
cudaDeviceSynchronize();
getRow <<<1, cols>>> (zero_mean_data_d, x_row_d, j, cols);
cudaDeviceSynchronize();
cudaMemcpy(s, x_row_d, cols * sizeof(double), cudaMemcpyDeviceToHost);
printf("Row:\n");
for (k = 0; k < 3; k++) {
printf("%f ", s[k]);
}
printf("\n\n");
//printf("Got row\n");
//output is at index 0 of data_r_d
vec_dot_product <<<1, cols>>> (x_row_d, r_unit_d, r_x_dot_d, 1, cols);
//printf("Got dot product\n");
cudaDeviceSynchronize();
cudaMemcpy(&dot_prod, r_x_dot_d, sizeof(double), cudaMemcpyDeviceToHost);
//printf("Isolated scalar value");
vec_scalar_mul <<<1, cols>>> (x_row_d, add_to_s_d, dot_prod, 1, cols);
//printf("Did scalar mult\n");
vec_add <<<1, cols>>> (s_d, add_to_s_d, s_d, 1, cols);
//printf("Did vector add\n");
}
cudaDeviceSynchronize();
cudaMemcpy(s, s_d, cols * sizeof(double), cudaMemcpyDeviceToHost);
printf("S:\n");
for (k = 0; k < 3; k++) {
printf("%f ", s[k]);
}
printf("\n");
MatrixMul <<<1, cols>>> (r_unit_d, s_d, eigenvalues_d, cols, 1, 1, cols);
cudaDeviceSynchronize();
cudaMemcpy(eigenvalues, eigenvalues_d, cols * sizeof(double), cudaMemcpyDeviceToHost);
printf("Eigenvalues:\n");
for (k = 0; k < 3; k++) {
printf("%f ", eigenvalues[k]);
}
printf("\n");
//printf("Did matrix mult\n");
cudaDeviceSynchronize();
cudaMemcpy(s, s_d, cols * sizeof(double), cudaMemcpyDeviceToHost);
s_len = 0;
for (k = 0; k < cols; k++) {
s_len += (s[i] * s[i]);
}
s_len = 1/sqrt(s_len);
vec_scalar_mul <<<1, cols>>> (r_unit_d, r_unit_d, s_len, 1, cols);
}
cudaDeviceSynchronize();
printf("Finished calculating eigenvalues\n");
cudaMemcpy(eigenvalues, eigenvalues_d, cols * sizeof(double), cudaMemcpyDeviceToHost);
printf("Eigenvalues:\n");
for (i = 0; i < 3; i++) {
printf("%f ", eigenvalues[i]);
}
printf("\n");
free(r);
free(s);
cudaFree(data_d);
cudaFree(means_d);
cudaFree(zero_mean_data_d);
cudaFree(r_d);
cudaFree(r_unit_d);
cudaFree(s_d);
cudaFree(x_row_d);
cudaFree(r_x_dot_d);
cudaFree(add_to_s_d);
cudaFree(state_d);
return eigenvalues;
}
#define SIZE 5
int main() {
double data[6] = {1, 2, 3, 4, 5, 6};
double *eigenvalues = pca(data, 2, 3, 2);
int i;
for (i = 0; i < 3; i++) {
printf("%f ", eigenvalues[i]);
}
printf("\n");
/*
double r[5] = {0, 0, 0, 0, 0};
unsigned long clock_seed = (unsigned long)clock();
double *r_d;
curandState *state_d;
unsigned long *clock_seed_d;
cudaMalloc((void**)&r_d, SIZE * sizeof(double));
cudaMalloc((void**)&state_d, SIZE * sizeof(curandState));
cudaMalloc((void**)&clock_seed_d, sizeof(unsigned long));
cudaMemcpy(r_d, r, SIZE * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(clock_seed_d, &clock_seed, sizeof(unsigned long), cudaMemcpyHostToDevice);
genRandVector<<<5, 1>>>(r_d, state_d, (double)clock());
cudaMemcpy(r, r_d, SIZE * sizeof(double), cudaMemcpyDeviceToHost);
int i;
for (i = 0; i < SIZE; i++) {
printf("%f ", r[i]);
}
printf("\n");
*/
}
/*
vec_dot_mat(zero_mean_data_d, r_unit_d, data_r_d, rows, cols)
MatrixMul(zero_mean_data_d, data_r_d, add_to_s, cols, rows, 1, cols);
vec_add(s_d, add_to_s, new_s_d, 1, cols);
MatrixMul(r_unit_d, , double * C, int Ax, int Ay, int Bx, int By)
*/
|
efa880010d63b88e1c3cb0dbaefd785902633c73.hip | // !!! This is a file automatically generated by hipify!!!
/*------------------------------------------------------------------------------
Copyright 2016 by Nicola Bombieri
Mangrove is provided under the terms of The MIT License (MIT):
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
------------------------------------------------------------------------------*/
/**
* @author Federico Busato
* Univerity of Verona, Dept. of Computer Science
* [email protected]
*/
#include <iostream>
#include "XLib.hpp"
#include "mangrove.hpp"
#include "Utility.hpp"
using namespace mangrove;
using namespace mangrove_support;
template<typename T>
void Processing(GenerateParamStr& GenerateParam, ReadParamStr& ReadParam,
HostParamStr& HostParam, GPUParamStr& GPUParam,
ParameterStr& Parameters);
//------------------------------------------------------------------------------
int main(int argc, char *argv[]) {
GenerateParamStr GenerateParam;
ReadParamStr ReadParam;
HostParamStr HostParam;
GPUParamStr GPUParam;
ParameterStr Parameters;
setParameters(GenerateParam, ReadParam, HostParam, GPUParam, Parameters,
argc, argv);
if (Parameters.mining_type == MINING_TYPE::BOOL) {
std::cout << " Boolean Mining" << std::endl;
Processing<bool>(GenerateParam, ReadParam, HostParam,
GPUParam, Parameters);
}
else if (Parameters.mining_type == MINING_TYPE::NUMERIC) {
std::cout << " Numeric Mining" << std::endl;
Processing<numeric_t>(GenerateParam, ReadParam, HostParam,
GPUParam, Parameters);
}
#if defined(__NVCC__)
hipDeviceReset();
#endif
}
//------------------------------------------------------------------------------
template<typename T>
void Processing(GenerateParamStr& GenerateParam, ReadParamStr& ReadParam,
HostParamStr& HostParam, GPUParamStr& GPUParam,
ParameterStr& Parameters) {
xlib::ThousandSep TS;
TracePropSTR<T> TraceProp;
if (ReadParam.trace_file != nullptr)
readTrace(ReadParam, TraceProp);
else
generateMain(GenerateParam, TraceProp);
// Reading variables' names
if (Parameters.host)
getVariables(Parameters.var_file, TraceProp.vars, HostParam);
else
getVariables(Parameters.var_file, TraceProp.vars, GPUParam);
std::cout << std::endl
<< " N. of threads "
<< std::thread::hardware_concurrency() << std::endl
<< " Trace variables "
<< TraceProp.vars << std::endl
<< " Simulation Instants "
<< TraceProp.sim_instants << std::endl;
if (std::is_same<T, bool>::value) {
std::cout << " Trace length "
<< TraceProp.trace_length << std::endl << std::endl;
}
if (Parameters.host) HostMiningFixed(HostParam, TraceProp);
#if defined(__NVCC__)
if (Parameters.gpu) GPUMiningFixed(GPUParam, TraceProp);
#endif
std::cout << std::endl;
}
| efa880010d63b88e1c3cb0dbaefd785902633c73.cu | /*------------------------------------------------------------------------------
Copyright © 2016 by Nicola Bombieri
Mangrove is provided under the terms of The MIT License (MIT):
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
------------------------------------------------------------------------------*/
/**
* @author Federico Busato
* Univerity of Verona, Dept. of Computer Science
* [email protected]
*/
#include <iostream>
#include "XLib.hpp"
#include "mangrove.hpp"
#include "Utility.hpp"
using namespace mangrove;
using namespace mangrove_support;
template<typename T>
void Processing(GenerateParamStr& GenerateParam, ReadParamStr& ReadParam,
HostParamStr& HostParam, GPUParamStr& GPUParam,
ParameterStr& Parameters);
//------------------------------------------------------------------------------
int main(int argc, char *argv[]) {
GenerateParamStr GenerateParam;
ReadParamStr ReadParam;
HostParamStr HostParam;
GPUParamStr GPUParam;
ParameterStr Parameters;
setParameters(GenerateParam, ReadParam, HostParam, GPUParam, Parameters,
argc, argv);
if (Parameters.mining_type == MINING_TYPE::BOOL) {
std::cout << " Boolean Mining" << std::endl;
Processing<bool>(GenerateParam, ReadParam, HostParam,
GPUParam, Parameters);
}
else if (Parameters.mining_type == MINING_TYPE::NUMERIC) {
std::cout << " Numeric Mining" << std::endl;
Processing<numeric_t>(GenerateParam, ReadParam, HostParam,
GPUParam, Parameters);
}
#if defined(__NVCC__)
cudaDeviceReset();
#endif
}
//------------------------------------------------------------------------------
template<typename T>
void Processing(GenerateParamStr& GenerateParam, ReadParamStr& ReadParam,
HostParamStr& HostParam, GPUParamStr& GPUParam,
ParameterStr& Parameters) {
xlib::ThousandSep TS;
TracePropSTR<T> TraceProp;
if (ReadParam.trace_file != nullptr)
readTrace(ReadParam, TraceProp);
else
generateMain(GenerateParam, TraceProp);
// Reading variables' names
if (Parameters.host)
getVariables(Parameters.var_file, TraceProp.vars, HostParam);
else
getVariables(Parameters.var_file, TraceProp.vars, GPUParam);
std::cout << std::endl
<< " N. of threads "
<< std::thread::hardware_concurrency() << std::endl
<< " Trace variables "
<< TraceProp.vars << std::endl
<< " Simulation Instants "
<< TraceProp.sim_instants << std::endl;
if (std::is_same<T, bool>::value) {
std::cout << " Trace length "
<< TraceProp.trace_length << std::endl << std::endl;
}
if (Parameters.host) HostMiningFixed(HostParam, TraceProp);
#if defined(__NVCC__)
if (Parameters.gpu) GPUMiningFixed(GPUParam, TraceProp);
#endif
std::cout << std::endl;
}
|
17fd1478510a5156cedd83231a47af1ca928f15d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "gputimer.h"
#define NUM_THREADS 1000000
#define ARRAY_SIZE 100
#define BLOCK_WIDTH 1000
void print_array(int *array, int size)
{
printf("{ ");
for (int i = 0; i < size; i++) { printf("%d ", array[i]); }
printf("}\n");
}
__global__ void increment_naive(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
g[i] = g[i] + 1;
}
__global__ void increment_atomic(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
atomicAdd(& g[i], 1);
}
int main(int argc,char **argv)
{
GpuTimer timer;
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE);
// declare and allocate host memory
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// declare, allocate, and zero out GPU memory
int * d_array;
hipMalloc((void **) &d_array, ARRAY_BYTES);
hipMemset((void *) d_array, 0, ARRAY_BYTES);
// launch the kernel - comment out one of these
timer.Start();
// Instructions: This program is needed for the next quiz
// uncomment increment_naive to measure speed and accuracy
// of non-atomic increments or uncomment increment_atomic to
// measure speed and accuracy of atomic icrements
// increment_naive<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
hipLaunchKernelGGL(( increment_atomic), dim3(NUM_THREADS/BLOCK_WIDTH), dim3(BLOCK_WIDTH), 0, 0, d_array);
timer.Stop();
// copy back the array of sums from GPU and print
hipMemcpy(h_array, d_array, ARRAY_BYTES, hipMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
printf("Time elapsed = %g ms\n", timer.Elapsed());
// free GPU memory allocation and exit
hipFree(d_array);
return 0;
}
| 17fd1478510a5156cedd83231a47af1ca928f15d.cu | #include <stdio.h>
#include "gputimer.h"
#define NUM_THREADS 1000000
#define ARRAY_SIZE 100
#define BLOCK_WIDTH 1000
void print_array(int *array, int size)
{
printf("{ ");
for (int i = 0; i < size; i++) { printf("%d ", array[i]); }
printf("}\n");
}
__global__ void increment_naive(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
g[i] = g[i] + 1;
}
__global__ void increment_atomic(int *g)
{
// which thread is this?
int i = blockIdx.x * blockDim.x + threadIdx.x;
// each thread to increment consecutive elements, wrapping at ARRAY_SIZE
i = i % ARRAY_SIZE;
atomicAdd(& g[i], 1);
}
int main(int argc,char **argv)
{
GpuTimer timer;
printf("%d total threads in %d blocks writing into %d array elements\n",
NUM_THREADS, NUM_THREADS / BLOCK_WIDTH, ARRAY_SIZE);
// declare and allocate host memory
int h_array[ARRAY_SIZE];
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// declare, allocate, and zero out GPU memory
int * d_array;
cudaMalloc((void **) &d_array, ARRAY_BYTES);
cudaMemset((void *) d_array, 0, ARRAY_BYTES);
// launch the kernel - comment out one of these
timer.Start();
// Instructions: This program is needed for the next quiz
// uncomment increment_naive to measure speed and accuracy
// of non-atomic increments or uncomment increment_atomic to
// measure speed and accuracy of atomic icrements
// increment_naive<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
increment_atomic<<<NUM_THREADS/BLOCK_WIDTH, BLOCK_WIDTH>>>(d_array);
timer.Stop();
// copy back the array of sums from GPU and print
cudaMemcpy(h_array, d_array, ARRAY_BYTES, cudaMemcpyDeviceToHost);
print_array(h_array, ARRAY_SIZE);
printf("Time elapsed = %g ms\n", timer.Elapsed());
// free GPU memory allocation and exit
cudaFree(d_array);
return 0;
}
|
fffdc3c67e5d25015d37a2217e3b427816c97b92.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "box3d4r-32x16-1-128_kernel.hu"
__device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; }
__global__ void kernel0_1(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 4 - 4);
const AN5D_TYPE __c1Pad = (4);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 4 - 4);
const AN5D_TYPE __c2Pad = (4);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 4 - 4);
const AN5D_TYPE __c3Pad = (4);
#define __c3 c3
const AN5D_TYPE __halo1 = 4;
const AN5D_TYPE __halo2 = 4;
const AN5D_TYPE __halo3 = 4;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 8;
const AN5D_TYPE __side3Len = 24;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_1_5;
double __reg_1_6;
double __reg_1_7;
double __reg_1_8;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.240f) * (__REGREF(__a, 0, 0))) + (0.0010f * (__SBREF(__a_sb, -4, -4)))) + (0.0020f * (__SBREF(__a_sb, -4, -3)))) + (0.0030f * (__SBREF(__a_sb, -4, -2)))) + (0.0040f * (__SBREF(__a_sb, -4, -1)))) + (0.0050f * (__SBREF(__a_sb, -4, 0)))) + (0.0060f * (__SBREF(__a_sb, -4, 1)))) + (0.0070f * (__SBREF(__a_sb, -4, 2)))) + (0.0080f * (__SBREF(__a_sb, -4, 3)))) + (0.0090f * (__SBREF(__a_sb, -4, 4)))) + (0.0100f * (__SBREF(__a_sb, -3, -4)))) + (0.0110f * (__SBREF(__a_sb, -3, -3)))) + (0.0120f * (__SBREF(__a_sb, -3, -2)))) + (0.0130f * (__SBREF(__a_sb, -3, -1)))) + (0.0140f * (__SBREF(__a_sb, -3, 0)))) + (0.0150f * (__SBREF(__a_sb, -3, 1)))) + (0.0160f * (__SBREF(__a_sb, -3, 2)))) + (0.0170f * (__SBREF(__a_sb, -3, 3)))) + (0.0180f * (__SBREF(__a_sb, -3, 4)))) + (0.0190f * (__SBREF(__a_sb, -2, -4)))) + (0.0200f * (__SBREF(__a_sb, -2, -3)))) + (0.0210f * (__SBREF(__a_sb, -2, -2)))) + (0.0220f * (__SBREF(__a_sb, -2, -1)))) + (0.0230f * (__SBREF(__a_sb, -2, 0)))) + (0.0240f * (__SBREF(__a_sb, -2, 1)))) + (0.0250f * (__SBREF(__a_sb, -2, 2)))) + (0.0260f * (__SBREF(__a_sb, -2, 3)))) + (0.0270f * (__SBREF(__a_sb, -2, 4)))) + (0.0280f * (__SBREF(__a_sb, -1, -4)))) + (0.0290f * (__SBREF(__a_sb, -1, -3)))) + (0.0300f * (__SBREF(__a_sb, -1, -2)))) + (0.0310f * (__SBREF(__a_sb, -1, -1)))) + (0.0320f * (__SBREF(__a_sb, -1, 0)))) + (0.0330f * (__SBREF(__a_sb, -1, 1)))) + (0.0340f * (__SBREF(__a_sb, -1, 2)))) + (0.0350f * (__SBREF(__a_sb, -1, 3)))) + (0.0360f * (__SBREF(__a_sb, -1, 4)))) + (0.0370f * (__SBREF(__a_sb, 0, -4)))) + (0.0380f * (__SBREF(__a_sb, 0, -3)))) + (0.0390f * (__SBREF(__a_sb, 0, -2)))) + (0.0400f * (__SBREF(__a_sb, 0, -1)))) + (0.0410f * (__SBREF(__a_sb, 0, 1)))) + (0.0420f * (__SBREF(__a_sb, 0, 2)))) + (0.0430f * (__SBREF(__a_sb, 0, 3)))) + (0.0440f * (__SBREF(__a_sb, 0, 4)))) + (0.0450f * (__SBREF(__a_sb, 1, -4)))) + (0.0460f * (__SBREF(__a_sb, 1, -3)))) + (0.0470f * (__SBREF(__a_sb, 1, -2)))) + (0.0480f * (__SBREF(__a_sb, 1, -1)))) + (0.0490f * (__SBREF(__a_sb, 1, 0)))) + (0.0500f * (__SBREF(__a_sb, 1, 1)))) + (0.0510f * (__SBREF(__a_sb, 1, 2)))) + (0.0520f * (__SBREF(__a_sb, 1, 3)))) + (0.0530f * (__SBREF(__a_sb, 1, 4)))) + (0.0540f * (__SBREF(__a_sb, 2, -4)))) + (0.0550f * (__SBREF(__a_sb, 2, -3)))) + (0.0560f * (__SBREF(__a_sb, 2, -2)))) + (0.0570f * (__SBREF(__a_sb, 2, -1)))) + (0.0580f * (__SBREF(__a_sb, 2, 0)))) + (0.0590f * (__SBREF(__a_sb, 2, 1)))) + (0.0600f * (__SBREF(__a_sb, 2, 2)))) + (0.0610f * (__SBREF(__a_sb, 2, 3)))) + (0.0620f * (__SBREF(__a_sb, 2, 4)))) + (0.0630f * (__SBREF(__a_sb, 3, -4)))) + (0.0640f * (__SBREF(__a_sb, 3, -3)))) + (0.0650f * (__SBREF(__a_sb, 3, -2)))) + (0.0660f * (__SBREF(__a_sb, 3, -1)))) + (0.0670f * (__SBREF(__a_sb, 3, 0)))) + (0.0680f * (__SBREF(__a_sb, 3, 1)))) + (0.0690f * (__SBREF(__a_sb, 3, 2)))) + (0.0700f * (__SBREF(__a_sb, 3, 3)))) + (0.0710f * (__SBREF(__a_sb, 3, 4)))) + (0.0720f * (__SBREF(__a_sb, 4, -4)))) + (0.0730f * (__SBREF(__a_sb, 4, -3)))) + (0.0740f * (__SBREF(__a_sb, 4, -2)))) + (0.0750f * (__SBREF(__a_sb, 4, -1)))) + (0.0760f * (__SBREF(__a_sb, 4, 0)))) + (0.0770f * (__SBREF(__a_sb, 4, 1)))) + (0.0780f * (__SBREF(__a_sb, 4, 2)))) + (0.0790f * (__SBREF(__a_sb, 4, 3)))) + (0.0800f * (__SBREF(__a_sb, 4, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.248f) * (__REGREF(__a, 0, 0)))) + (0.0011f * (__SBREF(__a_sb, -4, -4)))) + (0.0021f * (__SBREF(__a_sb, -4, -3)))) + (0.0031f * (__SBREF(__a_sb, -4, -2)))) + (0.0041f * (__SBREF(__a_sb, -4, -1)))) + (0.0051f * (__SBREF(__a_sb, -4, 0)))) + (0.0061f * (__SBREF(__a_sb, -4, 1)))) + (0.0071f * (__SBREF(__a_sb, -4, 2)))) + (0.0081f * (__SBREF(__a_sb, -4, 3)))) + (0.0091f * (__SBREF(__a_sb, -4, 4)))) + (0.0101f * (__SBREF(__a_sb, -3, -4)))) + (0.0111f * (__SBREF(__a_sb, -3, -3)))) + (0.0121f * (__SBREF(__a_sb, -3, -2)))) + (0.0131f * (__SBREF(__a_sb, -3, -1)))) + (0.0141f * (__SBREF(__a_sb, -3, 0)))) + (0.0151f * (__SBREF(__a_sb, -3, 1)))) + (0.0161f * (__SBREF(__a_sb, -3, 2)))) + (0.0171f * (__SBREF(__a_sb, -3, 3)))) + (0.0181f * (__SBREF(__a_sb, -3, 4)))) + (0.0191f * (__SBREF(__a_sb, -2, -4)))) + (0.0201f * (__SBREF(__a_sb, -2, -3)))) + (0.0211f * (__SBREF(__a_sb, -2, -2)))) + (0.0221f * (__SBREF(__a_sb, -2, -1)))) + (0.0231f * (__SBREF(__a_sb, -2, 0)))) + (0.0241f * (__SBREF(__a_sb, -2, 1)))) + (0.0251f * (__SBREF(__a_sb, -2, 2)))) + (0.0261f * (__SBREF(__a_sb, -2, 3)))) + (0.0271f * (__SBREF(__a_sb, -2, 4)))) + (0.0281f * (__SBREF(__a_sb, -1, -4)))) + (0.0291f * (__SBREF(__a_sb, -1, -3)))) + (0.0301f * (__SBREF(__a_sb, -1, -2)))) + (0.0311f * (__SBREF(__a_sb, -1, -1)))) + (0.0321f * (__SBREF(__a_sb, -1, 0)))) + (0.0331f * (__SBREF(__a_sb, -1, 1)))) + (0.0341f * (__SBREF(__a_sb, -1, 2)))) + (0.0351f * (__SBREF(__a_sb, -1, 3)))) + (0.0361f * (__SBREF(__a_sb, -1, 4)))) + (0.0371f * (__SBREF(__a_sb, 0, -4)))) + (0.0381f * (__SBREF(__a_sb, 0, -3)))) + (0.0391f * (__SBREF(__a_sb, 0, -2)))) + (0.0401f * (__SBREF(__a_sb, 0, -1)))) + (0.0411f * (__SBREF(__a_sb, 0, 1)))) + (0.0421f * (__SBREF(__a_sb, 0, 2)))) + (0.0431f * (__SBREF(__a_sb, 0, 3)))) + (0.0441f * (__SBREF(__a_sb, 0, 4)))) + (0.0451f * (__SBREF(__a_sb, 1, -4)))) + (0.0461f * (__SBREF(__a_sb, 1, -3)))) + (0.0471f * (__SBREF(__a_sb, 1, -2)))) + (0.0481f * (__SBREF(__a_sb, 1, -1)))) + (0.0491f * (__SBREF(__a_sb, 1, 0)))) + (0.0501f * (__SBREF(__a_sb, 1, 1)))) + (0.0511f * (__SBREF(__a_sb, 1, 2)))) + (0.0521f * (__SBREF(__a_sb, 1, 3)))) + (0.0531f * (__SBREF(__a_sb, 1, 4)))) + (0.0541f * (__SBREF(__a_sb, 2, -4)))) + (0.0551f * (__SBREF(__a_sb, 2, -3)))) + (0.0561f * (__SBREF(__a_sb, 2, -2)))) + (0.0571f * (__SBREF(__a_sb, 2, -1)))) + (0.0581f * (__SBREF(__a_sb, 2, 0)))) + (0.0591f * (__SBREF(__a_sb, 2, 1)))) + (0.0601f * (__SBREF(__a_sb, 2, 2)))) + (0.0611f * (__SBREF(__a_sb, 2, 3)))) + (0.0621f * (__SBREF(__a_sb, 2, 4)))) + (0.0631f * (__SBREF(__a_sb, 3, -4)))) + (0.0641f * (__SBREF(__a_sb, 3, -3)))) + (0.0651f * (__SBREF(__a_sb, 3, -2)))) + (0.0661f * (__SBREF(__a_sb, 3, -1)))) + (0.0671f * (__SBREF(__a_sb, 3, 0)))) + (0.0681f * (__SBREF(__a_sb, 3, 1)))) + (0.0691f * (__SBREF(__a_sb, 3, 2)))) + (0.0701f * (__SBREF(__a_sb, 3, 3)))) + (0.0711f * (__SBREF(__a_sb, 3, 4)))) + (0.0721f * (__SBREF(__a_sb, 4, -4)))) + (0.0731f * (__SBREF(__a_sb, 4, -3)))) + (0.0741f * (__SBREF(__a_sb, 4, -2)))) + (0.0751f * (__SBREF(__a_sb, 4, -1)))) + (0.0761f * (__SBREF(__a_sb, 4, 0)))) + (0.0771f * (__SBREF(__a_sb, 4, 1)))) + (0.0781f * (__SBREF(__a_sb, 4, 2)))) + (0.0791f * (__SBREF(__a_sb, 4, 3)))) + (0.0801f * (__SBREF(__a_sb, 4, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.256f) * (__REGREF(__a, 0, 0)))) + (0.0012f * (__SBREF(__a_sb, -4, -4)))) + (0.0022f * (__SBREF(__a_sb, -4, -3)))) + (0.0032f * (__SBREF(__a_sb, -4, -2)))) + (0.0042f * (__SBREF(__a_sb, -4, -1)))) + (0.0052f * (__SBREF(__a_sb, -4, 0)))) + (0.0062f * (__SBREF(__a_sb, -4, 1)))) + (0.0072f * (__SBREF(__a_sb, -4, 2)))) + (0.0082f * (__SBREF(__a_sb, -4, 3)))) + (0.0092f * (__SBREF(__a_sb, -4, 4)))) + (0.0102f * (__SBREF(__a_sb, -3, -4)))) + (0.0112f * (__SBREF(__a_sb, -3, -3)))) + (0.0122f * (__SBREF(__a_sb, -3, -2)))) + (0.0132f * (__SBREF(__a_sb, -3, -1)))) + (0.0142f * (__SBREF(__a_sb, -3, 0)))) + (0.0152f * (__SBREF(__a_sb, -3, 1)))) + (0.0162f * (__SBREF(__a_sb, -3, 2)))) + (0.0172f * (__SBREF(__a_sb, -3, 3)))) + (0.0182f * (__SBREF(__a_sb, -3, 4)))) + (0.0192f * (__SBREF(__a_sb, -2, -4)))) + (0.0202f * (__SBREF(__a_sb, -2, -3)))) + (0.0212f * (__SBREF(__a_sb, -2, -2)))) + (0.0222f * (__SBREF(__a_sb, -2, -1)))) + (0.0232f * (__SBREF(__a_sb, -2, 0)))) + (0.0242f * (__SBREF(__a_sb, -2, 1)))) + (0.0252f * (__SBREF(__a_sb, -2, 2)))) + (0.0262f * (__SBREF(__a_sb, -2, 3)))) + (0.0272f * (__SBREF(__a_sb, -2, 4)))) + (0.0282f * (__SBREF(__a_sb, -1, -4)))) + (0.0292f * (__SBREF(__a_sb, -1, -3)))) + (0.0302f * (__SBREF(__a_sb, -1, -2)))) + (0.0312f * (__SBREF(__a_sb, -1, -1)))) + (0.0322f * (__SBREF(__a_sb, -1, 0)))) + (0.0332f * (__SBREF(__a_sb, -1, 1)))) + (0.0342f * (__SBREF(__a_sb, -1, 2)))) + (0.0352f * (__SBREF(__a_sb, -1, 3)))) + (0.0362f * (__SBREF(__a_sb, -1, 4)))) + (0.0372f * (__SBREF(__a_sb, 0, -4)))) + (0.0382f * (__SBREF(__a_sb, 0, -3)))) + (0.0392f * (__SBREF(__a_sb, 0, -2)))) + (0.0402f * (__SBREF(__a_sb, 0, -1)))) + (0.0412f * (__SBREF(__a_sb, 0, 1)))) + (0.0422f * (__SBREF(__a_sb, 0, 2)))) + (0.0432f * (__SBREF(__a_sb, 0, 3)))) + (0.0442f * (__SBREF(__a_sb, 0, 4)))) + (0.0452f * (__SBREF(__a_sb, 1, -4)))) + (0.0462f * (__SBREF(__a_sb, 1, -3)))) + (0.0472f * (__SBREF(__a_sb, 1, -2)))) + (0.0482f * (__SBREF(__a_sb, 1, -1)))) + (0.0492f * (__SBREF(__a_sb, 1, 0)))) + (0.0502f * (__SBREF(__a_sb, 1, 1)))) + (0.0512f * (__SBREF(__a_sb, 1, 2)))) + (0.0522f * (__SBREF(__a_sb, 1, 3)))) + (0.0532f * (__SBREF(__a_sb, 1, 4)))) + (0.0542f * (__SBREF(__a_sb, 2, -4)))) + (0.0552f * (__SBREF(__a_sb, 2, -3)))) + (0.0562f * (__SBREF(__a_sb, 2, -2)))) + (0.0572f * (__SBREF(__a_sb, 2, -1)))) + (0.0582f * (__SBREF(__a_sb, 2, 0)))) + (0.0592f * (__SBREF(__a_sb, 2, 1)))) + (0.0602f * (__SBREF(__a_sb, 2, 2)))) + (0.0612f * (__SBREF(__a_sb, 2, 3)))) + (0.0622f * (__SBREF(__a_sb, 2, 4)))) + (0.0632f * (__SBREF(__a_sb, 3, -4)))) + (0.0642f * (__SBREF(__a_sb, 3, -3)))) + (0.0652f * (__SBREF(__a_sb, 3, -2)))) + (0.0662f * (__SBREF(__a_sb, 3, -1)))) + (0.0672f * (__SBREF(__a_sb, 3, 0)))) + (0.0682f * (__SBREF(__a_sb, 3, 1)))) + (0.0692f * (__SBREF(__a_sb, 3, 2)))) + (0.0702f * (__SBREF(__a_sb, 3, 3)))) + (0.0712f * (__SBREF(__a_sb, 3, 4)))) + (0.0722f * (__SBREF(__a_sb, 4, -4)))) + (0.0732f * (__SBREF(__a_sb, 4, -3)))) + (0.0742f * (__SBREF(__a_sb, 4, -2)))) + (0.0752f * (__SBREF(__a_sb, 4, -1)))) + (0.0762f * (__SBREF(__a_sb, 4, 0)))) + (0.0772f * (__SBREF(__a_sb, 4, 1)))) + (0.0782f * (__SBREF(__a_sb, 4, 2)))) + (0.0792f * (__SBREF(__a_sb, 4, 3)))) + (0.0802f * (__SBREF(__a_sb, 4, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.264f) * (__REGREF(__a, 0, 0)))) + (0.0013f * (__SBREF(__a_sb, -4, -4)))) + (0.0023f * (__SBREF(__a_sb, -4, -3)))) + (0.0033f * (__SBREF(__a_sb, -4, -2)))) + (0.0043f * (__SBREF(__a_sb, -4, -1)))) + (0.0053f * (__SBREF(__a_sb, -4, 0)))) + (0.0063f * (__SBREF(__a_sb, -4, 1)))) + (0.0073f * (__SBREF(__a_sb, -4, 2)))) + (0.0083f * (__SBREF(__a_sb, -4, 3)))) + (0.0093f * (__SBREF(__a_sb, -4, 4)))) + (0.0103f * (__SBREF(__a_sb, -3, -4)))) + (0.0113f * (__SBREF(__a_sb, -3, -3)))) + (0.0123f * (__SBREF(__a_sb, -3, -2)))) + (0.0133f * (__SBREF(__a_sb, -3, -1)))) + (0.0143f * (__SBREF(__a_sb, -3, 0)))) + (0.0153f * (__SBREF(__a_sb, -3, 1)))) + (0.0163f * (__SBREF(__a_sb, -3, 2)))) + (0.0173f * (__SBREF(__a_sb, -3, 3)))) + (0.0183f * (__SBREF(__a_sb, -3, 4)))) + (0.0193f * (__SBREF(__a_sb, -2, -4)))) + (0.0203f * (__SBREF(__a_sb, -2, -3)))) + (0.0213f * (__SBREF(__a_sb, -2, -2)))) + (0.0223f * (__SBREF(__a_sb, -2, -1)))) + (0.0233f * (__SBREF(__a_sb, -2, 0)))) + (0.0243f * (__SBREF(__a_sb, -2, 1)))) + (0.0253f * (__SBREF(__a_sb, -2, 2)))) + (0.0263f * (__SBREF(__a_sb, -2, 3)))) + (0.0273f * (__SBREF(__a_sb, -2, 4)))) + (0.0283f * (__SBREF(__a_sb, -1, -4)))) + (0.0293f * (__SBREF(__a_sb, -1, -3)))) + (0.0303f * (__SBREF(__a_sb, -1, -2)))) + (0.0313f * (__SBREF(__a_sb, -1, -1)))) + (0.0323f * (__SBREF(__a_sb, -1, 0)))) + (0.0333f * (__SBREF(__a_sb, -1, 1)))) + (0.0343f * (__SBREF(__a_sb, -1, 2)))) + (0.0353f * (__SBREF(__a_sb, -1, 3)))) + (0.0363f * (__SBREF(__a_sb, -1, 4)))) + (0.0373f * (__SBREF(__a_sb, 0, -4)))) + (0.0383f * (__SBREF(__a_sb, 0, -3)))) + (0.0393f * (__SBREF(__a_sb, 0, -2)))) + (0.0403f * (__SBREF(__a_sb, 0, -1)))) + (0.0413f * (__SBREF(__a_sb, 0, 1)))) + (0.0423f * (__SBREF(__a_sb, 0, 2)))) + (0.0433f * (__SBREF(__a_sb, 0, 3)))) + (0.0443f * (__SBREF(__a_sb, 0, 4)))) + (0.0453f * (__SBREF(__a_sb, 1, -4)))) + (0.0463f * (__SBREF(__a_sb, 1, -3)))) + (0.0473f * (__SBREF(__a_sb, 1, -2)))) + (0.0483f * (__SBREF(__a_sb, 1, -1)))) + (0.0493f * (__SBREF(__a_sb, 1, 0)))) + (0.0503f * (__SBREF(__a_sb, 1, 1)))) + (0.0513f * (__SBREF(__a_sb, 1, 2)))) + (0.0523f * (__SBREF(__a_sb, 1, 3)))) + (0.0533f * (__SBREF(__a_sb, 1, 4)))) + (0.0543f * (__SBREF(__a_sb, 2, -4)))) + (0.0553f * (__SBREF(__a_sb, 2, -3)))) + (0.0563f * (__SBREF(__a_sb, 2, -2)))) + (0.0573f * (__SBREF(__a_sb, 2, -1)))) + (0.0583f * (__SBREF(__a_sb, 2, 0)))) + (0.0593f * (__SBREF(__a_sb, 2, 1)))) + (0.0603f * (__SBREF(__a_sb, 2, 2)))) + (0.0613f * (__SBREF(__a_sb, 2, 3)))) + (0.0623f * (__SBREF(__a_sb, 2, 4)))) + (0.0633f * (__SBREF(__a_sb, 3, -4)))) + (0.0643f * (__SBREF(__a_sb, 3, -3)))) + (0.0653f * (__SBREF(__a_sb, 3, -2)))) + (0.0663f * (__SBREF(__a_sb, 3, -1)))) + (0.0673f * (__SBREF(__a_sb, 3, 0)))) + (0.0683f * (__SBREF(__a_sb, 3, 1)))) + (0.0693f * (__SBREF(__a_sb, 3, 2)))) + (0.0703f * (__SBREF(__a_sb, 3, 3)))) + (0.0713f * (__SBREF(__a_sb, 3, 4)))) + (0.0723f * (__SBREF(__a_sb, 4, -4)))) + (0.0733f * (__SBREF(__a_sb, 4, -3)))) + (0.0743f * (__SBREF(__a_sb, 4, -2)))) + (0.0753f * (__SBREF(__a_sb, 4, -1)))) + (0.0763f * (__SBREF(__a_sb, 4, 0)))) + (0.0773f * (__SBREF(__a_sb, 4, 1)))) + (0.0783f * (__SBREF(__a_sb, 4, 2)))) + (0.0793f * (__SBREF(__a_sb, 4, 3)))) + (0.0803f * (__SBREF(__a_sb, 4, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.272f) * (__REGREF(__a, 0, 0)))) + (0.0014f * (__SBREF(__a_sb, -4, -4)))) + (0.0024f * (__SBREF(__a_sb, -4, -3)))) + (0.0034f * (__SBREF(__a_sb, -4, -2)))) + (0.0044f * (__SBREF(__a_sb, -4, -1)))) + (0.0054f * (__SBREF(__a_sb, -4, 0)))) + (0.0064f * (__SBREF(__a_sb, -4, 1)))) + (0.0074f * (__SBREF(__a_sb, -4, 2)))) + (0.0084f * (__SBREF(__a_sb, -4, 3)))) + (0.0094f * (__SBREF(__a_sb, -4, 4)))) + (0.0104f * (__SBREF(__a_sb, -3, -4)))) + (0.0114f * (__SBREF(__a_sb, -3, -3)))) + (0.0124f * (__SBREF(__a_sb, -3, -2)))) + (0.0134f * (__SBREF(__a_sb, -3, -1)))) + (0.0144f * (__SBREF(__a_sb, -3, 0)))) + (0.0154f * (__SBREF(__a_sb, -3, 1)))) + (0.0164f * (__SBREF(__a_sb, -3, 2)))) + (0.0174f * (__SBREF(__a_sb, -3, 3)))) + (0.0184f * (__SBREF(__a_sb, -3, 4)))) + (0.0194f * (__SBREF(__a_sb, -2, -4)))) + (0.0204f * (__SBREF(__a_sb, -2, -3)))) + (0.0214f * (__SBREF(__a_sb, -2, -2)))) + (0.0224f * (__SBREF(__a_sb, -2, -1)))) + (0.0234f * (__SBREF(__a_sb, -2, 0)))) + (0.0244f * (__SBREF(__a_sb, -2, 1)))) + (0.0254f * (__SBREF(__a_sb, -2, 2)))) + (0.0264f * (__SBREF(__a_sb, -2, 3)))) + (0.0274f * (__SBREF(__a_sb, -2, 4)))) + (0.0284f * (__SBREF(__a_sb, -1, -4)))) + (0.0294f * (__SBREF(__a_sb, -1, -3)))) + (0.0304f * (__SBREF(__a_sb, -1, -2)))) + (0.0314f * (__SBREF(__a_sb, -1, -1)))) + (0.0324f * (__SBREF(__a_sb, -1, 0)))) + (0.0334f * (__SBREF(__a_sb, -1, 1)))) + (0.0344f * (__SBREF(__a_sb, -1, 2)))) + (0.0354f * (__SBREF(__a_sb, -1, 3)))) + (0.0364f * (__SBREF(__a_sb, -1, 4)))) + (0.0374f * (__SBREF(__a_sb, 0, -4)))) + (0.0384f * (__SBREF(__a_sb, 0, -3)))) + (0.0394f * (__SBREF(__a_sb, 0, -2)))) + (0.0404f * (__SBREF(__a_sb, 0, -1)))) + (0.0414f * (__SBREF(__a_sb, 0, 1)))) + (0.0424f * (__SBREF(__a_sb, 0, 2)))) + (0.0434f * (__SBREF(__a_sb, 0, 3)))) + (0.0444f * (__SBREF(__a_sb, 0, 4)))) + (0.0454f * (__SBREF(__a_sb, 1, -4)))) + (0.0464f * (__SBREF(__a_sb, 1, -3)))) + (0.0474f * (__SBREF(__a_sb, 1, -2)))) + (0.0484f * (__SBREF(__a_sb, 1, -1)))) + (0.0494f * (__SBREF(__a_sb, 1, 0)))) + (0.0504f * (__SBREF(__a_sb, 1, 1)))) + (0.0514f * (__SBREF(__a_sb, 1, 2)))) + (0.0524f * (__SBREF(__a_sb, 1, 3)))) + (0.0534f * (__SBREF(__a_sb, 1, 4)))) + (0.0544f * (__SBREF(__a_sb, 2, -4)))) + (0.0554f * (__SBREF(__a_sb, 2, -3)))) + (0.0564f * (__SBREF(__a_sb, 2, -2)))) + (0.0574f * (__SBREF(__a_sb, 2, -1)))) + (0.0584f * (__SBREF(__a_sb, 2, 0)))) + (0.0594f * (__SBREF(__a_sb, 2, 1)))) + (0.0604f * (__SBREF(__a_sb, 2, 2)))) + (0.0614f * (__SBREF(__a_sb, 2, 3)))) + (0.0624f * (__SBREF(__a_sb, 2, 4)))) + (0.0634f * (__SBREF(__a_sb, 3, -4)))) + (0.0644f * (__SBREF(__a_sb, 3, -3)))) + (0.0654f * (__SBREF(__a_sb, 3, -2)))) + (0.0664f * (__SBREF(__a_sb, 3, -1)))) + (0.0674f * (__SBREF(__a_sb, 3, 0)))) + (0.0684f * (__SBREF(__a_sb, 3, 1)))) + (0.0694f * (__SBREF(__a_sb, 3, 2)))) + (0.0704f * (__SBREF(__a_sb, 3, 3)))) + (0.0714f * (__SBREF(__a_sb, 3, 4)))) + (0.0724f * (__SBREF(__a_sb, 4, -4)))) + (0.0734f * (__SBREF(__a_sb, 4, -3)))) + (0.0744f * (__SBREF(__a_sb, 4, -2)))) + (0.0754f * (__SBREF(__a_sb, 4, -1)))) + (0.0764f * (__SBREF(__a_sb, 4, 0)))) + (0.0774f * (__SBREF(__a_sb, 4, 1)))) + (0.0784f * (__SBREF(__a_sb, 4, 2)))) + (0.0794f * (__SBREF(__a_sb, 4, 3)))) + (0.0804f * (__SBREF(__a_sb, 4, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_5_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.280f) * (__REGREF(__a, 0, 0)))) + (0.0015f * (__SBREF(__a_sb, -4, -4)))) + (0.0025f * (__SBREF(__a_sb, -4, -3)))) + (0.0035f * (__SBREF(__a_sb, -4, -2)))) + (0.0045f * (__SBREF(__a_sb, -4, -1)))) + (0.0055f * (__SBREF(__a_sb, -4, 0)))) + (0.0065f * (__SBREF(__a_sb, -4, 1)))) + (0.0075f * (__SBREF(__a_sb, -4, 2)))) + (0.0085f * (__SBREF(__a_sb, -4, 3)))) + (0.0095f * (__SBREF(__a_sb, -4, 4)))) + (0.0105f * (__SBREF(__a_sb, -3, -4)))) + (0.0115f * (__SBREF(__a_sb, -3, -3)))) + (0.0125f * (__SBREF(__a_sb, -3, -2)))) + (0.0135f * (__SBREF(__a_sb, -3, -1)))) + (0.0145f * (__SBREF(__a_sb, -3, 0)))) + (0.0155f * (__SBREF(__a_sb, -3, 1)))) + (0.0165f * (__SBREF(__a_sb, -3, 2)))) + (0.0175f * (__SBREF(__a_sb, -3, 3)))) + (0.0185f * (__SBREF(__a_sb, -3, 4)))) + (0.0195f * (__SBREF(__a_sb, -2, -4)))) + (0.0205f * (__SBREF(__a_sb, -2, -3)))) + (0.0215f * (__SBREF(__a_sb, -2, -2)))) + (0.0225f * (__SBREF(__a_sb, -2, -1)))) + (0.0235f * (__SBREF(__a_sb, -2, 0)))) + (0.0245f * (__SBREF(__a_sb, -2, 1)))) + (0.0255f * (__SBREF(__a_sb, -2, 2)))) + (0.0265f * (__SBREF(__a_sb, -2, 3)))) + (0.0275f * (__SBREF(__a_sb, -2, 4)))) + (0.0285f * (__SBREF(__a_sb, -1, -4)))) + (0.0295f * (__SBREF(__a_sb, -1, -3)))) + (0.0305f * (__SBREF(__a_sb, -1, -2)))) + (0.0315f * (__SBREF(__a_sb, -1, -1)))) + (0.0325f * (__SBREF(__a_sb, -1, 0)))) + (0.0335f * (__SBREF(__a_sb, -1, 1)))) + (0.0345f * (__SBREF(__a_sb, -1, 2)))) + (0.0355f * (__SBREF(__a_sb, -1, 3)))) + (0.0365f * (__SBREF(__a_sb, -1, 4)))) + (0.0375f * (__SBREF(__a_sb, 0, -4)))) + (0.0385f * (__SBREF(__a_sb, 0, -3)))) + (0.0395f * (__SBREF(__a_sb, 0, -2)))) + (0.0405f * (__SBREF(__a_sb, 0, -1)))) + (0.0415f * (__SBREF(__a_sb, 0, 1)))) + (0.0425f * (__SBREF(__a_sb, 0, 2)))) + (0.0435f * (__SBREF(__a_sb, 0, 3)))) + (0.0445f * (__SBREF(__a_sb, 0, 4)))) + (0.0455f * (__SBREF(__a_sb, 1, -4)))) + (0.0465f * (__SBREF(__a_sb, 1, -3)))) + (0.0475f * (__SBREF(__a_sb, 1, -2)))) + (0.0485f * (__SBREF(__a_sb, 1, -1)))) + (0.0495f * (__SBREF(__a_sb, 1, 0)))) + (0.0505f * (__SBREF(__a_sb, 1, 1)))) + (0.0515f * (__SBREF(__a_sb, 1, 2)))) + (0.0525f * (__SBREF(__a_sb, 1, 3)))) + (0.0535f * (__SBREF(__a_sb, 1, 4)))) + (0.0545f * (__SBREF(__a_sb, 2, -4)))) + (0.0555f * (__SBREF(__a_sb, 2, -3)))) + (0.0565f * (__SBREF(__a_sb, 2, -2)))) + (0.0575f * (__SBREF(__a_sb, 2, -1)))) + (0.0585f * (__SBREF(__a_sb, 2, 0)))) + (0.0595f * (__SBREF(__a_sb, 2, 1)))) + (0.0605f * (__SBREF(__a_sb, 2, 2)))) + (0.0615f * (__SBREF(__a_sb, 2, 3)))) + (0.0625f * (__SBREF(__a_sb, 2, 4)))) + (0.0635f * (__SBREF(__a_sb, 3, -4)))) + (0.0645f * (__SBREF(__a_sb, 3, -3)))) + (0.0655f * (__SBREF(__a_sb, 3, -2)))) + (0.0665f * (__SBREF(__a_sb, 3, -1)))) + (0.0675f * (__SBREF(__a_sb, 3, 0)))) + (0.0685f * (__SBREF(__a_sb, 3, 1)))) + (0.0695f * (__SBREF(__a_sb, 3, 2)))) + (0.0705f * (__SBREF(__a_sb, 3, 3)))) + (0.0715f * (__SBREF(__a_sb, 3, 4)))) + (0.0725f * (__SBREF(__a_sb, 4, -4)))) + (0.0735f * (__SBREF(__a_sb, 4, -3)))) + (0.0745f * (__SBREF(__a_sb, 4, -2)))) + (0.0755f * (__SBREF(__a_sb, 4, -1)))) + (0.0765f * (__SBREF(__a_sb, 4, 0)))) + (0.0775f * (__SBREF(__a_sb, 4, 1)))) + (0.0785f * (__SBREF(__a_sb, 4, 2)))) + (0.0795f * (__SBREF(__a_sb, 4, 3)))) + (0.0805f * (__SBREF(__a_sb, 4, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_5(out, a) do { double etmp; __CALCEXPR_5_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_6_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.288f) * (__REGREF(__a, 0, 0)))) + (0.0016f * (__SBREF(__a_sb, -4, -4)))) + (0.0026f * (__SBREF(__a_sb, -4, -3)))) + (0.0036f * (__SBREF(__a_sb, -4, -2)))) + (0.0046f * (__SBREF(__a_sb, -4, -1)))) + (0.0056f * (__SBREF(__a_sb, -4, 0)))) + (0.0066f * (__SBREF(__a_sb, -4, 1)))) + (0.0076f * (__SBREF(__a_sb, -4, 2)))) + (0.0086f * (__SBREF(__a_sb, -4, 3)))) + (0.0096f * (__SBREF(__a_sb, -4, 4)))) + (0.0106f * (__SBREF(__a_sb, -3, -4)))) + (0.0116f * (__SBREF(__a_sb, -3, -3)))) + (0.0126f * (__SBREF(__a_sb, -3, -2)))) + (0.0136f * (__SBREF(__a_sb, -3, -1)))) + (0.0146f * (__SBREF(__a_sb, -3, 0)))) + (0.0156f * (__SBREF(__a_sb, -3, 1)))) + (0.0166f * (__SBREF(__a_sb, -3, 2)))) + (0.0176f * (__SBREF(__a_sb, -3, 3)))) + (0.0186f * (__SBREF(__a_sb, -3, 4)))) + (0.0196f * (__SBREF(__a_sb, -2, -4)))) + (0.0206f * (__SBREF(__a_sb, -2, -3)))) + (0.0216f * (__SBREF(__a_sb, -2, -2)))) + (0.0226f * (__SBREF(__a_sb, -2, -1)))) + (0.0236f * (__SBREF(__a_sb, -2, 0)))) + (0.0246f * (__SBREF(__a_sb, -2, 1)))) + (0.0256f * (__SBREF(__a_sb, -2, 2)))) + (0.0266f * (__SBREF(__a_sb, -2, 3)))) + (0.0276f * (__SBREF(__a_sb, -2, 4)))) + (0.0286f * (__SBREF(__a_sb, -1, -4)))) + (0.0296f * (__SBREF(__a_sb, -1, -3)))) + (0.0306f * (__SBREF(__a_sb, -1, -2)))) + (0.0316f * (__SBREF(__a_sb, -1, -1)))) + (0.0326f * (__SBREF(__a_sb, -1, 0)))) + (0.0336f * (__SBREF(__a_sb, -1, 1)))) + (0.0346f * (__SBREF(__a_sb, -1, 2)))) + (0.0356f * (__SBREF(__a_sb, -1, 3)))) + (0.0366f * (__SBREF(__a_sb, -1, 4)))) + (0.0376f * (__SBREF(__a_sb, 0, -4)))) + (0.0386f * (__SBREF(__a_sb, 0, -3)))) + (0.0396f * (__SBREF(__a_sb, 0, -2)))) + (0.0406f * (__SBREF(__a_sb, 0, -1)))) + (0.0416f * (__SBREF(__a_sb, 0, 1)))) + (0.0426f * (__SBREF(__a_sb, 0, 2)))) + (0.0436f * (__SBREF(__a_sb, 0, 3)))) + (0.0446f * (__SBREF(__a_sb, 0, 4)))) + (0.0456f * (__SBREF(__a_sb, 1, -4)))) + (0.0466f * (__SBREF(__a_sb, 1, -3)))) + (0.0476f * (__SBREF(__a_sb, 1, -2)))) + (0.0486f * (__SBREF(__a_sb, 1, -1)))) + (0.0496f * (__SBREF(__a_sb, 1, 0)))) + (0.0506f * (__SBREF(__a_sb, 1, 1)))) + (0.0516f * (__SBREF(__a_sb, 1, 2)))) + (0.0526f * (__SBREF(__a_sb, 1, 3)))) + (0.0536f * (__SBREF(__a_sb, 1, 4)))) + (0.0546f * (__SBREF(__a_sb, 2, -4)))) + (0.0556f * (__SBREF(__a_sb, 2, -3)))) + (0.0566f * (__SBREF(__a_sb, 2, -2)))) + (0.0576f * (__SBREF(__a_sb, 2, -1)))) + (0.0586f * (__SBREF(__a_sb, 2, 0)))) + (0.0596f * (__SBREF(__a_sb, 2, 1)))) + (0.0606f * (__SBREF(__a_sb, 2, 2)))) + (0.0616f * (__SBREF(__a_sb, 2, 3)))) + (0.0626f * (__SBREF(__a_sb, 2, 4)))) + (0.0636f * (__SBREF(__a_sb, 3, -4)))) + (0.0646f * (__SBREF(__a_sb, 3, -3)))) + (0.0656f * (__SBREF(__a_sb, 3, -2)))) + (0.0666f * (__SBREF(__a_sb, 3, -1)))) + (0.0676f * (__SBREF(__a_sb, 3, 0)))) + (0.0686f * (__SBREF(__a_sb, 3, 1)))) + (0.0696f * (__SBREF(__a_sb, 3, 2)))) + (0.0706f * (__SBREF(__a_sb, 3, 3)))) + (0.0716f * (__SBREF(__a_sb, 3, 4)))) + (0.0726f * (__SBREF(__a_sb, 4, -4)))) + (0.0736f * (__SBREF(__a_sb, 4, -3)))) + (0.0746f * (__SBREF(__a_sb, 4, -2)))) + (0.0756f * (__SBREF(__a_sb, 4, -1)))) + (0.0766f * (__SBREF(__a_sb, 4, 0)))) + (0.0776f * (__SBREF(__a_sb, 4, 1)))) + (0.0786f * (__SBREF(__a_sb, 4, 2)))) + (0.0796f * (__SBREF(__a_sb, 4, 3)))) + (0.0806f * (__SBREF(__a_sb, 4, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_6(out, a) do { double etmp; __CALCEXPR_6_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_7_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.296f) * (__REGREF(__a, 0, 0)))) + (0.0017f * (__SBREF(__a_sb, -4, -4)))) + (0.0027f * (__SBREF(__a_sb, -4, -3)))) + (0.0037f * (__SBREF(__a_sb, -4, -2)))) + (0.0047f * (__SBREF(__a_sb, -4, -1)))) + (0.0057f * (__SBREF(__a_sb, -4, 0)))) + (0.0067f * (__SBREF(__a_sb, -4, 1)))) + (0.0077f * (__SBREF(__a_sb, -4, 2)))) + (0.0087f * (__SBREF(__a_sb, -4, 3)))) + (0.0097f * (__SBREF(__a_sb, -4, 4)))) + (0.0107f * (__SBREF(__a_sb, -3, -4)))) + (0.0117f * (__SBREF(__a_sb, -3, -3)))) + (0.0127f * (__SBREF(__a_sb, -3, -2)))) + (0.0137f * (__SBREF(__a_sb, -3, -1)))) + (0.0147f * (__SBREF(__a_sb, -3, 0)))) + (0.0157f * (__SBREF(__a_sb, -3, 1)))) + (0.0167f * (__SBREF(__a_sb, -3, 2)))) + (0.0177f * (__SBREF(__a_sb, -3, 3)))) + (0.0187f * (__SBREF(__a_sb, -3, 4)))) + (0.0197f * (__SBREF(__a_sb, -2, -4)))) + (0.0207f * (__SBREF(__a_sb, -2, -3)))) + (0.0217f * (__SBREF(__a_sb, -2, -2)))) + (0.0227f * (__SBREF(__a_sb, -2, -1)))) + (0.0237f * (__SBREF(__a_sb, -2, 0)))) + (0.0247f * (__SBREF(__a_sb, -2, 1)))) + (0.0257f * (__SBREF(__a_sb, -2, 2)))) + (0.0267f * (__SBREF(__a_sb, -2, 3)))) + (0.0277f * (__SBREF(__a_sb, -2, 4)))) + (0.0287f * (__SBREF(__a_sb, -1, -4)))) + (0.0297f * (__SBREF(__a_sb, -1, -3)))) + (0.0307f * (__SBREF(__a_sb, -1, -2)))) + (0.0317f * (__SBREF(__a_sb, -1, -1)))) + (0.0327f * (__SBREF(__a_sb, -1, 0)))) + (0.0337f * (__SBREF(__a_sb, -1, 1)))) + (0.0347f * (__SBREF(__a_sb, -1, 2)))) + (0.0357f * (__SBREF(__a_sb, -1, 3)))) + (0.0367f * (__SBREF(__a_sb, -1, 4)))) + (0.0377f * (__SBREF(__a_sb, 0, -4)))) + (0.0387f * (__SBREF(__a_sb, 0, -3)))) + (0.0397f * (__SBREF(__a_sb, 0, -2)))) + (0.0407f * (__SBREF(__a_sb, 0, -1)))) + (0.0417f * (__SBREF(__a_sb, 0, 1)))) + (0.0427f * (__SBREF(__a_sb, 0, 2)))) + (0.0437f * (__SBREF(__a_sb, 0, 3)))) + (0.0447f * (__SBREF(__a_sb, 0, 4)))) + (0.0457f * (__SBREF(__a_sb, 1, -4)))) + (0.0467f * (__SBREF(__a_sb, 1, -3)))) + (0.0477f * (__SBREF(__a_sb, 1, -2)))) + (0.0487f * (__SBREF(__a_sb, 1, -1)))) + (0.0497f * (__SBREF(__a_sb, 1, 0)))) + (0.0507f * (__SBREF(__a_sb, 1, 1)))) + (0.0517f * (__SBREF(__a_sb, 1, 2)))) + (0.0527f * (__SBREF(__a_sb, 1, 3)))) + (0.0537f * (__SBREF(__a_sb, 1, 4)))) + (0.0547f * (__SBREF(__a_sb, 2, -4)))) + (0.0557f * (__SBREF(__a_sb, 2, -3)))) + (0.0567f * (__SBREF(__a_sb, 2, -2)))) + (0.0577f * (__SBREF(__a_sb, 2, -1)))) + (0.0587f * (__SBREF(__a_sb, 2, 0)))) + (0.0597f * (__SBREF(__a_sb, 2, 1)))) + (0.0607f * (__SBREF(__a_sb, 2, 2)))) + (0.0617f * (__SBREF(__a_sb, 2, 3)))) + (0.0627f * (__SBREF(__a_sb, 2, 4)))) + (0.0637f * (__SBREF(__a_sb, 3, -4)))) + (0.0647f * (__SBREF(__a_sb, 3, -3)))) + (0.0657f * (__SBREF(__a_sb, 3, -2)))) + (0.0667f * (__SBREF(__a_sb, 3, -1)))) + (0.0677f * (__SBREF(__a_sb, 3, 0)))) + (0.0687f * (__SBREF(__a_sb, 3, 1)))) + (0.0697f * (__SBREF(__a_sb, 3, 2)))) + (0.0707f * (__SBREF(__a_sb, 3, 3)))) + (0.0717f * (__SBREF(__a_sb, 3, 4)))) + (0.0727f * (__SBREF(__a_sb, 4, -4)))) + (0.0737f * (__SBREF(__a_sb, 4, -3)))) + (0.0747f * (__SBREF(__a_sb, 4, -2)))) + (0.0757f * (__SBREF(__a_sb, 4, -1)))) + (0.0767f * (__SBREF(__a_sb, 4, 0)))) + (0.0777f * (__SBREF(__a_sb, 4, 1)))) + (0.0787f * (__SBREF(__a_sb, 4, 2)))) + (0.0797f * (__SBREF(__a_sb, 4, 3)))) + (0.0807f * (__SBREF(__a_sb, 4, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_7(out, a) do { double etmp; __CALCEXPR_7_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_8_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.304f) * (__REGREF(__a, 0, 0)))) + (0.0018f * (__SBREF(__a_sb, -4, -4)))) + (0.0028f * (__SBREF(__a_sb, -4, -3)))) + (0.0038f * (__SBREF(__a_sb, -4, -2)))) + (0.0048f * (__SBREF(__a_sb, -4, -1)))) + (0.0058f * (__SBREF(__a_sb, -4, 0)))) + (0.0068f * (__SBREF(__a_sb, -4, 1)))) + (0.0078f * (__SBREF(__a_sb, -4, 2)))) + (0.0088f * (__SBREF(__a_sb, -4, 3)))) + (0.0098f * (__SBREF(__a_sb, -4, 4)))) + (0.0108f * (__SBREF(__a_sb, -3, -4)))) + (0.0118f * (__SBREF(__a_sb, -3, -3)))) + (0.0128f * (__SBREF(__a_sb, -3, -2)))) + (0.0138f * (__SBREF(__a_sb, -3, -1)))) + (0.0148f * (__SBREF(__a_sb, -3, 0)))) + (0.0158f * (__SBREF(__a_sb, -3, 1)))) + (0.0168f * (__SBREF(__a_sb, -3, 2)))) + (0.0178f * (__SBREF(__a_sb, -3, 3)))) + (0.0188f * (__SBREF(__a_sb, -3, 4)))) + (0.0198f * (__SBREF(__a_sb, -2, -4)))) + (0.0208f * (__SBREF(__a_sb, -2, -3)))) + (0.0218f * (__SBREF(__a_sb, -2, -2)))) + (0.0228f * (__SBREF(__a_sb, -2, -1)))) + (0.0238f * (__SBREF(__a_sb, -2, 0)))) + (0.0248f * (__SBREF(__a_sb, -2, 1)))) + (0.0258f * (__SBREF(__a_sb, -2, 2)))) + (0.0268f * (__SBREF(__a_sb, -2, 3)))) + (0.0278f * (__SBREF(__a_sb, -2, 4)))) + (0.0288f * (__SBREF(__a_sb, -1, -4)))) + (0.0298f * (__SBREF(__a_sb, -1, -3)))) + (0.0308f * (__SBREF(__a_sb, -1, -2)))) + (0.0318f * (__SBREF(__a_sb, -1, -1)))) + (0.0328f * (__SBREF(__a_sb, -1, 0)))) + (0.0338f * (__SBREF(__a_sb, -1, 1)))) + (0.0348f * (__SBREF(__a_sb, -1, 2)))) + (0.0358f * (__SBREF(__a_sb, -1, 3)))) + (0.0368f * (__SBREF(__a_sb, -1, 4)))) + (0.0378f * (__SBREF(__a_sb, 0, -4)))) + (0.0388f * (__SBREF(__a_sb, 0, -3)))) + (0.0398f * (__SBREF(__a_sb, 0, -2)))) + (0.0408f * (__SBREF(__a_sb, 0, -1)))) + (0.0418f * (__SBREF(__a_sb, 0, 1)))) + (0.0428f * (__SBREF(__a_sb, 0, 2)))) + (0.0438f * (__SBREF(__a_sb, 0, 3)))) + (0.0448f * (__SBREF(__a_sb, 0, 4)))) + (0.0458f * (__SBREF(__a_sb, 1, -4)))) + (0.0468f * (__SBREF(__a_sb, 1, -3)))) + (0.0478f * (__SBREF(__a_sb, 1, -2)))) + (0.0488f * (__SBREF(__a_sb, 1, -1)))) + (0.0498f * (__SBREF(__a_sb, 1, 0)))) + (0.0508f * (__SBREF(__a_sb, 1, 1)))) + (0.0518f * (__SBREF(__a_sb, 1, 2)))) + (0.0528f * (__SBREF(__a_sb, 1, 3)))) + (0.0538f * (__SBREF(__a_sb, 1, 4)))) + (0.0548f * (__SBREF(__a_sb, 2, -4)))) + (0.0558f * (__SBREF(__a_sb, 2, -3)))) + (0.0568f * (__SBREF(__a_sb, 2, -2)))) + (0.0578f * (__SBREF(__a_sb, 2, -1)))) + (0.0588f * (__SBREF(__a_sb, 2, 0)))) + (0.0598f * (__SBREF(__a_sb, 2, 1)))) + (0.0608f * (__SBREF(__a_sb, 2, 2)))) + (0.0618f * (__SBREF(__a_sb, 2, 3)))) + (0.0628f * (__SBREF(__a_sb, 2, 4)))) + (0.0638f * (__SBREF(__a_sb, 3, -4)))) + (0.0648f * (__SBREF(__a_sb, 3, -3)))) + (0.0658f * (__SBREF(__a_sb, 3, -2)))) + (0.0668f * (__SBREF(__a_sb, 3, -1)))) + (0.0678f * (__SBREF(__a_sb, 3, 0)))) + (0.0688f * (__SBREF(__a_sb, 3, 1)))) + (0.0698f * (__SBREF(__a_sb, 3, 2)))) + (0.0708f * (__SBREF(__a_sb, 3, 3)))) + (0.0718f * (__SBREF(__a_sb, 3, 4)))) + (0.0728f * (__SBREF(__a_sb, 4, -4)))) + (0.0738f * (__SBREF(__a_sb, 4, -3)))) + (0.0748f * (__SBREF(__a_sb, 4, -2)))) + (0.0758f * (__SBREF(__a_sb, 4, -1)))) + (0.0768f * (__SBREF(__a_sb, 4, 0)))) + (0.0778f * (__SBREF(__a_sb, 4, 1)))) + (0.0788f * (__SBREF(__a_sb, 4, 2)))) + (0.0798f * (__SBREF(__a_sb, 4, 3)))) + (0.0808f * (__SBREF(__a_sb, 4, 4)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_8(out, a) do { double etmp; __CALCEXPR_8_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); __CALCEXPR_5(out5, reg); __CALCEXPR_6(out6, reg); __CALCEXPR_7(out7, reg); __CALCEXPR_8(out8, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(4, __reg_1_4);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(4, __reg_1_4);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 13;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 4, __reg_1_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 4, __reg_1_7);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 4, __reg_1_8);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 4, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 4, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 4, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 4, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 4, __reg_1_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
}
else if (__h + 7 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
}
else if (__h + 8 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
}
else if (__h + 9 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
}
else if (__h + 10 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_5, __reg_0);
__STORE(__h + 5, __reg_1_5);
}
else if (__h + 11 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h + 5, __reg_1_5);
__LOAD(__reg_0, __h + 10);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_6, __reg_0);
__STORE(__h + 6, __reg_1_6);
}
else if (__h + 12 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h + 5, __reg_1_5);
__LOAD(__reg_0, __h + 10);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h + 6, __reg_1_6);
__LOAD(__reg_0, __h + 11);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_7, __reg_0);
__STORE(__h + 7, __reg_1_7);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 9;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 4, __reg_1_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 4, __reg_1_7);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 4, __reg_1_8);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 4, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 4, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 4, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 4, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 4, __reg_1_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 4, __reg_1_6);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 4, __reg_1_7);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 4, __reg_1_8);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 4, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 4, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 4, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 4, __reg_1_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 4, __reg_1_4);
__h++;
}
}
| fffdc3c67e5d25015d37a2217e3b427816c97b92.cu | #include "box3d4r-32x16-1-128_kernel.hu"
__device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; }
__global__ void kernel0_1(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 4 - 4);
const AN5D_TYPE __c1Pad = (4);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 4 - 4);
const AN5D_TYPE __c2Pad = (4);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 4 - 4);
const AN5D_TYPE __c3Pad = (4);
#define __c3 c3
const AN5D_TYPE __halo1 = 4;
const AN5D_TYPE __halo2 = 4;
const AN5D_TYPE __halo3 = 4;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 8;
const AN5D_TYPE __side3Len = 24;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_1_5;
double __reg_1_6;
double __reg_1_7;
double __reg_1_8;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.240f) * (__REGREF(__a, 0, 0))) + (0.0010f * (__SBREF(__a_sb, -4, -4)))) + (0.0020f * (__SBREF(__a_sb, -4, -3)))) + (0.0030f * (__SBREF(__a_sb, -4, -2)))) + (0.0040f * (__SBREF(__a_sb, -4, -1)))) + (0.0050f * (__SBREF(__a_sb, -4, 0)))) + (0.0060f * (__SBREF(__a_sb, -4, 1)))) + (0.0070f * (__SBREF(__a_sb, -4, 2)))) + (0.0080f * (__SBREF(__a_sb, -4, 3)))) + (0.0090f * (__SBREF(__a_sb, -4, 4)))) + (0.0100f * (__SBREF(__a_sb, -3, -4)))) + (0.0110f * (__SBREF(__a_sb, -3, -3)))) + (0.0120f * (__SBREF(__a_sb, -3, -2)))) + (0.0130f * (__SBREF(__a_sb, -3, -1)))) + (0.0140f * (__SBREF(__a_sb, -3, 0)))) + (0.0150f * (__SBREF(__a_sb, -3, 1)))) + (0.0160f * (__SBREF(__a_sb, -3, 2)))) + (0.0170f * (__SBREF(__a_sb, -3, 3)))) + (0.0180f * (__SBREF(__a_sb, -3, 4)))) + (0.0190f * (__SBREF(__a_sb, -2, -4)))) + (0.0200f * (__SBREF(__a_sb, -2, -3)))) + (0.0210f * (__SBREF(__a_sb, -2, -2)))) + (0.0220f * (__SBREF(__a_sb, -2, -1)))) + (0.0230f * (__SBREF(__a_sb, -2, 0)))) + (0.0240f * (__SBREF(__a_sb, -2, 1)))) + (0.0250f * (__SBREF(__a_sb, -2, 2)))) + (0.0260f * (__SBREF(__a_sb, -2, 3)))) + (0.0270f * (__SBREF(__a_sb, -2, 4)))) + (0.0280f * (__SBREF(__a_sb, -1, -4)))) + (0.0290f * (__SBREF(__a_sb, -1, -3)))) + (0.0300f * (__SBREF(__a_sb, -1, -2)))) + (0.0310f * (__SBREF(__a_sb, -1, -1)))) + (0.0320f * (__SBREF(__a_sb, -1, 0)))) + (0.0330f * (__SBREF(__a_sb, -1, 1)))) + (0.0340f * (__SBREF(__a_sb, -1, 2)))) + (0.0350f * (__SBREF(__a_sb, -1, 3)))) + (0.0360f * (__SBREF(__a_sb, -1, 4)))) + (0.0370f * (__SBREF(__a_sb, 0, -4)))) + (0.0380f * (__SBREF(__a_sb, 0, -3)))) + (0.0390f * (__SBREF(__a_sb, 0, -2)))) + (0.0400f * (__SBREF(__a_sb, 0, -1)))) + (0.0410f * (__SBREF(__a_sb, 0, 1)))) + (0.0420f * (__SBREF(__a_sb, 0, 2)))) + (0.0430f * (__SBREF(__a_sb, 0, 3)))) + (0.0440f * (__SBREF(__a_sb, 0, 4)))) + (0.0450f * (__SBREF(__a_sb, 1, -4)))) + (0.0460f * (__SBREF(__a_sb, 1, -3)))) + (0.0470f * (__SBREF(__a_sb, 1, -2)))) + (0.0480f * (__SBREF(__a_sb, 1, -1)))) + (0.0490f * (__SBREF(__a_sb, 1, 0)))) + (0.0500f * (__SBREF(__a_sb, 1, 1)))) + (0.0510f * (__SBREF(__a_sb, 1, 2)))) + (0.0520f * (__SBREF(__a_sb, 1, 3)))) + (0.0530f * (__SBREF(__a_sb, 1, 4)))) + (0.0540f * (__SBREF(__a_sb, 2, -4)))) + (0.0550f * (__SBREF(__a_sb, 2, -3)))) + (0.0560f * (__SBREF(__a_sb, 2, -2)))) + (0.0570f * (__SBREF(__a_sb, 2, -1)))) + (0.0580f * (__SBREF(__a_sb, 2, 0)))) + (0.0590f * (__SBREF(__a_sb, 2, 1)))) + (0.0600f * (__SBREF(__a_sb, 2, 2)))) + (0.0610f * (__SBREF(__a_sb, 2, 3)))) + (0.0620f * (__SBREF(__a_sb, 2, 4)))) + (0.0630f * (__SBREF(__a_sb, 3, -4)))) + (0.0640f * (__SBREF(__a_sb, 3, -3)))) + (0.0650f * (__SBREF(__a_sb, 3, -2)))) + (0.0660f * (__SBREF(__a_sb, 3, -1)))) + (0.0670f * (__SBREF(__a_sb, 3, 0)))) + (0.0680f * (__SBREF(__a_sb, 3, 1)))) + (0.0690f * (__SBREF(__a_sb, 3, 2)))) + (0.0700f * (__SBREF(__a_sb, 3, 3)))) + (0.0710f * (__SBREF(__a_sb, 3, 4)))) + (0.0720f * (__SBREF(__a_sb, 4, -4)))) + (0.0730f * (__SBREF(__a_sb, 4, -3)))) + (0.0740f * (__SBREF(__a_sb, 4, -2)))) + (0.0750f * (__SBREF(__a_sb, 4, -1)))) + (0.0760f * (__SBREF(__a_sb, 4, 0)))) + (0.0770f * (__SBREF(__a_sb, 4, 1)))) + (0.0780f * (__SBREF(__a_sb, 4, 2)))) + (0.0790f * (__SBREF(__a_sb, 4, 3)))) + (0.0800f * (__SBREF(__a_sb, 4, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.248f) * (__REGREF(__a, 0, 0)))) + (0.0011f * (__SBREF(__a_sb, -4, -4)))) + (0.0021f * (__SBREF(__a_sb, -4, -3)))) + (0.0031f * (__SBREF(__a_sb, -4, -2)))) + (0.0041f * (__SBREF(__a_sb, -4, -1)))) + (0.0051f * (__SBREF(__a_sb, -4, 0)))) + (0.0061f * (__SBREF(__a_sb, -4, 1)))) + (0.0071f * (__SBREF(__a_sb, -4, 2)))) + (0.0081f * (__SBREF(__a_sb, -4, 3)))) + (0.0091f * (__SBREF(__a_sb, -4, 4)))) + (0.0101f * (__SBREF(__a_sb, -3, -4)))) + (0.0111f * (__SBREF(__a_sb, -3, -3)))) + (0.0121f * (__SBREF(__a_sb, -3, -2)))) + (0.0131f * (__SBREF(__a_sb, -3, -1)))) + (0.0141f * (__SBREF(__a_sb, -3, 0)))) + (0.0151f * (__SBREF(__a_sb, -3, 1)))) + (0.0161f * (__SBREF(__a_sb, -3, 2)))) + (0.0171f * (__SBREF(__a_sb, -3, 3)))) + (0.0181f * (__SBREF(__a_sb, -3, 4)))) + (0.0191f * (__SBREF(__a_sb, -2, -4)))) + (0.0201f * (__SBREF(__a_sb, -2, -3)))) + (0.0211f * (__SBREF(__a_sb, -2, -2)))) + (0.0221f * (__SBREF(__a_sb, -2, -1)))) + (0.0231f * (__SBREF(__a_sb, -2, 0)))) + (0.0241f * (__SBREF(__a_sb, -2, 1)))) + (0.0251f * (__SBREF(__a_sb, -2, 2)))) + (0.0261f * (__SBREF(__a_sb, -2, 3)))) + (0.0271f * (__SBREF(__a_sb, -2, 4)))) + (0.0281f * (__SBREF(__a_sb, -1, -4)))) + (0.0291f * (__SBREF(__a_sb, -1, -3)))) + (0.0301f * (__SBREF(__a_sb, -1, -2)))) + (0.0311f * (__SBREF(__a_sb, -1, -1)))) + (0.0321f * (__SBREF(__a_sb, -1, 0)))) + (0.0331f * (__SBREF(__a_sb, -1, 1)))) + (0.0341f * (__SBREF(__a_sb, -1, 2)))) + (0.0351f * (__SBREF(__a_sb, -1, 3)))) + (0.0361f * (__SBREF(__a_sb, -1, 4)))) + (0.0371f * (__SBREF(__a_sb, 0, -4)))) + (0.0381f * (__SBREF(__a_sb, 0, -3)))) + (0.0391f * (__SBREF(__a_sb, 0, -2)))) + (0.0401f * (__SBREF(__a_sb, 0, -1)))) + (0.0411f * (__SBREF(__a_sb, 0, 1)))) + (0.0421f * (__SBREF(__a_sb, 0, 2)))) + (0.0431f * (__SBREF(__a_sb, 0, 3)))) + (0.0441f * (__SBREF(__a_sb, 0, 4)))) + (0.0451f * (__SBREF(__a_sb, 1, -4)))) + (0.0461f * (__SBREF(__a_sb, 1, -3)))) + (0.0471f * (__SBREF(__a_sb, 1, -2)))) + (0.0481f * (__SBREF(__a_sb, 1, -1)))) + (0.0491f * (__SBREF(__a_sb, 1, 0)))) + (0.0501f * (__SBREF(__a_sb, 1, 1)))) + (0.0511f * (__SBREF(__a_sb, 1, 2)))) + (0.0521f * (__SBREF(__a_sb, 1, 3)))) + (0.0531f * (__SBREF(__a_sb, 1, 4)))) + (0.0541f * (__SBREF(__a_sb, 2, -4)))) + (0.0551f * (__SBREF(__a_sb, 2, -3)))) + (0.0561f * (__SBREF(__a_sb, 2, -2)))) + (0.0571f * (__SBREF(__a_sb, 2, -1)))) + (0.0581f * (__SBREF(__a_sb, 2, 0)))) + (0.0591f * (__SBREF(__a_sb, 2, 1)))) + (0.0601f * (__SBREF(__a_sb, 2, 2)))) + (0.0611f * (__SBREF(__a_sb, 2, 3)))) + (0.0621f * (__SBREF(__a_sb, 2, 4)))) + (0.0631f * (__SBREF(__a_sb, 3, -4)))) + (0.0641f * (__SBREF(__a_sb, 3, -3)))) + (0.0651f * (__SBREF(__a_sb, 3, -2)))) + (0.0661f * (__SBREF(__a_sb, 3, -1)))) + (0.0671f * (__SBREF(__a_sb, 3, 0)))) + (0.0681f * (__SBREF(__a_sb, 3, 1)))) + (0.0691f * (__SBREF(__a_sb, 3, 2)))) + (0.0701f * (__SBREF(__a_sb, 3, 3)))) + (0.0711f * (__SBREF(__a_sb, 3, 4)))) + (0.0721f * (__SBREF(__a_sb, 4, -4)))) + (0.0731f * (__SBREF(__a_sb, 4, -3)))) + (0.0741f * (__SBREF(__a_sb, 4, -2)))) + (0.0751f * (__SBREF(__a_sb, 4, -1)))) + (0.0761f * (__SBREF(__a_sb, 4, 0)))) + (0.0771f * (__SBREF(__a_sb, 4, 1)))) + (0.0781f * (__SBREF(__a_sb, 4, 2)))) + (0.0791f * (__SBREF(__a_sb, 4, 3)))) + (0.0801f * (__SBREF(__a_sb, 4, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.256f) * (__REGREF(__a, 0, 0)))) + (0.0012f * (__SBREF(__a_sb, -4, -4)))) + (0.0022f * (__SBREF(__a_sb, -4, -3)))) + (0.0032f * (__SBREF(__a_sb, -4, -2)))) + (0.0042f * (__SBREF(__a_sb, -4, -1)))) + (0.0052f * (__SBREF(__a_sb, -4, 0)))) + (0.0062f * (__SBREF(__a_sb, -4, 1)))) + (0.0072f * (__SBREF(__a_sb, -4, 2)))) + (0.0082f * (__SBREF(__a_sb, -4, 3)))) + (0.0092f * (__SBREF(__a_sb, -4, 4)))) + (0.0102f * (__SBREF(__a_sb, -3, -4)))) + (0.0112f * (__SBREF(__a_sb, -3, -3)))) + (0.0122f * (__SBREF(__a_sb, -3, -2)))) + (0.0132f * (__SBREF(__a_sb, -3, -1)))) + (0.0142f * (__SBREF(__a_sb, -3, 0)))) + (0.0152f * (__SBREF(__a_sb, -3, 1)))) + (0.0162f * (__SBREF(__a_sb, -3, 2)))) + (0.0172f * (__SBREF(__a_sb, -3, 3)))) + (0.0182f * (__SBREF(__a_sb, -3, 4)))) + (0.0192f * (__SBREF(__a_sb, -2, -4)))) + (0.0202f * (__SBREF(__a_sb, -2, -3)))) + (0.0212f * (__SBREF(__a_sb, -2, -2)))) + (0.0222f * (__SBREF(__a_sb, -2, -1)))) + (0.0232f * (__SBREF(__a_sb, -2, 0)))) + (0.0242f * (__SBREF(__a_sb, -2, 1)))) + (0.0252f * (__SBREF(__a_sb, -2, 2)))) + (0.0262f * (__SBREF(__a_sb, -2, 3)))) + (0.0272f * (__SBREF(__a_sb, -2, 4)))) + (0.0282f * (__SBREF(__a_sb, -1, -4)))) + (0.0292f * (__SBREF(__a_sb, -1, -3)))) + (0.0302f * (__SBREF(__a_sb, -1, -2)))) + (0.0312f * (__SBREF(__a_sb, -1, -1)))) + (0.0322f * (__SBREF(__a_sb, -1, 0)))) + (0.0332f * (__SBREF(__a_sb, -1, 1)))) + (0.0342f * (__SBREF(__a_sb, -1, 2)))) + (0.0352f * (__SBREF(__a_sb, -1, 3)))) + (0.0362f * (__SBREF(__a_sb, -1, 4)))) + (0.0372f * (__SBREF(__a_sb, 0, -4)))) + (0.0382f * (__SBREF(__a_sb, 0, -3)))) + (0.0392f * (__SBREF(__a_sb, 0, -2)))) + (0.0402f * (__SBREF(__a_sb, 0, -1)))) + (0.0412f * (__SBREF(__a_sb, 0, 1)))) + (0.0422f * (__SBREF(__a_sb, 0, 2)))) + (0.0432f * (__SBREF(__a_sb, 0, 3)))) + (0.0442f * (__SBREF(__a_sb, 0, 4)))) + (0.0452f * (__SBREF(__a_sb, 1, -4)))) + (0.0462f * (__SBREF(__a_sb, 1, -3)))) + (0.0472f * (__SBREF(__a_sb, 1, -2)))) + (0.0482f * (__SBREF(__a_sb, 1, -1)))) + (0.0492f * (__SBREF(__a_sb, 1, 0)))) + (0.0502f * (__SBREF(__a_sb, 1, 1)))) + (0.0512f * (__SBREF(__a_sb, 1, 2)))) + (0.0522f * (__SBREF(__a_sb, 1, 3)))) + (0.0532f * (__SBREF(__a_sb, 1, 4)))) + (0.0542f * (__SBREF(__a_sb, 2, -4)))) + (0.0552f * (__SBREF(__a_sb, 2, -3)))) + (0.0562f * (__SBREF(__a_sb, 2, -2)))) + (0.0572f * (__SBREF(__a_sb, 2, -1)))) + (0.0582f * (__SBREF(__a_sb, 2, 0)))) + (0.0592f * (__SBREF(__a_sb, 2, 1)))) + (0.0602f * (__SBREF(__a_sb, 2, 2)))) + (0.0612f * (__SBREF(__a_sb, 2, 3)))) + (0.0622f * (__SBREF(__a_sb, 2, 4)))) + (0.0632f * (__SBREF(__a_sb, 3, -4)))) + (0.0642f * (__SBREF(__a_sb, 3, -3)))) + (0.0652f * (__SBREF(__a_sb, 3, -2)))) + (0.0662f * (__SBREF(__a_sb, 3, -1)))) + (0.0672f * (__SBREF(__a_sb, 3, 0)))) + (0.0682f * (__SBREF(__a_sb, 3, 1)))) + (0.0692f * (__SBREF(__a_sb, 3, 2)))) + (0.0702f * (__SBREF(__a_sb, 3, 3)))) + (0.0712f * (__SBREF(__a_sb, 3, 4)))) + (0.0722f * (__SBREF(__a_sb, 4, -4)))) + (0.0732f * (__SBREF(__a_sb, 4, -3)))) + (0.0742f * (__SBREF(__a_sb, 4, -2)))) + (0.0752f * (__SBREF(__a_sb, 4, -1)))) + (0.0762f * (__SBREF(__a_sb, 4, 0)))) + (0.0772f * (__SBREF(__a_sb, 4, 1)))) + (0.0782f * (__SBREF(__a_sb, 4, 2)))) + (0.0792f * (__SBREF(__a_sb, 4, 3)))) + (0.0802f * (__SBREF(__a_sb, 4, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.264f) * (__REGREF(__a, 0, 0)))) + (0.0013f * (__SBREF(__a_sb, -4, -4)))) + (0.0023f * (__SBREF(__a_sb, -4, -3)))) + (0.0033f * (__SBREF(__a_sb, -4, -2)))) + (0.0043f * (__SBREF(__a_sb, -4, -1)))) + (0.0053f * (__SBREF(__a_sb, -4, 0)))) + (0.0063f * (__SBREF(__a_sb, -4, 1)))) + (0.0073f * (__SBREF(__a_sb, -4, 2)))) + (0.0083f * (__SBREF(__a_sb, -4, 3)))) + (0.0093f * (__SBREF(__a_sb, -4, 4)))) + (0.0103f * (__SBREF(__a_sb, -3, -4)))) + (0.0113f * (__SBREF(__a_sb, -3, -3)))) + (0.0123f * (__SBREF(__a_sb, -3, -2)))) + (0.0133f * (__SBREF(__a_sb, -3, -1)))) + (0.0143f * (__SBREF(__a_sb, -3, 0)))) + (0.0153f * (__SBREF(__a_sb, -3, 1)))) + (0.0163f * (__SBREF(__a_sb, -3, 2)))) + (0.0173f * (__SBREF(__a_sb, -3, 3)))) + (0.0183f * (__SBREF(__a_sb, -3, 4)))) + (0.0193f * (__SBREF(__a_sb, -2, -4)))) + (0.0203f * (__SBREF(__a_sb, -2, -3)))) + (0.0213f * (__SBREF(__a_sb, -2, -2)))) + (0.0223f * (__SBREF(__a_sb, -2, -1)))) + (0.0233f * (__SBREF(__a_sb, -2, 0)))) + (0.0243f * (__SBREF(__a_sb, -2, 1)))) + (0.0253f * (__SBREF(__a_sb, -2, 2)))) + (0.0263f * (__SBREF(__a_sb, -2, 3)))) + (0.0273f * (__SBREF(__a_sb, -2, 4)))) + (0.0283f * (__SBREF(__a_sb, -1, -4)))) + (0.0293f * (__SBREF(__a_sb, -1, -3)))) + (0.0303f * (__SBREF(__a_sb, -1, -2)))) + (0.0313f * (__SBREF(__a_sb, -1, -1)))) + (0.0323f * (__SBREF(__a_sb, -1, 0)))) + (0.0333f * (__SBREF(__a_sb, -1, 1)))) + (0.0343f * (__SBREF(__a_sb, -1, 2)))) + (0.0353f * (__SBREF(__a_sb, -1, 3)))) + (0.0363f * (__SBREF(__a_sb, -1, 4)))) + (0.0373f * (__SBREF(__a_sb, 0, -4)))) + (0.0383f * (__SBREF(__a_sb, 0, -3)))) + (0.0393f * (__SBREF(__a_sb, 0, -2)))) + (0.0403f * (__SBREF(__a_sb, 0, -1)))) + (0.0413f * (__SBREF(__a_sb, 0, 1)))) + (0.0423f * (__SBREF(__a_sb, 0, 2)))) + (0.0433f * (__SBREF(__a_sb, 0, 3)))) + (0.0443f * (__SBREF(__a_sb, 0, 4)))) + (0.0453f * (__SBREF(__a_sb, 1, -4)))) + (0.0463f * (__SBREF(__a_sb, 1, -3)))) + (0.0473f * (__SBREF(__a_sb, 1, -2)))) + (0.0483f * (__SBREF(__a_sb, 1, -1)))) + (0.0493f * (__SBREF(__a_sb, 1, 0)))) + (0.0503f * (__SBREF(__a_sb, 1, 1)))) + (0.0513f * (__SBREF(__a_sb, 1, 2)))) + (0.0523f * (__SBREF(__a_sb, 1, 3)))) + (0.0533f * (__SBREF(__a_sb, 1, 4)))) + (0.0543f * (__SBREF(__a_sb, 2, -4)))) + (0.0553f * (__SBREF(__a_sb, 2, -3)))) + (0.0563f * (__SBREF(__a_sb, 2, -2)))) + (0.0573f * (__SBREF(__a_sb, 2, -1)))) + (0.0583f * (__SBREF(__a_sb, 2, 0)))) + (0.0593f * (__SBREF(__a_sb, 2, 1)))) + (0.0603f * (__SBREF(__a_sb, 2, 2)))) + (0.0613f * (__SBREF(__a_sb, 2, 3)))) + (0.0623f * (__SBREF(__a_sb, 2, 4)))) + (0.0633f * (__SBREF(__a_sb, 3, -4)))) + (0.0643f * (__SBREF(__a_sb, 3, -3)))) + (0.0653f * (__SBREF(__a_sb, 3, -2)))) + (0.0663f * (__SBREF(__a_sb, 3, -1)))) + (0.0673f * (__SBREF(__a_sb, 3, 0)))) + (0.0683f * (__SBREF(__a_sb, 3, 1)))) + (0.0693f * (__SBREF(__a_sb, 3, 2)))) + (0.0703f * (__SBREF(__a_sb, 3, 3)))) + (0.0713f * (__SBREF(__a_sb, 3, 4)))) + (0.0723f * (__SBREF(__a_sb, 4, -4)))) + (0.0733f * (__SBREF(__a_sb, 4, -3)))) + (0.0743f * (__SBREF(__a_sb, 4, -2)))) + (0.0753f * (__SBREF(__a_sb, 4, -1)))) + (0.0763f * (__SBREF(__a_sb, 4, 0)))) + (0.0773f * (__SBREF(__a_sb, 4, 1)))) + (0.0783f * (__SBREF(__a_sb, 4, 2)))) + (0.0793f * (__SBREF(__a_sb, 4, 3)))) + (0.0803f * (__SBREF(__a_sb, 4, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.272f) * (__REGREF(__a, 0, 0)))) + (0.0014f * (__SBREF(__a_sb, -4, -4)))) + (0.0024f * (__SBREF(__a_sb, -4, -3)))) + (0.0034f * (__SBREF(__a_sb, -4, -2)))) + (0.0044f * (__SBREF(__a_sb, -4, -1)))) + (0.0054f * (__SBREF(__a_sb, -4, 0)))) + (0.0064f * (__SBREF(__a_sb, -4, 1)))) + (0.0074f * (__SBREF(__a_sb, -4, 2)))) + (0.0084f * (__SBREF(__a_sb, -4, 3)))) + (0.0094f * (__SBREF(__a_sb, -4, 4)))) + (0.0104f * (__SBREF(__a_sb, -3, -4)))) + (0.0114f * (__SBREF(__a_sb, -3, -3)))) + (0.0124f * (__SBREF(__a_sb, -3, -2)))) + (0.0134f * (__SBREF(__a_sb, -3, -1)))) + (0.0144f * (__SBREF(__a_sb, -3, 0)))) + (0.0154f * (__SBREF(__a_sb, -3, 1)))) + (0.0164f * (__SBREF(__a_sb, -3, 2)))) + (0.0174f * (__SBREF(__a_sb, -3, 3)))) + (0.0184f * (__SBREF(__a_sb, -3, 4)))) + (0.0194f * (__SBREF(__a_sb, -2, -4)))) + (0.0204f * (__SBREF(__a_sb, -2, -3)))) + (0.0214f * (__SBREF(__a_sb, -2, -2)))) + (0.0224f * (__SBREF(__a_sb, -2, -1)))) + (0.0234f * (__SBREF(__a_sb, -2, 0)))) + (0.0244f * (__SBREF(__a_sb, -2, 1)))) + (0.0254f * (__SBREF(__a_sb, -2, 2)))) + (0.0264f * (__SBREF(__a_sb, -2, 3)))) + (0.0274f * (__SBREF(__a_sb, -2, 4)))) + (0.0284f * (__SBREF(__a_sb, -1, -4)))) + (0.0294f * (__SBREF(__a_sb, -1, -3)))) + (0.0304f * (__SBREF(__a_sb, -1, -2)))) + (0.0314f * (__SBREF(__a_sb, -1, -1)))) + (0.0324f * (__SBREF(__a_sb, -1, 0)))) + (0.0334f * (__SBREF(__a_sb, -1, 1)))) + (0.0344f * (__SBREF(__a_sb, -1, 2)))) + (0.0354f * (__SBREF(__a_sb, -1, 3)))) + (0.0364f * (__SBREF(__a_sb, -1, 4)))) + (0.0374f * (__SBREF(__a_sb, 0, -4)))) + (0.0384f * (__SBREF(__a_sb, 0, -3)))) + (0.0394f * (__SBREF(__a_sb, 0, -2)))) + (0.0404f * (__SBREF(__a_sb, 0, -1)))) + (0.0414f * (__SBREF(__a_sb, 0, 1)))) + (0.0424f * (__SBREF(__a_sb, 0, 2)))) + (0.0434f * (__SBREF(__a_sb, 0, 3)))) + (0.0444f * (__SBREF(__a_sb, 0, 4)))) + (0.0454f * (__SBREF(__a_sb, 1, -4)))) + (0.0464f * (__SBREF(__a_sb, 1, -3)))) + (0.0474f * (__SBREF(__a_sb, 1, -2)))) + (0.0484f * (__SBREF(__a_sb, 1, -1)))) + (0.0494f * (__SBREF(__a_sb, 1, 0)))) + (0.0504f * (__SBREF(__a_sb, 1, 1)))) + (0.0514f * (__SBREF(__a_sb, 1, 2)))) + (0.0524f * (__SBREF(__a_sb, 1, 3)))) + (0.0534f * (__SBREF(__a_sb, 1, 4)))) + (0.0544f * (__SBREF(__a_sb, 2, -4)))) + (0.0554f * (__SBREF(__a_sb, 2, -3)))) + (0.0564f * (__SBREF(__a_sb, 2, -2)))) + (0.0574f * (__SBREF(__a_sb, 2, -1)))) + (0.0584f * (__SBREF(__a_sb, 2, 0)))) + (0.0594f * (__SBREF(__a_sb, 2, 1)))) + (0.0604f * (__SBREF(__a_sb, 2, 2)))) + (0.0614f * (__SBREF(__a_sb, 2, 3)))) + (0.0624f * (__SBREF(__a_sb, 2, 4)))) + (0.0634f * (__SBREF(__a_sb, 3, -4)))) + (0.0644f * (__SBREF(__a_sb, 3, -3)))) + (0.0654f * (__SBREF(__a_sb, 3, -2)))) + (0.0664f * (__SBREF(__a_sb, 3, -1)))) + (0.0674f * (__SBREF(__a_sb, 3, 0)))) + (0.0684f * (__SBREF(__a_sb, 3, 1)))) + (0.0694f * (__SBREF(__a_sb, 3, 2)))) + (0.0704f * (__SBREF(__a_sb, 3, 3)))) + (0.0714f * (__SBREF(__a_sb, 3, 4)))) + (0.0724f * (__SBREF(__a_sb, 4, -4)))) + (0.0734f * (__SBREF(__a_sb, 4, -3)))) + (0.0744f * (__SBREF(__a_sb, 4, -2)))) + (0.0754f * (__SBREF(__a_sb, 4, -1)))) + (0.0764f * (__SBREF(__a_sb, 4, 0)))) + (0.0774f * (__SBREF(__a_sb, 4, 1)))) + (0.0784f * (__SBREF(__a_sb, 4, 2)))) + (0.0794f * (__SBREF(__a_sb, 4, 3)))) + (0.0804f * (__SBREF(__a_sb, 4, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_5_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.280f) * (__REGREF(__a, 0, 0)))) + (0.0015f * (__SBREF(__a_sb, -4, -4)))) + (0.0025f * (__SBREF(__a_sb, -4, -3)))) + (0.0035f * (__SBREF(__a_sb, -4, -2)))) + (0.0045f * (__SBREF(__a_sb, -4, -1)))) + (0.0055f * (__SBREF(__a_sb, -4, 0)))) + (0.0065f * (__SBREF(__a_sb, -4, 1)))) + (0.0075f * (__SBREF(__a_sb, -4, 2)))) + (0.0085f * (__SBREF(__a_sb, -4, 3)))) + (0.0095f * (__SBREF(__a_sb, -4, 4)))) + (0.0105f * (__SBREF(__a_sb, -3, -4)))) + (0.0115f * (__SBREF(__a_sb, -3, -3)))) + (0.0125f * (__SBREF(__a_sb, -3, -2)))) + (0.0135f * (__SBREF(__a_sb, -3, -1)))) + (0.0145f * (__SBREF(__a_sb, -3, 0)))) + (0.0155f * (__SBREF(__a_sb, -3, 1)))) + (0.0165f * (__SBREF(__a_sb, -3, 2)))) + (0.0175f * (__SBREF(__a_sb, -3, 3)))) + (0.0185f * (__SBREF(__a_sb, -3, 4)))) + (0.0195f * (__SBREF(__a_sb, -2, -4)))) + (0.0205f * (__SBREF(__a_sb, -2, -3)))) + (0.0215f * (__SBREF(__a_sb, -2, -2)))) + (0.0225f * (__SBREF(__a_sb, -2, -1)))) + (0.0235f * (__SBREF(__a_sb, -2, 0)))) + (0.0245f * (__SBREF(__a_sb, -2, 1)))) + (0.0255f * (__SBREF(__a_sb, -2, 2)))) + (0.0265f * (__SBREF(__a_sb, -2, 3)))) + (0.0275f * (__SBREF(__a_sb, -2, 4)))) + (0.0285f * (__SBREF(__a_sb, -1, -4)))) + (0.0295f * (__SBREF(__a_sb, -1, -3)))) + (0.0305f * (__SBREF(__a_sb, -1, -2)))) + (0.0315f * (__SBREF(__a_sb, -1, -1)))) + (0.0325f * (__SBREF(__a_sb, -1, 0)))) + (0.0335f * (__SBREF(__a_sb, -1, 1)))) + (0.0345f * (__SBREF(__a_sb, -1, 2)))) + (0.0355f * (__SBREF(__a_sb, -1, 3)))) + (0.0365f * (__SBREF(__a_sb, -1, 4)))) + (0.0375f * (__SBREF(__a_sb, 0, -4)))) + (0.0385f * (__SBREF(__a_sb, 0, -3)))) + (0.0395f * (__SBREF(__a_sb, 0, -2)))) + (0.0405f * (__SBREF(__a_sb, 0, -1)))) + (0.0415f * (__SBREF(__a_sb, 0, 1)))) + (0.0425f * (__SBREF(__a_sb, 0, 2)))) + (0.0435f * (__SBREF(__a_sb, 0, 3)))) + (0.0445f * (__SBREF(__a_sb, 0, 4)))) + (0.0455f * (__SBREF(__a_sb, 1, -4)))) + (0.0465f * (__SBREF(__a_sb, 1, -3)))) + (0.0475f * (__SBREF(__a_sb, 1, -2)))) + (0.0485f * (__SBREF(__a_sb, 1, -1)))) + (0.0495f * (__SBREF(__a_sb, 1, 0)))) + (0.0505f * (__SBREF(__a_sb, 1, 1)))) + (0.0515f * (__SBREF(__a_sb, 1, 2)))) + (0.0525f * (__SBREF(__a_sb, 1, 3)))) + (0.0535f * (__SBREF(__a_sb, 1, 4)))) + (0.0545f * (__SBREF(__a_sb, 2, -4)))) + (0.0555f * (__SBREF(__a_sb, 2, -3)))) + (0.0565f * (__SBREF(__a_sb, 2, -2)))) + (0.0575f * (__SBREF(__a_sb, 2, -1)))) + (0.0585f * (__SBREF(__a_sb, 2, 0)))) + (0.0595f * (__SBREF(__a_sb, 2, 1)))) + (0.0605f * (__SBREF(__a_sb, 2, 2)))) + (0.0615f * (__SBREF(__a_sb, 2, 3)))) + (0.0625f * (__SBREF(__a_sb, 2, 4)))) + (0.0635f * (__SBREF(__a_sb, 3, -4)))) + (0.0645f * (__SBREF(__a_sb, 3, -3)))) + (0.0655f * (__SBREF(__a_sb, 3, -2)))) + (0.0665f * (__SBREF(__a_sb, 3, -1)))) + (0.0675f * (__SBREF(__a_sb, 3, 0)))) + (0.0685f * (__SBREF(__a_sb, 3, 1)))) + (0.0695f * (__SBREF(__a_sb, 3, 2)))) + (0.0705f * (__SBREF(__a_sb, 3, 3)))) + (0.0715f * (__SBREF(__a_sb, 3, 4)))) + (0.0725f * (__SBREF(__a_sb, 4, -4)))) + (0.0735f * (__SBREF(__a_sb, 4, -3)))) + (0.0745f * (__SBREF(__a_sb, 4, -2)))) + (0.0755f * (__SBREF(__a_sb, 4, -1)))) + (0.0765f * (__SBREF(__a_sb, 4, 0)))) + (0.0775f * (__SBREF(__a_sb, 4, 1)))) + (0.0785f * (__SBREF(__a_sb, 4, 2)))) + (0.0795f * (__SBREF(__a_sb, 4, 3)))) + (0.0805f * (__SBREF(__a_sb, 4, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_5(out, a) do { double etmp; __CALCEXPR_5_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_6_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.288f) * (__REGREF(__a, 0, 0)))) + (0.0016f * (__SBREF(__a_sb, -4, -4)))) + (0.0026f * (__SBREF(__a_sb, -4, -3)))) + (0.0036f * (__SBREF(__a_sb, -4, -2)))) + (0.0046f * (__SBREF(__a_sb, -4, -1)))) + (0.0056f * (__SBREF(__a_sb, -4, 0)))) + (0.0066f * (__SBREF(__a_sb, -4, 1)))) + (0.0076f * (__SBREF(__a_sb, -4, 2)))) + (0.0086f * (__SBREF(__a_sb, -4, 3)))) + (0.0096f * (__SBREF(__a_sb, -4, 4)))) + (0.0106f * (__SBREF(__a_sb, -3, -4)))) + (0.0116f * (__SBREF(__a_sb, -3, -3)))) + (0.0126f * (__SBREF(__a_sb, -3, -2)))) + (0.0136f * (__SBREF(__a_sb, -3, -1)))) + (0.0146f * (__SBREF(__a_sb, -3, 0)))) + (0.0156f * (__SBREF(__a_sb, -3, 1)))) + (0.0166f * (__SBREF(__a_sb, -3, 2)))) + (0.0176f * (__SBREF(__a_sb, -3, 3)))) + (0.0186f * (__SBREF(__a_sb, -3, 4)))) + (0.0196f * (__SBREF(__a_sb, -2, -4)))) + (0.0206f * (__SBREF(__a_sb, -2, -3)))) + (0.0216f * (__SBREF(__a_sb, -2, -2)))) + (0.0226f * (__SBREF(__a_sb, -2, -1)))) + (0.0236f * (__SBREF(__a_sb, -2, 0)))) + (0.0246f * (__SBREF(__a_sb, -2, 1)))) + (0.0256f * (__SBREF(__a_sb, -2, 2)))) + (0.0266f * (__SBREF(__a_sb, -2, 3)))) + (0.0276f * (__SBREF(__a_sb, -2, 4)))) + (0.0286f * (__SBREF(__a_sb, -1, -4)))) + (0.0296f * (__SBREF(__a_sb, -1, -3)))) + (0.0306f * (__SBREF(__a_sb, -1, -2)))) + (0.0316f * (__SBREF(__a_sb, -1, -1)))) + (0.0326f * (__SBREF(__a_sb, -1, 0)))) + (0.0336f * (__SBREF(__a_sb, -1, 1)))) + (0.0346f * (__SBREF(__a_sb, -1, 2)))) + (0.0356f * (__SBREF(__a_sb, -1, 3)))) + (0.0366f * (__SBREF(__a_sb, -1, 4)))) + (0.0376f * (__SBREF(__a_sb, 0, -4)))) + (0.0386f * (__SBREF(__a_sb, 0, -3)))) + (0.0396f * (__SBREF(__a_sb, 0, -2)))) + (0.0406f * (__SBREF(__a_sb, 0, -1)))) + (0.0416f * (__SBREF(__a_sb, 0, 1)))) + (0.0426f * (__SBREF(__a_sb, 0, 2)))) + (0.0436f * (__SBREF(__a_sb, 0, 3)))) + (0.0446f * (__SBREF(__a_sb, 0, 4)))) + (0.0456f * (__SBREF(__a_sb, 1, -4)))) + (0.0466f * (__SBREF(__a_sb, 1, -3)))) + (0.0476f * (__SBREF(__a_sb, 1, -2)))) + (0.0486f * (__SBREF(__a_sb, 1, -1)))) + (0.0496f * (__SBREF(__a_sb, 1, 0)))) + (0.0506f * (__SBREF(__a_sb, 1, 1)))) + (0.0516f * (__SBREF(__a_sb, 1, 2)))) + (0.0526f * (__SBREF(__a_sb, 1, 3)))) + (0.0536f * (__SBREF(__a_sb, 1, 4)))) + (0.0546f * (__SBREF(__a_sb, 2, -4)))) + (0.0556f * (__SBREF(__a_sb, 2, -3)))) + (0.0566f * (__SBREF(__a_sb, 2, -2)))) + (0.0576f * (__SBREF(__a_sb, 2, -1)))) + (0.0586f * (__SBREF(__a_sb, 2, 0)))) + (0.0596f * (__SBREF(__a_sb, 2, 1)))) + (0.0606f * (__SBREF(__a_sb, 2, 2)))) + (0.0616f * (__SBREF(__a_sb, 2, 3)))) + (0.0626f * (__SBREF(__a_sb, 2, 4)))) + (0.0636f * (__SBREF(__a_sb, 3, -4)))) + (0.0646f * (__SBREF(__a_sb, 3, -3)))) + (0.0656f * (__SBREF(__a_sb, 3, -2)))) + (0.0666f * (__SBREF(__a_sb, 3, -1)))) + (0.0676f * (__SBREF(__a_sb, 3, 0)))) + (0.0686f * (__SBREF(__a_sb, 3, 1)))) + (0.0696f * (__SBREF(__a_sb, 3, 2)))) + (0.0706f * (__SBREF(__a_sb, 3, 3)))) + (0.0716f * (__SBREF(__a_sb, 3, 4)))) + (0.0726f * (__SBREF(__a_sb, 4, -4)))) + (0.0736f * (__SBREF(__a_sb, 4, -3)))) + (0.0746f * (__SBREF(__a_sb, 4, -2)))) + (0.0756f * (__SBREF(__a_sb, 4, -1)))) + (0.0766f * (__SBREF(__a_sb, 4, 0)))) + (0.0776f * (__SBREF(__a_sb, 4, 1)))) + (0.0786f * (__SBREF(__a_sb, 4, 2)))) + (0.0796f * (__SBREF(__a_sb, 4, 3)))) + (0.0806f * (__SBREF(__a_sb, 4, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_6(out, a) do { double etmp; __CALCEXPR_6_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_7_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.296f) * (__REGREF(__a, 0, 0)))) + (0.0017f * (__SBREF(__a_sb, -4, -4)))) + (0.0027f * (__SBREF(__a_sb, -4, -3)))) + (0.0037f * (__SBREF(__a_sb, -4, -2)))) + (0.0047f * (__SBREF(__a_sb, -4, -1)))) + (0.0057f * (__SBREF(__a_sb, -4, 0)))) + (0.0067f * (__SBREF(__a_sb, -4, 1)))) + (0.0077f * (__SBREF(__a_sb, -4, 2)))) + (0.0087f * (__SBREF(__a_sb, -4, 3)))) + (0.0097f * (__SBREF(__a_sb, -4, 4)))) + (0.0107f * (__SBREF(__a_sb, -3, -4)))) + (0.0117f * (__SBREF(__a_sb, -3, -3)))) + (0.0127f * (__SBREF(__a_sb, -3, -2)))) + (0.0137f * (__SBREF(__a_sb, -3, -1)))) + (0.0147f * (__SBREF(__a_sb, -3, 0)))) + (0.0157f * (__SBREF(__a_sb, -3, 1)))) + (0.0167f * (__SBREF(__a_sb, -3, 2)))) + (0.0177f * (__SBREF(__a_sb, -3, 3)))) + (0.0187f * (__SBREF(__a_sb, -3, 4)))) + (0.0197f * (__SBREF(__a_sb, -2, -4)))) + (0.0207f * (__SBREF(__a_sb, -2, -3)))) + (0.0217f * (__SBREF(__a_sb, -2, -2)))) + (0.0227f * (__SBREF(__a_sb, -2, -1)))) + (0.0237f * (__SBREF(__a_sb, -2, 0)))) + (0.0247f * (__SBREF(__a_sb, -2, 1)))) + (0.0257f * (__SBREF(__a_sb, -2, 2)))) + (0.0267f * (__SBREF(__a_sb, -2, 3)))) + (0.0277f * (__SBREF(__a_sb, -2, 4)))) + (0.0287f * (__SBREF(__a_sb, -1, -4)))) + (0.0297f * (__SBREF(__a_sb, -1, -3)))) + (0.0307f * (__SBREF(__a_sb, -1, -2)))) + (0.0317f * (__SBREF(__a_sb, -1, -1)))) + (0.0327f * (__SBREF(__a_sb, -1, 0)))) + (0.0337f * (__SBREF(__a_sb, -1, 1)))) + (0.0347f * (__SBREF(__a_sb, -1, 2)))) + (0.0357f * (__SBREF(__a_sb, -1, 3)))) + (0.0367f * (__SBREF(__a_sb, -1, 4)))) + (0.0377f * (__SBREF(__a_sb, 0, -4)))) + (0.0387f * (__SBREF(__a_sb, 0, -3)))) + (0.0397f * (__SBREF(__a_sb, 0, -2)))) + (0.0407f * (__SBREF(__a_sb, 0, -1)))) + (0.0417f * (__SBREF(__a_sb, 0, 1)))) + (0.0427f * (__SBREF(__a_sb, 0, 2)))) + (0.0437f * (__SBREF(__a_sb, 0, 3)))) + (0.0447f * (__SBREF(__a_sb, 0, 4)))) + (0.0457f * (__SBREF(__a_sb, 1, -4)))) + (0.0467f * (__SBREF(__a_sb, 1, -3)))) + (0.0477f * (__SBREF(__a_sb, 1, -2)))) + (0.0487f * (__SBREF(__a_sb, 1, -1)))) + (0.0497f * (__SBREF(__a_sb, 1, 0)))) + (0.0507f * (__SBREF(__a_sb, 1, 1)))) + (0.0517f * (__SBREF(__a_sb, 1, 2)))) + (0.0527f * (__SBREF(__a_sb, 1, 3)))) + (0.0537f * (__SBREF(__a_sb, 1, 4)))) + (0.0547f * (__SBREF(__a_sb, 2, -4)))) + (0.0557f * (__SBREF(__a_sb, 2, -3)))) + (0.0567f * (__SBREF(__a_sb, 2, -2)))) + (0.0577f * (__SBREF(__a_sb, 2, -1)))) + (0.0587f * (__SBREF(__a_sb, 2, 0)))) + (0.0597f * (__SBREF(__a_sb, 2, 1)))) + (0.0607f * (__SBREF(__a_sb, 2, 2)))) + (0.0617f * (__SBREF(__a_sb, 2, 3)))) + (0.0627f * (__SBREF(__a_sb, 2, 4)))) + (0.0637f * (__SBREF(__a_sb, 3, -4)))) + (0.0647f * (__SBREF(__a_sb, 3, -3)))) + (0.0657f * (__SBREF(__a_sb, 3, -2)))) + (0.0667f * (__SBREF(__a_sb, 3, -1)))) + (0.0677f * (__SBREF(__a_sb, 3, 0)))) + (0.0687f * (__SBREF(__a_sb, 3, 1)))) + (0.0697f * (__SBREF(__a_sb, 3, 2)))) + (0.0707f * (__SBREF(__a_sb, 3, 3)))) + (0.0717f * (__SBREF(__a_sb, 3, 4)))) + (0.0727f * (__SBREF(__a_sb, 4, -4)))) + (0.0737f * (__SBREF(__a_sb, 4, -3)))) + (0.0747f * (__SBREF(__a_sb, 4, -2)))) + (0.0757f * (__SBREF(__a_sb, 4, -1)))) + (0.0767f * (__SBREF(__a_sb, 4, 0)))) + (0.0777f * (__SBREF(__a_sb, 4, 1)))) + (0.0787f * (__SBREF(__a_sb, 4, 2)))) + (0.0797f * (__SBREF(__a_sb, 4, 3)))) + (0.0807f * (__SBREF(__a_sb, 4, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_7(out, a) do { double etmp; __CALCEXPR_7_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_8_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.304f) * (__REGREF(__a, 0, 0)))) + (0.0018f * (__SBREF(__a_sb, -4, -4)))) + (0.0028f * (__SBREF(__a_sb, -4, -3)))) + (0.0038f * (__SBREF(__a_sb, -4, -2)))) + (0.0048f * (__SBREF(__a_sb, -4, -1)))) + (0.0058f * (__SBREF(__a_sb, -4, 0)))) + (0.0068f * (__SBREF(__a_sb, -4, 1)))) + (0.0078f * (__SBREF(__a_sb, -4, 2)))) + (0.0088f * (__SBREF(__a_sb, -4, 3)))) + (0.0098f * (__SBREF(__a_sb, -4, 4)))) + (0.0108f * (__SBREF(__a_sb, -3, -4)))) + (0.0118f * (__SBREF(__a_sb, -3, -3)))) + (0.0128f * (__SBREF(__a_sb, -3, -2)))) + (0.0138f * (__SBREF(__a_sb, -3, -1)))) + (0.0148f * (__SBREF(__a_sb, -3, 0)))) + (0.0158f * (__SBREF(__a_sb, -3, 1)))) + (0.0168f * (__SBREF(__a_sb, -3, 2)))) + (0.0178f * (__SBREF(__a_sb, -3, 3)))) + (0.0188f * (__SBREF(__a_sb, -3, 4)))) + (0.0198f * (__SBREF(__a_sb, -2, -4)))) + (0.0208f * (__SBREF(__a_sb, -2, -3)))) + (0.0218f * (__SBREF(__a_sb, -2, -2)))) + (0.0228f * (__SBREF(__a_sb, -2, -1)))) + (0.0238f * (__SBREF(__a_sb, -2, 0)))) + (0.0248f * (__SBREF(__a_sb, -2, 1)))) + (0.0258f * (__SBREF(__a_sb, -2, 2)))) + (0.0268f * (__SBREF(__a_sb, -2, 3)))) + (0.0278f * (__SBREF(__a_sb, -2, 4)))) + (0.0288f * (__SBREF(__a_sb, -1, -4)))) + (0.0298f * (__SBREF(__a_sb, -1, -3)))) + (0.0308f * (__SBREF(__a_sb, -1, -2)))) + (0.0318f * (__SBREF(__a_sb, -1, -1)))) + (0.0328f * (__SBREF(__a_sb, -1, 0)))) + (0.0338f * (__SBREF(__a_sb, -1, 1)))) + (0.0348f * (__SBREF(__a_sb, -1, 2)))) + (0.0358f * (__SBREF(__a_sb, -1, 3)))) + (0.0368f * (__SBREF(__a_sb, -1, 4)))) + (0.0378f * (__SBREF(__a_sb, 0, -4)))) + (0.0388f * (__SBREF(__a_sb, 0, -3)))) + (0.0398f * (__SBREF(__a_sb, 0, -2)))) + (0.0408f * (__SBREF(__a_sb, 0, -1)))) + (0.0418f * (__SBREF(__a_sb, 0, 1)))) + (0.0428f * (__SBREF(__a_sb, 0, 2)))) + (0.0438f * (__SBREF(__a_sb, 0, 3)))) + (0.0448f * (__SBREF(__a_sb, 0, 4)))) + (0.0458f * (__SBREF(__a_sb, 1, -4)))) + (0.0468f * (__SBREF(__a_sb, 1, -3)))) + (0.0478f * (__SBREF(__a_sb, 1, -2)))) + (0.0488f * (__SBREF(__a_sb, 1, -1)))) + (0.0498f * (__SBREF(__a_sb, 1, 0)))) + (0.0508f * (__SBREF(__a_sb, 1, 1)))) + (0.0518f * (__SBREF(__a_sb, 1, 2)))) + (0.0528f * (__SBREF(__a_sb, 1, 3)))) + (0.0538f * (__SBREF(__a_sb, 1, 4)))) + (0.0548f * (__SBREF(__a_sb, 2, -4)))) + (0.0558f * (__SBREF(__a_sb, 2, -3)))) + (0.0568f * (__SBREF(__a_sb, 2, -2)))) + (0.0578f * (__SBREF(__a_sb, 2, -1)))) + (0.0588f * (__SBREF(__a_sb, 2, 0)))) + (0.0598f * (__SBREF(__a_sb, 2, 1)))) + (0.0608f * (__SBREF(__a_sb, 2, 2)))) + (0.0618f * (__SBREF(__a_sb, 2, 3)))) + (0.0628f * (__SBREF(__a_sb, 2, 4)))) + (0.0638f * (__SBREF(__a_sb, 3, -4)))) + (0.0648f * (__SBREF(__a_sb, 3, -3)))) + (0.0658f * (__SBREF(__a_sb, 3, -2)))) + (0.0668f * (__SBREF(__a_sb, 3, -1)))) + (0.0678f * (__SBREF(__a_sb, 3, 0)))) + (0.0688f * (__SBREF(__a_sb, 3, 1)))) + (0.0698f * (__SBREF(__a_sb, 3, 2)))) + (0.0708f * (__SBREF(__a_sb, 3, 3)))) + (0.0718f * (__SBREF(__a_sb, 3, 4)))) + (0.0728f * (__SBREF(__a_sb, 4, -4)))) + (0.0738f * (__SBREF(__a_sb, 4, -3)))) + (0.0748f * (__SBREF(__a_sb, 4, -2)))) + (0.0758f * (__SBREF(__a_sb, 4, -1)))) + (0.0768f * (__SBREF(__a_sb, 4, 0)))) + (0.0778f * (__SBREF(__a_sb, 4, 1)))) + (0.0788f * (__SBREF(__a_sb, 4, 2)))) + (0.0798f * (__SBREF(__a_sb, 4, 3)))) + (0.0808f * (__SBREF(__a_sb, 4, 4)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_8(out, a) do { double etmp; __CALCEXPR_8_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); __CALCEXPR_5(out5, reg); __CALCEXPR_6(out6, reg); __CALCEXPR_7(out7, reg); __CALCEXPR_8(out8, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(4, __reg_1_4);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(4, __reg_1_4);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 13;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 4, __reg_1_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 4, __reg_1_7);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 4, __reg_1_8);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 4, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 4, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 4, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 4, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 4, __reg_1_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
}
else if (__h + 7 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
}
else if (__h + 8 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
}
else if (__h + 9 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
}
else if (__h + 10 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_5, __reg_0);
__STORE(__h + 5, __reg_1_5);
}
else if (__h + 11 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h + 5, __reg_1_5);
__LOAD(__reg_0, __h + 10);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_6, __reg_0);
__STORE(__h + 6, __reg_1_6);
}
else if (__h + 12 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 2, __reg_1_7);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 1, __reg_1_8);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
__LOAD(__reg_0, __h + 9);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h + 5, __reg_1_5);
__LOAD(__reg_0, __h + 10);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h + 6, __reg_1_6);
__LOAD(__reg_0, __h + 11);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_7, __reg_0);
__STORE(__h + 7, __reg_1_7);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 9;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 4, __reg_1_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 4, __reg_1_7);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 4, __reg_1_8);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 4, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 4, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 4, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 4, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 4, __reg_1_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 4, __reg_1_5);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0);
__STORE(__h - 4, __reg_1_6);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0);
__STORE(__h - 4, __reg_1_7);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0);
__STORE(__h - 4, __reg_1_8);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 4, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 4, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 4, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 4, __reg_1_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 4, __reg_1_4);
__h++;
}
}
|
faab3a1dcb252876af9e5ec394848435c89a8852.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <atomic>
#include <limits>
#include <mutex>
#include "HeterogeneousCore/CUDAUtilities/interface/cuda_assert.h"
#include "CUDADataFormats/SiPixelDigi/interface/SiPixelDigisCUDA.h"
#include "CUDADataFormats/SiPixelCluster/interface/SiPixelClustersCUDA.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cudastdAlgorithm.h"
#include "RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelRawToClusterGPUKernel.h"
#include "RecoLocalTracker/SiPixelRecHits/interface/pixelCPEforGPU.h"
#include "ClusterSLOnGPU.h"
using ClusterSLGPU = trackerHitAssociationHeterogeneousProduct::ClusterSLGPU;
__global__
void simLink(const SiPixelDigisCUDA::DeviceConstView *dd, uint32_t ndigis, const SiPixelClustersCUDA::DeviceConstView *cc, clusterSLOnGPU::HitsOnGPU const * hhp, ClusterSLGPU const * slp, uint32_t n)
{
assert(slp == slp->me_d);
constexpr int32_t invTK = 0; // std::numeric_limits<int32_t>::max();
constexpr uint16_t InvId = 9999; // must be > MaxNumModules
auto const & hh = *hhp;
auto const & sl = *slp;
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= ndigis)
return;
auto id = dd->moduleInd(i);
if (InvId == id)
return;
assert(id < 2000);
auto ch = pixelgpudetails::pixelToChannel(dd->xx(i), dd->yy(i));
auto first = hh.hitsModuleStart_d[id];
auto cl = first + cc->clus(i);
assert(cl < 2000 * blockDim.x);
const std::array<uint32_t, 4> me{{id, ch, 0, 0}};
auto less = [] __host__ __device__ (std::array<uint32_t, 4> const & a, std::array<uint32_t, 4> const & b)->bool {
// in this context we do not care of [2]
return a[0] < b[0] or (not b[0] < a[0] and a[1] < b[1]);
};
auto equal = [] __host__ __device__ (std::array<uint32_t, 4> const & a, std::array<uint32_t, 4> const & b)->bool {
// in this context we do not care of [2]
return a[0] == b[0] and a[1] == b[1];
};
auto const * b = sl.links_d;
auto const * e = b + n;
auto p = cuda_std::lower_bound(b, e, me, less);
int32_t j = p-sl.links_d;
assert(j >= 0);
auto getTK = [&](int i) { auto const & l = sl.links_d[i]; return l[2];};
j = ::min(int(j), int(n-1));
if (equal(me, sl.links_d[j])) {
auto const itk = j;
auto const tk = getTK(j);
auto old = atomicCAS(&sl.tkId_d[cl], invTK, itk);
if (invTK == old or tk == getTK(old)) {
atomicAdd(&sl.n1_d[cl], 1);
} else {
auto old = atomicCAS(&sl.tkId2_d[cl], invTK, itk);
if (invTK == old or tk == getTK(old)) atomicAdd(&sl.n2_d[cl], 1);
}
}
}
__global__
void verifyZero(uint32_t nhits, ClusterSLGPU const * slp) {
auto i = blockIdx.x*blockDim.x + threadIdx.x;
if (i > nhits)
return;
auto const & sl = *slp;
assert(sl.tkId_d[i]==0);
auto const & tk = sl.links_d[0];
assert(tk[0]==0);
assert(tk[1]==0);
assert(tk[2]==0);
assert(tk[3]==0);
}
__global__
void dumpLink(int first, int ev, clusterSLOnGPU::HitsOnGPU const * hhp, uint32_t nhits, ClusterSLGPU const * slp) {
auto i = first + blockIdx.x*blockDim.x + threadIdx.x;
if (i>nhits) return;
auto const & hh = *hhp;
auto const & sl = *slp;
/* just an example of use of global error....
assert(hh.cpeParams);
float ge[6];
hh.cpeParams->detParams(hh.detInd_d[i]).frame.toGlobal(hh.xerr_d[i], 0, hh.yerr_d[i],ge);
printf("Error: %d: %f,%f,%f,%f,%f,%f\n",hh.detInd_d[i],ge[0],ge[1],ge[2],ge[3],ge[4],ge[5]);
*/
auto const & tk1 = sl.links_d[sl.tkId_d[i]];
auto const & tk2 = sl.links_d[sl.tkId2_d[i]];
printf("HIT: %d %d %d %d %f %f %f %f %d %d %d %d %d %d %d\n", ev, i,
hh.detInd_d[i], hh.charge_d[i],
hh.xg_d[i], hh.yg_d[i], hh.zg_d[i], hh.rg_d[i], hh.iphi_d[i],
tk1[2], tk1[3], sl.n1_d[i],
tk2[2], tk2[3], sl.n2_d[i]
);
}
namespace clusterSLOnGPU {
constexpr uint32_t invTK = 0; // std::numeric_limits<int32_t>::max();
void printCSVHeader() {
printf("HIT: %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n", "ev", "ind",
"det", "charge",
"xg","yg","zg","rg","iphi",
"tkId","pt","n1","tkId2","pt2","n2"
);
}
std::atomic<int> evId(0);
std::once_flag doneCSVHeader;
Kernel::Kernel(cuda::stream_t<>& stream, bool dump) : doDump(dump) {
if (doDump) std::call_once(doneCSVHeader, printCSVHeader);
alloc(stream);
}
void Kernel::alloc(cuda::stream_t<>& stream) {
cudaCheck(hipMalloc((void**) & slgpu.links_d, (ClusterSLGPU::MAX_DIGIS)*sizeof(std::array<uint32_t, 4>)));
cudaCheck(hipMalloc((void**) & slgpu.tkId_d, (ClusterSLGPU::MaxNumModules*256)*sizeof(uint32_t)));
cudaCheck(hipMalloc((void**) & slgpu.tkId2_d, (ClusterSLGPU::MaxNumModules*256)*sizeof(uint32_t)));
cudaCheck(hipMalloc((void**) & slgpu.n1_d, (ClusterSLGPU::MaxNumModules*256)*sizeof(uint32_t)));
cudaCheck(hipMalloc((void**) & slgpu.n2_d, (ClusterSLGPU::MaxNumModules*256)*sizeof(uint32_t)));
cudaCheck(hipMalloc((void**) & slgpu.me_d, sizeof(ClusterSLGPU)));
cudaCheck(hipMemcpyAsync(slgpu.me_d, &slgpu, sizeof(ClusterSLGPU), hipMemcpyDefault, stream.id()));
}
void Kernel::deAlloc() {
cudaCheck(hipFree(slgpu.links_d));
cudaCheck(hipFree(slgpu.tkId_d));
cudaCheck(hipFree(slgpu.tkId2_d));
cudaCheck(hipFree(slgpu.n1_d));
cudaCheck(hipFree(slgpu.n2_d));
cudaCheck(hipFree(slgpu.me_d));
}
void Kernel::zero(hipStream_t stream) {
cudaCheck(hipMemsetAsync(slgpu.tkId_d, invTK, (ClusterSLGPU::MaxNumModules*256)*sizeof(uint32_t), stream));
cudaCheck(hipMemsetAsync(slgpu.tkId2_d, invTK, (ClusterSLGPU::MaxNumModules*256)*sizeof(uint32_t), stream));
cudaCheck(hipMemsetAsync(slgpu.n1_d, 0, (ClusterSLGPU::MaxNumModules*256)*sizeof(uint32_t), stream));
cudaCheck(hipMemsetAsync(slgpu.n2_d, 0, (ClusterSLGPU::MaxNumModules*256)*sizeof(uint32_t), stream));
}
void Kernel::algo(DigisOnGPU const & dd, uint32_t ndigis, HitsOnCPU const & hh, uint32_t nhits, uint32_t n, cuda::stream_t<>& stream) {
zero(stream.id());
ClusterSLGPU const & sl = slgpu;
int ev = ++evId;
int threadsPerBlock = 256;
int blocks = (nhits + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( verifyZero), dim3(blocks), dim3(threadsPerBlock), 0, stream.id(), nhits, sl.me_d);
cudaCheck(hipGetLastError());
blocks = (ndigis + threadsPerBlock - 1) / threadsPerBlock;
assert(sl.me_d);
hipLaunchKernelGGL(( simLink), dim3(blocks), dim3(threadsPerBlock), 0, stream.id(), dd.digis_d.view(), ndigis, dd.clusters_d.view(), hh.gpu_d, sl.me_d, n);
cudaCheck(hipGetLastError());
if (doDump) {
hipStreamSynchronize(stream.id()); // flush previous printf
// one line == 200B so each kernel can print only 5K lines....
blocks = 16; // (nhits + threadsPerBlock - 1) / threadsPerBlock;
for (int first=0; first<int(nhits); first+=blocks*threadsPerBlock) {
hipLaunchKernelGGL(( dumpLink), dim3(blocks), dim3(threadsPerBlock), 0, stream.id(), first, ev, hh.gpu_d, nhits, sl.me_d);
cudaCheck(hipGetLastError());
hipStreamSynchronize(stream.id());
}
}
cudaCheck(hipGetLastError());
}
}
| faab3a1dcb252876af9e5ec394848435c89a8852.cu | #include <atomic>
#include <limits>
#include <mutex>
#include "HeterogeneousCore/CUDAUtilities/interface/cuda_assert.h"
#include "CUDADataFormats/SiPixelDigi/interface/SiPixelDigisCUDA.h"
#include "CUDADataFormats/SiPixelCluster/interface/SiPixelClustersCUDA.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cudastdAlgorithm.h"
#include "RecoLocalTracker/SiPixelClusterizer/plugins/SiPixelRawToClusterGPUKernel.h"
#include "RecoLocalTracker/SiPixelRecHits/interface/pixelCPEforGPU.h"
#include "ClusterSLOnGPU.h"
using ClusterSLGPU = trackerHitAssociationHeterogeneousProduct::ClusterSLGPU;
__global__
void simLink(const SiPixelDigisCUDA::DeviceConstView *dd, uint32_t ndigis, const SiPixelClustersCUDA::DeviceConstView *cc, clusterSLOnGPU::HitsOnGPU const * hhp, ClusterSLGPU const * slp, uint32_t n)
{
assert(slp == slp->me_d);
constexpr int32_t invTK = 0; // std::numeric_limits<int32_t>::max();
constexpr uint16_t InvId = 9999; // must be > MaxNumModules
auto const & hh = *hhp;
auto const & sl = *slp;
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= ndigis)
return;
auto id = dd->moduleInd(i);
if (InvId == id)
return;
assert(id < 2000);
auto ch = pixelgpudetails::pixelToChannel(dd->xx(i), dd->yy(i));
auto first = hh.hitsModuleStart_d[id];
auto cl = first + cc->clus(i);
assert(cl < 2000 * blockDim.x);
const std::array<uint32_t, 4> me{{id, ch, 0, 0}};
auto less = [] __host__ __device__ (std::array<uint32_t, 4> const & a, std::array<uint32_t, 4> const & b)->bool {
// in this context we do not care of [2]
return a[0] < b[0] or (not b[0] < a[0] and a[1] < b[1]);
};
auto equal = [] __host__ __device__ (std::array<uint32_t, 4> const & a, std::array<uint32_t, 4> const & b)->bool {
// in this context we do not care of [2]
return a[0] == b[0] and a[1] == b[1];
};
auto const * b = sl.links_d;
auto const * e = b + n;
auto p = cuda_std::lower_bound(b, e, me, less);
int32_t j = p-sl.links_d;
assert(j >= 0);
auto getTK = [&](int i) { auto const & l = sl.links_d[i]; return l[2];};
j = std::min(int(j), int(n-1));
if (equal(me, sl.links_d[j])) {
auto const itk = j;
auto const tk = getTK(j);
auto old = atomicCAS(&sl.tkId_d[cl], invTK, itk);
if (invTK == old or tk == getTK(old)) {
atomicAdd(&sl.n1_d[cl], 1);
} else {
auto old = atomicCAS(&sl.tkId2_d[cl], invTK, itk);
if (invTK == old or tk == getTK(old)) atomicAdd(&sl.n2_d[cl], 1);
}
}
}
__global__
void verifyZero(uint32_t nhits, ClusterSLGPU const * slp) {
auto i = blockIdx.x*blockDim.x + threadIdx.x;
if (i > nhits)
return;
auto const & sl = *slp;
assert(sl.tkId_d[i]==0);
auto const & tk = sl.links_d[0];
assert(tk[0]==0);
assert(tk[1]==0);
assert(tk[2]==0);
assert(tk[3]==0);
}
__global__
void dumpLink(int first, int ev, clusterSLOnGPU::HitsOnGPU const * hhp, uint32_t nhits, ClusterSLGPU const * slp) {
auto i = first + blockIdx.x*blockDim.x + threadIdx.x;
if (i>nhits) return;
auto const & hh = *hhp;
auto const & sl = *slp;
/* just an example of use of global error....
assert(hh.cpeParams);
float ge[6];
hh.cpeParams->detParams(hh.detInd_d[i]).frame.toGlobal(hh.xerr_d[i], 0, hh.yerr_d[i],ge);
printf("Error: %d: %f,%f,%f,%f,%f,%f\n",hh.detInd_d[i],ge[0],ge[1],ge[2],ge[3],ge[4],ge[5]);
*/
auto const & tk1 = sl.links_d[sl.tkId_d[i]];
auto const & tk2 = sl.links_d[sl.tkId2_d[i]];
printf("HIT: %d %d %d %d %f %f %f %f %d %d %d %d %d %d %d\n", ev, i,
hh.detInd_d[i], hh.charge_d[i],
hh.xg_d[i], hh.yg_d[i], hh.zg_d[i], hh.rg_d[i], hh.iphi_d[i],
tk1[2], tk1[3], sl.n1_d[i],
tk2[2], tk2[3], sl.n2_d[i]
);
}
namespace clusterSLOnGPU {
constexpr uint32_t invTK = 0; // std::numeric_limits<int32_t>::max();
void printCSVHeader() {
printf("HIT: %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n", "ev", "ind",
"det", "charge",
"xg","yg","zg","rg","iphi",
"tkId","pt","n1","tkId2","pt2","n2"
);
}
std::atomic<int> evId(0);
std::once_flag doneCSVHeader;
Kernel::Kernel(cuda::stream_t<>& stream, bool dump) : doDump(dump) {
if (doDump) std::call_once(doneCSVHeader, printCSVHeader);
alloc(stream);
}
void Kernel::alloc(cuda::stream_t<>& stream) {
cudaCheck(cudaMalloc((void**) & slgpu.links_d, (ClusterSLGPU::MAX_DIGIS)*sizeof(std::array<uint32_t, 4>)));
cudaCheck(cudaMalloc((void**) & slgpu.tkId_d, (ClusterSLGPU::MaxNumModules*256)*sizeof(uint32_t)));
cudaCheck(cudaMalloc((void**) & slgpu.tkId2_d, (ClusterSLGPU::MaxNumModules*256)*sizeof(uint32_t)));
cudaCheck(cudaMalloc((void**) & slgpu.n1_d, (ClusterSLGPU::MaxNumModules*256)*sizeof(uint32_t)));
cudaCheck(cudaMalloc((void**) & slgpu.n2_d, (ClusterSLGPU::MaxNumModules*256)*sizeof(uint32_t)));
cudaCheck(cudaMalloc((void**) & slgpu.me_d, sizeof(ClusterSLGPU)));
cudaCheck(cudaMemcpyAsync(slgpu.me_d, &slgpu, sizeof(ClusterSLGPU), cudaMemcpyDefault, stream.id()));
}
void Kernel::deAlloc() {
cudaCheck(cudaFree(slgpu.links_d));
cudaCheck(cudaFree(slgpu.tkId_d));
cudaCheck(cudaFree(slgpu.tkId2_d));
cudaCheck(cudaFree(slgpu.n1_d));
cudaCheck(cudaFree(slgpu.n2_d));
cudaCheck(cudaFree(slgpu.me_d));
}
void Kernel::zero(cudaStream_t stream) {
cudaCheck(cudaMemsetAsync(slgpu.tkId_d, invTK, (ClusterSLGPU::MaxNumModules*256)*sizeof(uint32_t), stream));
cudaCheck(cudaMemsetAsync(slgpu.tkId2_d, invTK, (ClusterSLGPU::MaxNumModules*256)*sizeof(uint32_t), stream));
cudaCheck(cudaMemsetAsync(slgpu.n1_d, 0, (ClusterSLGPU::MaxNumModules*256)*sizeof(uint32_t), stream));
cudaCheck(cudaMemsetAsync(slgpu.n2_d, 0, (ClusterSLGPU::MaxNumModules*256)*sizeof(uint32_t), stream));
}
void Kernel::algo(DigisOnGPU const & dd, uint32_t ndigis, HitsOnCPU const & hh, uint32_t nhits, uint32_t n, cuda::stream_t<>& stream) {
zero(stream.id());
ClusterSLGPU const & sl = slgpu;
int ev = ++evId;
int threadsPerBlock = 256;
int blocks = (nhits + threadsPerBlock - 1) / threadsPerBlock;
verifyZero<<<blocks, threadsPerBlock, 0, stream.id()>>>(nhits, sl.me_d);
cudaCheck(cudaGetLastError());
blocks = (ndigis + threadsPerBlock - 1) / threadsPerBlock;
assert(sl.me_d);
simLink<<<blocks, threadsPerBlock, 0, stream.id()>>>(dd.digis_d.view(), ndigis, dd.clusters_d.view(), hh.gpu_d, sl.me_d, n);
cudaCheck(cudaGetLastError());
if (doDump) {
cudaStreamSynchronize(stream.id()); // flush previous printf
// one line == 200B so each kernel can print only 5K lines....
blocks = 16; // (nhits + threadsPerBlock - 1) / threadsPerBlock;
for (int first=0; first<int(nhits); first+=blocks*threadsPerBlock) {
dumpLink<<<blocks, threadsPerBlock, 0, stream.id()>>>(first, ev, hh.gpu_d, nhits, sl.me_d);
cudaCheck(cudaGetLastError());
cudaStreamSynchronize(stream.id());
}
}
cudaCheck(cudaGetLastError());
}
}
|
232c60cfc9871607ffcb3b66799a7aa0ca028bc5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/cumsum_kernel.h"
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/reverse.h>
#include <thrust/scan.h>
#ifdef __NVCC__
#include <hipcub/hipcub.hpp>
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/hostdevice.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
template <typename T, int BLOCK_SIZE>
__device__ void BlockReverse(
const T* idata, T* odata, int src_base, int dst_base, int valid_item) {
__shared__ T sh_mem[BLOCK_SIZE];
int tx = threadIdx.x;
int offset = tx;
int in_index = src_base + offset;
if (offset >= valid_item) {
sh_mem[offset] = 0;
} else {
int sh_mem_index = BLOCK_SIZE - offset - 1;
T data = idata[in_index];
sh_mem[sh_mem_index] = data;
}
__syncthreads();
int out_index = dst_base - offset;
if (offset < valid_item) {
int sh_mem_index = BLOCK_SIZE - offset - 1;
odata[out_index] = sh_mem[sh_mem_index];
}
}
template <typename T>
__global__ void MatrixRowReverse(const T* matrix_data,
T* reverse_data,
int reverse_size,
int outer_size,
int inner_size) {
int bx = blockIdx.x;
int by = blockIdx.y;
int item_per_block = 1024;
for (int block_offset = 0; block_offset < reverse_size;
block_offset += item_per_block) {
int valid_item = (reverse_size - block_offset > item_per_block)
? item_per_block
: reverse_size - block_offset;
int src_offset =
bx * reverse_size + block_offset + by * (inner_size * reverse_size);
int dst_offset = bx * reverse_size + by * (inner_size * reverse_size) +
reverse_size - 1 - block_offset;
if (reverse_size < item_per_block) {
valid_item = reverse_size;
}
BlockReverse<T, 1024>(
matrix_data, reverse_data, src_offset, dst_offset, valid_item);
}
}
template <typename T>
struct BlockPrefixCallbackOp {
// Running prefix
T running_total;
// Constructor
__device__ BlockPrefixCallbackOp(T running_total)
: running_total(running_total) {}
// Callback operator to be entered by the first warp of threads in the block.
// Thread-0 is responsible for returning a value for seeding the block-wide
// scan.
__device__ T operator()(T block_aggregate) {
T old_prefix = running_total;
running_total = old_prefix + block_aggregate;
return old_prefix;
}
};
// No bank-conflict transpose
template <typename T, int TILE_DIM, int BLOCK_ROWS>
__global__ void MatrixTranspose(T* odata,
const T* idata,
size_t height,
size_t width) {
__shared__ T tile[TILE_DIM][TILE_DIM + 1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) {
if (x < width && (y + j) < height) {
tile[threadIdx.y + j][threadIdx.x] = idata[(y + j) * width + x];
} else {
tile[threadIdx.y + j][threadIdx.x] = 0;
}
}
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) {
if (x < height && (y + j) < width) {
odata[(y + j) * height + x] = tile[threadIdx.x][threadIdx.y + j];
}
}
}
template <typename T, int BLOCK_THREADS, int ITEMS_PER_THREAD>
__global__ void BlockScanKernel(T* d_out,
const T* d_in,
int inner_size,
int outer_size,
int scan_size,
bool exclusive) {
// Specialize BlockLoad, BlockStore, and BlockRadixSort collective types
typedef hipcub::
BlockLoad<T, BLOCK_THREADS, ITEMS_PER_THREAD, cub::BLOCK_LOAD_TRANSPOSE>
BlockLoadT;
typedef hipcub::
BlockStore<T, BLOCK_THREADS, ITEMS_PER_THREAD, cub::BLOCK_STORE_TRANSPOSE>
BlockStoreT;
typedef hipcub::BlockScan<T, BLOCK_THREADS> BlockScanT;
// Allocate type-safe, repurposable shared memory for collectives
__shared__ union {
typename BlockLoadT::TempStorage load;
typename BlockStoreT::TempStorage store;
typename BlockScanT::TempStorage scan;
} temp_storage;
int bx = blockIdx.x;
int by = blockIdx.y;
BlockPrefixCallbackOp<T> prefix_op(0);
T block_aggregate = static_cast<T>(0);
// Obtain this block's segment of consecutive keys (blocked across threads)
int item_per_block = BLOCK_THREADS * ITEMS_PER_THREAD;
for (int block_offset = 0; block_offset < scan_size;
block_offset += BLOCK_THREADS * ITEMS_PER_THREAD) {
int valid_item = (scan_size - block_offset > item_per_block)
? item_per_block
: (scan_size - block_offset);
if (scan_size < item_per_block) {
valid_item = scan_size;
}
int offset = bx * scan_size + block_offset + by * (inner_size * scan_size);
T thread_keys[ITEMS_PER_THREAD];
BlockLoadT(temp_storage.load)
.Load(d_in + offset, thread_keys, valid_item, 0);
__syncthreads();
if (exclusive) {
T init_value = static_cast<T>(0);
BlockScanT(temp_storage.scan)
.ExclusiveScan(thread_keys, thread_keys, hipcub::Sum(), prefix_op);
} else {
BlockScanT(temp_storage.scan)
.InclusiveScan(thread_keys, thread_keys, hipcub::Sum(), prefix_op);
}
__syncthreads();
BlockStoreT(temp_storage.store)
.Store(d_out + offset, thread_keys, valid_item);
}
}
template <typename T, typename Context>
void CumsumKernel(const Context& dev_ctx,
const DenseTensor& x,
int axis,
bool flatten,
bool exclusive,
bool reverse,
DenseTensor* out) {
auto out_dims = out->dims();
auto size = x.numel();
PADDLE_ENFORCE_EQ(
axis < out_dims.size() && axis >= (0 - out_dims.size()),
true,
phi::errors::OutOfRange(
"Attr(axis) is out of range, It's expected "
"to be in range of [-%d, %d]. But received Attr(axis) = %d.",
out_dims.size(),
out_dims.size() - 1,
axis));
if (axis < 0) {
axis += out_dims.size();
}
T* out_data = dev_ctx.template Alloc<T>(out);
const T* in_data = x.data<T>();
// Use thrust for parallel acceleration when the input size is equal to the
// length of the axis dimension.
if (size == out_dims[axis]) {
if (reverse) {
thrust::device_ptr<const T> dev_ptr =
thrust::device_pointer_cast(in_data);
thrust::device_vector<T> vec(dev_ptr, dev_ptr + size);
if (exclusive) {
thrust::exclusive_scan(
thrust::device, vec.rbegin(), vec.rend(), out_data);
} else {
thrust::inclusive_scan(
thrust::device, vec.rbegin(), vec.rend(), out_data);
}
thrust::reverse(thrust::device, out_data, out_data + size);
} else {
if (exclusive) {
thrust::exclusive_scan(
thrust::device, in_data, in_data + size, out_data);
} else {
thrust::inclusive_scan(
thrust::device, in_data, in_data + size, out_data);
}
}
return;
}
size_t height = 1;
size_t width = 1;
for (size_t i = 0; i <= axis; i++) {
height *= out_dims[i];
}
for (size_t i = axis + 1; i < out_dims.size(); i++) {
width *= out_dims[i];
}
int scan_size = out_dims[axis];
bool transpose = (axis != out_dims.size() - 1);
int tile_size = 32;
dim3 blocks(32, 8);
dim3 transpose_grids((width + tile_size - 1) / tile_size,
(height + tile_size - 1) / tile_size);
out->Resize(out_dims);
auto* tmp_data = out->data<T>();
T* next_in_data = out_data;
T* next_out_data = tmp_data;
if (transpose) {
hipLaunchKernelGGL(( MatrixTranspose<T, 32, 8>), dim3(transpose_grids), dim3(blocks), 0, dev_ctx.stream(),
out_data, in_data, height, width);
next_in_data = out_data;
next_out_data = tmp_data;
}
auto swap_ptr = [](T*& ptr1, T*& ptr2) {
T* tmp = ptr2;
ptr2 = ptr1;
ptr1 = tmp;
};
int outer_size = height / scan_size;
int inner_size = width;
// Consider the size of shared memory, here block size is 128
dim3 scan_grid(outer_size, inner_size);
dim3 reverse_grid = scan_grid;
if (reverse) {
if (transpose) {
reverse_grid.x = scan_grid.y;
reverse_grid.y = scan_grid.x;
hipLaunchKernelGGL(( MatrixRowReverse<T>), dim3(reverse_grid), dim3(1024), 0, dev_ctx.stream(),
next_in_data, next_out_data, scan_size, outer_size, inner_size);
if (!transpose) next_in_data = tmp_data;
swap_ptr(next_in_data, next_out_data);
} else {
hipLaunchKernelGGL(( MatrixRowReverse<T>), dim3(reverse_grid), dim3(1024), 0, dev_ctx.stream(),
in_data, out_data, scan_size, outer_size, inner_size);
}
}
if (!transpose && !reverse) {
hipLaunchKernelGGL(( BlockScanKernel<T, 128, 4>), dim3(scan_grid), dim3(128), 0, dev_ctx.stream(),
out_data, in_data, outer_size, inner_size, scan_size, exclusive);
} else {
hipLaunchKernelGGL(( BlockScanKernel<T, 128, 4>), dim3(scan_grid), dim3(128), 0, dev_ctx.stream(),
next_out_data,
next_in_data,
outer_size,
inner_size,
scan_size,
exclusive);
}
swap_ptr(next_in_data, next_out_data);
if (reverse) {
hipLaunchKernelGGL(( MatrixRowReverse<T>), dim3(reverse_grid), dim3(1024), 0, dev_ctx.stream(),
next_in_data, next_out_data, scan_size, outer_size, inner_size);
swap_ptr(next_in_data, next_out_data);
}
if (transpose) {
transpose_grids.x = (height + tile_size - 1) / tile_size;
transpose_grids.y = (width + tile_size - 1) / tile_size;
hipLaunchKernelGGL(( MatrixTranspose<T, 32, 8>), dim3(transpose_grids), dim3(blocks), 0, dev_ctx.stream(),
next_out_data, next_in_data, width, height);
}
}
} // namespace phi
PD_REGISTER_KERNEL(cumsum,
GPU,
ALL_LAYOUT,
phi::CumsumKernel,
float,
double,
int16_t,
int,
int64_t) {}
| 232c60cfc9871607ffcb3b66799a7aa0ca028bc5.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/cumsum_kernel.h"
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/reverse.h>
#include <thrust/scan.h>
#ifdef __NVCC__
#include <cub/cub.cuh>
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/hostdevice.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
template <typename T, int BLOCK_SIZE>
__device__ void BlockReverse(
const T* idata, T* odata, int src_base, int dst_base, int valid_item) {
__shared__ T sh_mem[BLOCK_SIZE];
int tx = threadIdx.x;
int offset = tx;
int in_index = src_base + offset;
if (offset >= valid_item) {
sh_mem[offset] = 0;
} else {
int sh_mem_index = BLOCK_SIZE - offset - 1;
T data = idata[in_index];
sh_mem[sh_mem_index] = data;
}
__syncthreads();
int out_index = dst_base - offset;
if (offset < valid_item) {
int sh_mem_index = BLOCK_SIZE - offset - 1;
odata[out_index] = sh_mem[sh_mem_index];
}
}
template <typename T>
__global__ void MatrixRowReverse(const T* matrix_data,
T* reverse_data,
int reverse_size,
int outer_size,
int inner_size) {
int bx = blockIdx.x;
int by = blockIdx.y;
int item_per_block = 1024;
for (int block_offset = 0; block_offset < reverse_size;
block_offset += item_per_block) {
int valid_item = (reverse_size - block_offset > item_per_block)
? item_per_block
: reverse_size - block_offset;
int src_offset =
bx * reverse_size + block_offset + by * (inner_size * reverse_size);
int dst_offset = bx * reverse_size + by * (inner_size * reverse_size) +
reverse_size - 1 - block_offset;
if (reverse_size < item_per_block) {
valid_item = reverse_size;
}
BlockReverse<T, 1024>(
matrix_data, reverse_data, src_offset, dst_offset, valid_item);
}
}
template <typename T>
struct BlockPrefixCallbackOp {
// Running prefix
T running_total;
// Constructor
__device__ BlockPrefixCallbackOp(T running_total)
: running_total(running_total) {}
// Callback operator to be entered by the first warp of threads in the block.
// Thread-0 is responsible for returning a value for seeding the block-wide
// scan.
__device__ T operator()(T block_aggregate) {
T old_prefix = running_total;
running_total = old_prefix + block_aggregate;
return old_prefix;
}
};
// No bank-conflict transpose
template <typename T, int TILE_DIM, int BLOCK_ROWS>
__global__ void MatrixTranspose(T* odata,
const T* idata,
size_t height,
size_t width) {
__shared__ T tile[TILE_DIM][TILE_DIM + 1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) {
if (x < width && (y + j) < height) {
tile[threadIdx.y + j][threadIdx.x] = idata[(y + j) * width + x];
} else {
tile[threadIdx.y + j][threadIdx.x] = 0;
}
}
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) {
if (x < height && (y + j) < width) {
odata[(y + j) * height + x] = tile[threadIdx.x][threadIdx.y + j];
}
}
}
template <typename T, int BLOCK_THREADS, int ITEMS_PER_THREAD>
__global__ void BlockScanKernel(T* d_out,
const T* d_in,
int inner_size,
int outer_size,
int scan_size,
bool exclusive) {
// Specialize BlockLoad, BlockStore, and BlockRadixSort collective types
typedef cub::
BlockLoad<T, BLOCK_THREADS, ITEMS_PER_THREAD, cub::BLOCK_LOAD_TRANSPOSE>
BlockLoadT;
typedef cub::
BlockStore<T, BLOCK_THREADS, ITEMS_PER_THREAD, cub::BLOCK_STORE_TRANSPOSE>
BlockStoreT;
typedef cub::BlockScan<T, BLOCK_THREADS> BlockScanT;
// Allocate type-safe, repurposable shared memory for collectives
__shared__ union {
typename BlockLoadT::TempStorage load;
typename BlockStoreT::TempStorage store;
typename BlockScanT::TempStorage scan;
} temp_storage;
int bx = blockIdx.x;
int by = blockIdx.y;
BlockPrefixCallbackOp<T> prefix_op(0);
T block_aggregate = static_cast<T>(0);
// Obtain this block's segment of consecutive keys (blocked across threads)
int item_per_block = BLOCK_THREADS * ITEMS_PER_THREAD;
for (int block_offset = 0; block_offset < scan_size;
block_offset += BLOCK_THREADS * ITEMS_PER_THREAD) {
int valid_item = (scan_size - block_offset > item_per_block)
? item_per_block
: (scan_size - block_offset);
if (scan_size < item_per_block) {
valid_item = scan_size;
}
int offset = bx * scan_size + block_offset + by * (inner_size * scan_size);
T thread_keys[ITEMS_PER_THREAD];
BlockLoadT(temp_storage.load)
.Load(d_in + offset, thread_keys, valid_item, 0);
__syncthreads();
if (exclusive) {
T init_value = static_cast<T>(0);
BlockScanT(temp_storage.scan)
.ExclusiveScan(thread_keys, thread_keys, cub::Sum(), prefix_op);
} else {
BlockScanT(temp_storage.scan)
.InclusiveScan(thread_keys, thread_keys, cub::Sum(), prefix_op);
}
__syncthreads();
BlockStoreT(temp_storage.store)
.Store(d_out + offset, thread_keys, valid_item);
}
}
template <typename T, typename Context>
void CumsumKernel(const Context& dev_ctx,
const DenseTensor& x,
int axis,
bool flatten,
bool exclusive,
bool reverse,
DenseTensor* out) {
auto out_dims = out->dims();
auto size = x.numel();
PADDLE_ENFORCE_EQ(
axis < out_dims.size() && axis >= (0 - out_dims.size()),
true,
phi::errors::OutOfRange(
"Attr(axis) is out of range, It's expected "
"to be in range of [-%d, %d]. But received Attr(axis) = %d.",
out_dims.size(),
out_dims.size() - 1,
axis));
if (axis < 0) {
axis += out_dims.size();
}
T* out_data = dev_ctx.template Alloc<T>(out);
const T* in_data = x.data<T>();
// Use thrust for parallel acceleration when the input size is equal to the
// length of the ‘axis’ dimension.
if (size == out_dims[axis]) {
if (reverse) {
thrust::device_ptr<const T> dev_ptr =
thrust::device_pointer_cast(in_data);
thrust::device_vector<T> vec(dev_ptr, dev_ptr + size);
if (exclusive) {
thrust::exclusive_scan(
thrust::device, vec.rbegin(), vec.rend(), out_data);
} else {
thrust::inclusive_scan(
thrust::device, vec.rbegin(), vec.rend(), out_data);
}
thrust::reverse(thrust::device, out_data, out_data + size);
} else {
if (exclusive) {
thrust::exclusive_scan(
thrust::device, in_data, in_data + size, out_data);
} else {
thrust::inclusive_scan(
thrust::device, in_data, in_data + size, out_data);
}
}
return;
}
size_t height = 1;
size_t width = 1;
for (size_t i = 0; i <= axis; i++) {
height *= out_dims[i];
}
for (size_t i = axis + 1; i < out_dims.size(); i++) {
width *= out_dims[i];
}
int scan_size = out_dims[axis];
bool transpose = (axis != out_dims.size() - 1);
int tile_size = 32;
dim3 blocks(32, 8);
dim3 transpose_grids((width + tile_size - 1) / tile_size,
(height + tile_size - 1) / tile_size);
out->Resize(out_dims);
auto* tmp_data = out->data<T>();
T* next_in_data = out_data;
T* next_out_data = tmp_data;
if (transpose) {
MatrixTranspose<T, 32, 8><<<transpose_grids, blocks, 0, dev_ctx.stream()>>>(
out_data, in_data, height, width);
next_in_data = out_data;
next_out_data = tmp_data;
}
auto swap_ptr = [](T*& ptr1, T*& ptr2) {
T* tmp = ptr2;
ptr2 = ptr1;
ptr1 = tmp;
};
int outer_size = height / scan_size;
int inner_size = width;
// Consider the size of shared memory, here block size is 128
dim3 scan_grid(outer_size, inner_size);
dim3 reverse_grid = scan_grid;
if (reverse) {
if (transpose) {
reverse_grid.x = scan_grid.y;
reverse_grid.y = scan_grid.x;
MatrixRowReverse<T><<<reverse_grid, 1024, 0, dev_ctx.stream()>>>(
next_in_data, next_out_data, scan_size, outer_size, inner_size);
if (!transpose) next_in_data = tmp_data;
swap_ptr(next_in_data, next_out_data);
} else {
MatrixRowReverse<T><<<reverse_grid, 1024, 0, dev_ctx.stream()>>>(
in_data, out_data, scan_size, outer_size, inner_size);
}
}
if (!transpose && !reverse) {
BlockScanKernel<T, 128, 4><<<scan_grid, 128, 0, dev_ctx.stream()>>>(
out_data, in_data, outer_size, inner_size, scan_size, exclusive);
} else {
BlockScanKernel<T, 128, 4><<<scan_grid, 128, 0, dev_ctx.stream()>>>(
next_out_data,
next_in_data,
outer_size,
inner_size,
scan_size,
exclusive);
}
swap_ptr(next_in_data, next_out_data);
if (reverse) {
MatrixRowReverse<T><<<reverse_grid, 1024, 0, dev_ctx.stream()>>>(
next_in_data, next_out_data, scan_size, outer_size, inner_size);
swap_ptr(next_in_data, next_out_data);
}
if (transpose) {
transpose_grids.x = (height + tile_size - 1) / tile_size;
transpose_grids.y = (width + tile_size - 1) / tile_size;
MatrixTranspose<T, 32, 8><<<transpose_grids, blocks, 0, dev_ctx.stream()>>>(
next_out_data, next_in_data, width, height);
}
}
} // namespace phi
PD_REGISTER_KERNEL(cumsum,
GPU,
ALL_LAYOUT,
phi::CumsumKernel,
float,
double,
int16_t,
int,
int64_t) {}
|
f736bfe9d6702335260eb46e6abfabc0fc4a3eb9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/util/rng.hpp"
#include "caffe/util/io.hpp"
namespace caffe {
template <typename Dtype>
__device__ void resample(const Dtype* bottom_data, Dtype* top_data, Dtype W, Dtype H, const int index, int width, int height) {
const int size = width * height;
const Dtype w_ = floorf(W);
const Dtype h_ = floorf(H);
const Dtype _w = fminf(w_+1,width-1);
const Dtype _h = fminf(h_+1,height-1);
const Dtype wTL = (W-w_)*(H-h_);
const Dtype wTR = (_w-W)*(H-h_);
const Dtype wBL = (W-w_)*(_h-H);
const Dtype wBR = (_w-W)*(_h-H);
const int pTL = w_ + h_ * width;
const int pTR = w_ + _h * width;
const int pBL = _w + h_ * width;
const int pBR = _w + _h * width;
top_data[index] =
wTL * bottom_data[pTL] + wTR * bottom_data[pTR] + wBR * bottom_data[pBR] + wBL * bottom_data[pBL];
top_data[index+size] =
wTL * bottom_data[pTL+size] + wTR * bottom_data[pTR+size] + wBR * bottom_data[pBR+size] + wBL * bottom_data[pBL+size];
top_data[index+size+size] =
wTL * bottom_data[pTL+size+size] + wTR * bottom_data[pTR+size+size] + wBR * bottom_data[pBR+size+size] + wBL * bottom_data[pBL+size+size];
}
/***********************************COLOR SHIFT***********************************/
template <typename Dtype>
__global__ void color_shift_kernel(const Dtype* bottom_data, Dtype* top_data,
const Dtype* eig_value, const Dtype* eig_matrix, Dtype* random_vector, int total) {
CUDA_KERNEL_LOOP(index, total) {
int channel = index * 3 / total;
int pos = 3 * channel;
top_data[index] = bottom_data[index] + eig_value[0] * random_vector[0] * eig_matrix[pos]
+ eig_value[1] * random_vector[1] * eig_matrix[pos+1]
+ eig_value[2] * random_vector[2] * eig_matrix[pos+2];
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_color_shift_gpu(const Dtype* bottom_data, Dtype* top_data) {
const Dtype* eig_value = color_shift_eig_value_.gpu_data();
const Dtype* eig_matrix = color_shift_eig_matrix_.gpu_data();
Dtype* random_vector_ = color_shift_random_vector_.mutable_gpu_data();
for (int n = 0; n < num_; ++n) {
caffe_gpu_rng_gaussian(4, color_shift_mu_, color_shift_sigma_, random_vector_);
hipLaunchKernelGGL(( color_shift_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(total_)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom_data + n * total_, top_data + n * total_, eig_value, eig_matrix, random_vector_, total_);
}
}
/***********************************GAUSSIAN NOISE***********************************/
template <typename Dtype>
__global__ void gaussian_noise_kernel(const Dtype* bottom_data, Dtype* top_data,
const unsigned int* mask, const Dtype* random_vector, const int thershold, const int count) {
CUDA_KERNEL_LOOP(index, count) {
top_data[index] = (mask[index] < thershold)? bottom_data[index] + random_vector[index] : bottom_data[index];
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_gaussian_noise_gpu(const Dtype* bottom_data, Dtype* top_data) {
unsigned int* mask = static_cast<unsigned int*>(gaussian_noise_mask_.mutable_gpu_data());
Dtype* random_vector = gaussian_noise_buffer_.mutable_gpu_data();
caffe_gpu_rng_uniform(count_, mask);
caffe_gpu_rng_uniform(count_, gaussian_noise_mu_, gaussian_noise_sigma_, random_vector);
hipLaunchKernelGGL(( gaussian_noise_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(count_)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom_data, top_data, mask, random_vector, uint_thres_, count_);
}
/***********************************CONTRAST***********************************/
template <typename Dtype>
__global__ void contrast_kernel(const Dtype* bottom_data, Dtype* top_data, const Dtype factor, const int count) {
CUDA_KERNEL_LOOP(index, count) {
top_data[index] = factor * (bottom_data[index] - 128) + 128;
if (top_data[index] > 255) top_data[index] = 255.0;
if (top_data[index] < 0) top_data[index] = 0.0;
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_contrast_gpu(const Dtype* bottom_data, Dtype* top_data) {
Dtype C = Dtype(Rand(contrast_max_thershold_*2) - contrast_max_thershold_);
Dtype F = 1.0156868 * (C + 255) / ( 259 - C);
hipLaunchKernelGGL(( contrast_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(count_)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom_data, top_data, F, count_);
}
/***********************************FLARE***********************************/
template <typename Dtype>
__global__ void flare_kernel(const Dtype* bottom_data, Dtype* top_data, const int radius, const int lumi, const int centerH, const int centerW,
const int height, const int width, const int total) {
CUDA_KERNEL_LOOP(index, total) {
const int size = width * height;
const int CR = index % size;
const int H = CR / width;
const int W = CR % width;
const int dH = H - centerH;
const int dW = W - centerW;
const Dtype dis = sqrtf(dH * dH + dW * dW) + 0.00001;
top_data[index] = bottom_data[index] + lumi * fmaxf(1 - dis / radius, 0);
if (top_data[index] > 255) top_data[index] = 255.0;
if (top_data[index] < 0) top_data[index] = 0.0;
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_flare_gpu(const Dtype* bottom_data, Dtype* top_data) {
if (flare_num_ == 1) {
for (int n = 0; n < num_; ++n) {
int centerH = Rand(height_);
int centerW = Rand(width_);
int radius = flare_min_radius_ + Rand(flare_max_radius_ - flare_min_radius_);
int lumi = flare_min_lumi_ + Rand(flare_max_lumi_ - flare_min_lumi_);
hipLaunchKernelGGL(( flare_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(count_)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom_data + n * total_, top_data + n * total_, radius, lumi, centerH, centerW, height_, width_ ,total_);
}
} else {
Dtype* buffer_ = flare_buffer_.mutable_gpu_data();
caffe_copy(count_, bottom_data, buffer_);
for (int i = 0; i < flare_num_; ++i) {
for (int n = 0; n < num_; ++n) {
int centerH = Rand(height_);
int centerW = Rand(width_);
int radius = flare_min_radius_ + Rand(flare_max_radius_ - flare_min_radius_);
int lumi = flare_min_lumi_ + Rand(flare_max_lumi_ - flare_min_lumi_);
hipLaunchKernelGGL(( flare_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(count_)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
buffer_ + n * total_, top_data + n * total_, radius, lumi, centerH, centerW, height_, width_ ,total_);
}
if (i != flare_num_ - 1) caffe_copy(count_, top_data, buffer_);
}
}
}
/***********************************BLUR***********************************/
template <typename Dtype>
__global__ void blur_kernel(const Dtype* bottom_data, Dtype* top_data, const Dtype t, const Dtype sum,
const int height, const int width, const int total) {
CUDA_KERNEL_LOOP(index, total) {
const int size = width * height;
const int CR = index % size;
const int H = CR / width;
const int W = CR % width;
if (H == 0 || H == (height -1) || W == 0 || W == (width - 1))
top_data[index] = bottom_data[index];
else {
top_data[index] = (bottom_data[index] + t * (bottom_data[index+1] + bottom_data[index-1] + bottom_data[index+width] + bottom_data[index-width])
+ t * t * (bottom_data[index+width+1] + bottom_data[index-width-1] + bottom_data[index+width-1] + bottom_data[index-width+1])) / sum;
}
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_blur_gpu(const Dtype* bottom_data, Dtype* top_data) {
for (int n = 0; n < num_; ++n) {
caffe_rng_uniform(1, blur_min_sigma_, blur_max_sigma_, &blur_sigma_);
const Dtype t = ::exp(- (1/blur_sigma_) * (1/blur_sigma_) / 2);
const Dtype sum = 4 * t * t + 4 * t + 1;
hipLaunchKernelGGL(( blur_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(count_)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom_data + n * total_, top_data + n * total_, t, sum, height_, width_ ,total_);
}
}
/***********************************COLOR BIAS***********************************/
template <typename Dtype>
__global__ void color_bias_kernel(const Dtype* bottom_data, Dtype* top_data, const int size, const int bias, const int bias_ch) {
CUDA_KERNEL_LOOP(index, size) {
const int pos = index + bias_ch * size;
top_data[pos] = bottom_data[pos] + bias;
if (top_data[pos] > 255) top_data[pos] = 255.0;
if (top_data[pos] < 0) top_data[pos] = 0.0;
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_color_bias_gpu(const Dtype* bottom_data, Dtype* top_data) {
for (int n = 0; n < num_; ++n) {
const int size = height_ * width_;
const int bias = Rand(color_max_bias_);
const int bias_ch = Rand(3);
caffe_copy(count_, bottom_data, top_data);
hipLaunchKernelGGL(( color_bias_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(size)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom_data + n * total_, top_data + n * total_, size, bias, bias_ch);
}
}
/***********************************MIRROR***********************************/
template <typename Dtype>
__global__ void mirror_kernel(const Dtype* bottom_data, Dtype* top_data, const int size, const int width) {
CUDA_KERNEL_LOOP(index, size) {
const int W = index % width;
top_data[index] = bottom_data[index - W + width - 1 - W];
top_data[index+size] = bottom_data[index+size - W + width - 1 - W];
top_data[index+size+size] = bottom_data[index+size+size - W + width - 1 - W];
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_mirror_gpu(const Dtype* bottom_data, Dtype* top_data) {
const int size = height_ * width_;
for (int n = 0; n < num_; ++n) {
hipLaunchKernelGGL(( mirror_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(size)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom_data + n * total_, top_data + n * total_, size, width_);
}
}
/***********************************SATURATION***********************************/
template <typename Dtype>
__global__ void saturation_kernel(const Dtype* bottom_data, Dtype* top_data, const int size, const int width, Dtype bias) {
CUDA_KERNEL_LOOP(index, size) {
//Weights are ordered by B R G.
const Dtype param = sqrtf(bottom_data[index] * bottom_data[index] * 0.114 +
bottom_data[index+size] * bottom_data[index+size] * 0.587 +
bottom_data[index+size+size] * bottom_data[index+size+size] * 0.299);
top_data[index] = param + (bottom_data[index] - param) * bias;
top_data[index+size] = param + (bottom_data[index+size] - param) * bias;
top_data[index+size+size] = param + (bottom_data[index+size+size] - param) * bias;
if (top_data[index] > 255) top_data[index] = 255.0;
if (top_data[index] < 0) top_data[index] = 0.0;
if (top_data[index+size] > 255) top_data[index+size] = 255.0;
if (top_data[index+size] < 0) top_data[index+size] = 0.0;
if (top_data[index+size+size] > 255) top_data[index+size+size] = 255.0;
if (top_data[index+size+size] < 0) top_data[index+size+size] = 0.0;
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_saturation_gpu(const Dtype* bottom_data, Dtype* top_data) {
const int size = height_ * width_;
for (int n = 0; n < num_; ++n) {
caffe_rng_uniform(1, 1-saturation_max_bias_, 1+saturation_max_bias_, &saturation_bias_);
hipLaunchKernelGGL(( saturation_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(size)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom_data + n * total_, top_data + n * total_, size, width_, saturation_bias_);
}
}
/***********************************MEAN***********************************/
template <typename Dtype>
__global__ void mean_file_kernel(const Dtype* bottom_data, Dtype* top_data, const int total, const Dtype* mean) {
CUDA_KERNEL_LOOP(index, total) {
top_data[index] = bottom_data[index] - mean[index];
}
}
template <typename Dtype>
__global__ void mean_value_kernel(const Dtype* bottom_data, Dtype* top_data, const int size,
const Dtype value_B, const Dtype value_G, const Dtype value_R) {
CUDA_KERNEL_LOOP(index, size) {
top_data[index] = bottom_data[index] - value_B;
top_data[index+size] = bottom_data[index+size] - value_G;
top_data[index+size+size] = bottom_data[index+size+size] - value_R;
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_mean_gpu(const Dtype* bottom_data, Dtype* top_data) {
if (has_mean_file_) {
const string& mean_file = this->layer_param_.augmentation_param().mean().mean_file();
CHECK_EQ(channels_, mean_data_.channels());
CHECK_EQ(height_, mean_data_.height());
CHECK_EQ(width_, mean_data_.width());
// const Dtype* mean = mean_data_.gpu_data();
const Dtype* mean_cpu = mean_data_.cpu_data();
Dtype* mean;
hipMalloc((void**) &mean, total_*sizeof(Dtype));
hipMemcpy(mean, mean_cpu, total_*sizeof(Dtype), hipMemcpyHostToDevice);
for (int n = 0; n < num_; ++n) {
hipLaunchKernelGGL(( mean_file_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(total_)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom_data + n * total_, top_data + n * total_, total_, mean);
}
hipFree(mean);
} else {
const int size = height_ * width_;
for (int n = 0; n < num_; ++n) {
hipLaunchKernelGGL(( mean_value_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(size)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom_data + n * total_, top_data + n * total_, size, mean_values_[0], mean_values_[1], mean_values_[2]);
}
}
}
/***********************************SIN WARP***********************************/
template <typename Dtype>
__global__ void sin_warp_kernel(const Dtype* bottom_data, Dtype* top_data,
const Dtype am, const Dtype hz, const Dtype ph, const int size, const int width, const int height) {
CUDA_KERNEL_LOOP(index, size) {
const int H = index / width;
const int W = index % width;
const Dtype xo = (am * sinf(3.1416 * H / hz + ph));
const Dtype yo = (am * sinf(3.1416 * W / hz + ph));
Dtype nH = (Dtype)H + yo;
Dtype nW = (Dtype)W + xo;
if (nW > width - 1 || nW < 0 || nH > height - 1 || nH < 0) {
top_data[index] = 0;
top_data[index+size] = 0;
top_data[index+size+size] = 0;
} else {
resample(bottom_data, top_data, nW, nH, index, width, height);
}
// const Dtype p = fminf(height-1, fmaxf((H + yo),0));
// const Dtype q = fminf(width-1, fmaxf((W + xo),0));
// resample(bottom_data, top_data, q, p, index, width, height);
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_sin_warp_gpu(const Dtype* bottom_data, Dtype* top_data) {
const int size = height_ * width_;
for (int n = 0; n < num_; ++n) {
caffe_rng_uniform(1, sin_warp_min_am_, sin_warp_max_am_, &random_am_);
caffe_rng_uniform(1, sin_warp_min_hz_, sin_warp_max_hz_, &random_hz_);
caffe_rng_uniform(1, (Dtype)0., (Dtype)6.2832, &random_ph_);
hipLaunchKernelGGL(( sin_warp_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(size)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom_data + n * total_, top_data + n * total_, random_am_, random_hz_, random_ph_, size, width_, height_);
}
}
/***********************************ROTATION***********************************/
template <typename Dtype>
__global__ void rotation_kernel(const Dtype* bottom_data, Dtype* top_data, const Dtype cH, const Dtype cW,
const Dtype degree, const int size, const int width, const int height) {
CUDA_KERNEL_LOOP(index, size) {
const int H = index / width;
const int W = index % width;
const Dtype dW = (Dtype)W - cW;
const Dtype dH = (Dtype)H - cH;
const Dtype ca = cosf(degree);
const Dtype sa = sinf(degree);
// const Dtype ncW = ca * cW + sa * cH;
// const Dtype ncH = -sa * cH + ca * cW;
// const Dtype dW = ncW - cW;
// const Dtype dH = ncH - cH;
// Dtype nW = ca * (W+dW) + sa * (H+dH);
// Dtype nH = -sa * (W+dW) + ca * (H+dH);
Dtype nW = ca * dW + sa * dH + cW;
Dtype nH = -sa * dW + ca * dH + cH;
// const Dtype dis = sqrtf(dW*dW+dH*dH) + 0.000001;
// const Dtype theta = (cH > H)? 6.2831853 - acosf(dW/dis) : acosf(dW/dis);
// Dtype nW = cW + dis * cosf(theta - degree);
// Dtype nH = cH + dis * sinf(theta - degree);
if (nW > width - 1 || nW < 0 || nH > height - 1 || nH < 0) {
top_data[index] = 0;
top_data[index+size] = 0;
top_data[index+size+size] = 0;
} else {
resample(bottom_data, top_data, nW, nH, index, width, height);
}
// const Dtype nW = fminf(width-1, fmaxf((cW + dis * cosf(theta - degree)),0));
// const Dtype nH = fminf(height-1, fmaxf((cH + dis * sinf(theta - degree)),0));
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_rotation_gpu(const Dtype* bottom_data, Dtype* top_data) {
const int size = height_ * width_;
const Dtype cH = (height_ - 1) / 2;
const Dtype cW = (width_ - 1) / 2;
for (int n = 0; n < num_; ++n) {
caffe_rng_uniform(1, -rotation_max_degree_, rotation_max_degree_, &rotation_degree_);
hipLaunchKernelGGL(( rotation_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(size)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom_data + n * total_, top_data + n * total_, cH, cW, rotation_degree_, size, width_, height_);
}
}
/***********************************LENS DISTORTION***********************************/
template <typename Dtype>
__global__ void lens_distortion_kernel(const Dtype* bottom_data, Dtype* top_data, const Dtype cH, const Dtype cW, const Dtype paramA,
const Dtype paramB, const Dtype paramC, const Dtype paramD, const int size, const int width, const int height) {
CUDA_KERNEL_LOOP(index, size) {
const int H = index / width;
const int W = index % width;
const Dtype dW = ((Dtype)W - cW)/width;
const Dtype dH = ((Dtype)H - cH)/height;
const Dtype dstR = sqrtf(dW*dW+dH*dH) + 0.000001;
const Dtype srcR = (paramA * dstR * dstR * dstR + paramB * dstR * dstR + paramC * dstR + paramD) * dstR;
const Dtype factor = fabsf(dstR / srcR);
Dtype nW = cW + dW * factor * width;
Dtype nH = cH + dH * factor * height;
if (nW > width - 1 || nW < 0 || nH > height - 1 || nH < 0) {
top_data[index] = 0;
top_data[index+size] = 0;
top_data[index+size+size] = 0;
} else {
resample(bottom_data, top_data, nW, nH, index, width, height);
}
// const Dtype nW = fminf(width-1, fmaxf((cW + dW * factor * width),0));
// const Dtype nH = fminf(height-1, fmaxf((cH + dH * factor * height),0));
// resample(bottom_data, top_data, nW, nH, index, width, height);
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_lens_distortion_gpu(const Dtype* bottom_data, Dtype* top_data) {
const int size = height_ * width_;
const Dtype cH = (height_ - 1) / 2;
const Dtype cW = (width_ - 1) / 2;
for (int n = 0; n < num_; ++n) {
caffe_rng_uniform(1, -lens_max_parama_, lens_max_parama_, &lens_parama_);
caffe_rng_uniform(1, -lens_max_paramb_, lens_max_paramb_, &lens_paramb_);
hipLaunchKernelGGL(( lens_distortion_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(size)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, bottom_data + n * total_, top_data + n * total_,
cH, cW, lens_parama_, lens_paramb_, lens_paramc_, lens_paramd_, size, width_, height_);
}
}
/***********************************RESCALE + CROP***********************************/
template <typename Dtype>
__global__ void rescale_kernel(const Dtype* bottom_data, Dtype* top_data, const Dtype width_scale, const Dtype height_scale,
const int size, const int width, const int height) {
CUDA_KERNEL_LOOP(index, size) {
const int H = index / width;
const int W = index % width;
Dtype nW = W / width_scale;
Dtype nH = H / height_scale;
if (nW > width - 1 || nW < 0 || nH > height - 1 || nH < 0) {
top_data[index] = 255;
top_data[index+size] = 255;
top_data[index+size+size] = 255;
} else {
resample(bottom_data, top_data, nW, nH, index, width, height);
}
}
}
template <typename Dtype>
__global__ void crop_kernel(const Dtype* bottom_data, Dtype* top_data, const int wskip, const int hskip,
const int size, const int width, const int out_size, const int out_width) {
CUDA_KERNEL_LOOP(index, out_size) {
const int H = index / out_width;
const int W = index % out_width;
const int nH = H + hskip;
const int nW = W + wskip;
const int pos = nH * width + nW;
top_data[index] = bottom_data[pos];
top_data[index+out_size] = bottom_data[pos+size];
top_data[index+out_size+out_size] = bottom_data[pos+size+size];
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_crop_gpu(const Dtype* bottom_data, Dtype* top_data) {
const int out_size = out_height_ * out_width_;
const int size = height_ * width_;
const int out_total = out_height_ * out_width_ * channels_;
for (int n = 0; n < num_; ++n) {
const int wskip = Rand(width_ - crop_size_);
const int hskip = Rand(height_ - crop_size_);
hipLaunchKernelGGL(( crop_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(size)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, bottom_data + n * total_, top_data + n * out_total,
wskip, hskip, size, width_ , out_size, out_width_);
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_rescale_crop_gpu(const Dtype* bottom_data, Dtype* buffer, Dtype* top_data) {
const int out_size = out_height_ * out_width_;
const int out_total = out_height_ * out_width_ * channels_;
const int size = height_ * width_;
for (int n = 0; n < num_; ++n) {
caffe_rng_uniform(1, width_min_scale_, width_max_scale_, &width_scale_);
if (fixed_ratio_)
height_scale_ = width_scale_;
else
caffe_rng_uniform(1, height_min_scale_, height_max_scale_, &height_scale_);
// LOG(INFO)<<height_scale_<<","<<width_scale_;
hipLaunchKernelGGL(( rescale_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(size)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, bottom_data + n * total_, buffer + n * total_,
width_scale_, height_scale_, size, width_, height_);
const int nwidth = (int)(width_ * width_scale_);
const int nheight = (int)(height_ * height_scale_);
const int wskip = Rand(max(1,nwidth - crop_size_));
const int hskip = Rand(max(1,nheight - crop_size_));
hipLaunchKernelGGL(( crop_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(size)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, buffer + n * total_, top_data + n * out_total,
wskip, hskip, size, width_ , out_size, out_width_);
// LOG(INFO)<<"=================";
// LOG(INFO)<<nheight<<","<<nwidth;
// LOG(INFO)<<crop_size_;
// LOG(INFO)<<nheight - crop_size_<<","<<nwidth - crop_size_;
// LOG(INFO)<<hskip<<","<<wskip;
// LOG(INFO)<<hskip+crop_size_<<","<<wskip+crop_size_;
// LOG(INFO)<<"=================";
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_variable_crop_gpu(const Dtype* bottom_data, Dtype* buffer, Dtype* top_data, Dtype* crop_data,
const vector<Blob<Dtype>*>& top) {//DEVICE DEVICE DEVICE HOST
const int size = height_ * width_;
// LOG(INFO) << ":::::::::::::::::::::::::::Starting:::::::::::::::::::::::::::";
//do some rescale staffs
if (size_protection_ == false) {
caffe_rng_uniform(1, width_min_scale_, width_max_scale_, &width_scale_);
if (fixed_ratio_)
height_scale_ = width_scale_;
else
caffe_rng_uniform(1, height_min_scale_, height_max_scale_, &height_scale_);
} else {
Dtype tmpratio = ::min(static_cast<Dtype>(max_size_)/static_cast<Dtype>(height_),
static_cast<Dtype>(max_size_)/static_cast<Dtype>(width_));
Dtype ratio = ::min(tmpratio, (Dtype)1.0);
caffe_rng_uniform(1, width_min_scale_, width_max_scale_, &width_scale_);
if (fixed_ratio_)
height_scale_ = width_scale_;
else
caffe_rng_uniform(1, height_min_scale_, height_max_scale_, &height_scale_);
width_scale_ *= ratio;
height_scale_ *= ratio;
// LOG(INFO) << "size_protection: " <<size_protection_;
// LOG(INFO) << "tmpRatio: " <<tmpratio;
// LOG(INFO) << "Ratio: " <<ratio;
}
// LOG(INFO) << ":::::::::::::::::::::::::::Rescale:::::::::::::::::::::::::::";
// LOG(INFO) << "Original size: " <<height_ <<" "<< width_;
// LOG(INFO) << "Rescale scale: " <<height_scale_ <<" "<< width_scale_;
const int nwidth = (int)(width_ * width_scale_);
const int nheight = (int)(height_ * height_scale_);
// LOG(INFO) << "Rescale size: " <<nheight <<" "<< nwidth;
hipLaunchKernelGGL(( rescale_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(size)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, bottom_data, buffer,
width_scale_, height_scale_, size, width_, height_);
// LOG(INFO) << ":::::::::::::::::::::::::::Crop:::::::::::::::::::::::::::";
// LOG(INFO) << "Crop scale: " <<crop_scale_;
const int crop_size_width = (int)nwidth * crop_scale_;
const int crop_size_height = (int)nheight * crop_scale_;
const int wskip = Rand(max(1,nwidth - crop_size_width));
const int hskip = Rand(max(1,nheight - crop_size_height));
const int out_size = crop_size_height * crop_size_width;
// LOG(INFO) << "Crop size: " <<crop_size_height <<" "<< crop_size_width;
top[0]->Reshape(num_,channels_,crop_size_height,crop_size_width);
hipLaunchKernelGGL(( crop_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(size)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, buffer, top_data,
wskip, hskip, size, width_ , out_size, crop_size_width);
// LOG(INFO) << ":::::::::::::::::::::::::::Ending:::::::::::::::::::::::::::";
const int wrest = nwidth - wskip - crop_size_width;
const int hrest = nheight - hskip - crop_size_height;
crop_data[4] = height_scale_; crop_data[5] = width_scale_;
crop_data[2] = wskip; crop_data[3] = wrest;
crop_data[0] = hskip; crop_data[1] = hrest;
}
/***********************************FORWARD FUNCTION***********************************/
template <typename Dtype>
void AugmentationLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* top_buffer;
Dtype* bottom_buffer;
// LOG(INFO)<<top[t]->channels()<<","<<top[t]->height()<<","<<top[t]->width();
hipMalloc((void**) &bottom_buffer, count_*sizeof(Dtype));
hipMalloc((void**) &top_buffer, count_*sizeof(Dtype));
hipMemcpy(bottom_buffer, bottom_data, count_*sizeof(Dtype), hipMemcpyHostToDevice);
//==========LV1==========//
//COLOR SHIFT
if ((this->layer_param_.augmentation_param().color_shift().execute() ==
AugmentationColorShiftParameter_Execute_ALWAYS) ||
((this->layer_param_.augmentation_param().color_shift().execute() ==
AugmentationColorShiftParameter_Execute_RANDOM) && (Rand(2)) ) ) {
do_color_shift_gpu(bottom_buffer, top_buffer);
hipMemcpy(bottom_buffer, top_buffer, count_*sizeof(Dtype), hipMemcpyDeviceToDevice);}
//GAUSSIAN NOISE
if ((this->layer_param_.augmentation_param().gaussian_noise().execute() ==
AugmentationGaussianNoiseParameter_Execute_ALWAYS) ||
((this->layer_param_.augmentation_param().gaussian_noise().execute() ==
AugmentationGaussianNoiseParameter_Execute_RANDOM) && (Rand(2)) ) ) {
do_gaussian_noise_gpu(bottom_buffer, top_buffer);
hipMemcpy(bottom_buffer, top_buffer, count_*sizeof(Dtype), hipMemcpyDeviceToDevice);}
//CONTRAST
if ((this->layer_param_.augmentation_param().contrast().execute() ==
AugmentationContrastParameter_Execute_ALWAYS) ||
((this->layer_param_.augmentation_param().contrast().execute() ==
AugmentationContrastParameter_Execute_RANDOM) && (Rand(2)) ) ) {
do_contrast_gpu(bottom_buffer, top_buffer);
hipMemcpy(bottom_buffer, top_buffer, count_*sizeof(Dtype), hipMemcpyDeviceToDevice);}
//FLARE
if ((this->layer_param_.augmentation_param().flare().execute() ==
AugmentationFlareParameter_Execute_ALWAYS) ||
((this->layer_param_.augmentation_param().flare().execute() ==
AugmentationFlareParameter_Execute_RANDOM) && (Rand(2)) ) ) {
do_flare_gpu(bottom_buffer, top_buffer);
hipMemcpy(bottom_buffer, top_buffer, count_*sizeof(Dtype), hipMemcpyDeviceToDevice);}
//BLUR
if ((this->layer_param_.augmentation_param().blur().execute() ==
AugmentationBlurParameter_Execute_ALWAYS) ||
((this->layer_param_.augmentation_param().blur().execute() ==
AugmentationBlurParameter_Execute_RANDOM) && (Rand(2)) ) ) {
do_blur_gpu(bottom_buffer, top_buffer);
hipMemcpy(bottom_buffer, top_buffer, count_*sizeof(Dtype), hipMemcpyDeviceToDevice);}
//COLOR_BIAS
if ((this->layer_param_.augmentation_param().color_bias().execute() ==
AugmentationColorBiasParameter_Execute_ALWAYS) ||
((this->layer_param_.augmentation_param().color_bias().execute() ==
AugmentationColorBiasParameter_Execute_RANDOM) && (Rand(2)) ) ) {
do_color_bias_gpu(bottom_buffer, top_buffer);
hipMemcpy(bottom_buffer, top_buffer, count_*sizeof(Dtype), hipMemcpyDeviceToDevice);}
//MIRROR
if ((this->layer_param_.augmentation_param().mirror().execute() ==
AugmentationMirrorParameter_Execute_ALWAYS) ||
((this->layer_param_.augmentation_param().mirror().execute() ==
AugmentationMirrorParameter_Execute_RANDOM) && (Rand(2)) ) ) {
do_mirror_gpu(bottom_buffer, top_buffer);
hipMemcpy(bottom_buffer, top_buffer, count_*sizeof(Dtype), hipMemcpyDeviceToDevice);
if (output_crop_) { (top[1]->mutable_cpu_data())[6] = 1;} }
//SATURATION
if ((this->layer_param_.augmentation_param().saturation().execute() ==
AugmentationSaturationParameter_Execute_ALWAYS) ||
((this->layer_param_.augmentation_param().saturation().execute() ==
AugmentationSaturationParameter_Execute_RANDOM) && (Rand(2)) ) ) {
do_saturation_gpu(bottom_buffer, top_buffer);
hipMemcpy(bottom_buffer, top_buffer, count_*sizeof(Dtype), hipMemcpyDeviceToDevice);}
//MEAN
if (this->layer_param_.augmentation_param().mean().execute() ==
AugmentationMeanParameter_Execute_ALWAYS) {
do_mean_gpu(bottom_buffer, top_buffer);
hipMemcpy(bottom_buffer, top_buffer, count_*sizeof(Dtype), hipMemcpyDeviceToDevice);}
//==========LV2==========//
//LV2 LASSO
vector<AUGFUNC> lv2_lasso;
if (this->layer_param_.augmentation_param().sin_warp().execute() ==
AugmentationSinWarpParameter_Execute_LASSO)
lv2_lasso.push_back(&AugmentationLayer<Dtype>::do_sin_warp_gpu);
if (this->layer_param_.augmentation_param().rotation().execute() ==
AugmentationRotationParameter_Execute_LASSO)
lv2_lasso.push_back(&AugmentationLayer<Dtype>::do_rotation_gpu);
if (this->layer_param_.augmentation_param().lens_distortion().execute() ==
AugmentationLensDistortionParameter_Execute_LASSO)
lv2_lasso.push_back(&AugmentationLayer<Dtype>::do_lens_distortion_gpu);
if (lv2_lasso.size()!=0 && Rand(2)) {
int run_lasso = Rand(lv2_lasso.size());
(this->*lv2_lasso[run_lasso])(bottom_buffer, top_buffer);
hipMemcpy(bottom_buffer, top_buffer, count_*sizeof(Dtype), hipMemcpyDeviceToDevice);
}
//SIN WARP
if ((this->layer_param_.augmentation_param().sin_warp().execute() ==
AugmentationSinWarpParameter_Execute_ALWAYS) ||
((this->layer_param_.augmentation_param().sin_warp().execute() ==
AugmentationSinWarpParameter_Execute_RANDOM) && (Rand(2)) ) ) {
do_sin_warp_gpu(bottom_buffer, top_buffer);
hipMemcpy(bottom_buffer, top_buffer, count_*sizeof(Dtype), hipMemcpyDeviceToDevice);}
//ROTATION
if ((this->layer_param_.augmentation_param().rotation().execute() ==
AugmentationRotationParameter_Execute_ALWAYS) ||
((this->layer_param_.augmentation_param().rotation().execute() ==
AugmentationRotationParameter_Execute_RANDOM) && (Rand(2)) ) ) {
do_rotation_gpu(bottom_buffer, top_buffer);
hipMemcpy(bottom_buffer, top_buffer, count_*sizeof(Dtype), hipMemcpyDeviceToDevice);}
//LENSDISTORTION
if ((this->layer_param_.augmentation_param().lens_distortion().execute() ==
AugmentationLensDistortionParameter_Execute_ALWAYS) ||
((this->layer_param_.augmentation_param().lens_distortion().execute() ==
AugmentationLensDistortionParameter_Execute_RANDOM) && (Rand(2)) ) ) {
do_lens_distortion_gpu(bottom_buffer, top_buffer);
hipMemcpy(bottom_buffer, top_buffer, count_*sizeof(Dtype), hipMemcpyDeviceToDevice);}
//==========LV3==========//
//LV3
if ( (this->layer_param_.augmentation_param().rescale().execute() == AugmentationRescaleParameter_Execute_NEVER) &&
(this->layer_param_.augmentation_param().crop().execute() == AugmentationCropParameter_Execute_NEVER) ) {
if (this->layer_param_.augmentation_param().variable_crop().execute() == AugmentationVariableCropParameter_Execute_NEVER)
hipMemcpy(top_data, bottom_buffer, count_*sizeof(Dtype), hipMemcpyDeviceToDevice);
else {
if (output_crop_) {
Dtype* crop_data = top[1]->mutable_cpu_data();
do_variable_crop_gpu(bottom_buffer, top_buffer, top_data, crop_data, top);
} else {
Dtype* crop_data;
crop_data = new Dtype[4];
do_variable_crop_gpu(bottom_buffer, top_buffer, top_data, crop_data, top);
delete[] crop_data;
}
}
}
else {
if (this->layer_param_.augmentation_param().rescale().execute() == AugmentationRescaleParameter_Execute_NEVER)
do_crop_gpu(bottom_buffer, top_data);
else
do_rescale_crop_gpu(bottom_buffer, top_buffer, top_data);
}
hipFree(bottom_buffer);
hipFree(top_buffer);
}
template <typename Dtype>
void AugmentationLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
}
INSTANTIATE_LAYER_GPU_FUNCS(AugmentationLayer);
} // namespace caffe
| f736bfe9d6702335260eb46e6abfabc0fc4a3eb9.cu | #include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/util/rng.hpp"
#include "caffe/util/io.hpp"
namespace caffe {
template <typename Dtype>
__device__ void resample(const Dtype* bottom_data, Dtype* top_data, Dtype W, Dtype H, const int index, int width, int height) {
const int size = width * height;
const Dtype w_ = floorf(W);
const Dtype h_ = floorf(H);
const Dtype _w = fminf(w_+1,width-1);
const Dtype _h = fminf(h_+1,height-1);
const Dtype wTL = (W-w_)*(H-h_);
const Dtype wTR = (_w-W)*(H-h_);
const Dtype wBL = (W-w_)*(_h-H);
const Dtype wBR = (_w-W)*(_h-H);
const int pTL = w_ + h_ * width;
const int pTR = w_ + _h * width;
const int pBL = _w + h_ * width;
const int pBR = _w + _h * width;
top_data[index] =
wTL * bottom_data[pTL] + wTR * bottom_data[pTR] + wBR * bottom_data[pBR] + wBL * bottom_data[pBL];
top_data[index+size] =
wTL * bottom_data[pTL+size] + wTR * bottom_data[pTR+size] + wBR * bottom_data[pBR+size] + wBL * bottom_data[pBL+size];
top_data[index+size+size] =
wTL * bottom_data[pTL+size+size] + wTR * bottom_data[pTR+size+size] + wBR * bottom_data[pBR+size+size] + wBL * bottom_data[pBL+size+size];
}
/***********************************COLOR SHIFT***********************************/
template <typename Dtype>
__global__ void color_shift_kernel(const Dtype* bottom_data, Dtype* top_data,
const Dtype* eig_value, const Dtype* eig_matrix, Dtype* random_vector, int total) {
CUDA_KERNEL_LOOP(index, total) {
int channel = index * 3 / total;
int pos = 3 * channel;
top_data[index] = bottom_data[index] + eig_value[0] * random_vector[0] * eig_matrix[pos]
+ eig_value[1] * random_vector[1] * eig_matrix[pos+1]
+ eig_value[2] * random_vector[2] * eig_matrix[pos+2];
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_color_shift_gpu(const Dtype* bottom_data, Dtype* top_data) {
const Dtype* eig_value = color_shift_eig_value_.gpu_data();
const Dtype* eig_matrix = color_shift_eig_matrix_.gpu_data();
Dtype* random_vector_ = color_shift_random_vector_.mutable_gpu_data();
for (int n = 0; n < num_; ++n) {
caffe_gpu_rng_gaussian(4, color_shift_mu_, color_shift_sigma_, random_vector_);
color_shift_kernel<Dtype><<<CAFFE_GET_BLOCKS(total_),CAFFE_CUDA_NUM_THREADS>>>
(bottom_data + n * total_, top_data + n * total_, eig_value, eig_matrix, random_vector_, total_);
}
}
/***********************************GAUSSIAN NOISE***********************************/
template <typename Dtype>
__global__ void gaussian_noise_kernel(const Dtype* bottom_data, Dtype* top_data,
const unsigned int* mask, const Dtype* random_vector, const int thershold, const int count) {
CUDA_KERNEL_LOOP(index, count) {
top_data[index] = (mask[index] < thershold)? bottom_data[index] + random_vector[index] : bottom_data[index];
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_gaussian_noise_gpu(const Dtype* bottom_data, Dtype* top_data) {
unsigned int* mask = static_cast<unsigned int*>(gaussian_noise_mask_.mutable_gpu_data());
Dtype* random_vector = gaussian_noise_buffer_.mutable_gpu_data();
caffe_gpu_rng_uniform(count_, mask);
caffe_gpu_rng_uniform(count_, gaussian_noise_mu_, gaussian_noise_sigma_, random_vector);
gaussian_noise_kernel<Dtype><<<CAFFE_GET_BLOCKS(count_),CAFFE_CUDA_NUM_THREADS>>>
(bottom_data, top_data, mask, random_vector, uint_thres_, count_);
}
/***********************************CONTRAST***********************************/
template <typename Dtype>
__global__ void contrast_kernel(const Dtype* bottom_data, Dtype* top_data, const Dtype factor, const int count) {
CUDA_KERNEL_LOOP(index, count) {
top_data[index] = factor * (bottom_data[index] - 128) + 128;
if (top_data[index] > 255) top_data[index] = 255.0;
if (top_data[index] < 0) top_data[index] = 0.0;
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_contrast_gpu(const Dtype* bottom_data, Dtype* top_data) {
Dtype C = Dtype(Rand(contrast_max_thershold_*2) - contrast_max_thershold_);
Dtype F = 1.0156868 * (C + 255) / ( 259 - C);
contrast_kernel<Dtype><<<CAFFE_GET_BLOCKS(count_),CAFFE_CUDA_NUM_THREADS>>>
(bottom_data, top_data, F, count_);
}
/***********************************FLARE***********************************/
template <typename Dtype>
__global__ void flare_kernel(const Dtype* bottom_data, Dtype* top_data, const int radius, const int lumi, const int centerH, const int centerW,
const int height, const int width, const int total) {
CUDA_KERNEL_LOOP(index, total) {
const int size = width * height;
const int CR = index % size;
const int H = CR / width;
const int W = CR % width;
const int dH = H - centerH;
const int dW = W - centerW;
const Dtype dis = sqrtf(dH * dH + dW * dW) + 0.00001;
top_data[index] = bottom_data[index] + lumi * fmaxf(1 - dis / radius, 0);
if (top_data[index] > 255) top_data[index] = 255.0;
if (top_data[index] < 0) top_data[index] = 0.0;
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_flare_gpu(const Dtype* bottom_data, Dtype* top_data) {
if (flare_num_ == 1) {
for (int n = 0; n < num_; ++n) {
int centerH = Rand(height_);
int centerW = Rand(width_);
int radius = flare_min_radius_ + Rand(flare_max_radius_ - flare_min_radius_);
int lumi = flare_min_lumi_ + Rand(flare_max_lumi_ - flare_min_lumi_);
flare_kernel<Dtype><<<CAFFE_GET_BLOCKS(count_),CAFFE_CUDA_NUM_THREADS>>>
(bottom_data + n * total_, top_data + n * total_, radius, lumi, centerH, centerW, height_, width_ ,total_);
}
} else {
Dtype* buffer_ = flare_buffer_.mutable_gpu_data();
caffe_copy(count_, bottom_data, buffer_);
for (int i = 0; i < flare_num_; ++i) {
for (int n = 0; n < num_; ++n) {
int centerH = Rand(height_);
int centerW = Rand(width_);
int radius = flare_min_radius_ + Rand(flare_max_radius_ - flare_min_radius_);
int lumi = flare_min_lumi_ + Rand(flare_max_lumi_ - flare_min_lumi_);
flare_kernel<Dtype><<<CAFFE_GET_BLOCKS(count_),CAFFE_CUDA_NUM_THREADS>>>
(buffer_ + n * total_, top_data + n * total_, radius, lumi, centerH, centerW, height_, width_ ,total_);
}
if (i != flare_num_ - 1) caffe_copy(count_, top_data, buffer_);
}
}
}
/***********************************BLUR***********************************/
template <typename Dtype>
__global__ void blur_kernel(const Dtype* bottom_data, Dtype* top_data, const Dtype t, const Dtype sum,
const int height, const int width, const int total) {
CUDA_KERNEL_LOOP(index, total) {
const int size = width * height;
const int CR = index % size;
const int H = CR / width;
const int W = CR % width;
if (H == 0 || H == (height -1) || W == 0 || W == (width - 1))
top_data[index] = bottom_data[index];
else {
top_data[index] = (bottom_data[index] + t * (bottom_data[index+1] + bottom_data[index-1] + bottom_data[index+width] + bottom_data[index-width])
+ t * t * (bottom_data[index+width+1] + bottom_data[index-width-1] + bottom_data[index+width-1] + bottom_data[index-width+1])) / sum;
}
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_blur_gpu(const Dtype* bottom_data, Dtype* top_data) {
for (int n = 0; n < num_; ++n) {
caffe_rng_uniform(1, blur_min_sigma_, blur_max_sigma_, &blur_sigma_);
const Dtype t = std::exp(- (1/blur_sigma_) * (1/blur_sigma_) / 2);
const Dtype sum = 4 * t * t + 4 * t + 1;
blur_kernel<Dtype><<<CAFFE_GET_BLOCKS(count_),CAFFE_CUDA_NUM_THREADS>>>
(bottom_data + n * total_, top_data + n * total_, t, sum, height_, width_ ,total_);
}
}
/***********************************COLOR BIAS***********************************/
template <typename Dtype>
__global__ void color_bias_kernel(const Dtype* bottom_data, Dtype* top_data, const int size, const int bias, const int bias_ch) {
CUDA_KERNEL_LOOP(index, size) {
const int pos = index + bias_ch * size;
top_data[pos] = bottom_data[pos] + bias;
if (top_data[pos] > 255) top_data[pos] = 255.0;
if (top_data[pos] < 0) top_data[pos] = 0.0;
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_color_bias_gpu(const Dtype* bottom_data, Dtype* top_data) {
for (int n = 0; n < num_; ++n) {
const int size = height_ * width_;
const int bias = Rand(color_max_bias_);
const int bias_ch = Rand(3);
caffe_copy(count_, bottom_data, top_data);
color_bias_kernel<Dtype><<<CAFFE_GET_BLOCKS(size),CAFFE_CUDA_NUM_THREADS>>>
(bottom_data + n * total_, top_data + n * total_, size, bias, bias_ch);
}
}
/***********************************MIRROR***********************************/
template <typename Dtype>
__global__ void mirror_kernel(const Dtype* bottom_data, Dtype* top_data, const int size, const int width) {
CUDA_KERNEL_LOOP(index, size) {
const int W = index % width;
top_data[index] = bottom_data[index - W + width - 1 - W];
top_data[index+size] = bottom_data[index+size - W + width - 1 - W];
top_data[index+size+size] = bottom_data[index+size+size - W + width - 1 - W];
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_mirror_gpu(const Dtype* bottom_data, Dtype* top_data) {
const int size = height_ * width_;
for (int n = 0; n < num_; ++n) {
mirror_kernel<Dtype><<<CAFFE_GET_BLOCKS(size),CAFFE_CUDA_NUM_THREADS>>>
(bottom_data + n * total_, top_data + n * total_, size, width_);
}
}
/***********************************SATURATION***********************************/
template <typename Dtype>
__global__ void saturation_kernel(const Dtype* bottom_data, Dtype* top_data, const int size, const int width, Dtype bias) {
CUDA_KERNEL_LOOP(index, size) {
//Weights are ordered by B R G.
const Dtype param = sqrtf(bottom_data[index] * bottom_data[index] * 0.114 +
bottom_data[index+size] * bottom_data[index+size] * 0.587 +
bottom_data[index+size+size] * bottom_data[index+size+size] * 0.299);
top_data[index] = param + (bottom_data[index] - param) * bias;
top_data[index+size] = param + (bottom_data[index+size] - param) * bias;
top_data[index+size+size] = param + (bottom_data[index+size+size] - param) * bias;
if (top_data[index] > 255) top_data[index] = 255.0;
if (top_data[index] < 0) top_data[index] = 0.0;
if (top_data[index+size] > 255) top_data[index+size] = 255.0;
if (top_data[index+size] < 0) top_data[index+size] = 0.0;
if (top_data[index+size+size] > 255) top_data[index+size+size] = 255.0;
if (top_data[index+size+size] < 0) top_data[index+size+size] = 0.0;
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_saturation_gpu(const Dtype* bottom_data, Dtype* top_data) {
const int size = height_ * width_;
for (int n = 0; n < num_; ++n) {
caffe_rng_uniform(1, 1-saturation_max_bias_, 1+saturation_max_bias_, &saturation_bias_);
saturation_kernel<Dtype><<<CAFFE_GET_BLOCKS(size),CAFFE_CUDA_NUM_THREADS>>>
(bottom_data + n * total_, top_data + n * total_, size, width_, saturation_bias_);
}
}
/***********************************MEAN***********************************/
template <typename Dtype>
__global__ void mean_file_kernel(const Dtype* bottom_data, Dtype* top_data, const int total, const Dtype* mean) {
CUDA_KERNEL_LOOP(index, total) {
top_data[index] = bottom_data[index] - mean[index];
}
}
template <typename Dtype>
__global__ void mean_value_kernel(const Dtype* bottom_data, Dtype* top_data, const int size,
const Dtype value_B, const Dtype value_G, const Dtype value_R) {
CUDA_KERNEL_LOOP(index, size) {
top_data[index] = bottom_data[index] - value_B;
top_data[index+size] = bottom_data[index+size] - value_G;
top_data[index+size+size] = bottom_data[index+size+size] - value_R;
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_mean_gpu(const Dtype* bottom_data, Dtype* top_data) {
if (has_mean_file_) {
const string& mean_file = this->layer_param_.augmentation_param().mean().mean_file();
CHECK_EQ(channels_, mean_data_.channels());
CHECK_EQ(height_, mean_data_.height());
CHECK_EQ(width_, mean_data_.width());
// const Dtype* mean = mean_data_.gpu_data();
const Dtype* mean_cpu = mean_data_.cpu_data();
Dtype* mean;
cudaMalloc((void**) &mean, total_*sizeof(Dtype));
cudaMemcpy(mean, mean_cpu, total_*sizeof(Dtype), cudaMemcpyHostToDevice);
for (int n = 0; n < num_; ++n) {
mean_file_kernel<Dtype><<<CAFFE_GET_BLOCKS(total_),CAFFE_CUDA_NUM_THREADS>>>
(bottom_data + n * total_, top_data + n * total_, total_, mean);
}
cudaFree(mean);
} else {
const int size = height_ * width_;
for (int n = 0; n < num_; ++n) {
mean_value_kernel<Dtype><<<CAFFE_GET_BLOCKS(size),CAFFE_CUDA_NUM_THREADS>>>
(bottom_data + n * total_, top_data + n * total_, size, mean_values_[0], mean_values_[1], mean_values_[2]);
}
}
}
/***********************************SIN WARP***********************************/
template <typename Dtype>
__global__ void sin_warp_kernel(const Dtype* bottom_data, Dtype* top_data,
const Dtype am, const Dtype hz, const Dtype ph, const int size, const int width, const int height) {
CUDA_KERNEL_LOOP(index, size) {
const int H = index / width;
const int W = index % width;
const Dtype xo = (am * sinf(3.1416 * H / hz + ph));
const Dtype yo = (am * sinf(3.1416 * W / hz + ph));
Dtype nH = (Dtype)H + yo;
Dtype nW = (Dtype)W + xo;
if (nW > width - 1 || nW < 0 || nH > height - 1 || nH < 0) {
top_data[index] = 0;
top_data[index+size] = 0;
top_data[index+size+size] = 0;
} else {
resample(bottom_data, top_data, nW, nH, index, width, height);
}
// const Dtype p = fminf(height-1, fmaxf((H + yo),0));
// const Dtype q = fminf(width-1, fmaxf((W + xo),0));
// resample(bottom_data, top_data, q, p, index, width, height);
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_sin_warp_gpu(const Dtype* bottom_data, Dtype* top_data) {
const int size = height_ * width_;
for (int n = 0; n < num_; ++n) {
caffe_rng_uniform(1, sin_warp_min_am_, sin_warp_max_am_, &random_am_);
caffe_rng_uniform(1, sin_warp_min_hz_, sin_warp_max_hz_, &random_hz_);
caffe_rng_uniform(1, (Dtype)0., (Dtype)6.2832, &random_ph_);
sin_warp_kernel<Dtype><<<CAFFE_GET_BLOCKS(size),CAFFE_CUDA_NUM_THREADS>>>
(bottom_data + n * total_, top_data + n * total_, random_am_, random_hz_, random_ph_, size, width_, height_);
}
}
/***********************************ROTATION***********************************/
template <typename Dtype>
__global__ void rotation_kernel(const Dtype* bottom_data, Dtype* top_data, const Dtype cH, const Dtype cW,
const Dtype degree, const int size, const int width, const int height) {
CUDA_KERNEL_LOOP(index, size) {
const int H = index / width;
const int W = index % width;
const Dtype dW = (Dtype)W - cW;
const Dtype dH = (Dtype)H - cH;
const Dtype ca = cosf(degree);
const Dtype sa = sinf(degree);
// const Dtype ncW = ca * cW + sa * cH;
// const Dtype ncH = -sa * cH + ca * cW;
// const Dtype dW = ncW - cW;
// const Dtype dH = ncH - cH;
// Dtype nW = ca * (W+dW) + sa * (H+dH);
// Dtype nH = -sa * (W+dW) + ca * (H+dH);
Dtype nW = ca * dW + sa * dH + cW;
Dtype nH = -sa * dW + ca * dH + cH;
// const Dtype dis = sqrtf(dW*dW+dH*dH) + 0.000001;
// const Dtype theta = (cH > H)? 6.2831853 - acosf(dW/dis) : acosf(dW/dis);
// Dtype nW = cW + dis * cosf(theta - degree);
// Dtype nH = cH + dis * sinf(theta - degree);
if (nW > width - 1 || nW < 0 || nH > height - 1 || nH < 0) {
top_data[index] = 0;
top_data[index+size] = 0;
top_data[index+size+size] = 0;
} else {
resample(bottom_data, top_data, nW, nH, index, width, height);
}
// const Dtype nW = fminf(width-1, fmaxf((cW + dis * cosf(theta - degree)),0));
// const Dtype nH = fminf(height-1, fmaxf((cH + dis * sinf(theta - degree)),0));
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_rotation_gpu(const Dtype* bottom_data, Dtype* top_data) {
const int size = height_ * width_;
const Dtype cH = (height_ - 1) / 2;
const Dtype cW = (width_ - 1) / 2;
for (int n = 0; n < num_; ++n) {
caffe_rng_uniform(1, -rotation_max_degree_, rotation_max_degree_, &rotation_degree_);
rotation_kernel<Dtype><<<CAFFE_GET_BLOCKS(size),CAFFE_CUDA_NUM_THREADS>>>
(bottom_data + n * total_, top_data + n * total_, cH, cW, rotation_degree_, size, width_, height_);
}
}
/***********************************LENS DISTORTION***********************************/
template <typename Dtype>
__global__ void lens_distortion_kernel(const Dtype* bottom_data, Dtype* top_data, const Dtype cH, const Dtype cW, const Dtype paramA,
const Dtype paramB, const Dtype paramC, const Dtype paramD, const int size, const int width, const int height) {
CUDA_KERNEL_LOOP(index, size) {
const int H = index / width;
const int W = index % width;
const Dtype dW = ((Dtype)W - cW)/width;
const Dtype dH = ((Dtype)H - cH)/height;
const Dtype dstR = sqrtf(dW*dW+dH*dH) + 0.000001;
const Dtype srcR = (paramA * dstR * dstR * dstR + paramB * dstR * dstR + paramC * dstR + paramD) * dstR;
const Dtype factor = fabsf(dstR / srcR);
Dtype nW = cW + dW * factor * width;
Dtype nH = cH + dH * factor * height;
if (nW > width - 1 || nW < 0 || nH > height - 1 || nH < 0) {
top_data[index] = 0;
top_data[index+size] = 0;
top_data[index+size+size] = 0;
} else {
resample(bottom_data, top_data, nW, nH, index, width, height);
}
// const Dtype nW = fminf(width-1, fmaxf((cW + dW * factor * width),0));
// const Dtype nH = fminf(height-1, fmaxf((cH + dH * factor * height),0));
// resample(bottom_data, top_data, nW, nH, index, width, height);
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_lens_distortion_gpu(const Dtype* bottom_data, Dtype* top_data) {
const int size = height_ * width_;
const Dtype cH = (height_ - 1) / 2;
const Dtype cW = (width_ - 1) / 2;
for (int n = 0; n < num_; ++n) {
caffe_rng_uniform(1, -lens_max_parama_, lens_max_parama_, &lens_parama_);
caffe_rng_uniform(1, -lens_max_paramb_, lens_max_paramb_, &lens_paramb_);
lens_distortion_kernel<Dtype><<<CAFFE_GET_BLOCKS(size),CAFFE_CUDA_NUM_THREADS>>>(bottom_data + n * total_, top_data + n * total_,
cH, cW, lens_parama_, lens_paramb_, lens_paramc_, lens_paramd_, size, width_, height_);
}
}
/***********************************RESCALE + CROP***********************************/
template <typename Dtype>
__global__ void rescale_kernel(const Dtype* bottom_data, Dtype* top_data, const Dtype width_scale, const Dtype height_scale,
const int size, const int width, const int height) {
CUDA_KERNEL_LOOP(index, size) {
const int H = index / width;
const int W = index % width;
Dtype nW = W / width_scale;
Dtype nH = H / height_scale;
if (nW > width - 1 || nW < 0 || nH > height - 1 || nH < 0) {
top_data[index] = 255;
top_data[index+size] = 255;
top_data[index+size+size] = 255;
} else {
resample(bottom_data, top_data, nW, nH, index, width, height);
}
}
}
template <typename Dtype>
__global__ void crop_kernel(const Dtype* bottom_data, Dtype* top_data, const int wskip, const int hskip,
const int size, const int width, const int out_size, const int out_width) {
CUDA_KERNEL_LOOP(index, out_size) {
const int H = index / out_width;
const int W = index % out_width;
const int nH = H + hskip;
const int nW = W + wskip;
const int pos = nH * width + nW;
top_data[index] = bottom_data[pos];
top_data[index+out_size] = bottom_data[pos+size];
top_data[index+out_size+out_size] = bottom_data[pos+size+size];
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_crop_gpu(const Dtype* bottom_data, Dtype* top_data) {
const int out_size = out_height_ * out_width_;
const int size = height_ * width_;
const int out_total = out_height_ * out_width_ * channels_;
for (int n = 0; n < num_; ++n) {
const int wskip = Rand(width_ - crop_size_);
const int hskip = Rand(height_ - crop_size_);
crop_kernel<Dtype><<<CAFFE_GET_BLOCKS(size),CAFFE_CUDA_NUM_THREADS>>>(bottom_data + n * total_, top_data + n * out_total,
wskip, hskip, size, width_ , out_size, out_width_);
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_rescale_crop_gpu(const Dtype* bottom_data, Dtype* buffer, Dtype* top_data) {
const int out_size = out_height_ * out_width_;
const int out_total = out_height_ * out_width_ * channels_;
const int size = height_ * width_;
for (int n = 0; n < num_; ++n) {
caffe_rng_uniform(1, width_min_scale_, width_max_scale_, &width_scale_);
if (fixed_ratio_)
height_scale_ = width_scale_;
else
caffe_rng_uniform(1, height_min_scale_, height_max_scale_, &height_scale_);
// LOG(INFO)<<height_scale_<<","<<width_scale_;
rescale_kernel<Dtype><<<CAFFE_GET_BLOCKS(size),CAFFE_CUDA_NUM_THREADS>>>(bottom_data + n * total_, buffer + n * total_,
width_scale_, height_scale_, size, width_, height_);
const int nwidth = (int)(width_ * width_scale_);
const int nheight = (int)(height_ * height_scale_);
const int wskip = Rand(max(1,nwidth - crop_size_));
const int hskip = Rand(max(1,nheight - crop_size_));
crop_kernel<Dtype><<<CAFFE_GET_BLOCKS(size),CAFFE_CUDA_NUM_THREADS>>>(buffer + n * total_, top_data + n * out_total,
wskip, hskip, size, width_ , out_size, out_width_);
// LOG(INFO)<<"=================";
// LOG(INFO)<<nheight<<","<<nwidth;
// LOG(INFO)<<crop_size_;
// LOG(INFO)<<nheight - crop_size_<<","<<nwidth - crop_size_;
// LOG(INFO)<<hskip<<","<<wskip;
// LOG(INFO)<<hskip+crop_size_<<","<<wskip+crop_size_;
// LOG(INFO)<<"=================";
}
}
template <typename Dtype>
void AugmentationLayer<Dtype>::do_variable_crop_gpu(const Dtype* bottom_data, Dtype* buffer, Dtype* top_data, Dtype* crop_data,
const vector<Blob<Dtype>*>& top) {//DEVICE DEVICE DEVICE HOST
const int size = height_ * width_;
// LOG(INFO) << ":::::::::::::::::::::::::::Starting:::::::::::::::::::::::::::";
//do some rescale staffs
if (size_protection_ == false) {
caffe_rng_uniform(1, width_min_scale_, width_max_scale_, &width_scale_);
if (fixed_ratio_)
height_scale_ = width_scale_;
else
caffe_rng_uniform(1, height_min_scale_, height_max_scale_, &height_scale_);
} else {
Dtype tmpratio = std::min(static_cast<Dtype>(max_size_)/static_cast<Dtype>(height_),
static_cast<Dtype>(max_size_)/static_cast<Dtype>(width_));
Dtype ratio = std::min(tmpratio, (Dtype)1.0);
caffe_rng_uniform(1, width_min_scale_, width_max_scale_, &width_scale_);
if (fixed_ratio_)
height_scale_ = width_scale_;
else
caffe_rng_uniform(1, height_min_scale_, height_max_scale_, &height_scale_);
width_scale_ *= ratio;
height_scale_ *= ratio;
// LOG(INFO) << "size_protection: " <<size_protection_;
// LOG(INFO) << "tmpRatio: " <<tmpratio;
// LOG(INFO) << "Ratio: " <<ratio;
}
// LOG(INFO) << ":::::::::::::::::::::::::::Rescale:::::::::::::::::::::::::::";
// LOG(INFO) << "Original size: " <<height_ <<" "<< width_;
// LOG(INFO) << "Rescale scale: " <<height_scale_ <<" "<< width_scale_;
const int nwidth = (int)(width_ * width_scale_);
const int nheight = (int)(height_ * height_scale_);
// LOG(INFO) << "Rescale size: " <<nheight <<" "<< nwidth;
rescale_kernel<Dtype><<<CAFFE_GET_BLOCKS(size),CAFFE_CUDA_NUM_THREADS>>>(bottom_data, buffer,
width_scale_, height_scale_, size, width_, height_);
// LOG(INFO) << ":::::::::::::::::::::::::::Crop:::::::::::::::::::::::::::";
// LOG(INFO) << "Crop scale: " <<crop_scale_;
const int crop_size_width = (int)nwidth * crop_scale_;
const int crop_size_height = (int)nheight * crop_scale_;
const int wskip = Rand(max(1,nwidth - crop_size_width));
const int hskip = Rand(max(1,nheight - crop_size_height));
const int out_size = crop_size_height * crop_size_width;
// LOG(INFO) << "Crop size: " <<crop_size_height <<" "<< crop_size_width;
top[0]->Reshape(num_,channels_,crop_size_height,crop_size_width);
crop_kernel<Dtype><<<CAFFE_GET_BLOCKS(size),CAFFE_CUDA_NUM_THREADS>>>(buffer, top_data,
wskip, hskip, size, width_ , out_size, crop_size_width);
// LOG(INFO) << ":::::::::::::::::::::::::::Ending:::::::::::::::::::::::::::";
const int wrest = nwidth - wskip - crop_size_width;
const int hrest = nheight - hskip - crop_size_height;
crop_data[4] = height_scale_; crop_data[5] = width_scale_;
crop_data[2] = wskip; crop_data[3] = wrest;
crop_data[0] = hskip; crop_data[1] = hrest;
}
/***********************************FORWARD FUNCTION***********************************/
template <typename Dtype>
void AugmentationLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* top_buffer;
Dtype* bottom_buffer;
// LOG(INFO)<<top[t]->channels()<<","<<top[t]->height()<<","<<top[t]->width();
cudaMalloc((void**) &bottom_buffer, count_*sizeof(Dtype));
cudaMalloc((void**) &top_buffer, count_*sizeof(Dtype));
cudaMemcpy(bottom_buffer, bottom_data, count_*sizeof(Dtype), cudaMemcpyHostToDevice);
//==========LV1==========//
//COLOR SHIFT
if ((this->layer_param_.augmentation_param().color_shift().execute() ==
AugmentationColorShiftParameter_Execute_ALWAYS) ||
((this->layer_param_.augmentation_param().color_shift().execute() ==
AugmentationColorShiftParameter_Execute_RANDOM) && (Rand(2)) ) ) {
do_color_shift_gpu(bottom_buffer, top_buffer);
cudaMemcpy(bottom_buffer, top_buffer, count_*sizeof(Dtype), cudaMemcpyDeviceToDevice);}
//GAUSSIAN NOISE
if ((this->layer_param_.augmentation_param().gaussian_noise().execute() ==
AugmentationGaussianNoiseParameter_Execute_ALWAYS) ||
((this->layer_param_.augmentation_param().gaussian_noise().execute() ==
AugmentationGaussianNoiseParameter_Execute_RANDOM) && (Rand(2)) ) ) {
do_gaussian_noise_gpu(bottom_buffer, top_buffer);
cudaMemcpy(bottom_buffer, top_buffer, count_*sizeof(Dtype), cudaMemcpyDeviceToDevice);}
//CONTRAST
if ((this->layer_param_.augmentation_param().contrast().execute() ==
AugmentationContrastParameter_Execute_ALWAYS) ||
((this->layer_param_.augmentation_param().contrast().execute() ==
AugmentationContrastParameter_Execute_RANDOM) && (Rand(2)) ) ) {
do_contrast_gpu(bottom_buffer, top_buffer);
cudaMemcpy(bottom_buffer, top_buffer, count_*sizeof(Dtype), cudaMemcpyDeviceToDevice);}
//FLARE
if ((this->layer_param_.augmentation_param().flare().execute() ==
AugmentationFlareParameter_Execute_ALWAYS) ||
((this->layer_param_.augmentation_param().flare().execute() ==
AugmentationFlareParameter_Execute_RANDOM) && (Rand(2)) ) ) {
do_flare_gpu(bottom_buffer, top_buffer);
cudaMemcpy(bottom_buffer, top_buffer, count_*sizeof(Dtype), cudaMemcpyDeviceToDevice);}
//BLUR
if ((this->layer_param_.augmentation_param().blur().execute() ==
AugmentationBlurParameter_Execute_ALWAYS) ||
((this->layer_param_.augmentation_param().blur().execute() ==
AugmentationBlurParameter_Execute_RANDOM) && (Rand(2)) ) ) {
do_blur_gpu(bottom_buffer, top_buffer);
cudaMemcpy(bottom_buffer, top_buffer, count_*sizeof(Dtype), cudaMemcpyDeviceToDevice);}
//COLOR_BIAS
if ((this->layer_param_.augmentation_param().color_bias().execute() ==
AugmentationColorBiasParameter_Execute_ALWAYS) ||
((this->layer_param_.augmentation_param().color_bias().execute() ==
AugmentationColorBiasParameter_Execute_RANDOM) && (Rand(2)) ) ) {
do_color_bias_gpu(bottom_buffer, top_buffer);
cudaMemcpy(bottom_buffer, top_buffer, count_*sizeof(Dtype), cudaMemcpyDeviceToDevice);}
//MIRROR
if ((this->layer_param_.augmentation_param().mirror().execute() ==
AugmentationMirrorParameter_Execute_ALWAYS) ||
((this->layer_param_.augmentation_param().mirror().execute() ==
AugmentationMirrorParameter_Execute_RANDOM) && (Rand(2)) ) ) {
do_mirror_gpu(bottom_buffer, top_buffer);
cudaMemcpy(bottom_buffer, top_buffer, count_*sizeof(Dtype), cudaMemcpyDeviceToDevice);
if (output_crop_) { (top[1]->mutable_cpu_data())[6] = 1;} }
//SATURATION
if ((this->layer_param_.augmentation_param().saturation().execute() ==
AugmentationSaturationParameter_Execute_ALWAYS) ||
((this->layer_param_.augmentation_param().saturation().execute() ==
AugmentationSaturationParameter_Execute_RANDOM) && (Rand(2)) ) ) {
do_saturation_gpu(bottom_buffer, top_buffer);
cudaMemcpy(bottom_buffer, top_buffer, count_*sizeof(Dtype), cudaMemcpyDeviceToDevice);}
//MEAN
if (this->layer_param_.augmentation_param().mean().execute() ==
AugmentationMeanParameter_Execute_ALWAYS) {
do_mean_gpu(bottom_buffer, top_buffer);
cudaMemcpy(bottom_buffer, top_buffer, count_*sizeof(Dtype), cudaMemcpyDeviceToDevice);}
//==========LV2==========//
//LV2 LASSO
vector<AUGFUNC> lv2_lasso;
if (this->layer_param_.augmentation_param().sin_warp().execute() ==
AugmentationSinWarpParameter_Execute_LASSO)
lv2_lasso.push_back(&AugmentationLayer<Dtype>::do_sin_warp_gpu);
if (this->layer_param_.augmentation_param().rotation().execute() ==
AugmentationRotationParameter_Execute_LASSO)
lv2_lasso.push_back(&AugmentationLayer<Dtype>::do_rotation_gpu);
if (this->layer_param_.augmentation_param().lens_distortion().execute() ==
AugmentationLensDistortionParameter_Execute_LASSO)
lv2_lasso.push_back(&AugmentationLayer<Dtype>::do_lens_distortion_gpu);
if (lv2_lasso.size()!=0 && Rand(2)) {
int run_lasso = Rand(lv2_lasso.size());
(this->*lv2_lasso[run_lasso])(bottom_buffer, top_buffer);
cudaMemcpy(bottom_buffer, top_buffer, count_*sizeof(Dtype), cudaMemcpyDeviceToDevice);
}
//SIN WARP
if ((this->layer_param_.augmentation_param().sin_warp().execute() ==
AugmentationSinWarpParameter_Execute_ALWAYS) ||
((this->layer_param_.augmentation_param().sin_warp().execute() ==
AugmentationSinWarpParameter_Execute_RANDOM) && (Rand(2)) ) ) {
do_sin_warp_gpu(bottom_buffer, top_buffer);
cudaMemcpy(bottom_buffer, top_buffer, count_*sizeof(Dtype), cudaMemcpyDeviceToDevice);}
//ROTATION
if ((this->layer_param_.augmentation_param().rotation().execute() ==
AugmentationRotationParameter_Execute_ALWAYS) ||
((this->layer_param_.augmentation_param().rotation().execute() ==
AugmentationRotationParameter_Execute_RANDOM) && (Rand(2)) ) ) {
do_rotation_gpu(bottom_buffer, top_buffer);
cudaMemcpy(bottom_buffer, top_buffer, count_*sizeof(Dtype), cudaMemcpyDeviceToDevice);}
//LENSDISTORTION
if ((this->layer_param_.augmentation_param().lens_distortion().execute() ==
AugmentationLensDistortionParameter_Execute_ALWAYS) ||
((this->layer_param_.augmentation_param().lens_distortion().execute() ==
AugmentationLensDistortionParameter_Execute_RANDOM) && (Rand(2)) ) ) {
do_lens_distortion_gpu(bottom_buffer, top_buffer);
cudaMemcpy(bottom_buffer, top_buffer, count_*sizeof(Dtype), cudaMemcpyDeviceToDevice);}
//==========LV3==========//
//LV3
if ( (this->layer_param_.augmentation_param().rescale().execute() == AugmentationRescaleParameter_Execute_NEVER) &&
(this->layer_param_.augmentation_param().crop().execute() == AugmentationCropParameter_Execute_NEVER) ) {
if (this->layer_param_.augmentation_param().variable_crop().execute() == AugmentationVariableCropParameter_Execute_NEVER)
cudaMemcpy(top_data, bottom_buffer, count_*sizeof(Dtype), cudaMemcpyDeviceToDevice);
else {
if (output_crop_) {
Dtype* crop_data = top[1]->mutable_cpu_data();
do_variable_crop_gpu(bottom_buffer, top_buffer, top_data, crop_data, top);
} else {
Dtype* crop_data;
crop_data = new Dtype[4];
do_variable_crop_gpu(bottom_buffer, top_buffer, top_data, crop_data, top);
delete[] crop_data;
}
}
}
else {
if (this->layer_param_.augmentation_param().rescale().execute() == AugmentationRescaleParameter_Execute_NEVER)
do_crop_gpu(bottom_buffer, top_data);
else
do_rescale_crop_gpu(bottom_buffer, top_buffer, top_data);
}
cudaFree(bottom_buffer);
cudaFree(top_buffer);
}
template <typename Dtype>
void AugmentationLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
}
INSTANTIATE_LAYER_GPU_FUNCS(AugmentationLayer);
} // namespace caffe
|
3f2172fa32afe5c482bd0a9fd6126428490c45d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define GAPX (22)
#define EXTENT (5)
#define mod(x,y) ( (x) & (y-1))
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float * __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
float * __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
int rowy = FORMA_BLOCKDIM_Y+16;
int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X + GAPX);
for (int __iter_1__ = 0; __iter_1__ <= N-1; __iter_1__ += FORMA_BLOCKDIM_Y) {
int __iter_2__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_2__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){
__tilevar_0__[__iter_3__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_2__,rowy)] = input[__iter_3__+M*__iter_2__];
}
__syncthreads();
int __iter_4__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
if( __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){
int __iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
float __temp_2__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__-1,rowy)]);
float __temp_5__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)] = __temp_35__;
}
}
int __iter_6__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
if( __iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){
int __iter_7__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
if (__iter_7__ < (FORMA_MAX((__iter_0__+1),1)+2) | __iter_7__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2))-2)) {
__copy_arr_0__[__iter_6__+(M)*(__iter_7__)] = __tilevar_1__[__iter_7__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_6__,rowy)];
}
}
}
__syncthreads();
int __iter_10__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
if( __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){
int __iter_11__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
float __temp_2__ = (__tilevar_1__[__iter_11__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__-1,rowy)]);
float __temp_5__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_1__[__iter_11__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_1__[__iter_11__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_1__[__iter_11__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_1__[__iter_11__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_1__[__iter_11__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__tilevar_0__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)] = __temp_35__;
}
}
int __iter_12__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
if( __iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){
int __iter_13__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
if (__iter_13__ < (FORMA_MAX((__iter_0__+2),1)+2) | __iter_13__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2))-2)) {
__copy_arr_1__[__iter_12__+(M)*(__iter_13__)] = __tilevar_0__[__iter_13__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_12__,rowy)];
}
}
}
__syncthreads();
int __iter_16__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
if( __iter_16__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){
int __iter_17__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
float __temp_2__ = (__tilevar_0__[__iter_17__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__-1,rowy)]);
float __temp_5__ = (__tilevar_0__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_0__[__iter_17__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_0__[__iter_17__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_0__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_0__[__iter_17__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_0__[__iter_17__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_0__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_0__[__iter_17__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__tilevar_1__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)] = __temp_35__;
}
}
int __iter_18__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
if( __iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){
int __iter_19__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
if (__iter_19__ < (FORMA_MAX((__iter_0__+3),1)+2) | __iter_19__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2))-2)) {
__copy_arr_2__[__iter_18__+(M)*(__iter_19__)] = __tilevar_1__[__iter_19__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_18__,rowy)];
}
}
}
__syncthreads();
int __iter_22__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ;
if( __iter_22__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-2)) ){
int __iter_23__ = FORMA_MAX((__iter_0__+4),1) + (int)(threadIdx.x) ;
if( __iter_23__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){
float __temp_2__ = (__tilevar_1__[__iter_23__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__-1,rowy)]);
float __temp_5__ = (__tilevar_1__[__iter_23__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_1__[__iter_23__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_1__[__iter_23__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_1__[__iter_23__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_1__[__iter_23__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_1__[__iter_23__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_1__[__iter_23__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_1__[__iter_23__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__var_1__[__iter_23__+(M)*(__iter_22__)] = __temp_35__;
}
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(2*(FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
return SMemSize;
}
__global__ void __kernel___forma_kernel__1__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float * __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
float * __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
int rowy = FORMA_BLOCKDIM_Y+16;
int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X + GAPX) + (int)FORMA_BLOCKDIM_X;
for (int __iter_1__ = 0; __iter_1__ <= N-1; __iter_1__ += FORMA_BLOCKDIM_Y) {
int __iter_2__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_3__ = FORMA_MAX(__iter_0__-2,0) + (int)(threadIdx.x) ;
if( __iter_2__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-1))){
__tilevar_0__[__iter_3__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*mod(__iter_2__,rowy)] = input[__iter_3__+(M)*(__iter_2__)];
}
__syncthreads();
int __iter_4__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
if( __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){
int __iter_5__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){
float __temp_2__ = (__tilevar_0__[__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__-1,rowy)]);
float __temp_5__ = (__tilevar_0__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_0__[__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_0__[__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_0__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_0__[__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_0__[__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_0__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_0__[__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__tilevar_1__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)] = __temp_35__;
}
}
int __iter_6__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
int __iter_7__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ;
if( __iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){
if (__iter_7__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) & (__iter_7__ < FORMA_MAX((__iter_0__-1),1) | __iter_7__ > FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)))) {
__tilevar_1__[__iter_7__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*mod(__iter_6__,rowy)] = __copy_arr_0__[__iter_6__+(M)*(__iter_7__)];
}
}
__syncthreads();
int __iter_10__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
if( __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){
int __iter_11__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){
float __temp_2__ = (__tilevar_1__[__iter_11__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__-1,rowy)]);
float __temp_5__ = (__tilevar_1__[__iter_11__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_1__[__iter_11__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_1__[__iter_11__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_1__[__iter_11__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_1__[__iter_11__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_1__[__iter_11__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_1__[__iter_11__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_1__[__iter_11__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__tilevar_0__[__iter_11__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)] = __temp_35__;
}
}
int __iter_12__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
int __iter_13__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ;
if( __iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){
if (__iter_13__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) & (__iter_13__ < FORMA_MAX((__iter_0__-2),1) | __iter_13__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)))) {
__tilevar_0__[__iter_13__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*mod(__iter_12__,rowy)] = __copy_arr_1__[__iter_12__+(M)*(__iter_13__)];
}
}
__syncthreads();
int __iter_16__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
if( __iter_16__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){
int __iter_17__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ;
if( __iter_17__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){
float __temp_2__ = (__tilevar_0__[__iter_17__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__-1,rowy)]);
float __temp_5__ = (__tilevar_0__[__iter_17__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_0__[__iter_17__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_0__[__iter_17__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_0__[__iter_17__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_0__[__iter_17__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_0__[__iter_17__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_0__[__iter_17__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_0__[__iter_17__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__tilevar_1__[__iter_17__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)] = __temp_35__;
}
}
int __iter_18__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
int __iter_19__ = FORMA_MAX((__iter_0__-5),1) + (int)(threadIdx.x) ;
if( __iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){
if (__iter_19__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(M-2)) & (__iter_19__ < FORMA_MAX((__iter_0__-3),1) | __iter_19__ > FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)))) {
__tilevar_1__[__iter_19__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*mod(__iter_18__,rowy)] = __copy_arr_2__[__iter_18__+(M)*(__iter_19__)];
}
}
__syncthreads();
int __iter_22__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ;
if( __iter_22__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-2)) ){
int __iter_23__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ;
if( __iter_23__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){
float __temp_2__ = (__tilevar_1__[__iter_23__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__-1,rowy)]);
float __temp_5__ = (__tilevar_1__[__iter_23__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_1__[__iter_23__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_1__[__iter_23__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_1__[__iter_23__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_1__[__iter_23__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_1__[__iter_23__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_1__[__iter_23__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_1__[__iter_23__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__var_1__[__iter_23__+(M)*(__iter_22__)] = __temp_35__;
}
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void jacobi(float * h_input, int N, int M, float * __var_0__){
/* Host allocation Begin */
float * input;
hipMalloc(&input,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(float)*((N)*(M)), memcpy_kind_h_input);
}
float * __var_1__;
hipMalloc(&__var_1__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __copy_arr_0__;
hipMalloc(&__copy_arr_0__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_0__\n");
float * __copy_arr_1__;
hipMalloc(&__copy_arr_1__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_1__\n");
float * __copy_arr_2__;
hipMalloc(&__copy_arr_2__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((M-1) - 0 ) + 1;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 16;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x+GAPX);
int __grid_1___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipLaunchKernelGGL(( __kernel___forma_kernel__1__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__1__\n");
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(float)*((N)*(M)), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
hipFree(__copy_arr_0__);
hipFree(__copy_arr_1__);
hipFree(__copy_arr_2__);
}
/*Host Free End*/
| 3f2172fa32afe5c482bd0a9fd6126428490c45d9.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define GAPX (22)
#define EXTENT (5)
#define mod(x,y) ( (x) & (y-1))
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float * __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
float * __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
int rowy = FORMA_BLOCKDIM_Y+16;
int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X + GAPX);
for (int __iter_1__ = 0; __iter_1__ <= N-1; __iter_1__ += FORMA_BLOCKDIM_Y) {
int __iter_2__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_2__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){
__tilevar_0__[__iter_3__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_2__,rowy)] = input[__iter_3__+M*__iter_2__];
}
__syncthreads();
int __iter_4__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
if( __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){
int __iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
float __temp_2__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__-1,rowy)]);
float __temp_5__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)] = __temp_35__;
}
}
int __iter_6__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
if( __iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){
int __iter_7__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
if (__iter_7__ < (FORMA_MAX((__iter_0__+1),1)+2) | __iter_7__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2))-2)) {
__copy_arr_0__[__iter_6__+(M)*(__iter_7__)] = __tilevar_1__[__iter_7__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_6__,rowy)];
}
}
}
__syncthreads();
int __iter_10__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
if( __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){
int __iter_11__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
float __temp_2__ = (__tilevar_1__[__iter_11__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__-1,rowy)]);
float __temp_5__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_1__[__iter_11__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_1__[__iter_11__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_1__[__iter_11__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_1__[__iter_11__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_1__[__iter_11__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__tilevar_0__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)] = __temp_35__;
}
}
int __iter_12__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
if( __iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){
int __iter_13__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
if (__iter_13__ < (FORMA_MAX((__iter_0__+2),1)+2) | __iter_13__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2))-2)) {
__copy_arr_1__[__iter_12__+(M)*(__iter_13__)] = __tilevar_0__[__iter_13__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_12__,rowy)];
}
}
}
__syncthreads();
int __iter_16__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
if( __iter_16__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){
int __iter_17__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
float __temp_2__ = (__tilevar_0__[__iter_17__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__-1,rowy)]);
float __temp_5__ = (__tilevar_0__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_0__[__iter_17__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_0__[__iter_17__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_0__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_0__[__iter_17__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_0__[__iter_17__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_0__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_0__[__iter_17__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__tilevar_1__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)] = __temp_35__;
}
}
int __iter_18__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
if( __iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){
int __iter_19__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
if (__iter_19__ < (FORMA_MAX((__iter_0__+3),1)+2) | __iter_19__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2))-2)) {
__copy_arr_2__[__iter_18__+(M)*(__iter_19__)] = __tilevar_1__[__iter_19__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_18__,rowy)];
}
}
}
__syncthreads();
int __iter_22__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ;
if( __iter_22__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-2)) ){
int __iter_23__ = FORMA_MAX((__iter_0__+4),1) + (int)(threadIdx.x) ;
if( __iter_23__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){
float __temp_2__ = (__tilevar_1__[__iter_23__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__-1,rowy)]);
float __temp_5__ = (__tilevar_1__[__iter_23__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_1__[__iter_23__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_1__[__iter_23__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_1__[__iter_23__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_1__[__iter_23__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_1__[__iter_23__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_1__[__iter_23__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_1__[__iter_23__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__var_1__[__iter_23__+(M)*(__iter_22__)] = __temp_35__;
}
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(2*(FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
return SMemSize;
}
__global__ void __kernel___forma_kernel__1__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float * __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
float * __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
int rowy = FORMA_BLOCKDIM_Y+16;
int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X + GAPX) + (int)FORMA_BLOCKDIM_X;
for (int __iter_1__ = 0; __iter_1__ <= N-1; __iter_1__ += FORMA_BLOCKDIM_Y) {
int __iter_2__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_3__ = FORMA_MAX(__iter_0__-2,0) + (int)(threadIdx.x) ;
if( __iter_2__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-1))){
__tilevar_0__[__iter_3__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*mod(__iter_2__,rowy)] = input[__iter_3__+(M)*(__iter_2__)];
}
__syncthreads();
int __iter_4__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
if( __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){
int __iter_5__ = FORMA_MAX((__iter_0__-1),1) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){
float __temp_2__ = (__tilevar_0__[__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__-1,rowy)]);
float __temp_5__ = (__tilevar_0__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_0__[__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_0__[__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_0__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_0__[__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_0__[__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_0__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_0__[__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__tilevar_1__[__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)] = __temp_35__;
}
}
int __iter_6__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
int __iter_7__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ;
if( __iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){
if (__iter_7__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) & (__iter_7__ < FORMA_MAX((__iter_0__-1),1) | __iter_7__ > FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)))) {
__tilevar_1__[__iter_7__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*mod(__iter_6__,rowy)] = __copy_arr_0__[__iter_6__+(M)*(__iter_7__)];
}
}
__syncthreads();
int __iter_10__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
if( __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){
int __iter_11__ = FORMA_MAX((__iter_0__-2),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){
float __temp_2__ = (__tilevar_1__[__iter_11__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__-1,rowy)]);
float __temp_5__ = (__tilevar_1__[__iter_11__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_1__[__iter_11__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_1__[__iter_11__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_1__[__iter_11__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_1__[__iter_11__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_1__[__iter_11__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_1__[__iter_11__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_1__[__iter_11__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__tilevar_0__[__iter_11__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)] = __temp_35__;
}
}
int __iter_12__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
int __iter_13__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ;
if( __iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){
if (__iter_13__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) & (__iter_13__ < FORMA_MAX((__iter_0__-2),1) | __iter_13__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)))) {
__tilevar_0__[__iter_13__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*mod(__iter_12__,rowy)] = __copy_arr_1__[__iter_12__+(M)*(__iter_13__)];
}
}
__syncthreads();
int __iter_16__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
if( __iter_16__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){
int __iter_17__ = FORMA_MAX((__iter_0__-3),1) + (int)(threadIdx.x) ;
if( __iter_17__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){
float __temp_2__ = (__tilevar_0__[__iter_17__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__-1,rowy)]);
float __temp_5__ = (__tilevar_0__[__iter_17__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_0__[__iter_17__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_0__[__iter_17__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_0__[__iter_17__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_0__[__iter_17__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_0__[__iter_17__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_0__[__iter_17__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_0__[__iter_17__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__tilevar_1__[__iter_17__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)] = __temp_35__;
}
}
int __iter_18__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
int __iter_19__ = FORMA_MAX((__iter_0__-5),1) + (int)(threadIdx.x) ;
if( __iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){
if (__iter_19__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(M-2)) & (__iter_19__ < FORMA_MAX((__iter_0__-3),1) | __iter_19__ > FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)))) {
__tilevar_1__[__iter_19__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*mod(__iter_18__,rowy)] = __copy_arr_2__[__iter_18__+(M)*(__iter_19__)];
}
}
__syncthreads();
int __iter_22__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ;
if( __iter_22__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-2)) ){
int __iter_23__ = FORMA_MAX((__iter_0__-4),1) + (int)(threadIdx.x) ;
if( __iter_23__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){
float __temp_2__ = (__tilevar_1__[__iter_23__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__-1,rowy)]);
float __temp_5__ = (__tilevar_1__[__iter_23__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__-1,rowy)]);
float __temp_6__ = (7 * __temp_2__ + 5 * __temp_5__);
float __temp_9__ = (__tilevar_1__[__iter_23__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__-1,rowy)]);
float __temp_10__ = (__temp_6__ + 9 * __temp_9__);
float __temp_13__ = (__tilevar_1__[__iter_23__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_1__[__iter_23__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__,rowy)]);
float __temp_18__ = (__temp_14__ + 15 * __temp_17__);
float __temp_21__ = (__tilevar_1__[__iter_23__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__,rowy)]);
float __temp_22__ = (__temp_18__ + 12 * __temp_21__);
float __temp_25__ = (__tilevar_1__[__iter_23__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__+1,rowy)]);
float __temp_26__ = (__temp_22__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_1__[__iter_23__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__+1,rowy)]);
float __temp_30__ = (__temp_26__ + 5 * __temp_29__);
float __temp_33__ = (__tilevar_1__[__iter_23__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__+1,rowy)]);
float __temp_34__ = (__temp_30__ + 7 * __temp_33__);
float __temp_35__ = (__temp_34__ / 118);
__var_1__[__iter_23__+(M)*(__iter_22__)] = __temp_35__;
}
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void jacobi(float * h_input, int N, int M, float * __var_0__){
/* Host allocation Begin */
float * input;
cudaMalloc(&input,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(float)*((N)*(M)), memcpy_kind_h_input);
}
float * __var_1__;
cudaMalloc(&__var_1__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __copy_arr_0__;
cudaMalloc(&__copy_arr_0__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_0__\n");
float * __copy_arr_1__;
cudaMalloc(&__copy_arr_1__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_1__\n");
float * __copy_arr_2__;
cudaMalloc(&__copy_arr_2__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((M-1) - 0 ) + 1;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 16;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x+GAPX);
int __grid_1___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
__kernel___forma_kernel__1__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__1__\n");
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(float)*((N)*(M)), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
cudaFree(__copy_arr_0__);
cudaFree(__copy_arr_1__);
cudaFree(__copy_arr_2__);
}
/*Host Free End*/
|
b0e9bfa5bdb8e3ccef1fa77bb9624a0137d6b462.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Udacity HW 4
//Radix Sorting
#include "utils.h"
#include <thrust/host_vector.h>
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are. Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 0 1 2 3 2]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
__global__ void copy_data(unsigned int * d_histogram, unsigned int * d_scan){
d_scan[1] = d_histogram[0];
// printf("d_scan[1] : %d \n", d_scan[1]);
}
__global__ void histogram(
unsigned int * d_histogram,
unsigned int *d_vals_src,
unsigned int mask,
unsigned int numElems,
unsigned int i)
{
int myId = blockDim.x * blockIdx.x + threadIdx.x;
// int tid = threadIdx.x;
if(myId >= numElems){
return;
}
unsigned int bin = (d_vals_src[myId] & mask) >> i;
atomicAdd(&d_histogram[bin],1);
}
__global__ void scan_across_block(unsigned int * d_sums, unsigned int * d_sums_cdf, unsigned int numElems){
extern __shared__ unsigned int sdata[];
int myId = blockDim.x * blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
sdata[ 2 * tid ] = 0;
sdata[ 2 * tid + 1 ] = 0;
if(2 * myId < numElems){
// unsigned int bin = (d_vals_src[2 * myId] & mask) >> i;
sdata[2 * tid ] = d_sums[2 * myId];
}
if (2 * myId +1 < numElems){
// unsigned int bin = (d_vals_src[2 * myId +1 ] & mask) >> i;
sdata[2 * tid + 1] = d_sums[2 *myId +1];
}
__syncthreads();
//reduce
for(unsigned int s = 1 ; s <= blockDim.x; s<<=1){
if(tid < blockDim.x /s){
sdata[s *(2*tid + 2) -1] += sdata[s*(2*tid +1) -1];
}
__syncthreads();
}
if(tid == 0){
sdata[2 * blockDim.x -1] = 0;
}
//down sweep
for( unsigned int s = blockDim.x ; s > 0 ; s >>=1){
if(tid < blockDim.x /s){
unsigned int tmp = sdata[s *(2*tid + 2) -1];
sdata[s*(2*tid +2) -1] += sdata[s*(2*tid +1) -1];
sdata[s*(2*tid +1) -1] = tmp;
}
__syncthreads();
}
if(2 * myId < numElems){
d_sums_cdf[2 * myId] = sdata[2 * tid];
}
if (2 * myId +1 < numElems){
d_sums_cdf[2 * myId + 1] = sdata[2 * tid + 1];
}
}
__global__ void compute_outputPos(
unsigned int * d_vals_src,
unsigned int *d_pos_dst,
unsigned int * d_cdf,
unsigned int * d_scan,
unsigned int * d_sums_cdf,
unsigned int numElems,
unsigned int mask,
unsigned int i)
{
int myId = blockDim.x * blockIdx.x + threadIdx.x;
if(myId >=numElems ) return;
unsigned int bin = (d_vals_src[myId] &mask)>> i;
unsigned int sum = d_sums_cdf[blockIdx.x] ;
// for(int j=0; j< blockIdx.x; j++){
// sum += d_sums[j];
// }
// printf("sum: %d\n",sum);
if(bin == 0){
d_pos_dst[myId] = d_scan[0] + myId - (sum+d_cdf[myId]);
}else{
d_pos_dst[myId] = d_scan[1] + sum + d_cdf[myId];
}
// printf("d_pos_dst[myid] : %d \n", d_pos_dst[myId]);
}
__global__ void scan_per_block(unsigned int * d_vals_src, unsigned int * d_cdf, unsigned int * d_sums,unsigned int numElems,unsigned int mask, unsigned int i)
{
extern __shared__ unsigned int sdata[];
int myId = blockDim.x * blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
sdata[ 2 * tid ] = 0;
sdata[ 2 * tid + 1 ] = 0;
if(2 * myId < numElems){
unsigned int bin = (d_vals_src[2 * myId] & mask) >> i;
sdata[2 * tid ] = bin;
}
if (2 * myId +1 < numElems){
unsigned int bin = (d_vals_src[2 * myId +1 ] & mask) >> i;
sdata[2 * tid + 1] = bin;
}
__syncthreads();
//reduce
for(unsigned int s = 1 ; s <= blockDim.x; s<<=1){
if(tid < blockDim.x /s){
sdata[s *(2*tid + 2) -1] += sdata[s*(2*tid +1) -1];
}
__syncthreads();
}
if(tid == 0){
d_sums[blockIdx.x] = sdata[2 * blockDim.x -1];
sdata[2 * blockDim.x -1] = 0;
}
//down sweep
for( unsigned int s = blockDim.x ; s > 0 ; s >>=1){
if(tid < blockDim.x /s){
unsigned int tmp = sdata[s *(2*tid + 2) -1];
sdata[s*(2*tid +2) -1] += sdata[s*(2*tid +1) -1];
sdata[s*(2*tid +1) -1] = tmp;
}
__syncthreads();
}
if(2 * myId < numElems){
d_cdf[2 * myId] = sdata[2 * tid];
}
if (2 * myId +1 < numElems){
d_cdf[2 * myId + 1] = sdata[2 * tid + 1];
}
}
__global__ void gather(
unsigned int * d_vals_src,
unsigned int * d_vals_dst,
unsigned int * d_pos_src,
unsigned int * d_pos_dst,
unsigned int * d_out_pos,
unsigned int numElems)
{
int myId = blockDim.x * blockIdx.x + threadIdx.x;
if(myId >= numElems){
return;
}
// unsigned int bin = (d_vals_src[myId]& mask) >> i;
d_vals_dst[d_out_pos[myId]] = d_vals_src[myId];
d_pos_dst[d_out_pos[myId]] = d_pos_src[myId];
// atomicAdd(&(d_scan[bin]), 1);
}
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
//TODO
//PUT YOUR SORT HERE
dim3 blockSize(256, 1 , 1);
dim3 gridSize((numElems + blockSize.x -1)/ blockSize.x, 1, 1 );
// printf(" numElems: %d gridSize.x : %d\n", numElems,gridSize.x);
dim3 blockS(blockSize.x /2, 1, 1);
const int numBits = 1;
const int numBins = 1 << numBits;
unsigned int *d_vals_src ;
unsigned int *d_pos_src ;
unsigned int *d_vals_dst ;
unsigned int *d_pos_dst ;
unsigned int * d_out_pos;
size_t size = sizeof(unsigned int ) * numElems;
checkCudaErrors(hipMalloc(&d_vals_src, size));
checkCudaErrors(hipMalloc(&d_pos_src, size));
checkCudaErrors(hipMalloc(&d_vals_dst, size));
checkCudaErrors(hipMalloc(&d_pos_dst, size));
checkCudaErrors(hipMalloc(&d_out_pos, size));
checkCudaErrors(hipMemcpy(d_vals_src , d_inputVals, size, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(d_pos_src , d_inputPos, size, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(d_vals_dst , d_outputVals, size, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(d_pos_dst , d_outputPos, size, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(d_out_pos , d_outputPos, size, hipMemcpyDeviceToDevice));
unsigned int * d_histogram;
unsigned int * d_scan;
unsigned int * d_cdf;
unsigned int * d_sums;
unsigned int * d_sums_cdf;
checkCudaErrors(hipMalloc(&d_histogram, sizeof(unsigned int)*numBins));
checkCudaErrors(hipMalloc(&d_scan, sizeof(unsigned int)*numBins));
checkCudaErrors(hipMalloc(&d_cdf, sizeof(unsigned int)*numElems));
checkCudaErrors(hipMalloc(&d_sums, sizeof(unsigned int)*(gridSize.x)));
checkCudaErrors(hipMalloc(&d_sums_cdf, sizeof(unsigned int)*(gridSize.x)));
unsigned int nextPow = gridSize.x ;
nextPow--;
nextPow = (nextPow >> 1) | nextPow;
nextPow = (nextPow >> 2) | nextPow;
nextPow = (nextPow >> 4) | nextPow;
nextPow = (nextPow >> 8) | nextPow;
nextPow = (nextPow >> 16) | nextPow;
nextPow++;
dim3 blockSize1(nextPow/2, 1, 1);
// checkCudaErrors(hipMemset(d_histogram, 0 , sizeof(unsigned int)*numBins));
for(unsigned int i = 0 ; i< 32; i+=numBits){
unsigned int mask = (numBins -1) << i;
checkCudaErrors(hipMemset(d_histogram, 0 , sizeof(unsigned int)*numBins));
checkCudaErrors(hipMemset(d_scan, 0 , sizeof(unsigned int)*numBins));
checkCudaErrors(hipMemset(d_cdf, 0 , sizeof(unsigned int)*numElems));
checkCudaErrors(hipMemset(d_sums, 0 , sizeof(unsigned int)*(gridSize.x)));
checkCudaErrors(hipMemset(d_sums_cdf, 0 , sizeof(unsigned int)*(gridSize.x)));
// memset(h_scan, 0, sizeof(unsigned int ) *numBins);
// memset(h_histogram, 0, sizeof(unsigned int ) *numBins);
hipLaunchKernelGGL(( scan_per_block), dim3(gridSize), dim3(blockS), sizeof(unsigned int)* blockSize.x, 0, d_vals_src, d_cdf, d_sums, numElems,mask ,i);
hipLaunchKernelGGL(( scan_across_block), dim3(1), dim3(blockSize1), sizeof(unsigned int ) * nextPow, 0, d_sums, d_sums_cdf, gridSize.x);
hipLaunchKernelGGL(( histogram), dim3(gridSize), dim3(blockSize), 0, 0, d_histogram, d_vals_src, mask, numElems, i);
// checkCudaErrors(hipMemcpy(d_scan,d_histogram +1 , sizeof(unsigned int), hipMemcpyDeviceToDevice));
hipLaunchKernelGGL(( copy_data), dim3(1),dim3(1), 0, 0, d_histogram,d_scan);
// checkCudaErrors(hipMemcpy(h_histogram, d_histogram, numBins* sizeof(unsigned int), hipMemcpyDeviceToHost));
// hipLaunchKernelGGL(( compute_outputPos), dim3(gridSize),dim3(blockSize), 0, 0, d_vals_src, d_out_pos,d_cdf, d_scan,d_sums,numElems, mask ,i);
hipLaunchKernelGGL(( compute_outputPos), dim3(gridSize),dim3(blockSize), 0, 0, d_vals_src, d_out_pos,d_cdf, d_scan,d_sums_cdf,numElems, mask ,i);
hipLaunchKernelGGL(( gather), dim3(gridSize), dim3(blockSize), 0, 0, d_vals_src,d_vals_dst,d_pos_src,d_pos_dst,d_out_pos ,numElems);
std::swap(d_vals_dst, d_vals_src);
std::swap(d_pos_dst, d_pos_src);
}
checkCudaErrors(hipMemcpy(d_vals_dst, d_vals_src,size, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(d_pos_dst, d_pos_src, size , hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy( d_inputVals,d_vals_src , size, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy( d_inputPos,d_pos_src , size, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy( d_outputVals,d_vals_dst , size, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy( d_outputPos,d_pos_dst ,size, hipMemcpyDeviceToDevice));
checkCudaErrors(hipFree(d_vals_src));
checkCudaErrors(hipFree(d_vals_dst));
checkCudaErrors(hipFree(d_pos_src));
checkCudaErrors(hipFree(d_pos_dst));
checkCudaErrors(hipFree(d_cdf));
checkCudaErrors(hipFree(d_sums));
checkCudaErrors(hipFree(d_sums_cdf));
checkCudaErrors(hipFree(d_out_pos));
// const int numBits = 1;
// const int numBins = 1 << numBits;
// unsigned int *binHistogram = new unsigned int[numBins];
// unsigned int *binScan = new unsigned int[numBins];
// unsigned int *vals_src = new unsigned int[numElems * sizeof(unsigned int)];
// unsigned int *pos_src = new unsigned int[numElems * sizeof(unsigned int)];
// memset(vals_src, 0 ,numElems * sizeof(unsigned int));
// memset(pos_src, 0 ,numElems * sizeof(unsigned int));
// checkCudaErrors(hipMemcpy(vals_src, d_inputVals, numElems * sizeof(unsigned int), hipMemcpyDeviceToHost));
// checkCudaErrors(hipMemcpy(pos_src, d_inputPos, numElems * sizeof(unsigned int), hipMemcpyDeviceToHost));
// unsigned int *vals_dst = new unsigned int[numElems * sizeof(unsigned int)];
// unsigned int *pos_dst = new unsigned int[numElems * sizeof(unsigned int)];
// memset(vals_dst, 0 ,numElems * sizeof(unsigned int));
// memset(pos_dst, 0 ,numElems * sizeof(unsigned int));
// checkCudaErrors(hipMemcpy(vals_dst, d_outputVals, numElems * sizeof(unsigned int), hipMemcpyDeviceToHost));
// checkCudaErrors(hipMemcpy(pos_dst, d_outputPos, numElems * sizeof(unsigned int), hipMemcpyDeviceToHost));
// //a simple radix sort - only guaranteed to work for numBits that are multiples of 2
// for (unsigned int i = 0; i < 8 * sizeof(unsigned int); i += numBits) {
// unsigned int mask = (numBins - 1) << i;
// memset(binHistogram, 0, sizeof(unsigned int) * numBins); //zero out the bins
// memset(binScan, 0, sizeof(unsigned int) * numBins); //zero out the bins
// //perform histogram of data & mask into bins
// for (unsigned int j = 0; j < numElems; ++j) {
// unsigned int bin = (vals_src[j] & mask) >> i;
// binHistogram[bin]++;
// }
// //perform exclusive prefix sum (scan) on binHistogram to get starting
// //location for each bin
// // for (unsigned int j = 1; j < numBins; ++j) {
// // binScan[j] = binScan[j - 1] + binHistogram[j - 1];
// // }
// binScan[1] = binScan[0] + binHistogram[0];
// //Gather everything into the correct location
// //need to move vals and positions
// for (unsigned int j = 0; j < numElems; ++j) {
// unsigned int bin = (vals_src[j] & mask) >> i;
// vals_dst[binScan[bin]] = vals_src[j];
// pos_dst[binScan[bin]] = pos_src[j];
// binScan[bin]++;
// }
// //swap the buffers (pointers only)
// std::swap(vals_dst, vals_src);
// std::swap(pos_dst, pos_src);
// }
// //we did an even number of iterations, need to copy from input buffer into output
// std::copy(vals_src, vals_src + numElems, vals_dst);
// std::copy(pos_src, pos_src + numElems, pos_dst);
// checkCudaErrors(hipMemcpy(d_outputVals, vals_dst, numElems*sizeof(unsigned),
// hipMemcpyHostToDevice));
// checkCudaErrors(hipMemcpy(d_outputPos, pos_dst, numElems*sizeof(unsigned),
// hipMemcpyHostToDevice));
// delete[] binHistogram;
// delete[] binScan;
}
| b0e9bfa5bdb8e3ccef1fa77bb9624a0137d6b462.cu | //Udacity HW 4
//Radix Sorting
#include "utils.h"
#include <thrust/host_vector.h>
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are. Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 0 1 2 3 2]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
__global__ void copy_data(unsigned int * d_histogram, unsigned int * d_scan){
d_scan[1] = d_histogram[0];
// printf("d_scan[1] : %d \n", d_scan[1]);
}
__global__ void histogram(
unsigned int * d_histogram,
unsigned int *d_vals_src,
unsigned int mask,
unsigned int numElems,
unsigned int i)
{
int myId = blockDim.x * blockIdx.x + threadIdx.x;
// int tid = threadIdx.x;
if(myId >= numElems){
return;
}
unsigned int bin = (d_vals_src[myId] & mask) >> i;
atomicAdd(&d_histogram[bin],1);
}
__global__ void scan_across_block(unsigned int * d_sums, unsigned int * d_sums_cdf, unsigned int numElems){
extern __shared__ unsigned int sdata[];
int myId = blockDim.x * blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
sdata[ 2 * tid ] = 0;
sdata[ 2 * tid + 1 ] = 0;
if(2 * myId < numElems){
// unsigned int bin = (d_vals_src[2 * myId] & mask) >> i;
sdata[2 * tid ] = d_sums[2 * myId];
}
if (2 * myId +1 < numElems){
// unsigned int bin = (d_vals_src[2 * myId +1 ] & mask) >> i;
sdata[2 * tid + 1] = d_sums[2 *myId +1];
}
__syncthreads();
//reduce
for(unsigned int s = 1 ; s <= blockDim.x; s<<=1){
if(tid < blockDim.x /s){
sdata[s *(2*tid + 2) -1] += sdata[s*(2*tid +1) -1];
}
__syncthreads();
}
if(tid == 0){
sdata[2 * blockDim.x -1] = 0;
}
//down sweep
for( unsigned int s = blockDim.x ; s > 0 ; s >>=1){
if(tid < blockDim.x /s){
unsigned int tmp = sdata[s *(2*tid + 2) -1];
sdata[s*(2*tid +2) -1] += sdata[s*(2*tid +1) -1];
sdata[s*(2*tid +1) -1] = tmp;
}
__syncthreads();
}
if(2 * myId < numElems){
d_sums_cdf[2 * myId] = sdata[2 * tid];
}
if (2 * myId +1 < numElems){
d_sums_cdf[2 * myId + 1] = sdata[2 * tid + 1];
}
}
__global__ void compute_outputPos(
unsigned int * d_vals_src,
unsigned int *d_pos_dst,
unsigned int * d_cdf,
unsigned int * d_scan,
unsigned int * d_sums_cdf,
unsigned int numElems,
unsigned int mask,
unsigned int i)
{
int myId = blockDim.x * blockIdx.x + threadIdx.x;
if(myId >=numElems ) return;
unsigned int bin = (d_vals_src[myId] &mask)>> i;
unsigned int sum = d_sums_cdf[blockIdx.x] ;
// for(int j=0; j< blockIdx.x; j++){
// sum += d_sums[j];
// }
// printf("sum: %d\n",sum);
if(bin == 0){
d_pos_dst[myId] = d_scan[0] + myId - (sum+d_cdf[myId]);
}else{
d_pos_dst[myId] = d_scan[1] + sum + d_cdf[myId];
}
// printf("d_pos_dst[myid] : %d \n", d_pos_dst[myId]);
}
__global__ void scan_per_block(unsigned int * d_vals_src, unsigned int * d_cdf, unsigned int * d_sums,unsigned int numElems,unsigned int mask, unsigned int i)
{
extern __shared__ unsigned int sdata[];
int myId = blockDim.x * blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
sdata[ 2 * tid ] = 0;
sdata[ 2 * tid + 1 ] = 0;
if(2 * myId < numElems){
unsigned int bin = (d_vals_src[2 * myId] & mask) >> i;
sdata[2 * tid ] = bin;
}
if (2 * myId +1 < numElems){
unsigned int bin = (d_vals_src[2 * myId +1 ] & mask) >> i;
sdata[2 * tid + 1] = bin;
}
__syncthreads();
//reduce
for(unsigned int s = 1 ; s <= blockDim.x; s<<=1){
if(tid < blockDim.x /s){
sdata[s *(2*tid + 2) -1] += sdata[s*(2*tid +1) -1];
}
__syncthreads();
}
if(tid == 0){
d_sums[blockIdx.x] = sdata[2 * blockDim.x -1];
sdata[2 * blockDim.x -1] = 0;
}
//down sweep
for( unsigned int s = blockDim.x ; s > 0 ; s >>=1){
if(tid < blockDim.x /s){
unsigned int tmp = sdata[s *(2*tid + 2) -1];
sdata[s*(2*tid +2) -1] += sdata[s*(2*tid +1) -1];
sdata[s*(2*tid +1) -1] = tmp;
}
__syncthreads();
}
if(2 * myId < numElems){
d_cdf[2 * myId] = sdata[2 * tid];
}
if (2 * myId +1 < numElems){
d_cdf[2 * myId + 1] = sdata[2 * tid + 1];
}
}
__global__ void gather(
unsigned int * d_vals_src,
unsigned int * d_vals_dst,
unsigned int * d_pos_src,
unsigned int * d_pos_dst,
unsigned int * d_out_pos,
unsigned int numElems)
{
int myId = blockDim.x * blockIdx.x + threadIdx.x;
if(myId >= numElems){
return;
}
// unsigned int bin = (d_vals_src[myId]& mask) >> i;
d_vals_dst[d_out_pos[myId]] = d_vals_src[myId];
d_pos_dst[d_out_pos[myId]] = d_pos_src[myId];
// atomicAdd(&(d_scan[bin]), 1);
}
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
//TODO
//PUT YOUR SORT HERE
dim3 blockSize(256, 1 , 1);
dim3 gridSize((numElems + blockSize.x -1)/ blockSize.x, 1, 1 );
// printf(" numElems: %d gridSize.x : %d\n", numElems,gridSize.x);
dim3 blockS(blockSize.x /2, 1, 1);
const int numBits = 1;
const int numBins = 1 << numBits;
unsigned int *d_vals_src ;
unsigned int *d_pos_src ;
unsigned int *d_vals_dst ;
unsigned int *d_pos_dst ;
unsigned int * d_out_pos;
size_t size = sizeof(unsigned int ) * numElems;
checkCudaErrors(cudaMalloc(&d_vals_src, size));
checkCudaErrors(cudaMalloc(&d_pos_src, size));
checkCudaErrors(cudaMalloc(&d_vals_dst, size));
checkCudaErrors(cudaMalloc(&d_pos_dst, size));
checkCudaErrors(cudaMalloc(&d_out_pos, size));
checkCudaErrors(cudaMemcpy(d_vals_src , d_inputVals, size, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(d_pos_src , d_inputPos, size, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(d_vals_dst , d_outputVals, size, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(d_pos_dst , d_outputPos, size, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(d_out_pos , d_outputPos, size, cudaMemcpyDeviceToDevice));
unsigned int * d_histogram;
unsigned int * d_scan;
unsigned int * d_cdf;
unsigned int * d_sums;
unsigned int * d_sums_cdf;
checkCudaErrors(cudaMalloc(&d_histogram, sizeof(unsigned int)*numBins));
checkCudaErrors(cudaMalloc(&d_scan, sizeof(unsigned int)*numBins));
checkCudaErrors(cudaMalloc(&d_cdf, sizeof(unsigned int)*numElems));
checkCudaErrors(cudaMalloc(&d_sums, sizeof(unsigned int)*(gridSize.x)));
checkCudaErrors(cudaMalloc(&d_sums_cdf, sizeof(unsigned int)*(gridSize.x)));
unsigned int nextPow = gridSize.x ;
nextPow--;
nextPow = (nextPow >> 1) | nextPow;
nextPow = (nextPow >> 2) | nextPow;
nextPow = (nextPow >> 4) | nextPow;
nextPow = (nextPow >> 8) | nextPow;
nextPow = (nextPow >> 16) | nextPow;
nextPow++;
dim3 blockSize1(nextPow/2, 1, 1);
// checkCudaErrors(cudaMemset(d_histogram, 0 , sizeof(unsigned int)*numBins));
for(unsigned int i = 0 ; i< 32; i+=numBits){
unsigned int mask = (numBins -1) << i;
checkCudaErrors(cudaMemset(d_histogram, 0 , sizeof(unsigned int)*numBins));
checkCudaErrors(cudaMemset(d_scan, 0 , sizeof(unsigned int)*numBins));
checkCudaErrors(cudaMemset(d_cdf, 0 , sizeof(unsigned int)*numElems));
checkCudaErrors(cudaMemset(d_sums, 0 , sizeof(unsigned int)*(gridSize.x)));
checkCudaErrors(cudaMemset(d_sums_cdf, 0 , sizeof(unsigned int)*(gridSize.x)));
// memset(h_scan, 0, sizeof(unsigned int ) *numBins);
// memset(h_histogram, 0, sizeof(unsigned int ) *numBins);
scan_per_block<<<gridSize, blockS, sizeof(unsigned int)* blockSize.x>>>(d_vals_src, d_cdf, d_sums, numElems,mask ,i);
scan_across_block<<<1, blockSize1, sizeof(unsigned int ) * nextPow>>>(d_sums, d_sums_cdf, gridSize.x);
histogram<<<gridSize, blockSize>>>(d_histogram, d_vals_src, mask, numElems, i);
// checkCudaErrors(cudaMemcpy(d_scan,d_histogram +1 , sizeof(unsigned int), cudaMemcpyDeviceToDevice));
copy_data<<<1,1>>>(d_histogram,d_scan);
// checkCudaErrors(cudaMemcpy(h_histogram, d_histogram, numBins* sizeof(unsigned int), cudaMemcpyDeviceToHost));
// compute_outputPos<<<gridSize,blockSize>>>(d_vals_src, d_out_pos,d_cdf, d_scan,d_sums,numElems, mask ,i);
compute_outputPos<<<gridSize,blockSize>>>(d_vals_src, d_out_pos,d_cdf, d_scan,d_sums_cdf,numElems, mask ,i);
gather<<<gridSize, blockSize>>>(d_vals_src,d_vals_dst,d_pos_src,d_pos_dst,d_out_pos ,numElems);
std::swap(d_vals_dst, d_vals_src);
std::swap(d_pos_dst, d_pos_src);
}
checkCudaErrors(cudaMemcpy(d_vals_dst, d_vals_src,size, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(d_pos_dst, d_pos_src, size , cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy( d_inputVals,d_vals_src , size, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy( d_inputPos,d_pos_src , size, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy( d_outputVals,d_vals_dst , size, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy( d_outputPos,d_pos_dst ,size, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaFree(d_vals_src));
checkCudaErrors(cudaFree(d_vals_dst));
checkCudaErrors(cudaFree(d_pos_src));
checkCudaErrors(cudaFree(d_pos_dst));
checkCudaErrors(cudaFree(d_cdf));
checkCudaErrors(cudaFree(d_sums));
checkCudaErrors(cudaFree(d_sums_cdf));
checkCudaErrors(cudaFree(d_out_pos));
// const int numBits = 1;
// const int numBins = 1 << numBits;
// unsigned int *binHistogram = new unsigned int[numBins];
// unsigned int *binScan = new unsigned int[numBins];
// unsigned int *vals_src = new unsigned int[numElems * sizeof(unsigned int)];
// unsigned int *pos_src = new unsigned int[numElems * sizeof(unsigned int)];
// memset(vals_src, 0 ,numElems * sizeof(unsigned int));
// memset(pos_src, 0 ,numElems * sizeof(unsigned int));
// checkCudaErrors(cudaMemcpy(vals_src, d_inputVals, numElems * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// checkCudaErrors(cudaMemcpy(pos_src, d_inputPos, numElems * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// unsigned int *vals_dst = new unsigned int[numElems * sizeof(unsigned int)];
// unsigned int *pos_dst = new unsigned int[numElems * sizeof(unsigned int)];
// memset(vals_dst, 0 ,numElems * sizeof(unsigned int));
// memset(pos_dst, 0 ,numElems * sizeof(unsigned int));
// checkCudaErrors(cudaMemcpy(vals_dst, d_outputVals, numElems * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// checkCudaErrors(cudaMemcpy(pos_dst, d_outputPos, numElems * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// //a simple radix sort - only guaranteed to work for numBits that are multiples of 2
// for (unsigned int i = 0; i < 8 * sizeof(unsigned int); i += numBits) {
// unsigned int mask = (numBins - 1) << i;
// memset(binHistogram, 0, sizeof(unsigned int) * numBins); //zero out the bins
// memset(binScan, 0, sizeof(unsigned int) * numBins); //zero out the bins
// //perform histogram of data & mask into bins
// for (unsigned int j = 0; j < numElems; ++j) {
// unsigned int bin = (vals_src[j] & mask) >> i;
// binHistogram[bin]++;
// }
// //perform exclusive prefix sum (scan) on binHistogram to get starting
// //location for each bin
// // for (unsigned int j = 1; j < numBins; ++j) {
// // binScan[j] = binScan[j - 1] + binHistogram[j - 1];
// // }
// binScan[1] = binScan[0] + binHistogram[0];
// //Gather everything into the correct location
// //need to move vals and positions
// for (unsigned int j = 0; j < numElems; ++j) {
// unsigned int bin = (vals_src[j] & mask) >> i;
// vals_dst[binScan[bin]] = vals_src[j];
// pos_dst[binScan[bin]] = pos_src[j];
// binScan[bin]++;
// }
// //swap the buffers (pointers only)
// std::swap(vals_dst, vals_src);
// std::swap(pos_dst, pos_src);
// }
// //we did an even number of iterations, need to copy from input buffer into output
// std::copy(vals_src, vals_src + numElems, vals_dst);
// std::copy(pos_src, pos_src + numElems, pos_dst);
// checkCudaErrors(cudaMemcpy(d_outputVals, vals_dst, numElems*sizeof(unsigned),
// cudaMemcpyHostToDevice));
// checkCudaErrors(cudaMemcpy(d_outputPos, pos_dst, numElems*sizeof(unsigned),
// cudaMemcpyHostToDevice));
// delete[] binHistogram;
// delete[] binScan;
}
|
6294e295d730958b5f31efd671ceda1ad44fb6f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "depthflowprojection_cuda_kernel.cuh"
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#define min(a,b) ((a<b)?(a):(b))
#define max(a,b) ((a>b)?(a):(b))
#define DEBUG (0)
#ifndef BLOCKDIMX
#define BLOCKDIMX (32)
#endif
#ifndef BLOCKDIMY
#define BLOCKDIMY (16)
#endif
using at::Half;
//forward path of our layer
template <typename scalar_t>
__global__ void DepthFlowProjection_gpu_forward_kernelfunc(
const int nElement,
const int w,
const int h,
const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1, const scalar_t* __restrict__ input2,
scalar_t* count,
scalar_t* output
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
//only use one dimensioon of the grid and block
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
// const float fillvalue =0.0f;
if( withinXbounds && withinYbounds) {
float fx = input1[ off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ];
float fy = input1[ off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ];
float x2 = (float) (w_i) + fx;
float y2 = (float) (h_i) + fy;
if(x2>=0.0f && y2 >= 0.0f &&x2 <= (float) ( w-1) && y2 <= (float) (h -1 ) ){
int ix2_L = (int) (x2);
int iy2_T = (int) (y2);
int ix2_R = min(ix2_L + 1, w - 1);
int iy2_B = min(iy2_T + 1, h - 1);
float temp = input2[batch_i * input2_b_stride + 0 + h_i * input2_h_stride + w_i];
atomicAdd(&output[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_L ] ,- temp * fx);
atomicAdd(&output[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_R ],-temp * fx);
atomicAdd(&output[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_L ] ,-temp * fx);
atomicAdd(&output[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_R ],-temp * fx);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_L] , -temp * fy);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_R] , -temp * fy);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_L] , -temp * fy);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_R] , -temp * fy);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_L], temp * 1);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_R] ,temp * 1);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] , temp * 1);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_R] ,temp * 1);
}
}
return ;
}
template <typename scalar_t>
__global__ void DepthFlowProjectionAveraging_kernelfunc(
const int nElement,
const int w,
const int h,
const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1, const scalar_t* __restrict__ input2,
scalar_t* count,
scalar_t* output
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
//only use one dimensioon of the grid and block
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
// const float fillvalue =0.0f;
if( withinXbounds && withinYbounds) {
float temp =count[batch_i * count_b_stride + 0 + h_i * count_h_stride + w_i] ;
if(temp > 0.0f){
output[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ] /= temp;
output[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ] /= temp;
}
}
return ;
}
template <typename scalar_t>
__global__ void DepthFlowFillhole_kernelfunc(
const int nElement,
const int w,
const int h,
const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1, const scalar_t* __restrict__ input2,
scalar_t* count,
scalar_t* output
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
//only use one dimensioon of the grid and block
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
// const float fillvalue =0.0f;
if( withinXbounds && withinYbounds) {
float temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + w_i] ;
if(temp <= 0.0f){
//search along the four directions,0/90/180/270, until finding at least one
int left_offset = w_i; float left_temp = 0.0f;
while(left_temp == 0.0f && left_offset - 1 >= 0){
left_offset = left_offset - 1;
left_temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + left_offset] ;
}
int right_offset = w_i ; float right_temp = 0.0f;
while(right_temp ==0.0f && right_offset + 1 <= w - 1 ){
right_offset = right_offset + 1 ;
right_temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + right_offset] ;
}
int up_offset = h_i ; float up_temp = 0.0f;
while(up_temp == 0.0f && up_offset - 1 >=0){
up_offset = up_offset - 1;
up_temp = count[batch_i * count_b_stride + 0 + up_offset * count_h_stride + w_i ] ;
}
int down_offset = h_i; float down_temp = 0.0f;
while(down_temp == 0.0f && down_offset + 1 <= h - 1 ){
down_offset = down_offset + 1;
down_temp = count[batch_i * count_b_stride + 0 + down_offset * count_h_stride + w_i] ;
}
if(left_temp + right_temp + up_temp + down_temp <=0.0f){
//printf("Can't fill hole, find no neighbor vectors availabel\n");
return;
}
left_temp = (left_temp > 0.0f)?1:0;
right_temp = (right_temp > 0.0f)?1:0;
up_temp = (up_temp > 0.0f)?1:0;
down_temp = (down_temp > 0.0f)?1:0;
output[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ] = (
left_temp * output[off + 0 * input1_c_stride + h_i * input1_h_stride + left_offset] +
right_temp * output[off + 0 * input1_c_stride + h_i * input1_h_stride + right_offset]+
up_temp * output[off + 0 * input1_c_stride + up_offset * input1_h_stride + w_i] +
down_temp * output[off + 0 * input1_c_stride + down_offset * input1_h_stride + w_i]
)/(
left_temp + right_temp + up_temp + down_temp
) ;
output[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ] =(
left_temp * output[off + 1 * input1_c_stride + h_i * input1_h_stride + left_offset] +
right_temp * output[off + 1 * input1_c_stride + h_i * input1_h_stride + right_offset]+
up_temp * output[off + 1 * input1_c_stride + up_offset * input1_h_stride + w_i] +
down_temp * output[off + 1 * input1_c_stride + down_offset * input1_h_stride + w_i]
)/(
left_temp + right_temp + up_temp + down_temp
) ;
}
}
return ;
}
template <typename scalar_t>
__global__ void DepthFlowProjection_gpu_backward_kernelfunc(
const int nElement, const int w, const int h, const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1, const scalar_t* __restrict__ input2,
scalar_t* count,
scalar_t* output,
const scalar_t* __restrict__ gradoutput,
scalar_t* gradinput1,
scalar_t* gradinput2
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
if(withinXbounds && withinYbounds){
float fx = input1[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i] ;
float fy = input1[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i] ;
float x2 = (float) ( w_i ) + fx;
float y2 = (float) ( h_i ) + fy;
if( x2 >=0.0f && y2 >= 0.0f && x2 <= (float) (w -1) && y2 <= (float) (h-1)){
int ix2_L = (int)(x2);
int iy2_T = (int)(y2);
int ix2_R = min(ix2_L + 1, w-1);
int iy2_B = min(iy2_T + 1, h-1);
float temp = input2[batch_i * input2_b_stride + 0 + h_i * input2_h_stride + w_i];
int iu_offset = off + 0 * input1_c_stride + h_i * input1_h_stride + w_i;
gradinput1[iu_offset] += - gradoutput[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_L] * temp /
count[batch_i * count_b_stride + 0+ iy2_T * count_h_stride + ix2_L] ;
gradinput1[iu_offset] += - gradoutput[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_R ] * temp /
count[batch_i * count_b_stride +0 + iy2_T * count_h_stride + ix2_R] ;
gradinput1[iu_offset ] += - gradoutput[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_L] * temp /
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] ;
gradinput1[iu_offset ] += - gradoutput[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_R] * temp /
count[batch_i * count_b_stride + 0+ iy2_B * count_h_stride + ix2_R] ;
int iv_offset = off + 1 * input1_c_stride + h_i * input1_h_stride + w_i;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_L] * temp /
count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_L] ;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_R] * temp /
count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_R] ;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_L] * temp /
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] ;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_R] * temp /
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_R] ;
int weight_offset = batch_i * input2_b_stride + 0 + h_i * input2_h_stride + w_i;
gradinput2[weight_offset] += - gradoutput[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_L] /
count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_L] *
(fx - output[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_L ] );
gradinput2[weight_offset] += -gradoutput[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_R] /
count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_R] *
(fx - output[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_R ] );
gradinput2[weight_offset] += -gradoutput[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_L] /
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] *
(fx - output[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_L ] );
gradinput2[weight_offset] += -gradoutput[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_R] /
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_R] *
(fx - output[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_R ] );
gradinput2[weight_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_L] /
count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_L] *
(fy - output[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_L ] );
gradinput2[weight_offset] += -gradoutput[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_R] /
count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_R] *
(fy - output[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_R ] );
gradinput2[weight_offset] += -gradoutput[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_L] /
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] *
(fy - output[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_L ] );
gradinput2[weight_offset] += -gradoutput[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_R] /
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_R] *
(fy - output[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_R ] );
}
}
return ;
}
int DepthFlowProjection_gpu_forward_kernel(
hipStream_t stream, const int nElement,
const int w, const int h, const int channel, const int batch, const int fillhole,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
at::Tensor& input1, at::Tensor& input2,
at::Tensor& count,
at::Tensor& output
)
{
int error = -1;
dim3 grid;
dim3 block;
// blockthread = 128;
//the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z
//the three channels are processsed in one kernel
block = dim3(BLOCKDIMX,BLOCKDIMY,1);
grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch);
if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG)
printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY);
// printf("I am here\n");
//extract the data of CudaTensor and use kernel to calculate.
AT_DISPATCH_FLOATING_TYPES(input1.type(), "DepthFlowProjection_gpu_forward", ([&] {
hipLaunchKernelGGL(( DepthFlowProjection_gpu_forward_kernelfunc), dim3(grid),dim3(block),0, stream ,
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),input2.data<scalar_t>(),count.data<scalar_t>(),output.data<scalar_t>()
);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("gpuerror in BilinearSampler.updateOutput: %s\n", hipGetErrorString(err));
//THError("aborting");
return error;
}
// printf("I am there\n");
AT_DISPATCH_FLOATING_TYPES(input1.type(), "DepthFlowProjectionAveraging", ([&] {
hipLaunchKernelGGL(( DepthFlowProjectionAveraging_kernelfunc), dim3(grid),dim3(block),0,stream,
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),input2.data<scalar_t>(),count.data<scalar_t>(),output.data<scalar_t>()
);
}));
// printf("I am kao\n");
// THCudaCheck(hipGetLastError());
err = hipGetLastError();
if (err != hipSuccess) {
printf("gpuerror in BilinearSampler.updateOutput: %s\n", hipGetErrorString(err));
//THError("aborting");
return error;
}
// printf("I am dd\n");
if(fillhole){
// printf("use flow fill hole\n");
AT_DISPATCH_FLOATING_TYPES(input1.type(), "DepthFlowFillhole", ([&] {
hipLaunchKernelGGL(( DepthFlowFillhole_kernelfunc), dim3(grid),dim3(block),0,stream,
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),input2.data<scalar_t>(),count.data<scalar_t>(),output.data<scalar_t>()
);
}));
err = hipGetLastError();
if (err != hipSuccess) {
printf("gpuerror in BilinearSampler.updateOutput: %s\n", hipGetErrorString(err));
return error;
}
}
error = 0;
return error;
}
int DepthFlowProjection_gpu_backward_kernel(
hipStream_t stream,
const int nElement,
const int w,
const int h,
const int channel,
const int batch,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
at::Tensor& input1, at::Tensor& input2,
at::Tensor& count, at::Tensor& output,
at::Tensor& gradoutput,
at::Tensor& gradinput1,
at::Tensor& gradinput2
)
{
int error = -1;
dim3 grid;
dim3 block;
//blockthread = 128;
//the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z
//the three channels are processsed in one kernel
block = dim3(BLOCKDIMX,BLOCKDIMY,1);
grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch);
if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG)
printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY);
AT_DISPATCH_FLOATING_TYPES(input1.type(), "DepthFlowProjection_gpu_backward", ([&] {
hipLaunchKernelGGL(( DepthFlowProjection_gpu_backward_kernelfunc) , dim3(grid),dim3(block),0, stream,
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),input2.data<scalar_t>(),count.data<scalar_t>(),output.data<scalar_t>(),
gradoutput.data<scalar_t>(), gradinput1.data<scalar_t>(), gradinput2.data<scalar_t>()
);
}));
// printf("gpu I am there\n");
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("gpu error in BilinearSampler.updateGradInput %s\n", hipGetErrorString(err));
//THError("aborting");
return error;
}
// printf("gpu I am here\n");
error = 0;
return error;
} | 6294e295d730958b5f31efd671ceda1ad44fb6f6.cu | #include <stdio.h>
#include "depthflowprojection_cuda_kernel.cuh"
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#define min(a,b) ((a<b)?(a):(b))
#define max(a,b) ((a>b)?(a):(b))
#define DEBUG (0)
#ifndef BLOCKDIMX
#define BLOCKDIMX (32)
#endif
#ifndef BLOCKDIMY
#define BLOCKDIMY (16)
#endif
using at::Half;
//forward path of our layer
template <typename scalar_t>
__global__ void DepthFlowProjection_gpu_forward_kernelfunc(
const int nElement,
const int w,
const int h,
const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1, const scalar_t* __restrict__ input2,
scalar_t* count,
scalar_t* output
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
//only use one dimensioon of the grid and block
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
// const float fillvalue =0.0f;
if( withinXbounds && withinYbounds) {
float fx = input1[ off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ];
float fy = input1[ off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ];
float x2 = (float) (w_i) + fx;
float y2 = (float) (h_i) + fy;
if(x2>=0.0f && y2 >= 0.0f &&x2 <= (float) ( w-1) && y2 <= (float) (h -1 ) ){
int ix2_L = (int) (x2);
int iy2_T = (int) (y2);
int ix2_R = min(ix2_L + 1, w - 1);
int iy2_B = min(iy2_T + 1, h - 1);
float temp = input2[batch_i * input2_b_stride + 0 + h_i * input2_h_stride + w_i];
atomicAdd(&output[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_L ] ,- temp * fx);
atomicAdd(&output[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_R ],-temp * fx);
atomicAdd(&output[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_L ] ,-temp * fx);
atomicAdd(&output[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_R ],-temp * fx);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_L] , -temp * fy);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_R] , -temp * fy);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_L] , -temp * fy);
atomicAdd(&output[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_R] , -temp * fy);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_L], temp * 1);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_R] ,temp * 1);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] , temp * 1);
atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_R] ,temp * 1);
}
}
return ;
}
template <typename scalar_t>
__global__ void DepthFlowProjectionAveraging_kernelfunc(
const int nElement,
const int w,
const int h,
const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1, const scalar_t* __restrict__ input2,
scalar_t* count,
scalar_t* output
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
//only use one dimensioon of the grid and block
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
// const float fillvalue =0.0f;
if( withinXbounds && withinYbounds) {
float temp =count[batch_i * count_b_stride + 0 + h_i * count_h_stride + w_i] ;
if(temp > 0.0f){
output[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ] /= temp;
output[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ] /= temp;
}
}
return ;
}
template <typename scalar_t>
__global__ void DepthFlowFillhole_kernelfunc(
const int nElement,
const int w,
const int h,
const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1, const scalar_t* __restrict__ input2,
scalar_t* count,
scalar_t* output
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
//only use one dimensioon of the grid and block
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
// const float fillvalue =0.0f;
if( withinXbounds && withinYbounds) {
float temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + w_i] ;
if(temp <= 0.0f){
//search along the four directions,0/90/180/270, until finding at least one
int left_offset = w_i; float left_temp = 0.0f;
while(left_temp == 0.0f && left_offset - 1 >= 0){
left_offset = left_offset - 1;
left_temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + left_offset] ;
}
int right_offset = w_i ; float right_temp = 0.0f;
while(right_temp ==0.0f && right_offset + 1 <= w - 1 ){
right_offset = right_offset + 1 ;
right_temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + right_offset] ;
}
int up_offset = h_i ; float up_temp = 0.0f;
while(up_temp == 0.0f && up_offset - 1 >=0){
up_offset = up_offset - 1;
up_temp = count[batch_i * count_b_stride + 0 + up_offset * count_h_stride + w_i ] ;
}
int down_offset = h_i; float down_temp = 0.0f;
while(down_temp == 0.0f && down_offset + 1 <= h - 1 ){
down_offset = down_offset + 1;
down_temp = count[batch_i * count_b_stride + 0 + down_offset * count_h_stride + w_i] ;
}
if(left_temp + right_temp + up_temp + down_temp <=0.0f){
//printf("Can't fill hole, find no neighbor vectors availabel\n");
return;
}
left_temp = (left_temp > 0.0f)?1:0;
right_temp = (right_temp > 0.0f)?1:0;
up_temp = (up_temp > 0.0f)?1:0;
down_temp = (down_temp > 0.0f)?1:0;
output[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ] = (
left_temp * output[off + 0 * input1_c_stride + h_i * input1_h_stride + left_offset] +
right_temp * output[off + 0 * input1_c_stride + h_i * input1_h_stride + right_offset]+
up_temp * output[off + 0 * input1_c_stride + up_offset * input1_h_stride + w_i] +
down_temp * output[off + 0 * input1_c_stride + down_offset * input1_h_stride + w_i]
)/(
left_temp + right_temp + up_temp + down_temp
) ;
output[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ] =(
left_temp * output[off + 1 * input1_c_stride + h_i * input1_h_stride + left_offset] +
right_temp * output[off + 1 * input1_c_stride + h_i * input1_h_stride + right_offset]+
up_temp * output[off + 1 * input1_c_stride + up_offset * input1_h_stride + w_i] +
down_temp * output[off + 1 * input1_c_stride + down_offset * input1_h_stride + w_i]
)/(
left_temp + right_temp + up_temp + down_temp
) ;
}
}
return ;
}
template <typename scalar_t>
__global__ void DepthFlowProjection_gpu_backward_kernelfunc(
const int nElement, const int w, const int h, const int channel,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
const scalar_t* __restrict__ input1, const scalar_t* __restrict__ input2,
scalar_t* count,
scalar_t* output,
const scalar_t* __restrict__ gradoutput,
scalar_t* gradinput1,
scalar_t* gradinput2
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w;
const bool withinYbounds = h_i < h;
const int batch_i = blockIdx.z;
const int off = batch_i * input1_b_stride;
// __syncthreads();
if(withinXbounds && withinYbounds){
float fx = input1[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i] ;
float fy = input1[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i] ;
float x2 = (float) ( w_i ) + fx;
float y2 = (float) ( h_i ) + fy;
if( x2 >=0.0f && y2 >= 0.0f && x2 <= (float) (w -1) && y2 <= (float) (h-1)){
int ix2_L = (int)(x2);
int iy2_T = (int)(y2);
int ix2_R = min(ix2_L + 1, w-1);
int iy2_B = min(iy2_T + 1, h-1);
float temp = input2[batch_i * input2_b_stride + 0 + h_i * input2_h_stride + w_i];
int iu_offset = off + 0 * input1_c_stride + h_i * input1_h_stride + w_i;
gradinput1[iu_offset] += - gradoutput[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_L] * temp /
count[batch_i * count_b_stride + 0+ iy2_T * count_h_stride + ix2_L] ;
gradinput1[iu_offset] += - gradoutput[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_R ] * temp /
count[batch_i * count_b_stride +0 + iy2_T * count_h_stride + ix2_R] ;
gradinput1[iu_offset ] += - gradoutput[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_L] * temp /
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] ;
gradinput1[iu_offset ] += - gradoutput[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_R] * temp /
count[batch_i * count_b_stride + 0+ iy2_B * count_h_stride + ix2_R] ;
int iv_offset = off + 1 * input1_c_stride + h_i * input1_h_stride + w_i;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_L] * temp /
count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_L] ;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_R] * temp /
count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_R] ;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_L] * temp /
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] ;
gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_R] * temp /
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_R] ;
int weight_offset = batch_i * input2_b_stride + 0 + h_i * input2_h_stride + w_i;
gradinput2[weight_offset] += - gradoutput[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_L] /
count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_L] *
(fx - output[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_L ] );
gradinput2[weight_offset] += -gradoutput[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_R] /
count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_R] *
(fx - output[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_R ] );
gradinput2[weight_offset] += -gradoutput[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_L] /
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] *
(fx - output[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_L ] );
gradinput2[weight_offset] += -gradoutput[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_R] /
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_R] *
(fx - output[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_R ] );
gradinput2[weight_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_L] /
count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_L] *
(fy - output[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_L ] );
gradinput2[weight_offset] += -gradoutput[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_R] /
count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_R] *
(fy - output[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_R ] );
gradinput2[weight_offset] += -gradoutput[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_L] /
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] *
(fy - output[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_L ] );
gradinput2[weight_offset] += -gradoutput[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_R] /
count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_R] *
(fy - output[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_R ] );
}
}
return ;
}
int DepthFlowProjection_gpu_forward_kernel(
cudaStream_t stream, const int nElement,
const int w, const int h, const int channel, const int batch, const int fillhole,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
at::Tensor& input1, at::Tensor& input2,
at::Tensor& count,
at::Tensor& output
)
{
int error = -1;
dim3 grid;
dim3 block;
// blockthread = 128;
//the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z
//the three channels are processsed in one kernel
block = dim3(BLOCKDIMX,BLOCKDIMY,1);
grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch);
if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG)
printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY);
// printf("I am here\n");
//extract the data of CudaTensor and use kernel to calculate.
AT_DISPATCH_FLOATING_TYPES(input1.type(), "DepthFlowProjection_gpu_forward", ([&] {
DepthFlowProjection_gpu_forward_kernelfunc<<<grid,block,0, stream >>>(
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),input2.data<scalar_t>(),count.data<scalar_t>(),output.data<scalar_t>()
);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err));
//THError("aborting");
return error;
}
// printf("I am there\n");
AT_DISPATCH_FLOATING_TYPES(input1.type(), "DepthFlowProjectionAveraging", ([&] {
DepthFlowProjectionAveraging_kernelfunc<<<grid,block,0,stream>>>(
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),input2.data<scalar_t>(),count.data<scalar_t>(),output.data<scalar_t>()
);
}));
// printf("I am kao\n");
// THCudaCheck(cudaGetLastError());
err = cudaGetLastError();
if (err != cudaSuccess) {
printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err));
//THError("aborting");
return error;
}
// printf("I am dd\n");
if(fillhole){
// printf("use flow fill hole\n");
AT_DISPATCH_FLOATING_TYPES(input1.type(), "DepthFlowFillhole", ([&] {
DepthFlowFillhole_kernelfunc<<<grid,block,0,stream>>>(
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),input2.data<scalar_t>(),count.data<scalar_t>(),output.data<scalar_t>()
);
}));
err = cudaGetLastError();
if (err != cudaSuccess) {
printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err));
return error;
}
}
error = 0;
return error;
}
int DepthFlowProjection_gpu_backward_kernel(
cudaStream_t stream,
const int nElement,
const int w,
const int h,
const int channel,
const int batch,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride,
const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride,
at::Tensor& input1, at::Tensor& input2,
at::Tensor& count, at::Tensor& output,
at::Tensor& gradoutput,
at::Tensor& gradinput1,
at::Tensor& gradinput2
)
{
int error = -1;
dim3 grid;
dim3 block;
//blockthread = 128;
//the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z
//the three channels are processsed in one kernel
block = dim3(BLOCKDIMX,BLOCKDIMY,1);
grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch);
if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG)
printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY);
AT_DISPATCH_FLOATING_TYPES(input1.type(), "DepthFlowProjection_gpu_backward", ([&] {
DepthFlowProjection_gpu_backward_kernelfunc <<<grid,block,0, stream>>>(
nElement, //to let the nummous
w,h,channel,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride,
count_b_stride,count_c_stride,count_h_stride,count_w_stride,
input1.data<scalar_t>(),input2.data<scalar_t>(),count.data<scalar_t>(),output.data<scalar_t>(),
gradoutput.data<scalar_t>(), gradinput1.data<scalar_t>(), gradinput2.data<scalar_t>()
);
}));
// printf("gpu I am there\n");
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("gpu error in BilinearSampler.updateGradInput %s\n", cudaGetErrorString(err));
//THError("aborting");
return error;
}
// printf("gpu I am here\n");
error = 0;
return error;
} |
57a3d712c2a48337d36203f1eff7088cbbee864d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "Brent_Kung_scan_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *X = NULL;
hipMalloc(&X, XSIZE*YSIZE);
float *Y = NULL;
hipMalloc(&Y, XSIZE*YSIZE);
int InputSize = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
Brent_Kung_scan_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, X,Y,InputSize);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
Brent_Kung_scan_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, X,Y,InputSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
Brent_Kung_scan_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, X,Y,InputSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 57a3d712c2a48337d36203f1eff7088cbbee864d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "Brent_Kung_scan_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *X = NULL;
cudaMalloc(&X, XSIZE*YSIZE);
float *Y = NULL;
cudaMalloc(&Y, XSIZE*YSIZE);
int InputSize = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
Brent_Kung_scan_kernel<<<gridBlock,threadBlock>>>(X,Y,InputSize);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
Brent_Kung_scan_kernel<<<gridBlock,threadBlock>>>(X,Y,InputSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
Brent_Kung_scan_kernel<<<gridBlock,threadBlock>>>(X,Y,InputSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
cb6e598edda826ee22433d94ca9a6f1f121f122a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if GOOGLE_CUDA
#include "ew_op_gpu.h"
#include "gpu_hmma.h"
#include <stdio.h>
#if __CUDA_ARCH__ >= 700
// C = A * B or A.T * B
// Dims: M, N, K
// N64: N even mult of 64
// A is sparse, B is dense, C is dense
// 32x64x16 warp tile
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(256) hgemm_bst_64x64x64_xn(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadStateB, uint szCtxHeadStateC, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
const uint stdA = 64+16;
const uint stdB = 64+16;
const uint stdC = 512+4;
__shared__ float fShare[stdC*16];
ehalf* hShare = (ehalf*)fShare;
uint2* Lut2s = (uint2*)&fShare[stdC*16];
uint tid = threadIdx.x;
uint idx_MN = blockIdx.x; // compound outer product dims
uint idx_M = div64(idx_MN, magic_N, shift_N); // idx_M = idx_MN / grid_N;
uint idx_N = idx_MN - idx_M*grid_N; // idx_N = idx_MN % grid_N;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// assume lower diagonal and schedule large reductions first
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// each head can optionally have its own lut
Lut += idx_H*szLut;
uint2 lut_head = Lut[idx_M];
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
uint tx = tid % 8;
uint ty = tid / 8;
if (lut_size > 0)
{
// prefetch the lut data into shared
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += 256)
{
uint2 entry = Lut[i];
entry.x *= 64*64; // 4096 entries of A per block
entry.y *= szHeadState*64; // 64 lines of B per block
Lut2s[i] = entry;
}
__syncthreads();
uint storAB = ty*stdA + tx*8; // assume stdA == stdB
uint loadA = fragmentA<OP_A,M16N16K16>::get_idx(tid, stdA, (tid & 192)*(OP_A == OP_N ? 1 : stdA)*16/64 + (tid & 32)*(OP_A == OP_N ? stdA : 1));
uint loadB = fragmentB<OP_N,M16N16K16>::get_idx(tid, stdB, (tid & 192)*stdB*16/64 + stdA*64);
uint b = idx_N*64 + tx*8;
uint offsetA = idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + tid*8;
uint offsetB = idx_B*szCtxHeadStateB + ty*szHeadState + idx_H*szState + b;
bool inB = N64 || b < szState;
fragmentC<OP_A,OP_N,M16N16K16> fragC[2][4];
int idx_lut = 0;
#pragma unroll 1
do
{
asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever
uint2 entry = Lut2s[idx_lut];
uint4 b00 = {0};
uint4 b32 = {0};
entry.x += offsetA;
entry.y += offsetB;
uint4 a00 = *(uint4*)&A[entry.x + 0*64];
uint4 a32 = *(uint4*)&A[entry.x + 32*64];
if (inB)
{
b00 = *(uint4*)&B[entry.y + 0*szHeadState];
b32 = *(uint4*)&B[entry.y + 32*szHeadState];
}
__syncthreads();
*(uint4*)&hShare[storAB + 0*stdA + 0*stdA] = a00;
*(uint4*)&hShare[storAB + 32*stdA + 0*stdA] = a32;
*(uint4*)&hShare[storAB + 0*stdB + 64*stdA] = b00;
*(uint4*)&hShare[storAB + 32*stdB + 64*stdA] = b32;
__syncthreads();
fragmentA<OP_A,M16N16K16> fragA[2];
fragmentB<OP_N,M16N16K16> fragB[4];
for (int i = 0; i < 2; i++)
fragA[i].load(hShare, loadA + (OP_A == OP_N ? stdA : 1)*i*16, stdA);
for (int i = 0; i < 4; i++)
fragB[i].load(hShare, loadB + i*16, stdB);
for (int i = 0; i < 2; i++)
for (int j = 0; j < 4; j++)
fragC[i][j].mma_sync(fragA[i], fragB[j]);
} while (++idx_lut < lut_size);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_MN) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
idx_M = div64(idx_MN, magic_N, shift_N);
idx_N = idx_MN - idx_M*grid_N;
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
uint txc = tid % 16;
uint tyc = tid / 16;
uint c = idx_N*64 + txc*4;
uint loadC = tyc*stdC + txc*4;
uint storC = fragmentC<OP_A,OP_N,M16N16K16>::get_idx(tid, stdC, (tid & 224)*2);
uint offsetC = idx_B*szCtxHeadStateC + (idx_M*64 + tyc)*szHeadState + idx_H*szState + c;
for (int i = 0; i < 2; i++)
{
__syncthreads();
for (int j = 0; j < 4; j++)
fragC[i][j].store(fShare, storC + j*16, stdC);
__syncthreads();
if (N64 || c < szState)
{
for (int j = 0; j < 2; j++)
*(uint2*)&C[offsetC + szHeadState*(j*32 + i*16)] = to_half4(
ew_add(
ew_add(
*(float4*)&fShare[loadC + j*64 + 0*128],
*(float4*)&fShare[loadC + j*64 + 1*128]),
ew_add(
*(float4*)&fShare[loadC + j*64 + 2*128],
*(float4*)&fShare[loadC + j*64 + 3*128])
)
);
}
}
}
else
{
uint c = idx_N*64 + tx*8;
uint offsetC = idx_B*szCtxHeadStateC + (idx_M*64 + ty)*szHeadState + idx_H*szState + c;
if (N64 || c < szState)
{
uint4 zero = {0};
*(uint4*)&C[offsetC + szHeadState* 0] = zero;
*(uint4*)&C[offsetC + szHeadState*32] = zero;
}
}
}
// C = A * B or A.T * B
// Dims: M, N, K
// N64: N even mult of 64
// A is sparse, B is dense, C is dense
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(128) hgemm_bst_32x64x32_xn(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadStateB, uint szCtxHeadStateC, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
const uint stdA = 48;
const uint stdB = 80;
const uint stdC = 132;
__shared__ ehalf hShare[(stdA + stdB)*32];
float* fShare = (float*)hShare;
uint2* Lut2s = (uint2*)&hShare[(stdA + stdB)*32];
uint tid = threadIdx.x;
uint idx_MN = blockIdx.x; // compound outer product dims
uint idx_M = div64(idx_MN, magic_N, shift_N); // idx_M = idx_MN / grid_N;
uint idx_N = idx_MN - idx_M*grid_N; // idx_N = idx_MN % grid_N;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// assume lower diagonal and schedule large reductions first
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// each head can optionally have its own lut
Lut += idx_H*szLut;
uint2 lut_head = Lut[idx_M];
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
uint txb = tid % 8;
uint tyb = tid / 8;
if (lut_size > 0)
{
// prefetch the lut data into shared
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += 128)
{
uint2 entry = Lut[i];
entry.x *= 32*32; // 1024 entries of A per block
entry.y *= szHeadState*32; // 32 lines of B per block
Lut2s[i] = entry;
}
__syncthreads();
uint txa = tid % 4;
uint tya = tid / 4;
uint storA = tya*stdA + txa*8;
uint storB = tyb*stdB + txb*8 + stdA*32;
uint loadA = fragmentA<OP_A,M16N16K16>::get_idx(tid, stdA, (tid & 64)*(OP_A == OP_N ? 1 : stdA)*16/64);
uint loadB = fragmentB<OP_N,M16N16K16>::get_idx(tid, stdB, (tid & 64)*stdB*16/64 + (tid & 32) + stdA*32);
uint b = idx_N*64 + txb*8;
uint offsetA = idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + tid*8;
uint offsetB = idx_B*szCtxHeadStateB + tyb*szHeadState + idx_H*szState + b;
bool inB = N64 || b < szState;
fragmentC<OP_A,OP_N,M16N16K16> fragC[2][2];
int idx_lut = 0;
#pragma unroll 1
do
{
asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever
uint2 entry = Lut2s[idx_lut];
uint4 b00 = {0};
uint4 b16 = {0};
entry.x += offsetA;
entry.y += offsetB;
uint4 a00 = *(uint4*)&A[entry.x];
if (inB)
{
b00 = *(uint4*)&B[entry.y + 0*szHeadState];
b16 = *(uint4*)&B[entry.y + 16*szHeadState];
}
__syncthreads();
*(uint4*)&hShare[storA] = a00;
*(uint4*)&hShare[storB + 0*stdB] = b00;
*(uint4*)&hShare[storB + 16*stdB] = b16;
__syncthreads();
fragmentA<OP_A,M16N16K16> fragA[2];
fragmentB<OP_N,M16N16K16> fragB[2];
for (int i = 0; i < 2; i++)
{
fragA[i].load(hShare, loadA + (OP_A == OP_N ? stdA : 1)*i*16, stdA);
fragB[i].load(hShare, loadB + i*16, stdB);
}
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
fragC[i][j].mma_sync(fragA[i], fragB[j]);
} while (++idx_lut < lut_size);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_MN) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
idx_M = div64(idx_MN, magic_N, shift_N);
idx_N = idx_MN - idx_M*grid_N;
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
uint txc = tid % 16;
uint tyc = tid / 16;
uint c = idx_N*64 + txc*4;
uint loadC = tyc*stdC + txc*4;
uint storC = fragmentC<OP_A,OP_N,M16N16K16>::get_idx(tid, stdC, tid & 96);
uint offsetC = idx_B*szCtxHeadStateC + (idx_M*32 + tyc)*szHeadState + idx_H*szState + c;
for (int i = 0; i < 2; i++)
{
__syncthreads();
for (int j = 0; j < 2; j++)
fragC[i][j].store(fShare, storC + j*16, stdC);
__syncthreads();
if (N64 || c < szState)
{
for (int j = 0; j < 2; j++)
*(uint2*)&C[offsetC + szHeadState*(j*8 + i*16)] = to_half4(ew_add(
*(float4*)&fShare[loadC + stdC*j*8 + 0],
*(float4*)&fShare[loadC + stdC*j*8 + 64]));
}
}
}
else
{
uint c = idx_N*64 + txb*8;
uint offsetC = idx_B*szCtxHeadStateC + (idx_M*32 + tyb)*szHeadState + idx_H*szState + c;
if (N64 || c < szState)
{
uint4 zero = {0};
*(uint4*)&C[offsetC + szHeadState* 0] = zero;
*(uint4*)&C[offsetC + szHeadState*16] = zero;
}
}
}
// C = A * B or A.T * B
// Dims: M, N, K
// N64: N even mult of 64
// A is sparse, B is dense, C is dense
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(64) hgemm_bst_16x64x16_xn(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadStateB, uint szCtxHeadStateC, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
const uint stdA = 16;
const uint stdB = 80;
const uint stdC = 68;
__shared__ ehalf hShare[(stdA + stdB)*16];
uint2* Lut2s = (uint2*)&hShare[(stdA + stdB)*16];
uint tid = threadIdx.x;
uint idx_MN = blockIdx.x; // compound outer product dims
uint idx_M = div64(idx_MN, magic_N, shift_N); // idx_M = idx_MN / grid_N;
uint idx_N = idx_MN - idx_M*grid_N; // idx_N = idx_MN % grid_N;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// assume lower diagonal and schedule large reductions first
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// each head can optionally have its own lut
Lut += idx_H*szLut;
uint2 lut_head = Lut[idx_M];
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
uint txb = tid % 8;
uint tyb = tid / 8;
if (lut_size > 0)
{
// prefetch the lut data into shared
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += 64)
{
uint2 entry = Lut[i];
entry.x *= 16*16; // 256 entries of A per block
entry.y *= szHeadState*16; // 16 lines of B per block
Lut2s[i] = entry;
}
__syncthreads();
uint txa = tid % 4;
uint tya = tid / 4;
uint storA = tya*stdA + txa*4;
uint storB = tyb*stdB + txb*8 + 16*stdA;
uint loadA = fragmentA<OP_A,M16N16K16>::get_idx(tid, stdA);
uint loadB = fragmentB<OP_N,M16N16K16>::get_idx(tid, stdB, 16*stdA + (tid & 32));
uint b = idx_N*64 + txb*8;
bool inB = N64 || b < szState;
uint offsetA = idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + tid*4;
uint offsetB = idx_B*szCtxHeadStateB + tyb*szHeadState + idx_H*szState + b;
fragmentC<OP_A,OP_N,M16N16K16> fragC[2];
int idx_lut = 0;
#pragma unroll 1
do
{
asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever
uint2 entry = Lut2s[idx_lut];
uint4 b0 = {0};
uint4 b8 = {0};
entry.x += offsetA;
entry.y += offsetB;
uint2 a0 = *(uint2*)&A[entry.x];
if (inB)
{
b0 = *(uint4*)&B[entry.y + 0*szHeadState];
b8 = *(uint4*)&B[entry.y + 8*szHeadState];
}
__syncthreads();
*(uint2*)&hShare[storA] = a0;
*(uint4*)&hShare[storB + 0*stdB] = b0;
*(uint4*)&hShare[storB + 8*stdB] = b8;
__syncthreads();
fragmentA<OP_A,M16N16K16> fragA;
fragmentB<OP_N,M16N16K16> fragB;
fragA.load(hShare, loadA, stdA);
#pragma unroll
for (int j = 0; j < 2; j++)
{
fragB.load(hShare, loadB + j*16, stdB);
fragC[j].mma_sync(fragA, fragB);
}
} while (++idx_lut < lut_size);
// allow assembler to forget these registers in the main loop
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_MN) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
idx_M = div64(idx_MN, magic_N, shift_N);
idx_N = idx_MN - idx_M*grid_N;
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// use thread stride of 4 to allow use of shared stride of 68
// which minimizes shared bank conflicts on write.
uint txc = tid % 16;
uint tyc = tid / 16;
uint c = idx_N*64 + txc*4;
uint loadC = tyc*stdC + txc*4;
uint storC = fragmentC<OP_A,OP_N,M16N16K16>::get_idx(tid, stdC, tid & 32);
uint offsetC = idx_B*szCtxHeadStateC + (idx_M*16 + tyc)*szHeadState + idx_H*szState + c;
__syncthreads();
for (int j = 0; j < 2; j++)
fragC[j].store(hShare, storC + j*16, stdC);
__syncthreads();
if (N64 || c < szState)
{
for (int i = 0; i < 4; i++)
*(uint2*)&C[offsetC + szHeadState*i*4] = *(uint2*)&hShare[loadC + stdC*i*4];
}
}
else
{
uint c = idx_N*64 + txb*8;
uint offsetC = idx_B*szCtxHeadStateC + (idx_M*16 + tyb)*szHeadState + idx_H*szState + c;
if (N64 || c < szState)
{
uint4 zero = {0};
*(uint4*)&C[offsetC + szHeadState*0] = zero;
*(uint4*)&C[offsetC + szHeadState*8] = zero;
}
}
}
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(64) hgemm_bst_8x64x8_xn(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadStateB, uint szCtxHeadStateC, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
const uint stdA = 8;
const uint stdB = 80;
const uint stdC = 68;
__shared__ ehalf hShare[(stdA + stdB)*16];
uint2* Lut2s = (uint2*)&hShare[(stdA + stdB)*16];
uint tid = threadIdx.x;
uint idx_MN = blockIdx.x; // compound outer product dims
uint idx_M = div64(idx_MN, magic_N, shift_N); // idx_M = idx_MN / grid_N;
uint idx_N = idx_MN - idx_M*grid_N; // idx_N = idx_MN % grid_N;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// assume lower diagonal and schedule large reductions first
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// each head can optionally have its own lut
Lut += idx_H*szLut;
uint2 lut_head = Lut[idx_M];
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
if (lut_size > 0)
{
// prefetch the lut data into shared
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += 64)
{
uint2 entry = Lut[i];
entry.x *= 8*8; // 64 entries of A per block
entry.y *= szHeadState*8; // 8 lines of B per block
Lut2s[i] = entry;
}
__syncthreads();
uint t32 = tid & 32;
uint t31 = tid & 31;
uint txb = tid % 8;
uint tyb = t31 / 8;
uint storA = tid*2;
uint storB = tyb*stdB + txb*8 + t32*20 + 16*stdA;
uint loadA = fragmentA<OP_A,M8N32K16>::get_idx(tid, stdA);
uint loadB = fragmentB<OP_N,M8N32K16>::get_idx(tid, stdB, t32 + 16*stdA);
uint b = idx_N*64 + txb*8;
uint offsetA = idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + t31*2;
uint offsetB = idx_B*szCtxHeadStateB + tyb*szHeadState + idx_H*szState + b;
fragmentC<OP_A,OP_N,M8N32K16> fragC;
uint idx_lut = t32 / 32;
uint idx_lut2 = 0;
uint lut_size2 = (lut_size + 1)/2;
#pragma unroll 1
do
{
uint a0 = 0;
uint4 b0 = {0};
uint4 b4 = {0};
if (idx_lut < lut_size)
{
uint2 entry = Lut2s[idx_lut];
entry.x += offsetA;
entry.y += offsetB;
a0 = *(uint*)&A[entry.x];
if (b < szState)
{
b0 = *(uint4*)&B[entry.y + 0*szHeadState];
b4 = *(uint4*)&B[entry.y + 4*szHeadState];
}
}
__syncthreads();
*(uint* )&hShare[storA ] = a0;
*(uint4*)&hShare[storB + 0*stdB] = b0;
*(uint4*)&hShare[storB + 4*stdB] = b4;
__syncthreads();
fragmentA<OP_A,M8N32K16> fragA;
fragmentB<OP_N,M8N32K16> fragB;
fragA.load(hShare, loadA, stdA);
fragB.load(hShare, loadB, stdB);
fragC.mma_sync(fragA, fragB);
idx_lut += 2;
} while (++idx_lut2 < lut_size2);
// allow assembler to forget these registers in the main loop
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_MN) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
idx_M = div64(idx_MN, magic_N, shift_N);
idx_N = idx_MN - idx_M*grid_N;
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// use thread stride of 4 to allow use of shared stride of 68
// which minimizes shared bank conflicts on write.
uint txc = tid % 16;
uint tyc = tid / 16;
uint c = idx_N*64 + txc*4;
uint loadC = tyc*stdC + txc*4;
uint storC = fragmentC<OP_A,OP_N,M8N32K16>::get_idx(tid, stdC, tid & 32);
uint offsetC = idx_B*szCtxHeadStateC + (idx_M*8 + tyc)*szHeadState + idx_H*szState + c;
__syncthreads();
fragC.store(hShare, storC, stdC);
__syncthreads();
if (N64 || c < szState)
{
for (int i = 0; i < 2; i++)
*(uint2*)&C[offsetC + szHeadState*i*4] = *(uint2*)&hShare[loadC + stdC*i*4];
}
}
else
{
uint txc = tid % 8;
uint tyc = tid / 8;
uint c = idx_N*64 + txc*8;
uint offsetC = idx_B*szCtxHeadStateC + (idx_M*8 + tyc)*szHeadState + idx_H*szState + c;
if (N64 || c < szState)
{
uint4 zero = {0};
*(uint4*)&C[offsetC + szHeadState*0] = zero;
}
}
}
// C = A * B.T
// Dims: M, N, K
// K64: K even mult of 64
// A is dense, B is dense, C is sparse
// 32x32x32 warp tile
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(256) hgemm_bst_64x64x64_nt(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadStateA, uint szCtxHeadStateB, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
const uint stdA = 64 + 8;
const uint stdB = 64 + 8;
const uint stdC = 64*4 + 4;
__shared__ ehalf hShare[(stdA + stdB)*64];
float* fShare = (float*)hShare;
uint tid = threadIdx.x;
uint bid = blockIdx.x; // blockid
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
uint2 lut_head = Lut[idx_H*szLut + bid];
uint tx = tid % 8;
uint ty = tid / 8;
uint k = tx * 8;
uint idx_M = lut_head.x;
uint idx_N = lut_head.y;
uint offsetA00 = idx_B*szCtxHeadStateA + (idx_M*64 + ty)*szHeadState + idx_H*szState + k;
uint offsetB00 = idx_B*szCtxHeadStateB + (idx_N*64 + ty)*szHeadState + idx_H*szState + k;
uint offsetA32 = offsetA00 + szHeadState*32;
uint offsetB32 = offsetB00 + szHeadState*32;
uint storA = ty*stdA + k;
uint storB = ty*stdB + k;
uint loadA = fragmentA<OP_N,M16N16K16>::get_idx(tid, stdA, (tid & 64)*stdA*32/64 + (tid & 128)*32/128 + 0*stdA);
uint loadB = fragmentB<OP_T,M16N16K16>::get_idx(tid, stdB, (tid & 32)*stdB*32/32 + (tid & 128)*32/128 + 64*stdA);
fragmentC<OP_N,OP_T,M16N16K16> fragC[2][2]; // m,n
uint loop = 0;
#pragma unroll 1
do
{
asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever
uint4 a00 = {0}, a32 = {0};
uint4 b00 = {0}, b32 = {0};
if (K64 || k < szState)
{
a00 = *(uint4*)&A[offsetA00];
a32 = *(uint4*)&A[offsetA32];
b00 = *(uint4*)&B[offsetB00];
b32 = *(uint4*)&B[offsetB32];
}
offsetA00 += 64;
offsetA32 += 64;
offsetB00 += 64;
offsetB32 += 64;
if (!K64)
k += 64;
__syncthreads();
*(uint4*)&hShare[storA + 0*stdA + 0*stdA] = a00;
*(uint4*)&hShare[storA + 32*stdA + 0*stdA] = a32;
*(uint4*)&hShare[storB + 0*stdB + 64*stdA] = b00;
*(uint4*)&hShare[storB + 32*stdB + 64*stdA] = b32;
__syncthreads();
fragmentA<OP_N,M16N16K16> fragA[2][2]; // m,k
fragmentB<OP_T,M16N16K16> fragB[2][2]; // n,k
for (int m = 0; m < 2; m++)
for (int k = 0; k < 2; k++)
fragA[m][k].load(hShare, loadA + m*16*stdA + k*16, stdA);
for (int n = 0; n < 2; n++)
for (int k = 0; k < 2; k++)
fragB[n][k].load(hShare, loadB + n*16*stdB + k*16, stdB);
for (int m = 0; m < 2; m++)
for (int n = 0; n < 2; n++)
for (int k = 0; k < 2; k++)
fragC[m][n].mma_sync(fragA[m][k], fragB[n][k]);
} while (++loop < loops);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
uint txc = tid % 16;
uint tyc = tid / 16;
uint loadC = tyc*stdC + txc*4;
uint storC = fragmentC<OP_N,OP_T,M16N16K16>::get_idx(tid, stdC, (tid & 224));
C += idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + bid*64*64 + tid*4;
for (int m = 0; m < 2; m++)
{
__syncthreads();
for (int n = 0; n < 2; n++)
fragC[m][n].store(fShare, storC + n*16, stdC);
__syncthreads();
for (int i = 0; i < 2; i++)
{
float4 sum4 = ew_add(
*(float4*)&fShare[loadC + i*64 + 0*128],
*(float4*)&fShare[loadC + i*64 + 1*128]
);
store((CV*)(C + 64*(i*32 + m*16)), sum4);
}
}
}
// C = A * B.T
// Dims: M, N, K
// K64: K even mult of 64
// A is dense, B is dense, C is sparse
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(128) hgemm_bst_32x32x64_nt(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadStateA, uint szCtxHeadStateB, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
const uint stdA = 72;
const uint stdB = 72;
const uint stdC = 132;
__shared__ ehalf hShare[(stdA + stdB)*32];
float* fShare = (float*)hShare;
uint tid = threadIdx.x;
uint bid = blockIdx.x; // blockid
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
uint2 lut_head = Lut[idx_H*szLut + bid];
uint tx = tid % 8;
uint ty = tid / 8;
uint k = tx * 8;
uint idx_M = lut_head.x;
uint idx_N = lut_head.y;
uint offsetA00 = idx_B*szCtxHeadStateA + (idx_M*32 + ty)*szHeadState + idx_H*szState + k;
uint offsetB00 = idx_B*szCtxHeadStateB + (idx_N*32 + ty)*szHeadState + idx_H*szState + k;
uint offsetA16 = offsetA00 + szHeadState*16;
uint offsetB16 = offsetB00 + szHeadState*16;
uint storA = ty*stdA + k;
uint storB = ty*stdB + k;
uint loadA = fragmentA<OP_N,M16N16K16>::get_idx(tid, stdA, (tid & 96)/2);
uint loadB = fragmentB<OP_T,M16N16K16>::get_idx(tid, stdB, (tid & 96)/2 + stdA*32);
fragmentC<OP_N,OP_T,M16N16K16> fragC[2][2];
uint loop = 0;
#pragma unroll 1
do
{
asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever
uint4 a00 = {0}, a16 = {0};
uint4 b00 = {0}, b16 = {0};
if (K64 || k < szState)
{
a00 = *(uint4*)&A[offsetA00];
a16 = *(uint4*)&A[offsetA16];
b00 = *(uint4*)&B[offsetB00];
b16 = *(uint4*)&B[offsetB16];
}
offsetA00 += 64;
offsetA16 += 64;
offsetB00 += 64;
offsetB16 += 64;
if (!K64)
k += 64;
__syncthreads();
*(uint4*)&hShare[storA + 0*stdA + 0*stdA] = a00;
*(uint4*)&hShare[storA + 16*stdA + 0*stdA] = a16;
*(uint4*)&hShare[storB + 0*stdB + 32*stdA] = b00;
*(uint4*)&hShare[storB + 16*stdB + 32*stdA] = b16;
__syncthreads();
fragmentA<OP_N,M16N16K16> fragA[2];
fragmentB<OP_T,M16N16K16> fragB[2];
for (int i = 0; i < 2; i++)
{
fragA[i].load(hShare, loadA + stdA*i*16, stdA);
fragB[i].load(hShare, loadB + stdB*i*16, stdB);
}
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
fragC[i][j].mma_sync(fragA[i], fragB[j]);
} while (++loop < loops);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
tx = tid % 8;
ty = tid / 8;
uint loadC = ty*stdC + tx*4;
uint storC = fragmentC<OP_N,OP_T,M16N16K16>::get_idx(tid, stdC, (tid & 96));
C += idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + bid*32*32 + tid*4;
for (int i = 0; i < 2; i++)
{
__syncthreads();
for (int j = 0; j < 2; j++)
fragC[i][j].store(fShare, storC + j*16, stdC);
__syncthreads();
float4 sum4 = ew_add(
ew_add(
*(float4*)&fShare[loadC + 0],
*(float4*)&fShare[loadC + 32]),
ew_add(
*(float4*)&fShare[loadC + 64],
*(float4*)&fShare[loadC + 96]));
store((CV*)(C + i*4*128), sum4);
}
}
// C = A * B.T
// Dims: M, N, K
// K64: K even mult of 64
// dds: A is dense, B is dense, C is sparse
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(64) hgemm_bst_16x16x64_nt(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadStateA, uint szCtxHeadStateB, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
const uint stdA = 72;
const uint stdB = 72;
const uint stdC = 48;
__shared__ ehalf hShare[(stdA + stdB)*16];
float* fShare = (float*)hShare;
uint tid = threadIdx.x;
uint bid = blockIdx.x; // blockid
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
uint2 lut_head = Lut[idx_H*szLut + bid];
uint tx = tid % 8;
uint ty = tid / 8;
uint k = tx * 8;
uint idx_M = lut_head.x;
uint idx_N = lut_head.y;
uint offsetA0 = idx_B*szCtxHeadStateA + (idx_M*16 + ty)*szHeadState + idx_H*szState + k;
uint offsetB0 = idx_B*szCtxHeadStateB + (idx_N*16 + ty)*szHeadState + idx_H*szState + k;
uint offsetA8 = offsetA0 + szHeadState*8;
uint offsetB8 = offsetB0 + szHeadState*8;
uint storA = ty*stdA + k;
uint storB = ty*stdB + k;
uint loadA = fragmentA<OP_N,M16N16K16>::get_idx(tid, stdA, (tid & 32));
uint loadB = fragmentB<OP_T,M16N16K16>::get_idx(tid, stdB, (tid & 32) + 16*stdA);
fragmentC<OP_N,OP_T,M16N16K16> fragC;
uint loop = 0;
#pragma unroll 1
do
{
asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever
uint4 a0 = {0}, a8 = {0};
uint4 b0 = {0}, b8 = {0};
if (K64 || k < szState)
{
a0 = *(uint4*)&A[offsetA0];
a8 = *(uint4*)&A[offsetA8];
b0 = *(uint4*)&B[offsetB0];
b8 = *(uint4*)&B[offsetB8];
}
offsetA0 += 64;
offsetA8 += 64;
offsetB0 += 64;
offsetB8 += 64;
if (!K64)
k += 64;
__syncthreads();
*(uint4*)&hShare[storA + 0*stdA + 0*stdA] = a0;
*(uint4*)&hShare[storA + 8*stdA + 0*stdA] = a8;
*(uint4*)&hShare[storB + 0*stdB + 16*stdA] = b0;
*(uint4*)&hShare[storB + 8*stdB + 16*stdA] = b8;
__syncthreads();
fragmentA<OP_N,M16N16K16> fragA;
fragmentB<OP_T,M16N16K16> fragB;
#pragma unroll
for (uint j = 0; j < 2; j++)
{
fragA.load(hShare, loadA + j*16, stdA);
fragB.load(hShare, loadB + j*16, stdB);
fragC.mma_sync(fragA, fragB);
}
} while (++loop < loops);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
tx = tid % 4;
ty = tid / 4;
uint loadC = ty*stdC + tx*4;
uint storC = fragmentC<OP_N,OP_T,M16N16K16>::get_idx(tid, stdC, (tid & 32)/2);
C += idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + bid*16*16 + tid*4;
__syncthreads();
fragC.store(fShare, storC, stdC);
__syncthreads();
float4 sum4 = ew_add(
*(float4*)&fShare[loadC + 0],
*(float4*)&fShare[loadC + 16]);
store((CV*)C, sum4);
}
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(32) hgemm_bst_8x8x64_nt(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadStateA, uint szCtxHeadStateB, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
const uint stdAB = 72;
const uint stdC = 8;
__shared__ ehalf hShare[stdAB*8*2];
float* fShare = (float*)hShare;
uint tid = threadIdx.x;
uint bid = blockIdx.x; // blockid
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
uint2 lut_head = Lut[idx_H*szLut + bid];
uint tx = tid % 8;
uint ty = tid / 8;
uint k = tx * 8;
uint idx_M = lut_head.x;
uint idx_N = lut_head.y;
uint offsetA0 = idx_B*szCtxHeadStateA + (idx_M*8 + ty)*szHeadState + idx_H*szState + k;
uint offsetB0 = idx_B*szCtxHeadStateB + (idx_N*8 + ty)*szHeadState + idx_H*szState + k;
uint offsetA4 = offsetA0 + szHeadState*4;
uint offsetB4 = offsetB0 + szHeadState*4;
uint storAB = ty*stdAB + k;
uint loadA = fragmentA<OP_N,M8N8K16>::get_idx(tid, stdAB, 0*stdAB);
uint loadB = fragmentB<OP_T,M8N8K16>::get_idx(tid, stdAB, 8*stdAB);
fragmentC<OP_N,OP_T,M8N8K16> fragC;
uint loop = 0;
#pragma unroll 1
do
{
uint4 a0 = {0}, a4 = {0};
uint4 b0 = {0}, b4 = {0};
if (K64 || k < szState)
{
a0 = *(uint4*)&A[offsetA0];
a4 = *(uint4*)&A[offsetA4];
b0 = *(uint4*)&B[offsetB0];
b4 = *(uint4*)&B[offsetB4];
}
offsetA0 += 64;
offsetA4 += 64;
offsetB0 += 64;
offsetB4 += 64;
if (!K64)
k += 64;
*(uint4*)&hShare[storAB + 0*stdAB + 0*stdAB] = a0;
*(uint4*)&hShare[storAB + 4*stdAB + 0*stdAB] = a4;
*(uint4*)&hShare[storAB + 0*stdAB + 8*stdAB] = b0;
*(uint4*)&hShare[storAB + 4*stdAB + 8*stdAB] = b4;
fragmentA<OP_N,M8N8K16> fragA;
fragmentB<OP_T,M8N8K16> fragB;
#pragma unroll
for (uint j = 0; j < 4; j++)
{
fragA.load(hShare, loadA + j*16, stdAB);
fragB.load(hShare, loadB + j*16, stdAB);
fragC.mma_sync(fragA, fragB);
}
} while (++loop < loops);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
uint storC = fragmentC<OP_N,OP_T,M8N8K16>::get_idx(tid, stdC);
fragC.store(fShare, storC, stdC);
C += idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + bid*8*8 + tid*2;
store((CV*)C, *(float2*)&fShare[tid*2]);
}
#else // __CUDA_ARCH__ >= 700
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(256) hgemm_bst_64x64x64_xn(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadStateB, uint szCtxHeadStateC, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
*C = 0;
}
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(128) hgemm_bst_32x64x32_xn(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadStateB, uint szCtxHeadStateC, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
*C = 0;
}
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(64) hgemm_bst_16x64x16_xn(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadStateB, uint szCtxHeadStateC, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
*C = 0;
}
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(64) hgemm_bst_8x64x8_xn(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadStateB, uint szCtxHeadStateC, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
*C = 0;
}
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(256) hgemm_bst_64x64x64_nt(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadStateA, uint szCtxHeadStateB, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
*C = 0;
}
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(128) hgemm_bst_32x32x64_nt(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadStateA, uint szCtxHeadStateB, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
*C = 0;
}
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(64) hgemm_bst_16x16x64_nt(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadStateA, uint szCtxHeadStateB, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
*C = 0;
}
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(32) hgemm_bst_8x8x64_nt(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadStateA, uint szCtxHeadStateB, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
*C = 0;
}
#endif // __CUDA_ARCH__ >= 700
bool blocksparse_transformer_xn(hipStream_t stream,
const uint2* lut,
const ehalf* a,
const ehalf* b,
ehalf* c,
uint block_size, uint blocks,
uint batch_dim, uint ctx_blks_b, uint ctx_blks_c, uint head_dim, uint state_dim,
uint lut_heads, uint lut_dim, uint op, uint magic, uint shift, uint max_lut)
{
uint szState = state_dim;
uint szHeadState = head_dim * szState;
uint szCtxHeadStateB = ctx_blks_b * block_size * szHeadState;
uint szCtxHeadStateC = ctx_blks_c * block_size * szHeadState;
uint szBlocksBlk = blocks * block_size * block_size;
uint szHeadBlocksBlk = head_dim * szBlocksBlk;
// if just one lut head, broadcast block-sparsity to all heads
uint szLut = lut_heads > 1 ? lut_dim : 0;
// compound gridDim.x with m and n coords
uint gridN = CEIL_DIV(state_dim, 64);
uint gridM = ctx_blks_c - 1;
uint gridX = ctx_blks_c * gridN;
uint shared = ((max_lut+1)/2)*2*8; // round up to nearest even, 8 bytes per entry
dim3 grid(gridX, batch_dim, head_dim);
if (op == 1) // NN
{
if (block_size == 8)
hipLaunchKernelGGL(( hgemm_bst_8x64x8_xn<OP_N,false>), dim3(grid), dim3(64),shared,stream, lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else if (block_size == 16)
hipLaunchKernelGGL(( hgemm_bst_16x64x16_xn<OP_N,false>), dim3(grid), dim3(64),shared,stream, lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else if (block_size == 32)
hipLaunchKernelGGL(( hgemm_bst_32x64x32_xn<OP_N,false>), dim3(grid),dim3(128),shared,stream, lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else
hipLaunchKernelGGL(( hgemm_bst_64x64x64_xn<OP_N,false>), dim3(grid),dim3(256),shared,stream, lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
}
else // TN
{
if (block_size == 8)
hipLaunchKernelGGL(( hgemm_bst_8x64x8_xn<OP_T,false>), dim3(grid), dim3(64),shared,stream, lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else if (block_size == 16)
hipLaunchKernelGGL(( hgemm_bst_16x64x16_xn<OP_T,false>), dim3(grid), dim3(64),shared,stream, lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else if (block_size == 32)
hipLaunchKernelGGL(( hgemm_bst_32x64x32_xn<OP_T,false>), dim3(grid),dim3(128),shared,stream, lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else
hipLaunchKernelGGL(( hgemm_bst_64x64x64_xn<OP_T,false>), dim3(grid),dim3(256),shared,stream, lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
}
return true;
}
template <typename CT, typename CV2, typename CV4>
bool blocksparse_transformer_nt(hipStream_t stream,
const uint2* lut,
const ehalf* a,
const ehalf* b,
CT* c,
uint block_size, uint blocks,
uint batch_dim, uint ctx_blks_a, uint ctx_blks_b, uint head_dim, uint state_dim,
uint lut_heads, uint lut_dim)
{
uint szState = state_dim;
uint szHeadState = head_dim * szState;
uint szCtxHeadStateA = ctx_blks_a * block_size * szHeadState;
uint szCtxHeadStateB = ctx_blks_b * block_size * szHeadState;
uint szBlocksBlk = blocks * block_size * block_size;
uint szHeadBlocksBlk = head_dim * szBlocksBlk;
// if just one lut head, broadcast block-sparsity to all heads
uint szLut = lut_heads > 1 ? lut_dim : 0;
uint loops = CEIL_DIV(state_dim, 64);
bool k64 = (state_dim & 63) == 0;
dim3 grid(blocks, batch_dim, head_dim);
if (block_size == 8)
{
if (k64)
hipLaunchKernelGGL(( hgemm_bst_8x8x64_nt<CT,CV2, true>), dim3(grid), dim3(32),0,stream, lut, a, b, c, szCtxHeadStateA, szCtxHeadStateB, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
else
hipLaunchKernelGGL(( hgemm_bst_8x8x64_nt<CT,CV2,false>), dim3(grid), dim3(32),0,stream, lut, a, b, c, szCtxHeadStateA, szCtxHeadStateB, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
}
else if (block_size == 16)
{
if (k64)
hipLaunchKernelGGL(( hgemm_bst_16x16x64_nt<CT,CV4, true>), dim3(grid), dim3(64),0,stream, lut, a, b, c, szCtxHeadStateA, szCtxHeadStateB, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
else
hipLaunchKernelGGL(( hgemm_bst_16x16x64_nt<CT,CV4,false>), dim3(grid), dim3(64),0,stream, lut, a, b, c, szCtxHeadStateA, szCtxHeadStateB, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
}
else if (block_size == 32)
hipLaunchKernelGGL(( hgemm_bst_32x32x64_nt<CT,CV4,false>), dim3(grid),dim3(128),0,stream, lut, a, b, c, szCtxHeadStateA, szCtxHeadStateB, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
else
hipLaunchKernelGGL(( hgemm_bst_64x64x64_nt<CT,CV4,false>), dim3(grid),dim3(256),0,stream, lut, a, b, c, szCtxHeadStateA, szCtxHeadStateB, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
// hipError_t error = hipGetLastError();
// printf("%s\n%s\n", hipGetErrorName(error), hipGetErrorString(error));
return true;
}
template bool blocksparse_transformer_nt<ehalf,ehalf2,ehalf4>(hipStream_t stream, const uint2* lut, const ehalf* a, const ehalf* b, ehalf* c, uint block_size, uint blocks, uint batch_dim, uint ctx_blks_a, uint ctx_blks_b, uint head_dim, uint state_dim, uint lut_heads, uint lut_dim);
template bool blocksparse_transformer_nt<bhalf,bhalf2,bhalf4>(hipStream_t stream, const uint2* lut, const ehalf* a, const ehalf* b, bhalf* c, uint block_size, uint blocks, uint batch_dim, uint ctx_blks_a, uint ctx_blks_b, uint head_dim, uint state_dim, uint lut_heads, uint lut_dim);
template <uint U, uint BSIZE, typename MASKT>
__global__ void blocksparse_masked_softmax(
const uint2* __restrict__ Lut,
const MASKT* __restrict__ Mask,
const bhalf* __restrict__ X,
ehalf* Y,
uint blocks, uint szLut, uint szMask, uint szHead, uint szBatch, float scale, uint shfl_init, uint use_mask)
{
__shared__ float Max[32];
__shared__ float Sum[32];
uint2* Lut2s = (uint2*)&Sum[32];
uint tid = threadIdx.x;
uint idx_Q = blockIdx.x / BSIZE; // Q dim
uint idx_q = blockIdx.x % BSIZE; // Q dim
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
Lut += idx_H * szLut;
Mask += idx_H * szMask + idx_q * blocks;
uint2 lut_head = Lut[idx_Q];
if (tid < 32)
{
// Allows non-power of 2 threads to work
Max[tid] = -FLT_MAX;
Sum[tid] = 0.0f;
}
// prefetch the lut data into shared
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += blockDim.x)
{
uint2 entry = Lut[i];
entry.y = use_mask ? (uint)__ldg(Mask + entry.x) : 0xffffffff;
entry.x *= BSIZE*BSIZE;
Lut2s[i] = entry;
//printf("%3d %3d %3d %08x\n", idx_Q, idx_q, i, entry.y);
}
__syncthreads();
uint lut_idx = (tid & (1024-BSIZE))*U/BSIZE;
uint tidx = tid % BSIZE;
uint mask_bit = 1 << tidx;
uint offset = idx_B*szBatch + idx_H*szHead + idx_q*BSIZE + tidx;
float xval[U];
#pragma unroll
for (int i = 0; i < U; i++)
{
uint2 entry = Lut2s[lut_idx + i];
uint offsetX = offset + entry.x;
bool in = lut_idx + i < lut_size;
float val = load(X + offsetX, 0, in);
xval[i] = in && (entry.y & mask_bit) != 0 ? val : -FLT_MAX;
}
// reduce within thread
float Xmax[U];
for (int i = 0; i < U; i++)
Xmax[i] = xval[i];
for (int j = U >> 1; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
Xmax[i] = fmaxf(Xmax[i], Xmax[i+j]);
float xmax = Xmax[0];
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
xmax = fmaxf(xmax, shfl_xor(xmax, i));
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Max[tid/32] = xmax;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
xmax = Max[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
xmax = fmaxf(xmax, shfl_xor(xmax, i));
// final reduction to shared
Max[tid] = xmax;
}
__syncthreads();
xmax = Max[0];
}
// compute exponent of softmax
float Xsum[U];
for (int i = 0; i < U; i++)
{
// use fast approx math: e**x == 2**(x * log2(e))
float exp = (xval[i] - xmax) * scale;
asm("ex2.approx.ftz.f32 %0, %0;" : "+f"(exp) :);
Xsum[i] = xval[i] = exp;
}
// reduce within thread
for (int j = U >> 1; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
Xsum[i] = Xsum[i] + Xsum[i+j];
float exp_sum = Xsum[0];
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
exp_sum += shfl_xor(exp_sum, i);
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Sum[tid/32] = exp_sum;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
exp_sum = Sum[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
exp_sum += shfl_xor(exp_sum, i);
// final reduction to shared
Sum[tid] = exp_sum;
}
__syncthreads();
exp_sum = Sum[0];
}
float rcp_exp_sum = exp_sum;
asm("rcp.approx.ftz.f32 %0, %0;" : "+f"(rcp_exp_sum) :);
#pragma unroll
for (int i = 0; i < U; i++)
{
ehalf out;
asm("cvt.rn.f16.f32 %0, %1;" : "=h"(out.x) : "f"(xval[i] * rcp_exp_sum));
uint offsetY = offset + Lut2s[lut_idx + i].x;
if (lut_idx + i < lut_size)
__stg(Y + offsetY, out);
}
}
template <uint U, uint BSIZE, typename MASKT>
__global__ void blocksparse_masked_softmax_grad(
const uint2* __restrict__ Lut,
const MASKT* __restrict__ Mask,
const ehalf* __restrict__ DY,
const ehalf* __restrict__ Y,
ehalf* DX,
uint blocks, uint szLut, uint szMask, uint szHead, uint szBatch, float scale, uint shfl_init, uint use_mask)
{
__shared__ float Sum[32];
uint2* Lut2s = (uint2*)&Sum[32];
uint tid = threadIdx.x;
uint idx_Q = blockIdx.x / BSIZE; // Q dim
uint idx_q = blockIdx.x % BSIZE; // Q dim
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut and/or mask
Lut += idx_H * szLut;
Mask += idx_H * szMask + idx_q * blocks;
uint2 lut_head = Lut[idx_Q];
if (tid < 32)
Sum[tid] = 0.0f;
// prefetch the lut data into shared
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += blockDim.x)
{
uint2 entry = Lut[i];
entry.y = use_mask ? (uint)__ldg(Mask + entry.x) : 0xffffffff;
entry.x *= BSIZE*BSIZE;
Lut2s[i] = entry;
}
__syncthreads();
uint lut_idx = (tid & (1024-BSIZE))*U/BSIZE;
uint tidx = tid % BSIZE;
uint mask_bit = 1 << tidx;
uint offset = idx_B*szBatch + idx_H*szHead + idx_q*BSIZE + tidx;
float dy[U], y[U];
#pragma unroll
for (int i = 0; i < U; i++)
{
uint2 entry = Lut2s[lut_idx + i];
uint offsetY = offset + entry.x;
bool in = lut_idx + i < lut_size && (entry.y & mask_bit) != 0;
dy[i] = load(DY + offsetY, 0, in);
y[i] = load(Y + offsetY, 0, in);
}
// compute dy * y
float dyy[U];
for (int i = 0; i < U; i++)
dyy[i] = dy[i] * y[i];
// reduce within thread
for (int j = U >> 1; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
dyy[i] = dyy[i] + dyy[i+j];
float sum_dyy = dyy[0];
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
sum_dyy += shfl_xor(sum_dyy, i);
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Sum[tid/32] = sum_dyy;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
sum_dyy = Sum[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
sum_dyy += shfl_xor(sum_dyy, i);
// final reduction to shared
Sum[tid] = sum_dyy;
}
__syncthreads();
sum_dyy = Sum[0];
}
// dx = (dy - sum_dyy) * y * scale
#pragma unroll
for (int i = 0; i < U; i++)
{
float dx = (dy[i] - sum_dyy) * y[i] * scale;
ehalf out;
asm("cvt.rn.f16.f32 %0, %1;" : "=h"(out.x) : "f"(dx));
uint offsetX = offset + Lut2s[lut_idx + i].x;
if (lut_idx + i < lut_size)
__stg(DX + offsetX, out);
}
}
typedef unsigned long long uint64;
template <uint UNROLL>
__global__ void __launch_bounds__(1024,2) blocksparse_masked_softmax_64x64(
const uint2* __restrict__ Lut,
const uint64* __restrict__ Mask,
const bhalf* __restrict__ X,
ehalf* Y,
uint blocks, uint szLut, uint szMask, uint szHead, uint szBatch, float scale, uint shfl_init, uint max_lut, uint use_mask)
{
__shared__ float Max[32];
__shared__ float Sum[32];
uint64* LutMask64 = (uint64*)&Sum[32];
uint* LutMask32 = (uint*)&Sum[32];
uint* LutOffset = (uint*)&LutMask64[max_lut];
uint tid = threadIdx.x;
uint idx_Q = blockIdx.x / 64;
uint idx_q = blockIdx.x % 64;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
Lut += idx_H * szLut;
Mask += idx_H * szMask + idx_q * blocks;
uint2 lut_head = Lut[idx_Q];
if (tid < 32)
{
// Allows non-power of 2 threads to work
Max[tid] = -FLT_MAX;
Sum[tid] = 0.0f;
}
// prefetch the lut data into shared
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < max_lut; i += blockDim.x)
{
uint64 mask = 0;
if (i < lut_size)
{
uint2 entry = Lut[i];
uint blk_id = entry.x;
LutOffset[i] = blk_id * 64*64;
mask = use_mask ? __ldg(Mask + blk_id) : 0xffffffffffffffff;
}
LutMask64[i] = mask;
}
__syncthreads();
uint lut_idx = (tid & (1024-32))*UNROLL/32;
uint tidx = (tid & 31)*2;
uint offset = idx_B*szBatch + idx_H*szHead + idx_q*64 + tidx + LutOffset[lut_idx];
X += offset;
bhalf2 xval[UNROLL];
#pragma unroll
for (uint i = 0; i < UNROLL; i++)
{
// nvcc/ptxas is really bad at generating sass that maximizes use of immediate offsets.
// This means way more registers are tied up in memory load addresses than are needed.
// This kernel's performance is hugely dependent on efficient register use so give the compiler a hand:
asm (
"{ \n\t"
".reg .pred p; \n\t"
".reg .s64 X, offset; \n\t"
"setp.lt.u32 p, %3, %4; \n\t"
"mov.b64 offset, {%2, 0}; \n\t"
"add.s64 X, %1, offset; \n\t"
"mov.u32 %0, 0xff80ff80; \n\t" // bhalf2 -inf, -inf
"@p ld.global.nc.u32 %0, [X]; \n\t"
"}" :"=r"(xval[i].x) : "l"(X), "r"(i*64*64*2), "r"(lut_idx + i), "r"(lut_size));
}
// split the 64 bit mask by half warp
uint tid16 = (tid & 16)/16;
uint mask0 = 1 << (tidx - tid16*32);
uint mask1 = mask0 << 1;
#pragma unroll
for (int i = 0; i < UNROLL; i++)
{
uint mask32 = LutMask32[(lut_idx + i)*2 + tid16];
if ((mask32 & mask0) == 0)
xval[i].x = (xval[i].x & 0xffff0000) | 0x0000ff80; // 0x0000fc00
if ((mask32 & mask1) == 0)
xval[i].x = (xval[i].x & 0x0000ffff) | 0xff800000;
}
// reduce within thread
float Xmax[UNROLL];
for (int i = 0; i < UNROLL; i++)
Xmax[i] = ew_max(to_float(xval[i]));
float xmax = Xmax[0];
for (int i = 1; i < UNROLL; i++)
xmax = fmaxf(Xmax[i], xmax);
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
xmax = fmaxf(xmax, shfl_xor(xmax, i));
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Max[tid/32] = xmax;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
xmax = Max[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
xmax = fmaxf(xmax, shfl_xor(xmax, i));
// final reduction to shared
Max[tid] = xmax;
}
__syncthreads();
xmax = Max[0];
}
// subtract xmax and compute exponent
float exp_sum = 0;
for (int i = 0; i < UNROLL; i++)
{
// use fast approx math: e**x == 2**(x * log2(e))
float2 Xval = ew_mul(ew_sub(to_float(xval[i]), xmax), scale);
asm("ex2.approx.ftz.f32 %0, %0;" : "+f"(Xval.x) :);
asm("ex2.approx.ftz.f32 %0, %0;" : "+f"(Xval.y) :);
exp_sum += ew_sum(Xval);
xval[i] = to_bhalf(Xval);
}
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
exp_sum += shfl_xor(exp_sum, i);
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Sum[tid/32] = exp_sum;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
exp_sum = Sum[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
exp_sum += shfl_xor(exp_sum, i);
// final reduction to shared
Sum[tid] = exp_sum;
}
__syncthreads();
exp_sum = Sum[0];
}
float rcp_exp_sum = exp_sum;
asm("rcp.approx.ftz.f32 %0, %0;" : "+f"(rcp_exp_sum) :);
Y += offset;
#pragma unroll
for (int i = 0; i < UNROLL; i++)
{
ehalf2 y = to_ehalf(ew_mul(to_float(xval[i]), rcp_exp_sum));
asm (
"{ \n\t"
".reg .pred p; \n\t"
".reg .s64 X, offset; \n\t"
"setp.lt.u32 p, %3, %4; \n\t"
"mov.b64 offset, {%1, 0}; \n\t"
"add.s64 X, %0, offset; \n\t"
"@p st.global.wb.u32 [X], %2; \n\t"
"}" :: "l"(Y), "r"(i*64*64*2), "r"(y.x), "r"(lut_idx + i), "r"(lut_size));
}
}
template <uint UNROLL>
__global__ void __launch_bounds__(1024,2) blocksparse_masked_softmax_64x64_grad(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ DY,
const ehalf* __restrict__ Y,
ehalf* DX,
uint blocks, uint szLut, uint szMask, uint szHead, uint szBatch, float scale, uint shfl_init)
{
__shared__ float Sum[32];
uint* LutOffset = (uint*)&Sum[32];
uint tid = threadIdx.x;
uint idx_Q = blockIdx.x / 64;
uint idx_q = blockIdx.x % 64;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
Lut += idx_H * szLut;
uint2 lut_head = Lut[idx_Q];
if (tid < 32)
Sum[tid] = 0.0f;
// prefetch the lut data into shared
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += blockDim.x)
LutOffset[i] = Lut[i].x * 64*64;
__syncthreads();
uint lut_idx = (tid & (1024-32))*UNROLL/32;
uint tidx = (tid & 31)*2;
uint offset = idx_B*szBatch + idx_H*szHead + idx_q*64 + tidx + LutOffset[lut_idx];
DY += offset;
Y += offset;
ehalf2 dy[UNROLL], y[UNROLL];
#pragma unroll
for (uint i = 0; i < UNROLL; i++)
{
asm (
"{ \n\t"
".reg .pred p; \n\t"
".reg .s64 DY, Y, offset; \n\t"
"setp.lt.u32 p, %5, %6; \n\t"
"mov.b64 offset, {%4, 0}; \n\t"
"add.s64 DY, %2, offset; \n\t"
"add.s64 Y, %3, offset; \n\t"
"mov.u32 %0, 0; \n\t"
"mov.u32 %1, 0; \n\t"
"@p ld.global.nc.u32 %0, [DY]; \n\t"
"@p ld.global.nc.u32 %1, [Y]; \n\t"
"}" : "=r"(dy[i].x), "=r"(y[i].x) : "l"(DY), "l"(Y), "r"(i*64*64*2), "r"(lut_idx + i), "r"(lut_size));
}
// compute dy * y and start reduction
float sum_dyy = 0.0f;
for (int i = 0; i < UNROLL; i++)
sum_dyy += ew_sum(ew_mul(to_float(dy[i]), to_float(y[i])));
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
sum_dyy += shfl_xor(sum_dyy, i);
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Sum[tid/32] = sum_dyy;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
sum_dyy = Sum[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
sum_dyy += shfl_xor(sum_dyy, i);
// final reduction to shared
Sum[tid] = sum_dyy;
}
__syncthreads();
sum_dyy = Sum[0];
}
DX += offset;
#pragma unroll
for (uint i = 0; i < UNROLL; i++)
{
// dx = (dy - sum_dyy) * y * scale
ehalf2 dx = to_ehalf(ew_mul(ew_mul(ew_sub(to_float(dy[i]), sum_dyy), to_float(y[i])), scale));
asm (
"{ \n\t"
".reg .pred p; \n\t"
".reg .s64 DX, offset; \n\t"
"setp.lt.u32 p, %3, %4; \n\t"
"mov.b64 offset, {%1, 0}; \n\t"
"add.s64 DX, %0, offset; \n\t"
"@p st.global.wb.u32 [DX], %2; \n\t"
"}" :: "l"(DX), "r"(i*64*64*2), "r"(dx.x), "r"(lut_idx + i), "r"(lut_size));
}
}
#define LOG2e 1.4426950408889634f
typedef unsigned char uchar;
bool BlocksparseMaskedSoftmax(hipStream_t stream,
const uint2* lut,
const char* mask,
const bhalf* x,
ehalf* y,
uint block_size, uint blocks,
uint batch_dim, uint head_dim, uint ctx_blks,
uint lut_heads, uint lut_dim, uint max_lut,
uint mask_heads, float scale)
{
uint szLut = lut_heads > 1 ? lut_dim : 0;
uint szMask = mask_heads > 1 ? blocks * block_size : 0;
uint gridQ = ctx_blks * block_size;
uint szHead = blocks * block_size * block_size;
uint szBatch = head_dim * szHead;
uint maxK = max_lut * block_size;
//hipMemsetD16Async((hipDeviceptr_t)c, 0, szBatch*batch_dim, stream);
// combine scaling with fast e**x compute
scale *= LOG2e;
dim3 grid(gridQ, batch_dim, head_dim);
if (block_size == 64)
{
// Unroll factor 8 (ctx_size up to 16K)
if (maxK > 1024*8)
{
uint threads = 1024;
uint shfl_init = 16;
uint lut_max = threads * 8 / 32;
uint shared = lut_max * 12;
hipLaunchKernelGGL(( blocksparse_masked_softmax_64x64<8>), dim3(grid),dim3(threads),shared,stream, lut, (const uint64*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
}
// Unroll factor of 4 is preferred (keeps these kernels under 32 registers for max occupancy)
else if (maxK >= 64*4)
{
uint threads = CEIL_DIV(maxK, 64*4) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
uint lut_max = threads * 4 / 32;
uint shared = lut_max * 12;
hipLaunchKernelGGL(( blocksparse_masked_softmax_64x64<4>), dim3(grid),dim3(threads),shared,stream, lut, (const uint64*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
}
// Unroll factor 1
else
{
uint threads = CEIL_DIV(maxK, 64) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
uint lut_max = threads * 1 / 32;
uint shared = lut_max * 12;
hipLaunchKernelGGL(( blocksparse_masked_softmax_64x64<1>), dim3(grid),dim3(threads),shared,stream, lut, (const uint64*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
}
}
else
{
uint shared = max_lut*8;
// Unroll factor 8 (ctx_size up to 8K)
if (maxK > 1024*4)
{
if (block_size == 8)
hipLaunchKernelGGL(( blocksparse_masked_softmax<8, 8, uchar>), dim3(grid),dim3(1024),shared,stream, lut, (const uchar*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, 16, mask != NULL);
else if (block_size == 16)
hipLaunchKernelGGL(( blocksparse_masked_softmax<8,16,ushort>), dim3(grid),dim3(1024),shared,stream, lut, (const ushort*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, 16, mask != NULL);
else
hipLaunchKernelGGL(( blocksparse_masked_softmax<8,32, uint>), dim3(grid),dim3(1024),shared,stream, lut, (const uint*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, 16, mask != NULL);
}
// Unroll factor of 4 is preferred
else if (maxK > 32*4)
{
uint threads = CEIL_DIV(maxK, 32*4) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
if (block_size == 8)
hipLaunchKernelGGL(( blocksparse_masked_softmax<4, 8, uchar>), dim3(grid),dim3(threads),shared,stream, lut, (const uchar*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else if (block_size == 16)
hipLaunchKernelGGL(( blocksparse_masked_softmax<4,16,ushort>), dim3(grid),dim3(threads),shared,stream, lut, (const ushort*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else
hipLaunchKernelGGL(( blocksparse_masked_softmax<4,32, uint>), dim3(grid),dim3(threads),shared,stream, lut, (const uint*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
}
// Unroll factor 1
else
{
uint threads = CEIL_DIV(maxK, 32) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
if (block_size == 8)
hipLaunchKernelGGL(( blocksparse_masked_softmax<1, 8, uchar>), dim3(grid),dim3(threads),shared,stream, lut, (const uchar*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else if (block_size == 16)
hipLaunchKernelGGL(( blocksparse_masked_softmax<1,16,ushort>), dim3(grid),dim3(threads),shared,stream, lut, (const ushort*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else
hipLaunchKernelGGL(( blocksparse_masked_softmax<1,32, uint>), dim3(grid),dim3(threads),shared,stream, lut, (const uint*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
}
}
return true;
}
bool BlocksparseMaskedSoftmaxGrad(hipStream_t stream,
const uint2* lut,
const char* mask,
const ehalf* dy,
const ehalf* y,
ehalf* dx,
uint block_size, uint blocks,
uint batch_dim, uint head_dim, uint ctx_blks,
uint lut_heads, uint lut_dim, uint max_lut,
uint mask_heads, float scale)
{
uint szLut = lut_heads > 1 ? lut_dim : 0;
uint szMask = mask_heads > 1 ? blocks * block_size : 0;
uint gridQ = ctx_blks * block_size;
uint szHead = blocks * block_size * block_size;
uint szBatch = head_dim * szHead;
uint maxK = max_lut * block_size;
//hipMemsetD16Async((hipDeviceptr_t)c, 0, szBatch*batch_dim, stream);
dim3 grid(gridQ, batch_dim, head_dim);
if (block_size == 64)
{
uint shared = max_lut*4;
// Unroll factor 8
if (maxK > 1024*8)
{
uint threads = 1024;
uint shfl_init = 16;
hipLaunchKernelGGL(( blocksparse_masked_softmax_64x64_grad<8>), dim3(grid),dim3(threads),shared,stream, lut, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init);
}
// Unroll factor of 4 is preferred
else if (maxK >= 64*4)
{
uint threads = CEIL_DIV(maxK, 64*4) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
hipLaunchKernelGGL(( blocksparse_masked_softmax_64x64_grad<4>), dim3(grid),dim3(threads),shared,stream, lut, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init);
}
// Unroll factor 1
else
{
uint threads = CEIL_DIV(maxK, 64) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
hipLaunchKernelGGL(( blocksparse_masked_softmax_64x64_grad<1>), dim3(grid),dim3(threads),shared,stream, lut, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init);
}
}
else
{
uint shared = max_lut*8;
// Unroll factor 8
if (maxK > 1024*4)
{
if (block_size == 8)
hipLaunchKernelGGL(( blocksparse_masked_softmax_grad<8, 8, uchar>), dim3(grid),dim3(1024),shared,stream, lut, (const uchar*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, 16, mask != NULL);
else if (block_size == 16)
hipLaunchKernelGGL(( blocksparse_masked_softmax_grad<8,16,ushort>), dim3(grid),dim3(1024),shared,stream, lut, (const ushort*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, 16, mask != NULL);
else
hipLaunchKernelGGL(( blocksparse_masked_softmax_grad<8,32, uint>), dim3(grid),dim3(1024),shared,stream, lut, (const uint*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, 16, mask != NULL);
}
// Unroll factor of 4 is preferred
else if (maxK > 32*4)
{
uint threads = CEIL_DIV(maxK, 32*4) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
if (block_size == 8)
hipLaunchKernelGGL(( blocksparse_masked_softmax_grad<4, 8, uchar>), dim3(grid),dim3(threads),shared,stream, lut, (const uchar*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else if (block_size == 16)
hipLaunchKernelGGL(( blocksparse_masked_softmax_grad<4,16,ushort>), dim3(grid),dim3(threads),shared,stream, lut, (const ushort*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else
hipLaunchKernelGGL(( blocksparse_masked_softmax_grad<4,32, uint>), dim3(grid),dim3(threads),shared,stream, lut, (const uint*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
}
// Unroll factor 1
else
{
uint threads = CEIL_DIV(maxK, 32) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
if (block_size == 8)
hipLaunchKernelGGL(( blocksparse_masked_softmax_grad<1, 8, uchar>), dim3(grid),dim3(threads),shared,stream, lut, (const uchar*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else if (block_size == 16)
hipLaunchKernelGGL(( blocksparse_masked_softmax_grad<1,16,ushort>), dim3(grid),dim3(threads),shared,stream, lut, (const ushort*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else
hipLaunchKernelGGL(( blocksparse_masked_softmax_grad<1,32, uint>), dim3(grid),dim3(threads),shared,stream, lut, (const uint*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
}
}
return true;
}
#endif // GOOGLE_CUDA
| cb6e598edda826ee22433d94ca9a6f1f121f122a.cu | #if GOOGLE_CUDA
#include "ew_op_gpu.h"
#include "gpu_hmma.h"
#include <stdio.h>
#if __CUDA_ARCH__ >= 700
// C = A * B or A.T * B
// Dims: M, N, K
// N64: N even mult of 64
// A is sparse, B is dense, C is dense
// 32x64x16 warp tile
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(256) hgemm_bst_64x64x64_xn(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadStateB, uint szCtxHeadStateC, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
const uint stdA = 64+16;
const uint stdB = 64+16;
const uint stdC = 512+4;
__shared__ float fShare[stdC*16];
ehalf* hShare = (ehalf*)fShare;
uint2* Lut2s = (uint2*)&fShare[stdC*16];
uint tid = threadIdx.x;
uint idx_MN = blockIdx.x; // compound outer product dims
uint idx_M = div64(idx_MN, magic_N, shift_N); // idx_M = idx_MN / grid_N;
uint idx_N = idx_MN - idx_M*grid_N; // idx_N = idx_MN % grid_N;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// assume lower diagonal and schedule large reductions first
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// each head can optionally have its own lut
Lut += idx_H*szLut;
uint2 lut_head = Lut[idx_M];
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
uint tx = tid % 8;
uint ty = tid / 8;
if (lut_size > 0)
{
// prefetch the lut data into shared
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += 256)
{
uint2 entry = Lut[i];
entry.x *= 64*64; // 4096 entries of A per block
entry.y *= szHeadState*64; // 64 lines of B per block
Lut2s[i] = entry;
}
__syncthreads();
uint storAB = ty*stdA + tx*8; // assume stdA == stdB
uint loadA = fragmentA<OP_A,M16N16K16>::get_idx(tid, stdA, (tid & 192)*(OP_A == OP_N ? 1 : stdA)*16/64 + (tid & 32)*(OP_A == OP_N ? stdA : 1));
uint loadB = fragmentB<OP_N,M16N16K16>::get_idx(tid, stdB, (tid & 192)*stdB*16/64 + stdA*64);
uint b = idx_N*64 + tx*8;
uint offsetA = idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + tid*8;
uint offsetB = idx_B*szCtxHeadStateB + ty*szHeadState + idx_H*szState + b;
bool inB = N64 || b < szState;
fragmentC<OP_A,OP_N,M16N16K16> fragC[2][4];
int idx_lut = 0;
#pragma unroll 1
do
{
asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever
uint2 entry = Lut2s[idx_lut];
uint4 b00 = {0};
uint4 b32 = {0};
entry.x += offsetA;
entry.y += offsetB;
uint4 a00 = *(uint4*)&A[entry.x + 0*64];
uint4 a32 = *(uint4*)&A[entry.x + 32*64];
if (inB)
{
b00 = *(uint4*)&B[entry.y + 0*szHeadState];
b32 = *(uint4*)&B[entry.y + 32*szHeadState];
}
__syncthreads();
*(uint4*)&hShare[storAB + 0*stdA + 0*stdA] = a00;
*(uint4*)&hShare[storAB + 32*stdA + 0*stdA] = a32;
*(uint4*)&hShare[storAB + 0*stdB + 64*stdA] = b00;
*(uint4*)&hShare[storAB + 32*stdB + 64*stdA] = b32;
__syncthreads();
fragmentA<OP_A,M16N16K16> fragA[2];
fragmentB<OP_N,M16N16K16> fragB[4];
for (int i = 0; i < 2; i++)
fragA[i].load(hShare, loadA + (OP_A == OP_N ? stdA : 1)*i*16, stdA);
for (int i = 0; i < 4; i++)
fragB[i].load(hShare, loadB + i*16, stdB);
for (int i = 0; i < 2; i++)
for (int j = 0; j < 4; j++)
fragC[i][j].mma_sync(fragA[i], fragB[j]);
} while (++idx_lut < lut_size);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_MN) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
idx_M = div64(idx_MN, magic_N, shift_N);
idx_N = idx_MN - idx_M*grid_N;
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
uint txc = tid % 16;
uint tyc = tid / 16;
uint c = idx_N*64 + txc*4;
uint loadC = tyc*stdC + txc*4;
uint storC = fragmentC<OP_A,OP_N,M16N16K16>::get_idx(tid, stdC, (tid & 224)*2);
uint offsetC = idx_B*szCtxHeadStateC + (idx_M*64 + tyc)*szHeadState + idx_H*szState + c;
for (int i = 0; i < 2; i++)
{
__syncthreads();
for (int j = 0; j < 4; j++)
fragC[i][j].store(fShare, storC + j*16, stdC);
__syncthreads();
if (N64 || c < szState)
{
for (int j = 0; j < 2; j++)
*(uint2*)&C[offsetC + szHeadState*(j*32 + i*16)] = to_half4(
ew_add(
ew_add(
*(float4*)&fShare[loadC + j*64 + 0*128],
*(float4*)&fShare[loadC + j*64 + 1*128]),
ew_add(
*(float4*)&fShare[loadC + j*64 + 2*128],
*(float4*)&fShare[loadC + j*64 + 3*128])
)
);
}
}
}
else
{
uint c = idx_N*64 + tx*8;
uint offsetC = idx_B*szCtxHeadStateC + (idx_M*64 + ty)*szHeadState + idx_H*szState + c;
if (N64 || c < szState)
{
uint4 zero = {0};
*(uint4*)&C[offsetC + szHeadState* 0] = zero;
*(uint4*)&C[offsetC + szHeadState*32] = zero;
}
}
}
// C = A * B or A.T * B
// Dims: M, N, K
// N64: N even mult of 64
// A is sparse, B is dense, C is dense
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(128) hgemm_bst_32x64x32_xn(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadStateB, uint szCtxHeadStateC, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
const uint stdA = 48;
const uint stdB = 80;
const uint stdC = 132;
__shared__ ehalf hShare[(stdA + stdB)*32];
float* fShare = (float*)hShare;
uint2* Lut2s = (uint2*)&hShare[(stdA + stdB)*32];
uint tid = threadIdx.x;
uint idx_MN = blockIdx.x; // compound outer product dims
uint idx_M = div64(idx_MN, magic_N, shift_N); // idx_M = idx_MN / grid_N;
uint idx_N = idx_MN - idx_M*grid_N; // idx_N = idx_MN % grid_N;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// assume lower diagonal and schedule large reductions first
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// each head can optionally have its own lut
Lut += idx_H*szLut;
uint2 lut_head = Lut[idx_M];
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
uint txb = tid % 8;
uint tyb = tid / 8;
if (lut_size > 0)
{
// prefetch the lut data into shared
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += 128)
{
uint2 entry = Lut[i];
entry.x *= 32*32; // 1024 entries of A per block
entry.y *= szHeadState*32; // 32 lines of B per block
Lut2s[i] = entry;
}
__syncthreads();
uint txa = tid % 4;
uint tya = tid / 4;
uint storA = tya*stdA + txa*8;
uint storB = tyb*stdB + txb*8 + stdA*32;
uint loadA = fragmentA<OP_A,M16N16K16>::get_idx(tid, stdA, (tid & 64)*(OP_A == OP_N ? 1 : stdA)*16/64);
uint loadB = fragmentB<OP_N,M16N16K16>::get_idx(tid, stdB, (tid & 64)*stdB*16/64 + (tid & 32) + stdA*32);
uint b = idx_N*64 + txb*8;
uint offsetA = idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + tid*8;
uint offsetB = idx_B*szCtxHeadStateB + tyb*szHeadState + idx_H*szState + b;
bool inB = N64 || b < szState;
fragmentC<OP_A,OP_N,M16N16K16> fragC[2][2];
int idx_lut = 0;
#pragma unroll 1
do
{
asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever
uint2 entry = Lut2s[idx_lut];
uint4 b00 = {0};
uint4 b16 = {0};
entry.x += offsetA;
entry.y += offsetB;
uint4 a00 = *(uint4*)&A[entry.x];
if (inB)
{
b00 = *(uint4*)&B[entry.y + 0*szHeadState];
b16 = *(uint4*)&B[entry.y + 16*szHeadState];
}
__syncthreads();
*(uint4*)&hShare[storA] = a00;
*(uint4*)&hShare[storB + 0*stdB] = b00;
*(uint4*)&hShare[storB + 16*stdB] = b16;
__syncthreads();
fragmentA<OP_A,M16N16K16> fragA[2];
fragmentB<OP_N,M16N16K16> fragB[2];
for (int i = 0; i < 2; i++)
{
fragA[i].load(hShare, loadA + (OP_A == OP_N ? stdA : 1)*i*16, stdA);
fragB[i].load(hShare, loadB + i*16, stdB);
}
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
fragC[i][j].mma_sync(fragA[i], fragB[j]);
} while (++idx_lut < lut_size);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_MN) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
idx_M = div64(idx_MN, magic_N, shift_N);
idx_N = idx_MN - idx_M*grid_N;
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
uint txc = tid % 16;
uint tyc = tid / 16;
uint c = idx_N*64 + txc*4;
uint loadC = tyc*stdC + txc*4;
uint storC = fragmentC<OP_A,OP_N,M16N16K16>::get_idx(tid, stdC, tid & 96);
uint offsetC = idx_B*szCtxHeadStateC + (idx_M*32 + tyc)*szHeadState + idx_H*szState + c;
for (int i = 0; i < 2; i++)
{
__syncthreads();
for (int j = 0; j < 2; j++)
fragC[i][j].store(fShare, storC + j*16, stdC);
__syncthreads();
if (N64 || c < szState)
{
for (int j = 0; j < 2; j++)
*(uint2*)&C[offsetC + szHeadState*(j*8 + i*16)] = to_half4(ew_add(
*(float4*)&fShare[loadC + stdC*j*8 + 0],
*(float4*)&fShare[loadC + stdC*j*8 + 64]));
}
}
}
else
{
uint c = idx_N*64 + txb*8;
uint offsetC = idx_B*szCtxHeadStateC + (idx_M*32 + tyb)*szHeadState + idx_H*szState + c;
if (N64 || c < szState)
{
uint4 zero = {0};
*(uint4*)&C[offsetC + szHeadState* 0] = zero;
*(uint4*)&C[offsetC + szHeadState*16] = zero;
}
}
}
// C = A * B or A.T * B
// Dims: M, N, K
// N64: N even mult of 64
// A is sparse, B is dense, C is dense
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(64) hgemm_bst_16x64x16_xn(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadStateB, uint szCtxHeadStateC, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
const uint stdA = 16;
const uint stdB = 80;
const uint stdC = 68;
__shared__ ehalf hShare[(stdA + stdB)*16];
uint2* Lut2s = (uint2*)&hShare[(stdA + stdB)*16];
uint tid = threadIdx.x;
uint idx_MN = blockIdx.x; // compound outer product dims
uint idx_M = div64(idx_MN, magic_N, shift_N); // idx_M = idx_MN / grid_N;
uint idx_N = idx_MN - idx_M*grid_N; // idx_N = idx_MN % grid_N;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// assume lower diagonal and schedule large reductions first
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// each head can optionally have its own lut
Lut += idx_H*szLut;
uint2 lut_head = Lut[idx_M];
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
uint txb = tid % 8;
uint tyb = tid / 8;
if (lut_size > 0)
{
// prefetch the lut data into shared
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += 64)
{
uint2 entry = Lut[i];
entry.x *= 16*16; // 256 entries of A per block
entry.y *= szHeadState*16; // 16 lines of B per block
Lut2s[i] = entry;
}
__syncthreads();
uint txa = tid % 4;
uint tya = tid / 4;
uint storA = tya*stdA + txa*4;
uint storB = tyb*stdB + txb*8 + 16*stdA;
uint loadA = fragmentA<OP_A,M16N16K16>::get_idx(tid, stdA);
uint loadB = fragmentB<OP_N,M16N16K16>::get_idx(tid, stdB, 16*stdA + (tid & 32));
uint b = idx_N*64 + txb*8;
bool inB = N64 || b < szState;
uint offsetA = idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + tid*4;
uint offsetB = idx_B*szCtxHeadStateB + tyb*szHeadState + idx_H*szState + b;
fragmentC<OP_A,OP_N,M16N16K16> fragC[2];
int idx_lut = 0;
#pragma unroll 1
do
{
asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever
uint2 entry = Lut2s[idx_lut];
uint4 b0 = {0};
uint4 b8 = {0};
entry.x += offsetA;
entry.y += offsetB;
uint2 a0 = *(uint2*)&A[entry.x];
if (inB)
{
b0 = *(uint4*)&B[entry.y + 0*szHeadState];
b8 = *(uint4*)&B[entry.y + 8*szHeadState];
}
__syncthreads();
*(uint2*)&hShare[storA] = a0;
*(uint4*)&hShare[storB + 0*stdB] = b0;
*(uint4*)&hShare[storB + 8*stdB] = b8;
__syncthreads();
fragmentA<OP_A,M16N16K16> fragA;
fragmentB<OP_N,M16N16K16> fragB;
fragA.load(hShare, loadA, stdA);
#pragma unroll
for (int j = 0; j < 2; j++)
{
fragB.load(hShare, loadB + j*16, stdB);
fragC[j].mma_sync(fragA, fragB);
}
} while (++idx_lut < lut_size);
// allow assembler to forget these registers in the main loop
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_MN) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
idx_M = div64(idx_MN, magic_N, shift_N);
idx_N = idx_MN - idx_M*grid_N;
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// use thread stride of 4 to allow use of shared stride of 68
// which minimizes shared bank conflicts on write.
uint txc = tid % 16;
uint tyc = tid / 16;
uint c = idx_N*64 + txc*4;
uint loadC = tyc*stdC + txc*4;
uint storC = fragmentC<OP_A,OP_N,M16N16K16>::get_idx(tid, stdC, tid & 32);
uint offsetC = idx_B*szCtxHeadStateC + (idx_M*16 + tyc)*szHeadState + idx_H*szState + c;
__syncthreads();
for (int j = 0; j < 2; j++)
fragC[j].store(hShare, storC + j*16, stdC);
__syncthreads();
if (N64 || c < szState)
{
for (int i = 0; i < 4; i++)
*(uint2*)&C[offsetC + szHeadState*i*4] = *(uint2*)&hShare[loadC + stdC*i*4];
}
}
else
{
uint c = idx_N*64 + txb*8;
uint offsetC = idx_B*szCtxHeadStateC + (idx_M*16 + tyb)*szHeadState + idx_H*szState + c;
if (N64 || c < szState)
{
uint4 zero = {0};
*(uint4*)&C[offsetC + szHeadState*0] = zero;
*(uint4*)&C[offsetC + szHeadState*8] = zero;
}
}
}
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(64) hgemm_bst_8x64x8_xn(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadStateB, uint szCtxHeadStateC, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
const uint stdA = 8;
const uint stdB = 80;
const uint stdC = 68;
__shared__ ehalf hShare[(stdA + stdB)*16];
uint2* Lut2s = (uint2*)&hShare[(stdA + stdB)*16];
uint tid = threadIdx.x;
uint idx_MN = blockIdx.x; // compound outer product dims
uint idx_M = div64(idx_MN, magic_N, shift_N); // idx_M = idx_MN / grid_N;
uint idx_N = idx_MN - idx_M*grid_N; // idx_N = idx_MN % grid_N;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// assume lower diagonal and schedule large reductions first
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// each head can optionally have its own lut
Lut += idx_H*szLut;
uint2 lut_head = Lut[idx_M];
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
if (lut_size > 0)
{
// prefetch the lut data into shared
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += 64)
{
uint2 entry = Lut[i];
entry.x *= 8*8; // 64 entries of A per block
entry.y *= szHeadState*8; // 8 lines of B per block
Lut2s[i] = entry;
}
__syncthreads();
uint t32 = tid & 32;
uint t31 = tid & 31;
uint txb = tid % 8;
uint tyb = t31 / 8;
uint storA = tid*2;
uint storB = tyb*stdB + txb*8 + t32*20 + 16*stdA;
uint loadA = fragmentA<OP_A,M8N32K16>::get_idx(tid, stdA);
uint loadB = fragmentB<OP_N,M8N32K16>::get_idx(tid, stdB, t32 + 16*stdA);
uint b = idx_N*64 + txb*8;
uint offsetA = idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + t31*2;
uint offsetB = idx_B*szCtxHeadStateB + tyb*szHeadState + idx_H*szState + b;
fragmentC<OP_A,OP_N,M8N32K16> fragC;
uint idx_lut = t32 / 32;
uint idx_lut2 = 0;
uint lut_size2 = (lut_size + 1)/2;
#pragma unroll 1
do
{
uint a0 = 0;
uint4 b0 = {0};
uint4 b4 = {0};
if (idx_lut < lut_size)
{
uint2 entry = Lut2s[idx_lut];
entry.x += offsetA;
entry.y += offsetB;
a0 = *(uint*)&A[entry.x];
if (b < szState)
{
b0 = *(uint4*)&B[entry.y + 0*szHeadState];
b4 = *(uint4*)&B[entry.y + 4*szHeadState];
}
}
__syncthreads();
*(uint* )&hShare[storA ] = a0;
*(uint4*)&hShare[storB + 0*stdB] = b0;
*(uint4*)&hShare[storB + 4*stdB] = b4;
__syncthreads();
fragmentA<OP_A,M8N32K16> fragA;
fragmentB<OP_N,M8N32K16> fragB;
fragA.load(hShare, loadA, stdA);
fragB.load(hShare, loadB, stdB);
fragC.mma_sync(fragA, fragB);
idx_lut += 2;
} while (++idx_lut2 < lut_size2);
// allow assembler to forget these registers in the main loop
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_MN) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
idx_M = div64(idx_MN, magic_N, shift_N);
idx_N = idx_MN - idx_M*grid_N;
if (OP_A == OP_N)
idx_M = grid_M - idx_M;
// use thread stride of 4 to allow use of shared stride of 68
// which minimizes shared bank conflicts on write.
uint txc = tid % 16;
uint tyc = tid / 16;
uint c = idx_N*64 + txc*4;
uint loadC = tyc*stdC + txc*4;
uint storC = fragmentC<OP_A,OP_N,M8N32K16>::get_idx(tid, stdC, tid & 32);
uint offsetC = idx_B*szCtxHeadStateC + (idx_M*8 + tyc)*szHeadState + idx_H*szState + c;
__syncthreads();
fragC.store(hShare, storC, stdC);
__syncthreads();
if (N64 || c < szState)
{
for (int i = 0; i < 2; i++)
*(uint2*)&C[offsetC + szHeadState*i*4] = *(uint2*)&hShare[loadC + stdC*i*4];
}
}
else
{
uint txc = tid % 8;
uint tyc = tid / 8;
uint c = idx_N*64 + txc*8;
uint offsetC = idx_B*szCtxHeadStateC + (idx_M*8 + tyc)*szHeadState + idx_H*szState + c;
if (N64 || c < szState)
{
uint4 zero = {0};
*(uint4*)&C[offsetC + szHeadState*0] = zero;
}
}
}
// C = A * B.T
// Dims: M, N, K
// K64: K even mult of 64
// A is dense, B is dense, C is sparse
// 32x32x32 warp tile
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(256) hgemm_bst_64x64x64_nt(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadStateA, uint szCtxHeadStateB, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
const uint stdA = 64 + 8;
const uint stdB = 64 + 8;
const uint stdC = 64*4 + 4;
__shared__ ehalf hShare[(stdA + stdB)*64];
float* fShare = (float*)hShare;
uint tid = threadIdx.x;
uint bid = blockIdx.x; // blockid
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
uint2 lut_head = Lut[idx_H*szLut + bid];
uint tx = tid % 8;
uint ty = tid / 8;
uint k = tx * 8;
uint idx_M = lut_head.x;
uint idx_N = lut_head.y;
uint offsetA00 = idx_B*szCtxHeadStateA + (idx_M*64 + ty)*szHeadState + idx_H*szState + k;
uint offsetB00 = idx_B*szCtxHeadStateB + (idx_N*64 + ty)*szHeadState + idx_H*szState + k;
uint offsetA32 = offsetA00 + szHeadState*32;
uint offsetB32 = offsetB00 + szHeadState*32;
uint storA = ty*stdA + k;
uint storB = ty*stdB + k;
uint loadA = fragmentA<OP_N,M16N16K16>::get_idx(tid, stdA, (tid & 64)*stdA*32/64 + (tid & 128)*32/128 + 0*stdA);
uint loadB = fragmentB<OP_T,M16N16K16>::get_idx(tid, stdB, (tid & 32)*stdB*32/32 + (tid & 128)*32/128 + 64*stdA);
fragmentC<OP_N,OP_T,M16N16K16> fragC[2][2]; // m,n
uint loop = 0;
#pragma unroll 1
do
{
asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever
uint4 a00 = {0}, a32 = {0};
uint4 b00 = {0}, b32 = {0};
if (K64 || k < szState)
{
a00 = *(uint4*)&A[offsetA00];
a32 = *(uint4*)&A[offsetA32];
b00 = *(uint4*)&B[offsetB00];
b32 = *(uint4*)&B[offsetB32];
}
offsetA00 += 64;
offsetA32 += 64;
offsetB00 += 64;
offsetB32 += 64;
if (!K64)
k += 64;
__syncthreads();
*(uint4*)&hShare[storA + 0*stdA + 0*stdA] = a00;
*(uint4*)&hShare[storA + 32*stdA + 0*stdA] = a32;
*(uint4*)&hShare[storB + 0*stdB + 64*stdA] = b00;
*(uint4*)&hShare[storB + 32*stdB + 64*stdA] = b32;
__syncthreads();
fragmentA<OP_N,M16N16K16> fragA[2][2]; // m,k
fragmentB<OP_T,M16N16K16> fragB[2][2]; // n,k
for (int m = 0; m < 2; m++)
for (int k = 0; k < 2; k++)
fragA[m][k].load(hShare, loadA + m*16*stdA + k*16, stdA);
for (int n = 0; n < 2; n++)
for (int k = 0; k < 2; k++)
fragB[n][k].load(hShare, loadB + n*16*stdB + k*16, stdB);
for (int m = 0; m < 2; m++)
for (int n = 0; n < 2; n++)
for (int k = 0; k < 2; k++)
fragC[m][n].mma_sync(fragA[m][k], fragB[n][k]);
} while (++loop < loops);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
uint txc = tid % 16;
uint tyc = tid / 16;
uint loadC = tyc*stdC + txc*4;
uint storC = fragmentC<OP_N,OP_T,M16N16K16>::get_idx(tid, stdC, (tid & 224));
C += idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + bid*64*64 + tid*4;
for (int m = 0; m < 2; m++)
{
__syncthreads();
for (int n = 0; n < 2; n++)
fragC[m][n].store(fShare, storC + n*16, stdC);
__syncthreads();
for (int i = 0; i < 2; i++)
{
float4 sum4 = ew_add(
*(float4*)&fShare[loadC + i*64 + 0*128],
*(float4*)&fShare[loadC + i*64 + 1*128]
);
store((CV*)(C + 64*(i*32 + m*16)), sum4);
}
}
}
// C = A * B.T
// Dims: M, N, K
// K64: K even mult of 64
// A is dense, B is dense, C is sparse
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(128) hgemm_bst_32x32x64_nt(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadStateA, uint szCtxHeadStateB, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
const uint stdA = 72;
const uint stdB = 72;
const uint stdC = 132;
__shared__ ehalf hShare[(stdA + stdB)*32];
float* fShare = (float*)hShare;
uint tid = threadIdx.x;
uint bid = blockIdx.x; // blockid
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
uint2 lut_head = Lut[idx_H*szLut + bid];
uint tx = tid % 8;
uint ty = tid / 8;
uint k = tx * 8;
uint idx_M = lut_head.x;
uint idx_N = lut_head.y;
uint offsetA00 = idx_B*szCtxHeadStateA + (idx_M*32 + ty)*szHeadState + idx_H*szState + k;
uint offsetB00 = idx_B*szCtxHeadStateB + (idx_N*32 + ty)*szHeadState + idx_H*szState + k;
uint offsetA16 = offsetA00 + szHeadState*16;
uint offsetB16 = offsetB00 + szHeadState*16;
uint storA = ty*stdA + k;
uint storB = ty*stdB + k;
uint loadA = fragmentA<OP_N,M16N16K16>::get_idx(tid, stdA, (tid & 96)/2);
uint loadB = fragmentB<OP_T,M16N16K16>::get_idx(tid, stdB, (tid & 96)/2 + stdA*32);
fragmentC<OP_N,OP_T,M16N16K16> fragC[2][2];
uint loop = 0;
#pragma unroll 1
do
{
asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever
uint4 a00 = {0}, a16 = {0};
uint4 b00 = {0}, b16 = {0};
if (K64 || k < szState)
{
a00 = *(uint4*)&A[offsetA00];
a16 = *(uint4*)&A[offsetA16];
b00 = *(uint4*)&B[offsetB00];
b16 = *(uint4*)&B[offsetB16];
}
offsetA00 += 64;
offsetA16 += 64;
offsetB00 += 64;
offsetB16 += 64;
if (!K64)
k += 64;
__syncthreads();
*(uint4*)&hShare[storA + 0*stdA + 0*stdA] = a00;
*(uint4*)&hShare[storA + 16*stdA + 0*stdA] = a16;
*(uint4*)&hShare[storB + 0*stdB + 32*stdA] = b00;
*(uint4*)&hShare[storB + 16*stdB + 32*stdA] = b16;
__syncthreads();
fragmentA<OP_N,M16N16K16> fragA[2];
fragmentB<OP_T,M16N16K16> fragB[2];
for (int i = 0; i < 2; i++)
{
fragA[i].load(hShare, loadA + stdA*i*16, stdA);
fragB[i].load(hShare, loadB + stdB*i*16, stdB);
}
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
fragC[i][j].mma_sync(fragA[i], fragB[j]);
} while (++loop < loops);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
tx = tid % 8;
ty = tid / 8;
uint loadC = ty*stdC + tx*4;
uint storC = fragmentC<OP_N,OP_T,M16N16K16>::get_idx(tid, stdC, (tid & 96));
C += idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + bid*32*32 + tid*4;
for (int i = 0; i < 2; i++)
{
__syncthreads();
for (int j = 0; j < 2; j++)
fragC[i][j].store(fShare, storC + j*16, stdC);
__syncthreads();
float4 sum4 = ew_add(
ew_add(
*(float4*)&fShare[loadC + 0],
*(float4*)&fShare[loadC + 32]),
ew_add(
*(float4*)&fShare[loadC + 64],
*(float4*)&fShare[loadC + 96]));
store((CV*)(C + i*4*128), sum4);
}
}
// C = A * B.T
// Dims: M, N, K
// K64: K even mult of 64
// dds: A is dense, B is dense, C is sparse
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(64) hgemm_bst_16x16x64_nt(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadStateA, uint szCtxHeadStateB, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
const uint stdA = 72;
const uint stdB = 72;
const uint stdC = 48;
__shared__ ehalf hShare[(stdA + stdB)*16];
float* fShare = (float*)hShare;
uint tid = threadIdx.x;
uint bid = blockIdx.x; // blockid
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
uint2 lut_head = Lut[idx_H*szLut + bid];
uint tx = tid % 8;
uint ty = tid / 8;
uint k = tx * 8;
uint idx_M = lut_head.x;
uint idx_N = lut_head.y;
uint offsetA0 = idx_B*szCtxHeadStateA + (idx_M*16 + ty)*szHeadState + idx_H*szState + k;
uint offsetB0 = idx_B*szCtxHeadStateB + (idx_N*16 + ty)*szHeadState + idx_H*szState + k;
uint offsetA8 = offsetA0 + szHeadState*8;
uint offsetB8 = offsetB0 + szHeadState*8;
uint storA = ty*stdA + k;
uint storB = ty*stdB + k;
uint loadA = fragmentA<OP_N,M16N16K16>::get_idx(tid, stdA, (tid & 32));
uint loadB = fragmentB<OP_T,M16N16K16>::get_idx(tid, stdB, (tid & 32) + 16*stdA);
fragmentC<OP_N,OP_T,M16N16K16> fragC;
uint loop = 0;
#pragma unroll 1
do
{
asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever
uint4 a0 = {0}, a8 = {0};
uint4 b0 = {0}, b8 = {0};
if (K64 || k < szState)
{
a0 = *(uint4*)&A[offsetA0];
a8 = *(uint4*)&A[offsetA8];
b0 = *(uint4*)&B[offsetB0];
b8 = *(uint4*)&B[offsetB8];
}
offsetA0 += 64;
offsetA8 += 64;
offsetB0 += 64;
offsetB8 += 64;
if (!K64)
k += 64;
__syncthreads();
*(uint4*)&hShare[storA + 0*stdA + 0*stdA] = a0;
*(uint4*)&hShare[storA + 8*stdA + 0*stdA] = a8;
*(uint4*)&hShare[storB + 0*stdB + 16*stdA] = b0;
*(uint4*)&hShare[storB + 8*stdB + 16*stdA] = b8;
__syncthreads();
fragmentA<OP_N,M16N16K16> fragA;
fragmentB<OP_T,M16N16K16> fragB;
#pragma unroll
for (uint j = 0; j < 2; j++)
{
fragA.load(hShare, loadA + j*16, stdA);
fragB.load(hShare, loadB + j*16, stdB);
fragC.mma_sync(fragA, fragB);
}
} while (++loop < loops);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
tx = tid % 4;
ty = tid / 4;
uint loadC = ty*stdC + tx*4;
uint storC = fragmentC<OP_N,OP_T,M16N16K16>::get_idx(tid, stdC, (tid & 32)/2);
C += idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + bid*16*16 + tid*4;
__syncthreads();
fragC.store(fShare, storC, stdC);
__syncthreads();
float4 sum4 = ew_add(
*(float4*)&fShare[loadC + 0],
*(float4*)&fShare[loadC + 16]);
store((CV*)C, sum4);
}
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(32) hgemm_bst_8x8x64_nt(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadStateA, uint szCtxHeadStateB, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
const uint stdAB = 72;
const uint stdC = 8;
__shared__ ehalf hShare[stdAB*8*2];
float* fShare = (float*)hShare;
uint tid = threadIdx.x;
uint bid = blockIdx.x; // blockid
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
uint2 lut_head = Lut[idx_H*szLut + bid];
uint tx = tid % 8;
uint ty = tid / 8;
uint k = tx * 8;
uint idx_M = lut_head.x;
uint idx_N = lut_head.y;
uint offsetA0 = idx_B*szCtxHeadStateA + (idx_M*8 + ty)*szHeadState + idx_H*szState + k;
uint offsetB0 = idx_B*szCtxHeadStateB + (idx_N*8 + ty)*szHeadState + idx_H*szState + k;
uint offsetA4 = offsetA0 + szHeadState*4;
uint offsetB4 = offsetB0 + szHeadState*4;
uint storAB = ty*stdAB + k;
uint loadA = fragmentA<OP_N,M8N8K16>::get_idx(tid, stdAB, 0*stdAB);
uint loadB = fragmentB<OP_T,M8N8K16>::get_idx(tid, stdAB, 8*stdAB);
fragmentC<OP_N,OP_T,M8N8K16> fragC;
uint loop = 0;
#pragma unroll 1
do
{
uint4 a0 = {0}, a4 = {0};
uint4 b0 = {0}, b4 = {0};
if (K64 || k < szState)
{
a0 = *(uint4*)&A[offsetA0];
a4 = *(uint4*)&A[offsetA4];
b0 = *(uint4*)&B[offsetB0];
b4 = *(uint4*)&B[offsetB4];
}
offsetA0 += 64;
offsetA4 += 64;
offsetB0 += 64;
offsetB4 += 64;
if (!K64)
k += 64;
*(uint4*)&hShare[storAB + 0*stdAB + 0*stdAB] = a0;
*(uint4*)&hShare[storAB + 4*stdAB + 0*stdAB] = a4;
*(uint4*)&hShare[storAB + 0*stdAB + 8*stdAB] = b0;
*(uint4*)&hShare[storAB + 4*stdAB + 8*stdAB] = b4;
fragmentA<OP_N,M8N8K16> fragA;
fragmentB<OP_T,M8N8K16> fragB;
#pragma unroll
for (uint j = 0; j < 4; j++)
{
fragA.load(hShare, loadA + j*16, stdAB);
fragB.load(hShare, loadB + j*16, stdAB);
fragC.mma_sync(fragA, fragB);
}
} while (++loop < loops);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :);
asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_H) :);
uint storC = fragmentC<OP_N,OP_T,M8N8K16>::get_idx(tid, stdC);
fragC.store(fShare, storC, stdC);
C += idx_B*szHeadBlocksBlk + idx_H*szBlocksBlk + bid*8*8 + tid*2;
store((CV*)C, *(float2*)&fShare[tid*2]);
}
#else // __CUDA_ARCH__ >= 700
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(256) hgemm_bst_64x64x64_xn(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadStateB, uint szCtxHeadStateC, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
*C = 0;
}
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(128) hgemm_bst_32x64x32_xn(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadStateB, uint szCtxHeadStateC, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
*C = 0;
}
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(64) hgemm_bst_16x64x16_xn(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadStateB, uint szCtxHeadStateC, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
*C = 0;
}
template <uint OP_A, bool N64>
__global__ void __launch_bounds__(64) hgemm_bst_8x64x8_xn(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
ehalf* C,
uint szCtxHeadStateB, uint szCtxHeadStateC, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint grid_M, uint grid_N, uint magic_N, uint shift_N)
{
*C = 0;
}
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(256) hgemm_bst_64x64x64_nt(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadStateA, uint szCtxHeadStateB, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
*C = 0;
}
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(128) hgemm_bst_32x32x64_nt(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadStateA, uint szCtxHeadStateB, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
*C = 0;
}
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(64) hgemm_bst_16x16x64_nt(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadStateA, uint szCtxHeadStateB, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
*C = 0;
}
template <typename CT, typename CV, bool K64>
__global__ void __launch_bounds__(32) hgemm_bst_8x8x64_nt(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ A,
const ehalf* __restrict__ B,
CT* C,
uint szCtxHeadStateA, uint szCtxHeadStateB, uint szHeadState, uint szState,
uint szHeadBlocksBlk, uint szBlocksBlk, uint szLut,
uint loops)
{
*C = 0;
}
#endif // __CUDA_ARCH__ >= 700
bool blocksparse_transformer_xn(CUstream stream,
const uint2* lut,
const ehalf* a,
const ehalf* b,
ehalf* c,
uint block_size, uint blocks,
uint batch_dim, uint ctx_blks_b, uint ctx_blks_c, uint head_dim, uint state_dim,
uint lut_heads, uint lut_dim, uint op, uint magic, uint shift, uint max_lut)
{
uint szState = state_dim;
uint szHeadState = head_dim * szState;
uint szCtxHeadStateB = ctx_blks_b * block_size * szHeadState;
uint szCtxHeadStateC = ctx_blks_c * block_size * szHeadState;
uint szBlocksBlk = blocks * block_size * block_size;
uint szHeadBlocksBlk = head_dim * szBlocksBlk;
// if just one lut head, broadcast block-sparsity to all heads
uint szLut = lut_heads > 1 ? lut_dim : 0;
// compound gridDim.x with m and n coords
uint gridN = CEIL_DIV(state_dim, 64);
uint gridM = ctx_blks_c - 1;
uint gridX = ctx_blks_c * gridN;
uint shared = ((max_lut+1)/2)*2*8; // round up to nearest even, 8 bytes per entry
dim3 grid(gridX, batch_dim, head_dim);
if (op == 1) // NN
{
if (block_size == 8)
hgemm_bst_8x64x8_xn<OP_N,false><<<grid, 64,shared,stream>>>(lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else if (block_size == 16)
hgemm_bst_16x64x16_xn<OP_N,false><<<grid, 64,shared,stream>>>(lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else if (block_size == 32)
hgemm_bst_32x64x32_xn<OP_N,false><<<grid,128,shared,stream>>>(lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else
hgemm_bst_64x64x64_xn<OP_N,false><<<grid,256,shared,stream>>>(lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
}
else // TN
{
if (block_size == 8)
hgemm_bst_8x64x8_xn<OP_T,false><<<grid, 64,shared,stream>>>(lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else if (block_size == 16)
hgemm_bst_16x64x16_xn<OP_T,false><<<grid, 64,shared,stream>>>(lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else if (block_size == 32)
hgemm_bst_32x64x32_xn<OP_T,false><<<grid,128,shared,stream>>>(lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
else
hgemm_bst_64x64x64_xn<OP_T,false><<<grid,256,shared,stream>>>(lut, a, b, c, szCtxHeadStateB, szCtxHeadStateC, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, gridM, gridN, magic, shift);
}
return true;
}
template <typename CT, typename CV2, typename CV4>
bool blocksparse_transformer_nt(CUstream stream,
const uint2* lut,
const ehalf* a,
const ehalf* b,
CT* c,
uint block_size, uint blocks,
uint batch_dim, uint ctx_blks_a, uint ctx_blks_b, uint head_dim, uint state_dim,
uint lut_heads, uint lut_dim)
{
uint szState = state_dim;
uint szHeadState = head_dim * szState;
uint szCtxHeadStateA = ctx_blks_a * block_size * szHeadState;
uint szCtxHeadStateB = ctx_blks_b * block_size * szHeadState;
uint szBlocksBlk = blocks * block_size * block_size;
uint szHeadBlocksBlk = head_dim * szBlocksBlk;
// if just one lut head, broadcast block-sparsity to all heads
uint szLut = lut_heads > 1 ? lut_dim : 0;
uint loops = CEIL_DIV(state_dim, 64);
bool k64 = (state_dim & 63) == 0;
dim3 grid(blocks, batch_dim, head_dim);
if (block_size == 8)
{
if (k64)
hgemm_bst_8x8x64_nt<CT,CV2, true><<<grid, 32,0,stream>>>(lut, a, b, c, szCtxHeadStateA, szCtxHeadStateB, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
else
hgemm_bst_8x8x64_nt<CT,CV2,false><<<grid, 32,0,stream>>>(lut, a, b, c, szCtxHeadStateA, szCtxHeadStateB, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
}
else if (block_size == 16)
{
if (k64)
hgemm_bst_16x16x64_nt<CT,CV4, true><<<grid, 64,0,stream>>>(lut, a, b, c, szCtxHeadStateA, szCtxHeadStateB, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
else
hgemm_bst_16x16x64_nt<CT,CV4,false><<<grid, 64,0,stream>>>(lut, a, b, c, szCtxHeadStateA, szCtxHeadStateB, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
}
else if (block_size == 32)
hgemm_bst_32x32x64_nt<CT,CV4,false><<<grid,128,0,stream>>>(lut, a, b, c, szCtxHeadStateA, szCtxHeadStateB, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
else
hgemm_bst_64x64x64_nt<CT,CV4,false><<<grid,256,0,stream>>>(lut, a, b, c, szCtxHeadStateA, szCtxHeadStateB, szHeadState, szState, szHeadBlocksBlk, szBlocksBlk, szLut, loops);
// cudaError_t error = cudaGetLastError();
// printf("%s\n%s\n", cudaGetErrorName(error), cudaGetErrorString(error));
return true;
}
template bool blocksparse_transformer_nt<ehalf,ehalf2,ehalf4>(CUstream stream, const uint2* lut, const ehalf* a, const ehalf* b, ehalf* c, uint block_size, uint blocks, uint batch_dim, uint ctx_blks_a, uint ctx_blks_b, uint head_dim, uint state_dim, uint lut_heads, uint lut_dim);
template bool blocksparse_transformer_nt<bhalf,bhalf2,bhalf4>(CUstream stream, const uint2* lut, const ehalf* a, const ehalf* b, bhalf* c, uint block_size, uint blocks, uint batch_dim, uint ctx_blks_a, uint ctx_blks_b, uint head_dim, uint state_dim, uint lut_heads, uint lut_dim);
template <uint U, uint BSIZE, typename MASKT>
__global__ void blocksparse_masked_softmax(
const uint2* __restrict__ Lut,
const MASKT* __restrict__ Mask,
const bhalf* __restrict__ X,
ehalf* Y,
uint blocks, uint szLut, uint szMask, uint szHead, uint szBatch, float scale, uint shfl_init, uint use_mask)
{
__shared__ float Max[32];
__shared__ float Sum[32];
uint2* Lut2s = (uint2*)&Sum[32];
uint tid = threadIdx.x;
uint idx_Q = blockIdx.x / BSIZE; // Q dim
uint idx_q = blockIdx.x % BSIZE; // Q dim
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
Lut += idx_H * szLut;
Mask += idx_H * szMask + idx_q * blocks;
uint2 lut_head = Lut[idx_Q];
if (tid < 32)
{
// Allows non-power of 2 threads to work
Max[tid] = -FLT_MAX;
Sum[tid] = 0.0f;
}
// prefetch the lut data into shared
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += blockDim.x)
{
uint2 entry = Lut[i];
entry.y = use_mask ? (uint)__ldg(Mask + entry.x) : 0xffffffff;
entry.x *= BSIZE*BSIZE;
Lut2s[i] = entry;
//printf("%3d %3d %3d %08x\n", idx_Q, idx_q, i, entry.y);
}
__syncthreads();
uint lut_idx = (tid & (1024-BSIZE))*U/BSIZE;
uint tidx = tid % BSIZE;
uint mask_bit = 1 << tidx;
uint offset = idx_B*szBatch + idx_H*szHead + idx_q*BSIZE + tidx;
float xval[U];
#pragma unroll
for (int i = 0; i < U; i++)
{
uint2 entry = Lut2s[lut_idx + i];
uint offsetX = offset + entry.x;
bool in = lut_idx + i < lut_size;
float val = load(X + offsetX, 0, in);
xval[i] = in && (entry.y & mask_bit) != 0 ? val : -FLT_MAX;
}
// reduce within thread
float Xmax[U];
for (int i = 0; i < U; i++)
Xmax[i] = xval[i];
for (int j = U >> 1; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
Xmax[i] = fmaxf(Xmax[i], Xmax[i+j]);
float xmax = Xmax[0];
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
xmax = fmaxf(xmax, shfl_xor(xmax, i));
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Max[tid/32] = xmax;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
xmax = Max[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
xmax = fmaxf(xmax, shfl_xor(xmax, i));
// final reduction to shared
Max[tid] = xmax;
}
__syncthreads();
xmax = Max[0];
}
// compute exponent of softmax
float Xsum[U];
for (int i = 0; i < U; i++)
{
// use fast approx math: e**x == 2**(x * log2(e))
float exp = (xval[i] - xmax) * scale;
asm("ex2.approx.ftz.f32 %0, %0;" : "+f"(exp) :);
Xsum[i] = xval[i] = exp;
}
// reduce within thread
for (int j = U >> 1; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
Xsum[i] = Xsum[i] + Xsum[i+j];
float exp_sum = Xsum[0];
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
exp_sum += shfl_xor(exp_sum, i);
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Sum[tid/32] = exp_sum;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
exp_sum = Sum[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
exp_sum += shfl_xor(exp_sum, i);
// final reduction to shared
Sum[tid] = exp_sum;
}
__syncthreads();
exp_sum = Sum[0];
}
float rcp_exp_sum = exp_sum;
asm("rcp.approx.ftz.f32 %0, %0;" : "+f"(rcp_exp_sum) :);
#pragma unroll
for (int i = 0; i < U; i++)
{
ehalf out;
asm("cvt.rn.f16.f32 %0, %1;" : "=h"(out.x) : "f"(xval[i] * rcp_exp_sum));
uint offsetY = offset + Lut2s[lut_idx + i].x;
if (lut_idx + i < lut_size)
__stg(Y + offsetY, out);
}
}
template <uint U, uint BSIZE, typename MASKT>
__global__ void blocksparse_masked_softmax_grad(
const uint2* __restrict__ Lut,
const MASKT* __restrict__ Mask,
const ehalf* __restrict__ DY,
const ehalf* __restrict__ Y,
ehalf* DX,
uint blocks, uint szLut, uint szMask, uint szHead, uint szBatch, float scale, uint shfl_init, uint use_mask)
{
__shared__ float Sum[32];
uint2* Lut2s = (uint2*)&Sum[32];
uint tid = threadIdx.x;
uint idx_Q = blockIdx.x / BSIZE; // Q dim
uint idx_q = blockIdx.x % BSIZE; // Q dim
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut and/or mask
Lut += idx_H * szLut;
Mask += idx_H * szMask + idx_q * blocks;
uint2 lut_head = Lut[idx_Q];
if (tid < 32)
Sum[tid] = 0.0f;
// prefetch the lut data into shared
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += blockDim.x)
{
uint2 entry = Lut[i];
entry.y = use_mask ? (uint)__ldg(Mask + entry.x) : 0xffffffff;
entry.x *= BSIZE*BSIZE;
Lut2s[i] = entry;
}
__syncthreads();
uint lut_idx = (tid & (1024-BSIZE))*U/BSIZE;
uint tidx = tid % BSIZE;
uint mask_bit = 1 << tidx;
uint offset = idx_B*szBatch + idx_H*szHead + idx_q*BSIZE + tidx;
float dy[U], y[U];
#pragma unroll
for (int i = 0; i < U; i++)
{
uint2 entry = Lut2s[lut_idx + i];
uint offsetY = offset + entry.x;
bool in = lut_idx + i < lut_size && (entry.y & mask_bit) != 0;
dy[i] = load(DY + offsetY, 0, in);
y[i] = load(Y + offsetY, 0, in);
}
// compute dy * y
float dyy[U];
for (int i = 0; i < U; i++)
dyy[i] = dy[i] * y[i];
// reduce within thread
for (int j = U >> 1; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
dyy[i] = dyy[i] + dyy[i+j];
float sum_dyy = dyy[0];
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
sum_dyy += shfl_xor(sum_dyy, i);
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Sum[tid/32] = sum_dyy;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
sum_dyy = Sum[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
sum_dyy += shfl_xor(sum_dyy, i);
// final reduction to shared
Sum[tid] = sum_dyy;
}
__syncthreads();
sum_dyy = Sum[0];
}
// dx = (dy - sum_dyy) * y * scale
#pragma unroll
for (int i = 0; i < U; i++)
{
float dx = (dy[i] - sum_dyy) * y[i] * scale;
ehalf out;
asm("cvt.rn.f16.f32 %0, %1;" : "=h"(out.x) : "f"(dx));
uint offsetX = offset + Lut2s[lut_idx + i].x;
if (lut_idx + i < lut_size)
__stg(DX + offsetX, out);
}
}
typedef unsigned long long uint64;
template <uint UNROLL>
__global__ void __launch_bounds__(1024,2) blocksparse_masked_softmax_64x64(
const uint2* __restrict__ Lut,
const uint64* __restrict__ Mask,
const bhalf* __restrict__ X,
ehalf* Y,
uint blocks, uint szLut, uint szMask, uint szHead, uint szBatch, float scale, uint shfl_init, uint max_lut, uint use_mask)
{
__shared__ float Max[32];
__shared__ float Sum[32];
uint64* LutMask64 = (uint64*)&Sum[32];
uint* LutMask32 = (uint*)&Sum[32];
uint* LutOffset = (uint*)&LutMask64[max_lut];
uint tid = threadIdx.x;
uint idx_Q = blockIdx.x / 64;
uint idx_q = blockIdx.x % 64;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
Lut += idx_H * szLut;
Mask += idx_H * szMask + idx_q * blocks;
uint2 lut_head = Lut[idx_Q];
if (tid < 32)
{
// Allows non-power of 2 threads to work
Max[tid] = -FLT_MAX;
Sum[tid] = 0.0f;
}
// prefetch the lut data into shared
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < max_lut; i += blockDim.x)
{
uint64 mask = 0;
if (i < lut_size)
{
uint2 entry = Lut[i];
uint blk_id = entry.x;
LutOffset[i] = blk_id * 64*64;
mask = use_mask ? __ldg(Mask + blk_id) : 0xffffffffffffffff;
}
LutMask64[i] = mask;
}
__syncthreads();
uint lut_idx = (tid & (1024-32))*UNROLL/32;
uint tidx = (tid & 31)*2;
uint offset = idx_B*szBatch + idx_H*szHead + idx_q*64 + tidx + LutOffset[lut_idx];
X += offset;
bhalf2 xval[UNROLL];
#pragma unroll
for (uint i = 0; i < UNROLL; i++)
{
// nvcc/ptxas is really bad at generating sass that maximizes use of immediate offsets.
// This means way more registers are tied up in memory load addresses than are needed.
// This kernel's performance is hugely dependent on efficient register use so give the compiler a hand:
asm (
"{ \n\t"
".reg .pred p; \n\t"
".reg .s64 X, offset; \n\t"
"setp.lt.u32 p, %3, %4; \n\t"
"mov.b64 offset, {%2, 0}; \n\t"
"add.s64 X, %1, offset; \n\t"
"mov.u32 %0, 0xff80ff80; \n\t" // bhalf2 -inf, -inf
"@p ld.global.nc.u32 %0, [X]; \n\t"
"}" :"=r"(xval[i].x) : "l"(X), "r"(i*64*64*2), "r"(lut_idx + i), "r"(lut_size));
}
// split the 64 bit mask by half warp
uint tid16 = (tid & 16)/16;
uint mask0 = 1 << (tidx - tid16*32);
uint mask1 = mask0 << 1;
#pragma unroll
for (int i = 0; i < UNROLL; i++)
{
uint mask32 = LutMask32[(lut_idx + i)*2 + tid16];
if ((mask32 & mask0) == 0)
xval[i].x = (xval[i].x & 0xffff0000) | 0x0000ff80; // 0x0000fc00
if ((mask32 & mask1) == 0)
xval[i].x = (xval[i].x & 0x0000ffff) | 0xff800000;
}
// reduce within thread
float Xmax[UNROLL];
for (int i = 0; i < UNROLL; i++)
Xmax[i] = ew_max(to_float(xval[i]));
float xmax = Xmax[0];
for (int i = 1; i < UNROLL; i++)
xmax = fmaxf(Xmax[i], xmax);
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
xmax = fmaxf(xmax, shfl_xor(xmax, i));
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Max[tid/32] = xmax;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
xmax = Max[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
xmax = fmaxf(xmax, shfl_xor(xmax, i));
// final reduction to shared
Max[tid] = xmax;
}
__syncthreads();
xmax = Max[0];
}
// subtract xmax and compute exponent
float exp_sum = 0;
for (int i = 0; i < UNROLL; i++)
{
// use fast approx math: e**x == 2**(x * log2(e))
float2 Xval = ew_mul(ew_sub(to_float(xval[i]), xmax), scale);
asm("ex2.approx.ftz.f32 %0, %0;" : "+f"(Xval.x) :);
asm("ex2.approx.ftz.f32 %0, %0;" : "+f"(Xval.y) :);
exp_sum += ew_sum(Xval);
xval[i] = to_bhalf(Xval);
}
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
exp_sum += shfl_xor(exp_sum, i);
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Sum[tid/32] = exp_sum;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
exp_sum = Sum[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
exp_sum += shfl_xor(exp_sum, i);
// final reduction to shared
Sum[tid] = exp_sum;
}
__syncthreads();
exp_sum = Sum[0];
}
float rcp_exp_sum = exp_sum;
asm("rcp.approx.ftz.f32 %0, %0;" : "+f"(rcp_exp_sum) :);
Y += offset;
#pragma unroll
for (int i = 0; i < UNROLL; i++)
{
ehalf2 y = to_ehalf(ew_mul(to_float(xval[i]), rcp_exp_sum));
asm (
"{ \n\t"
".reg .pred p; \n\t"
".reg .s64 X, offset; \n\t"
"setp.lt.u32 p, %3, %4; \n\t"
"mov.b64 offset, {%1, 0}; \n\t"
"add.s64 X, %0, offset; \n\t"
"@p st.global.wb.u32 [X], %2; \n\t"
"}" :: "l"(Y), "r"(i*64*64*2), "r"(y.x), "r"(lut_idx + i), "r"(lut_size));
}
}
template <uint UNROLL>
__global__ void __launch_bounds__(1024,2) blocksparse_masked_softmax_64x64_grad(
const uint2* __restrict__ Lut,
const ehalf* __restrict__ DY,
const ehalf* __restrict__ Y,
ehalf* DX,
uint blocks, uint szLut, uint szMask, uint szHead, uint szBatch, float scale, uint shfl_init)
{
__shared__ float Sum[32];
uint* LutOffset = (uint*)&Sum[32];
uint tid = threadIdx.x;
uint idx_Q = blockIdx.x / 64;
uint idx_q = blockIdx.x % 64;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
Lut += idx_H * szLut;
uint2 lut_head = Lut[idx_Q];
if (tid < 32)
Sum[tid] = 0.0f;
// prefetch the lut data into shared
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += blockDim.x)
LutOffset[i] = Lut[i].x * 64*64;
__syncthreads();
uint lut_idx = (tid & (1024-32))*UNROLL/32;
uint tidx = (tid & 31)*2;
uint offset = idx_B*szBatch + idx_H*szHead + idx_q*64 + tidx + LutOffset[lut_idx];
DY += offset;
Y += offset;
ehalf2 dy[UNROLL], y[UNROLL];
#pragma unroll
for (uint i = 0; i < UNROLL; i++)
{
asm (
"{ \n\t"
".reg .pred p; \n\t"
".reg .s64 DY, Y, offset; \n\t"
"setp.lt.u32 p, %5, %6; \n\t"
"mov.b64 offset, {%4, 0}; \n\t"
"add.s64 DY, %2, offset; \n\t"
"add.s64 Y, %3, offset; \n\t"
"mov.u32 %0, 0; \n\t"
"mov.u32 %1, 0; \n\t"
"@p ld.global.nc.u32 %0, [DY]; \n\t"
"@p ld.global.nc.u32 %1, [Y]; \n\t"
"}" : "=r"(dy[i].x), "=r"(y[i].x) : "l"(DY), "l"(Y), "r"(i*64*64*2), "r"(lut_idx + i), "r"(lut_size));
}
// compute dy * y and start reduction
float sum_dyy = 0.0f;
for (int i = 0; i < UNROLL; i++)
sum_dyy += ew_sum(ew_mul(to_float(dy[i]), to_float(y[i])));
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
sum_dyy += shfl_xor(sum_dyy, i);
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Sum[tid/32] = sum_dyy;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
sum_dyy = Sum[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
sum_dyy += shfl_xor(sum_dyy, i);
// final reduction to shared
Sum[tid] = sum_dyy;
}
__syncthreads();
sum_dyy = Sum[0];
}
DX += offset;
#pragma unroll
for (uint i = 0; i < UNROLL; i++)
{
// dx = (dy - sum_dyy) * y * scale
ehalf2 dx = to_ehalf(ew_mul(ew_mul(ew_sub(to_float(dy[i]), sum_dyy), to_float(y[i])), scale));
asm (
"{ \n\t"
".reg .pred p; \n\t"
".reg .s64 DX, offset; \n\t"
"setp.lt.u32 p, %3, %4; \n\t"
"mov.b64 offset, {%1, 0}; \n\t"
"add.s64 DX, %0, offset; \n\t"
"@p st.global.wb.u32 [DX], %2; \n\t"
"}" :: "l"(DX), "r"(i*64*64*2), "r"(dx.x), "r"(lut_idx + i), "r"(lut_size));
}
}
#define LOG2e 1.4426950408889634f
typedef unsigned char uchar;
bool BlocksparseMaskedSoftmax(CUstream stream,
const uint2* lut,
const char* mask,
const bhalf* x,
ehalf* y,
uint block_size, uint blocks,
uint batch_dim, uint head_dim, uint ctx_blks,
uint lut_heads, uint lut_dim, uint max_lut,
uint mask_heads, float scale)
{
uint szLut = lut_heads > 1 ? lut_dim : 0;
uint szMask = mask_heads > 1 ? blocks * block_size : 0;
uint gridQ = ctx_blks * block_size;
uint szHead = blocks * block_size * block_size;
uint szBatch = head_dim * szHead;
uint maxK = max_lut * block_size;
//cuMemsetD16Async((CUdeviceptr)c, 0, szBatch*batch_dim, stream);
// combine scaling with fast e**x compute
scale *= LOG2e;
dim3 grid(gridQ, batch_dim, head_dim);
if (block_size == 64)
{
// Unroll factor 8 (ctx_size up to 16K)
if (maxK > 1024*8)
{
uint threads = 1024;
uint shfl_init = 16;
uint lut_max = threads * 8 / 32;
uint shared = lut_max * 12;
blocksparse_masked_softmax_64x64<8><<<grid,threads,shared,stream>>>(lut, (const uint64*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
}
// Unroll factor of 4 is preferred (keeps these kernels under 32 registers for max occupancy)
else if (maxK >= 64*4)
{
uint threads = CEIL_DIV(maxK, 64*4) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
uint lut_max = threads * 4 / 32;
uint shared = lut_max * 12;
blocksparse_masked_softmax_64x64<4><<<grid,threads,shared,stream>>>(lut, (const uint64*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
}
// Unroll factor 1
else
{
uint threads = CEIL_DIV(maxK, 64) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
uint lut_max = threads * 1 / 32;
uint shared = lut_max * 12;
blocksparse_masked_softmax_64x64<1><<<grid,threads,shared,stream>>>(lut, (const uint64*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
}
}
else
{
uint shared = max_lut*8;
// Unroll factor 8 (ctx_size up to 8K)
if (maxK > 1024*4)
{
if (block_size == 8)
blocksparse_masked_softmax<8, 8, uchar><<<grid,1024,shared,stream>>>(lut, (const uchar*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, 16, mask != NULL);
else if (block_size == 16)
blocksparse_masked_softmax<8,16,ushort><<<grid,1024,shared,stream>>>(lut, (const ushort*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, 16, mask != NULL);
else
blocksparse_masked_softmax<8,32, uint><<<grid,1024,shared,stream>>>(lut, (const uint*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, 16, mask != NULL);
}
// Unroll factor of 4 is preferred
else if (maxK > 32*4)
{
uint threads = CEIL_DIV(maxK, 32*4) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
if (block_size == 8)
blocksparse_masked_softmax<4, 8, uchar><<<grid,threads,shared,stream>>>(lut, (const uchar*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else if (block_size == 16)
blocksparse_masked_softmax<4,16,ushort><<<grid,threads,shared,stream>>>(lut, (const ushort*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else
blocksparse_masked_softmax<4,32, uint><<<grid,threads,shared,stream>>>(lut, (const uint*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
}
// Unroll factor 1
else
{
uint threads = CEIL_DIV(maxK, 32) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
if (block_size == 8)
blocksparse_masked_softmax<1, 8, uchar><<<grid,threads,shared,stream>>>(lut, (const uchar*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else if (block_size == 16)
blocksparse_masked_softmax<1,16,ushort><<<grid,threads,shared,stream>>>(lut, (const ushort*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else
blocksparse_masked_softmax<1,32, uint><<<grid,threads,shared,stream>>>(lut, (const uint*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
}
}
return true;
}
bool BlocksparseMaskedSoftmaxGrad(CUstream stream,
const uint2* lut,
const char* mask,
const ehalf* dy,
const ehalf* y,
ehalf* dx,
uint block_size, uint blocks,
uint batch_dim, uint head_dim, uint ctx_blks,
uint lut_heads, uint lut_dim, uint max_lut,
uint mask_heads, float scale)
{
uint szLut = lut_heads > 1 ? lut_dim : 0;
uint szMask = mask_heads > 1 ? blocks * block_size : 0;
uint gridQ = ctx_blks * block_size;
uint szHead = blocks * block_size * block_size;
uint szBatch = head_dim * szHead;
uint maxK = max_lut * block_size;
//cuMemsetD16Async((CUdeviceptr)c, 0, szBatch*batch_dim, stream);
dim3 grid(gridQ, batch_dim, head_dim);
if (block_size == 64)
{
uint shared = max_lut*4;
// Unroll factor 8
if (maxK > 1024*8)
{
uint threads = 1024;
uint shfl_init = 16;
blocksparse_masked_softmax_64x64_grad<8><<<grid,threads,shared,stream>>>(lut, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init);
}
// Unroll factor of 4 is preferred
else if (maxK >= 64*4)
{
uint threads = CEIL_DIV(maxK, 64*4) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
blocksparse_masked_softmax_64x64_grad<4><<<grid,threads,shared,stream>>>(lut, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init);
}
// Unroll factor 1
else
{
uint threads = CEIL_DIV(maxK, 64) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
blocksparse_masked_softmax_64x64_grad<1><<<grid,threads,shared,stream>>>(lut, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init);
}
}
else
{
uint shared = max_lut*8;
// Unroll factor 8
if (maxK > 1024*4)
{
if (block_size == 8)
blocksparse_masked_softmax_grad<8, 8, uchar><<<grid,1024,shared,stream>>>(lut, (const uchar*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, 16, mask != NULL);
else if (block_size == 16)
blocksparse_masked_softmax_grad<8,16,ushort><<<grid,1024,shared,stream>>>(lut, (const ushort*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, 16, mask != NULL);
else
blocksparse_masked_softmax_grad<8,32, uint><<<grid,1024,shared,stream>>>(lut, (const uint*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, 16, mask != NULL);
}
// Unroll factor of 4 is preferred
else if (maxK > 32*4)
{
uint threads = CEIL_DIV(maxK, 32*4) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
if (block_size == 8)
blocksparse_masked_softmax_grad<4, 8, uchar><<<grid,threads,shared,stream>>>(lut, (const uchar*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else if (block_size == 16)
blocksparse_masked_softmax_grad<4,16,ushort><<<grid,threads,shared,stream>>>(lut, (const ushort*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else
blocksparse_masked_softmax_grad<4,32, uint><<<grid,threads,shared,stream>>>(lut, (const uint*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
}
// Unroll factor 1
else
{
uint threads = CEIL_DIV(maxK, 32) * 32;
uint shfl_init = THREAD_POW2(threads) / 64;
if (block_size == 8)
blocksparse_masked_softmax_grad<1, 8, uchar><<<grid,threads,shared,stream>>>(lut, (const uchar*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else if (block_size == 16)
blocksparse_masked_softmax_grad<1,16,ushort><<<grid,threads,shared,stream>>>(lut, (const ushort*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
else
blocksparse_masked_softmax_grad<1,32, uint><<<grid,threads,shared,stream>>>(lut, (const uint*)mask, dy, y, dx, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, mask != NULL);
}
}
return true;
}
#endif // GOOGLE_CUDA
|
591648776e7fc7092f25ed837952d235f6c4ae4d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/shuffle_channel_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaximumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaximumNumBlocks);
}
template <typename T>
__global__ void ShuffleChannel(const int nthreads, const int feature_map_size,
T* output, const T* input, int group_row,
int group_column, int len) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (size_t ii = index; ii < nthreads; ii += offset) {
const int n = index / group_row / group_column / len;
const int i = (index / group_column / len) % group_row;
const int j = index / len % group_column;
const int k = index - (n * feature_map_size + (i * group_column + j) * len);
T* p_o = output + n * feature_map_size + (j * group_row + i) * len;
p_o[k] = input[index];
}
}
template <typename DeviceContext, typename T>
class ShuffleChannelOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<framework::Tensor>("X");
auto* output = ctx.Output<framework::Tensor>("Out");
int group = ctx.Attr<int>("group");
auto input_dims = input->dims();
auto num = input_dims[0];
auto channel = input_dims[1];
auto height = input_dims[2];
auto weight = input_dims[3];
auto feature_map_size = channel * height * weight;
auto sp_sz = height * weight;
int group_row = group;
int group_column = channel / group_row;
// count is the product of NCHW same as numel()
int count = num * group_column * group_row * sp_sz;
int blocks = NumBlocks(output->numel());
int threads = kNumCUDAThreads;
const T* input_data = input->data<T>();
T* output_data = output->mutable_data<T>(ctx.GetPlace());
hipLaunchKernelGGL(( ShuffleChannel<T>)
, dim3(blocks), dim3(threads), 0, ctx.cuda_device_context().stream(),
count, feature_map_size, output_data, input_data, group_row,
group_column, sp_sz);
}
};
template <typename DeviceContext, typename T>
class ShuffleChannelGradOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* output_grad =
ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
auto* input_grad =
ctx.Output<framework::Tensor>(framework::GradVarName("X"));
int group = ctx.Attr<int>("group");
const auto& input_dims = input_grad->dims();
auto num = input_dims[0];
auto channel = input_dims[1];
auto height = input_dims[2];
auto weight = input_dims[3];
auto feature_map_size = channel * height * weight;
auto sp_sz = height * weight;
int group_row = group;
int group_column = channel / group_row;
T* input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace());
const T* output_grad_data = output_grad->data<T>();
int blocks = NumBlocks(output_grad->numel());
int threads = kNumCUDAThreads;
int count = num * group_column * group_row * sp_sz;
hipLaunchKernelGGL(( ShuffleChannel<T>)
, dim3(blocks), dim3(threads), 0, ctx.cuda_device_context().stream(),
count, feature_map_size, input_grad_data, output_grad_data,
group_row, group_column, sp_sz);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
shuffle_channel,
ops::ShuffleChannelOpCUDAKernel<paddle::platform::CUDADeviceContext, float>,
ops::ShuffleChannelOpCUDAKernel<paddle::platform::CUDADeviceContext,
double>);
REGISTER_OP_CUDA_KERNEL(
shuffle_channel_grad,
ops::ShuffleChannelGradOpCUDAKernel<paddle::platform::CUDADeviceContext,
float>,
ops::ShuffleChannelGradOpCUDAKernel<paddle::platform::CUDADeviceContext,
double>);
| 591648776e7fc7092f25ed837952d235f6c4ae4d.cu | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/shuffle_channel_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaximumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaximumNumBlocks);
}
template <typename T>
__global__ void ShuffleChannel(const int nthreads, const int feature_map_size,
T* output, const T* input, int group_row,
int group_column, int len) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (size_t ii = index; ii < nthreads; ii += offset) {
const int n = index / group_row / group_column / len;
const int i = (index / group_column / len) % group_row;
const int j = index / len % group_column;
const int k = index - (n * feature_map_size + (i * group_column + j) * len);
T* p_o = output + n * feature_map_size + (j * group_row + i) * len;
p_o[k] = input[index];
}
}
template <typename DeviceContext, typename T>
class ShuffleChannelOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<framework::Tensor>("X");
auto* output = ctx.Output<framework::Tensor>("Out");
int group = ctx.Attr<int>("group");
auto input_dims = input->dims();
auto num = input_dims[0];
auto channel = input_dims[1];
auto height = input_dims[2];
auto weight = input_dims[3];
auto feature_map_size = channel * height * weight;
auto sp_sz = height * weight;
int group_row = group;
int group_column = channel / group_row;
// count is the product of NCHW same as numel()
int count = num * group_column * group_row * sp_sz;
int blocks = NumBlocks(output->numel());
int threads = kNumCUDAThreads;
const T* input_data = input->data<T>();
T* output_data = output->mutable_data<T>(ctx.GetPlace());
ShuffleChannel<T>
<<<blocks, threads, 0, ctx.cuda_device_context().stream()>>>(
count, feature_map_size, output_data, input_data, group_row,
group_column, sp_sz);
}
};
template <typename DeviceContext, typename T>
class ShuffleChannelGradOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* output_grad =
ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
auto* input_grad =
ctx.Output<framework::Tensor>(framework::GradVarName("X"));
int group = ctx.Attr<int>("group");
const auto& input_dims = input_grad->dims();
auto num = input_dims[0];
auto channel = input_dims[1];
auto height = input_dims[2];
auto weight = input_dims[3];
auto feature_map_size = channel * height * weight;
auto sp_sz = height * weight;
int group_row = group;
int group_column = channel / group_row;
T* input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace());
const T* output_grad_data = output_grad->data<T>();
int blocks = NumBlocks(output_grad->numel());
int threads = kNumCUDAThreads;
int count = num * group_column * group_row * sp_sz;
ShuffleChannel<T>
<<<blocks, threads, 0, ctx.cuda_device_context().stream()>>>(
count, feature_map_size, input_grad_data, output_grad_data,
group_row, group_column, sp_sz);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
shuffle_channel,
ops::ShuffleChannelOpCUDAKernel<paddle::platform::CUDADeviceContext, float>,
ops::ShuffleChannelOpCUDAKernel<paddle::platform::CUDADeviceContext,
double>);
REGISTER_OP_CUDA_KERNEL(
shuffle_channel_grad,
ops::ShuffleChannelGradOpCUDAKernel<paddle::platform::CUDADeviceContext,
float>,
ops::ShuffleChannelGradOpCUDAKernel<paddle::platform::CUDADeviceContext,
double>);
|
e34c868c6ae735f109ee37483a282b3824be43c4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2015 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "maxout_layer_updater_cuda.h"
#include <hip/hip_runtime.h>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
#include "../maxout_layer.h"
#include "../nn_types.h"
template<typename position_type>
__global__ void maxout_upd_kernel(
float * __restrict output,
position_type * __restrict max_feature_map_positions,
const float * __restrict input,
int neuron_count_per_feature_map,
int input_feature_map_count,
int output_feature_map_count,
int feature_map_subsampling_size,
int entry_count)
{
int neuron_id = blockIdx.x * blockDim.x + threadIdx.x;
int output_feature_map_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((neuron_id < neuron_count_per_feature_map) && (output_feature_map_id < output_feature_map_count) && (entry_id < entry_count))
{
int input_offset = (entry_id * input_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id;
float max_val = input[input_offset];
int max_pos = 0;
for(int i = 1; i < feature_map_subsampling_size; ++i)
{
input_offset += output_feature_map_count * neuron_count_per_feature_map;
float new_val = input[input_offset];
if (new_val > max_val)
{
max_val = new_val;
max_pos = i;
}
}
int output_offset = (entry_id * output_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id;
output[output_offset] = max_val;
max_feature_map_positions[output_offset] = static_cast<position_type>(max_pos);
}
}
__global__ void maxout_forward_only_upd_kernel(
float * __restrict output,
const float * __restrict input,
int neuron_count_per_feature_map,
int input_feature_map_count,
int output_feature_map_count,
int feature_map_subsampling_size,
int entry_count)
{
int neuron_id = blockIdx.x * blockDim.x + threadIdx.x;
int output_feature_map_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((neuron_id < neuron_count_per_feature_map) && (output_feature_map_id < output_feature_map_count) && (entry_id < entry_count))
{
int input_offset = (entry_id * input_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id;
float max_val = input[input_offset];
for(int i = 1; i < feature_map_subsampling_size; ++i)
{
input_offset += output_feature_map_count * neuron_count_per_feature_map;
float new_val = input[input_offset];
max_val = max(new_val, max_val);
}
int output_offset = (entry_id * output_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id;
output[output_offset] = max_val;
}
}
template<typename position_type, bool add_update_to_destination>
__global__ void maxout_backprop_upd_kernel(
float * __restrict input_errors,
const position_type * __restrict max_feature_map_positions,
const float * __restrict output_errors,
int neuron_count_per_feature_map,
int input_feature_map_count,
int output_feature_map_count,
int feature_map_subsampling_size,
int entry_count)
{
int neuron_id = blockIdx.x * blockDim.x + threadIdx.x;
int output_feature_map_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((neuron_id < neuron_count_per_feature_map) && (output_feature_map_id < output_feature_map_count) && (entry_id < entry_count))
{
int output_offset = (entry_id * output_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id;
int max_feature_map = static_cast<int>(max_feature_map_positions[output_offset]);
float output_error = output_errors[output_offset];
int input_offset = (entry_id * input_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id;
for(int i = 0; i < feature_map_subsampling_size; ++i)
{
if (add_update_to_destination)
input_errors[input_offset] += ((i == max_feature_map) ? output_error : 0.0F);
else
input_errors[input_offset] = ((i == max_feature_map) ? output_error : 0.0F);
input_offset += output_feature_map_count * neuron_count_per_feature_map;
}
}
}
namespace nnforge
{
namespace cuda
{
maxout_layer_updater_cuda::maxout_layer_updater_cuda()
{
}
maxout_layer_updater_cuda::~maxout_layer_updater_cuda()
{
}
void maxout_layer_updater_cuda::enqueue_forward_propagation(
hipStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::ptr temporary_per_entry_buffer,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_elem_count_per_feature_map,
output_configuration_specific.feature_map_count,
entry_count);
if (actions.find(layer_action(layer_action::backward_data, 0)) == actions.end())
hipLaunchKernelGGL(( maxout_forward_only_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*output_buffer,
*input_buffers[0],
output_elem_count_per_feature_map,
input_configuration_specific_list[0].feature_map_count,
output_configuration_specific.feature_map_count,
feature_map_subsampling_size,
entry_count);
else if (feature_map_subsampling_size <= 256)
hipLaunchKernelGGL(( maxout_upd_kernel<unsigned char>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*output_buffer,
*temporary_per_entry_buffer,
*input_buffers[0],
output_elem_count_per_feature_map,
input_configuration_specific_list[0].feature_map_count,
output_configuration_specific.feature_map_count,
feature_map_subsampling_size,
entry_count);
else
hipLaunchKernelGGL(( maxout_upd_kernel<int>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*output_buffer,
*temporary_per_entry_buffer,
*input_buffers[0],
output_elem_count_per_feature_map,
input_configuration_specific_list[0].feature_map_count,
output_configuration_specific.feature_map_count,
feature_map_subsampling_size,
entry_count);
}
void maxout_layer_updater_cuda::enqueue_backward_data_propagation(
hipStream_t stream_id,
unsigned int input_index,
cuda_linear_buffer_device::ptr input_errors_buffer,
cuda_linear_buffer_device::const_ptr output_errors_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers,
cuda_linear_buffer_device::const_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer,
bool add_update_to_destination,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_elem_count_per_feature_map,
output_configuration_specific.feature_map_count,
entry_count);
if (feature_map_subsampling_size <= 256)
{
if (add_update_to_destination)
hipLaunchKernelGGL(( maxout_backprop_upd_kernel<unsigned char, true>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*input_errors_buffer,
*temporary_per_entry_buffer,
*output_errors_buffer,
output_elem_count_per_feature_map,
input_configuration_specific_list[0].feature_map_count,
output_configuration_specific.feature_map_count,
feature_map_subsampling_size,
entry_count);
else
hipLaunchKernelGGL(( maxout_backprop_upd_kernel<unsigned char, false>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*input_errors_buffer,
*temporary_per_entry_buffer,
*output_errors_buffer,
output_elem_count_per_feature_map,
input_configuration_specific_list[0].feature_map_count,
output_configuration_specific.feature_map_count,
feature_map_subsampling_size,
entry_count);
}
else
{
if (add_update_to_destination)
hipLaunchKernelGGL(( maxout_backprop_upd_kernel<int, true>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*input_errors_buffer,
*temporary_per_entry_buffer,
*output_errors_buffer,
output_elem_count_per_feature_map,
input_configuration_specific_list[0].feature_map_count,
output_configuration_specific.feature_map_count,
feature_map_subsampling_size,
entry_count);
else
hipLaunchKernelGGL(( maxout_backprop_upd_kernel<int, false>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*input_errors_buffer,
*temporary_per_entry_buffer,
*output_errors_buffer,
output_elem_count_per_feature_map,
input_configuration_specific_list[0].feature_map_count,
output_configuration_specific.feature_map_count,
feature_map_subsampling_size,
entry_count);
}
}
bool maxout_layer_updater_cuda::is_backward_data_dependent_on_input_buffer(unsigned int action_input_index, unsigned int data_input_index) const
{
return false;
}
bool maxout_layer_updater_cuda::is_backward_data_dependent_on_output_buffer(unsigned int action_input_index) const
{
return false;
}
bool maxout_layer_updater_cuda::is_backward_data_dependent_on_temporary_per_entry_buffer(unsigned int action_input_index) const
{
return true;
}
void maxout_layer_updater_cuda::updater_configured()
{
nnforge_shared_ptr<const maxout_layer> layer_derived = nnforge_dynamic_pointer_cast<const maxout_layer>(layer_schema);
feature_map_subsampling_size = layer_derived->feature_map_subsampling_size;
}
size_t maxout_layer_updater_cuda::get_temporary_per_entry_buffer_size() const
{
size_t res = 0;
if (actions.find(layer_action(layer_action::backward_data, 0)) != actions.end())
{
if (feature_map_subsampling_size <= 256)
return output_elem_count_per_entry * sizeof(unsigned char);
else
return output_elem_count_per_entry * sizeof(int);
}
return res;
}
}
}
| e34c868c6ae735f109ee37483a282b3824be43c4.cu | /*
* Copyright 2011-2015 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "maxout_layer_updater_cuda.h"
#include <cuda_runtime.h>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
#include "../maxout_layer.h"
#include "../nn_types.h"
template<typename position_type>
__global__ void maxout_upd_kernel(
float * __restrict output,
position_type * __restrict max_feature_map_positions,
const float * __restrict input,
int neuron_count_per_feature_map,
int input_feature_map_count,
int output_feature_map_count,
int feature_map_subsampling_size,
int entry_count)
{
int neuron_id = blockIdx.x * blockDim.x + threadIdx.x;
int output_feature_map_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((neuron_id < neuron_count_per_feature_map) && (output_feature_map_id < output_feature_map_count) && (entry_id < entry_count))
{
int input_offset = (entry_id * input_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id;
float max_val = input[input_offset];
int max_pos = 0;
for(int i = 1; i < feature_map_subsampling_size; ++i)
{
input_offset += output_feature_map_count * neuron_count_per_feature_map;
float new_val = input[input_offset];
if (new_val > max_val)
{
max_val = new_val;
max_pos = i;
}
}
int output_offset = (entry_id * output_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id;
output[output_offset] = max_val;
max_feature_map_positions[output_offset] = static_cast<position_type>(max_pos);
}
}
__global__ void maxout_forward_only_upd_kernel(
float * __restrict output,
const float * __restrict input,
int neuron_count_per_feature_map,
int input_feature_map_count,
int output_feature_map_count,
int feature_map_subsampling_size,
int entry_count)
{
int neuron_id = blockIdx.x * blockDim.x + threadIdx.x;
int output_feature_map_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((neuron_id < neuron_count_per_feature_map) && (output_feature_map_id < output_feature_map_count) && (entry_id < entry_count))
{
int input_offset = (entry_id * input_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id;
float max_val = input[input_offset];
for(int i = 1; i < feature_map_subsampling_size; ++i)
{
input_offset += output_feature_map_count * neuron_count_per_feature_map;
float new_val = input[input_offset];
max_val = max(new_val, max_val);
}
int output_offset = (entry_id * output_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id;
output[output_offset] = max_val;
}
}
template<typename position_type, bool add_update_to_destination>
__global__ void maxout_backprop_upd_kernel(
float * __restrict input_errors,
const position_type * __restrict max_feature_map_positions,
const float * __restrict output_errors,
int neuron_count_per_feature_map,
int input_feature_map_count,
int output_feature_map_count,
int feature_map_subsampling_size,
int entry_count)
{
int neuron_id = blockIdx.x * blockDim.x + threadIdx.x;
int output_feature_map_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((neuron_id < neuron_count_per_feature_map) && (output_feature_map_id < output_feature_map_count) && (entry_id < entry_count))
{
int output_offset = (entry_id * output_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id;
int max_feature_map = static_cast<int>(max_feature_map_positions[output_offset]);
float output_error = output_errors[output_offset];
int input_offset = (entry_id * input_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id;
for(int i = 0; i < feature_map_subsampling_size; ++i)
{
if (add_update_to_destination)
input_errors[input_offset] += ((i == max_feature_map) ? output_error : 0.0F);
else
input_errors[input_offset] = ((i == max_feature_map) ? output_error : 0.0F);
input_offset += output_feature_map_count * neuron_count_per_feature_map;
}
}
}
namespace nnforge
{
namespace cuda
{
maxout_layer_updater_cuda::maxout_layer_updater_cuda()
{
}
maxout_layer_updater_cuda::~maxout_layer_updater_cuda()
{
}
void maxout_layer_updater_cuda::enqueue_forward_propagation(
cudaStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::ptr temporary_per_entry_buffer,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_elem_count_per_feature_map,
output_configuration_specific.feature_map_count,
entry_count);
if (actions.find(layer_action(layer_action::backward_data, 0)) == actions.end())
maxout_forward_only_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*output_buffer,
*input_buffers[0],
output_elem_count_per_feature_map,
input_configuration_specific_list[0].feature_map_count,
output_configuration_specific.feature_map_count,
feature_map_subsampling_size,
entry_count);
else if (feature_map_subsampling_size <= 256)
maxout_upd_kernel<unsigned char><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*output_buffer,
*temporary_per_entry_buffer,
*input_buffers[0],
output_elem_count_per_feature_map,
input_configuration_specific_list[0].feature_map_count,
output_configuration_specific.feature_map_count,
feature_map_subsampling_size,
entry_count);
else
maxout_upd_kernel<int><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*output_buffer,
*temporary_per_entry_buffer,
*input_buffers[0],
output_elem_count_per_feature_map,
input_configuration_specific_list[0].feature_map_count,
output_configuration_specific.feature_map_count,
feature_map_subsampling_size,
entry_count);
}
void maxout_layer_updater_cuda::enqueue_backward_data_propagation(
cudaStream_t stream_id,
unsigned int input_index,
cuda_linear_buffer_device::ptr input_errors_buffer,
cuda_linear_buffer_device::const_ptr output_errors_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers,
cuda_linear_buffer_device::const_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer,
bool add_update_to_destination,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_elem_count_per_feature_map,
output_configuration_specific.feature_map_count,
entry_count);
if (feature_map_subsampling_size <= 256)
{
if (add_update_to_destination)
maxout_backprop_upd_kernel<unsigned char, true><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*input_errors_buffer,
*temporary_per_entry_buffer,
*output_errors_buffer,
output_elem_count_per_feature_map,
input_configuration_specific_list[0].feature_map_count,
output_configuration_specific.feature_map_count,
feature_map_subsampling_size,
entry_count);
else
maxout_backprop_upd_kernel<unsigned char, false><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*input_errors_buffer,
*temporary_per_entry_buffer,
*output_errors_buffer,
output_elem_count_per_feature_map,
input_configuration_specific_list[0].feature_map_count,
output_configuration_specific.feature_map_count,
feature_map_subsampling_size,
entry_count);
}
else
{
if (add_update_to_destination)
maxout_backprop_upd_kernel<int, true><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*input_errors_buffer,
*temporary_per_entry_buffer,
*output_errors_buffer,
output_elem_count_per_feature_map,
input_configuration_specific_list[0].feature_map_count,
output_configuration_specific.feature_map_count,
feature_map_subsampling_size,
entry_count);
else
maxout_backprop_upd_kernel<int, false><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*input_errors_buffer,
*temporary_per_entry_buffer,
*output_errors_buffer,
output_elem_count_per_feature_map,
input_configuration_specific_list[0].feature_map_count,
output_configuration_specific.feature_map_count,
feature_map_subsampling_size,
entry_count);
}
}
bool maxout_layer_updater_cuda::is_backward_data_dependent_on_input_buffer(unsigned int action_input_index, unsigned int data_input_index) const
{
return false;
}
bool maxout_layer_updater_cuda::is_backward_data_dependent_on_output_buffer(unsigned int action_input_index) const
{
return false;
}
bool maxout_layer_updater_cuda::is_backward_data_dependent_on_temporary_per_entry_buffer(unsigned int action_input_index) const
{
return true;
}
void maxout_layer_updater_cuda::updater_configured()
{
nnforge_shared_ptr<const maxout_layer> layer_derived = nnforge_dynamic_pointer_cast<const maxout_layer>(layer_schema);
feature_map_subsampling_size = layer_derived->feature_map_subsampling_size;
}
size_t maxout_layer_updater_cuda::get_temporary_per_entry_buffer_size() const
{
size_t res = 0;
if (actions.find(layer_action(layer_action::backward_data, 0)) != actions.end())
{
if (feature_map_subsampling_size <= 256)
return output_elem_count_per_entry * sizeof(unsigned char);
else
return output_elem_count_per_entry * sizeof(int);
}
return res;
}
}
}
|
99c00e92b26bc2ab88fb1895cc3082d40827e041.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ======================================================================== //
// Copyright 2018-2019 Ingo Wald //
// //
// Licensed under the Apache License, Version 2.0 (the "License"); //
// you may not use this file except in compliance with the License. //
// You may obtain a copy of the License at //
// //
// http://www.apache.org/licenses/LICENSE-2.0 //
// //
// Unless required by applicable law or agreed to in writing, software //
// distributed under the License is distributed on an "AS IS" BASIS, //
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //
// See the License for the specific language governing permissions and //
// limitations under the License. //
// ======================================================================== //
#include "vec.h"
#include <optix_device.h>
namespace osc {
#include "launch_params.h"
}
using namespace osc;
namespace osc {
/*! launch parameters in constant memory, filled in by optix upon
optixLaunch (this gets filled in from the buffer we pass to
optixLaunch) */
extern "C" __constant__ LaunchParams optixLaunchParams;
//------------------------------------------------------------------------------
// closest hit and anyhit programs for radiance-type rays.
//
// Note eventually we will have to create one pair of those for each
// ray type and each geometry type we want to render; but this
// simple example doesn't use any actual geometries yet, so we only
// create a single, dummy, set of them (we do have to have at least
// one group of them to set up the SBT)
//------------------------------------------------------------------------------
extern "C" __global__ void
__closesthit__radiance() { /*! for this simple example, this will remain empty
*/
}
extern "C" __global__ void
__anyhit__radiance() { /*! for this simple example, this will remain empty */
}
//------------------------------------------------------------------------------
// miss program that gets called for any ray that did not have a
// valid intersection
//
// as with the anyhit/closest hit programs, in this example we only
// need to have _some_ dummy function to set up a valid SBT
// ------------------------------------------------------------------------------
extern "C" __global__ void
__miss__radiance() { /*! for this simple example, this will remain empty */
}
//------------------------------------------------------------------------------
// ray gen program - the actual rendering happens in here
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__renderFrame() {
const int frameID = optixLaunchParams.frame_id;
if (frameID == 0 && optixGetLaunchIndex().x == 0 &&
optixGetLaunchIndex().y == 0) {
// we could of course also have used optixGetLaunchDims to query
// the launch size, but accessing the optixLaunchParams here
// makes sure they're not getting optimized away (because
// otherwise they'd not get used)
printf("############################################\n");
printf("Hello world from OptiX 7 raygen program!\n(within a "
"%ix%i-sized launch)\n",
optixLaunchParams.fb_size.x, optixLaunchParams.fb_size.y);
printf("############################################\n");
}
// ------------------------------------------------------------------
// for this example, produce a simple test pattern:
// ------------------------------------------------------------------
// compute a test pattern based on pixel ID
const int ix = optixGetLaunchIndex().x;
const int iy = optixGetLaunchIndex().y;
const float r = float((ix + frameID) % 256) / 255.0f;
const float g = float((iy + frameID) % 256) / 255.0f;
const float b = float((ix + iy + frameID) % 256) / 255.0f;
// and write to frame buffer ...
const unsigned fbIndex = ix + iy * optixLaunchParams.fb_size.x;
optixLaunchParams.color_buffer[fbIndex] = make_float4(r, g, b, 1.0f);
}
} // namespace osc
| 99c00e92b26bc2ab88fb1895cc3082d40827e041.cu | // ======================================================================== //
// Copyright 2018-2019 Ingo Wald //
// //
// Licensed under the Apache License, Version 2.0 (the "License"); //
// you may not use this file except in compliance with the License. //
// You may obtain a copy of the License at //
// //
// http://www.apache.org/licenses/LICENSE-2.0 //
// //
// Unless required by applicable law or agreed to in writing, software //
// distributed under the License is distributed on an "AS IS" BASIS, //
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //
// See the License for the specific language governing permissions and //
// limitations under the License. //
// ======================================================================== //
#include "vec.h"
#include <optix_device.h>
namespace osc {
#include "launch_params.h"
}
using namespace osc;
namespace osc {
/*! launch parameters in constant memory, filled in by optix upon
optixLaunch (this gets filled in from the buffer we pass to
optixLaunch) */
extern "C" __constant__ LaunchParams optixLaunchParams;
//------------------------------------------------------------------------------
// closest hit and anyhit programs for radiance-type rays.
//
// Note eventually we will have to create one pair of those for each
// ray type and each geometry type we want to render; but this
// simple example doesn't use any actual geometries yet, so we only
// create a single, dummy, set of them (we do have to have at least
// one group of them to set up the SBT)
//------------------------------------------------------------------------------
extern "C" __global__ void
__closesthit__radiance() { /*! for this simple example, this will remain empty
*/
}
extern "C" __global__ void
__anyhit__radiance() { /*! for this simple example, this will remain empty */
}
//------------------------------------------------------------------------------
// miss program that gets called for any ray that did not have a
// valid intersection
//
// as with the anyhit/closest hit programs, in this example we only
// need to have _some_ dummy function to set up a valid SBT
// ------------------------------------------------------------------------------
extern "C" __global__ void
__miss__radiance() { /*! for this simple example, this will remain empty */
}
//------------------------------------------------------------------------------
// ray gen program - the actual rendering happens in here
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__renderFrame() {
const int frameID = optixLaunchParams.frame_id;
if (frameID == 0 && optixGetLaunchIndex().x == 0 &&
optixGetLaunchIndex().y == 0) {
// we could of course also have used optixGetLaunchDims to query
// the launch size, but accessing the optixLaunchParams here
// makes sure they're not getting optimized away (because
// otherwise they'd not get used)
printf("############################################\n");
printf("Hello world from OptiX 7 raygen program!\n(within a "
"%ix%i-sized launch)\n",
optixLaunchParams.fb_size.x, optixLaunchParams.fb_size.y);
printf("############################################\n");
}
// ------------------------------------------------------------------
// for this example, produce a simple test pattern:
// ------------------------------------------------------------------
// compute a test pattern based on pixel ID
const int ix = optixGetLaunchIndex().x;
const int iy = optixGetLaunchIndex().y;
const float r = float((ix + frameID) % 256) / 255.0f;
const float g = float((iy + frameID) % 256) / 255.0f;
const float b = float((ix + iy + frameID) % 256) / 255.0f;
// and write to frame buffer ...
const unsigned fbIndex = ix + iy * optixLaunchParams.fb_size.x;
optixLaunchParams.color_buffer[fbIndex] = make_float4(r, g, b, 1.0f);
}
} // namespace osc
|
302c154f942cc02ac2e9d5151201779ba9c62cbc.hip | // !!! This is a file automatically generated by hipify!!!
/*
* GPU Accelerated MSExcel functions
* Copyright (C) 2011-2015 Cyrille Favreau <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Author: Cyrille Favreau <[email protected]>
*
*/
#include <hip/hip_runtime.h>
#include "ExcelCUDA_kernel.h"
/**
* @brief
*/
__global__ void kernel_performanceStorage( float* objects, float* series, float* performances, int nbObjects )
{
// Compute the index
unsigned int x = blockIdx.x*blockDim.x+threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y+threadIdx.y;
unsigned int index = (y*blockDim.x) + x;
int objectsIndex = index*nbObjects;
// Compute performance
__shared__ float localPerformance[2];
localPerformance[0] = 1.f; // current performance
localPerformance[1] = 0.f; // previous performance
for( int i(0); i<nbObjects; ++i ) {
localPerformance[1] = localPerformance[0];
localPerformance[0] = (1.0+objects[objectsIndex+i])*localPerformance[1];
if( index == 0 ) performances[i] = localPerformance[0];
}
// Store performance
series[index] = localPerformance[0] - 1.0;
}
/**
* @brief Kernel function to be executed on the GPU
* @param ptr Pointer to an array of floats stored in GPU memory
*/
__global__ void kernel_frequencies( float* series, int* frequencies, float range, int nbFrequencies )
{
// Compute the index
unsigned int x = blockIdx.x*blockDim.x+threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y+threadIdx.y;
unsigned int index = (y*blockDim.x) + x;
float v = series[index]-(-range/2);
int position = (v/(range/nbFrequencies));
if( position>=0 && position<nbFrequencies) {
atomicAdd(&frequencies[position],1);
}
}
| 302c154f942cc02ac2e9d5151201779ba9c62cbc.cu | /*
* GPU Accelerated MSExcel functions
* Copyright (C) 2011-2015 Cyrille Favreau <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Author: Cyrille Favreau <[email protected]>
*
*/
#include <cuda.h>
#include "ExcelCUDA_kernel.h"
/**
* @brief
*/
__global__ void kernel_performanceStorage( float* objects, float* series, float* performances, int nbObjects )
{
// Compute the index
unsigned int x = blockIdx.x*blockDim.x+threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y+threadIdx.y;
unsigned int index = (y*blockDim.x) + x;
int objectsIndex = index*nbObjects;
// Compute performance
__shared__ float localPerformance[2];
localPerformance[0] = 1.f; // current performance
localPerformance[1] = 0.f; // previous performance
for( int i(0); i<nbObjects; ++i ) {
localPerformance[1] = localPerformance[0];
localPerformance[0] = (1.0+objects[objectsIndex+i])*localPerformance[1];
if( index == 0 ) performances[i] = localPerformance[0];
}
// Store performance
series[index] = localPerformance[0] - 1.0;
}
/**
* @brief Kernel function to be executed on the GPU
* @param ptr Pointer to an array of floats stored in GPU memory
*/
__global__ void kernel_frequencies( float* series, int* frequencies, float range, int nbFrequencies )
{
// Compute the index
unsigned int x = blockIdx.x*blockDim.x+threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y+threadIdx.y;
unsigned int index = (y*blockDim.x) + x;
float v = series[index]-(-range/2);
int position = (v/(range/nbFrequencies));
if( position>=0 && position<nbFrequencies) {
atomicAdd(&frequencies[position],1);
}
}
|
2d5e84ac9f3235957e0c6087272a6b3ef4477315.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include <sys/time.h>
#include <stdio.h>
using namespace std;
/* Concurrent kernel execution
* - compare concurrent execution performance with serial execution
* - effect of (number of blocks) and (number of multiprocessors)
*/
#define TILE_DIM 16
#define BLOCK_ROWS 16
__global__ void transposeNaive(double *odata, double* idata, int width, int height, int nreps)
{
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + width * yIndex;
int index_out = yIndex + height * xIndex;
for (int r=0; r < nreps; r++) {
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
odata[index_out+i] = idata[index_in+i*width];
}
}
}
int main() {
// check if device support concurrent executions
int deviceCount;
hipGetDeviceCount(&deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
printf("Device %d has compute capability %d.%d.\n", device,
deviceProp.major, deviceProp.minor);
cout << " concurrent kernel execution = " << deviceProp.concurrentKernels << endl;
}
/************************/
// (*) Repeat for side = 32, 64, 2048
// (for side = 2048, set nTranspose = 8)
int side = 32;
int n = side*side;
int nTranspose = 96;
int nreps = 200;
int nStream = nTranspose;
// define streams
hipStream_t stream[nStream];
for (int i=0; i<nStream; i++)
hipStreamCreate(&stream[i]);
time_t sTime = time(NULL);
struct timeval tt1, tt2;
int ms;
double fms;
// allocate pinned host memory
double *data;
hipHostMalloc((void**) &data, nTranspose * n * sizeof(double));
// data initialization
for (int j=0; j<nTranspose; j++)
for (int i=0; i<n; i++) {
data[i+j*n] = double(i+j*n);
}
double *data_dev;
// device memory allocation
hipMalloc((void**) &data_dev, nStream * 2 * n * sizeof(double));
dim3 grid(side/16,side/16,1);
dim3 threads(16,16,1);
// send data to device
for (int i=0; i<nStream; i++) {
int offset = i * n;
hipMemcpy(data_dev + offset*2, data + offset, n * sizeof(double),
hipMemcpyHostToDevice);
}
hipDeviceSynchronize();
gettimeofday( &tt1, NULL );
// kernel executions :
// (*) for concurrent execution : stream[0] --> stream[i]
for (int i=0; i<nStream; i++) {
int offset = i * n;
hipLaunchKernelGGL(( transposeNaive) , dim3(grid), dim3(threads), 0, stream[0] ,
data_dev + offset*2+n, data_dev + offset*2, side, side, nreps);
}
hipDeviceSynchronize();
gettimeofday( &tt2, NULL );
// get data back from device
for (int i=0; i<nStream; i++) {
int offset = i * n;
hipMemcpy(data + offset, data_dev + offset*2 + n, n * sizeof(double),
hipMemcpyDeviceToHost);
}
// timing
ms = (tt2.tv_sec - tt1.tv_sec);
ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec);
fms = ((double)ms)/1000000.0;
cout << "Comp time = " << fms << endl;
// dDestroy streams
for (int i=0; i<nStream; i++)
hipStreamDestroy(stream[i]);
hipFree(data_dev);
cout << "value check = " << data[n+5467] << endl;
// free pinned host memory
hipHostFree(data);
}
| 2d5e84ac9f3235957e0c6087272a6b3ef4477315.cu | #include <cuda.h>
#include <iostream>
#include <sys/time.h>
#include <stdio.h>
using namespace std;
/* Concurrent kernel execution
* - compare concurrent execution performance with serial execution
* - effect of (number of blocks) and (number of multiprocessors)
*/
#define TILE_DIM 16
#define BLOCK_ROWS 16
__global__ void transposeNaive(double *odata, double* idata, int width, int height, int nreps)
{
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + width * yIndex;
int index_out = yIndex + height * xIndex;
for (int r=0; r < nreps; r++) {
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
odata[index_out+i] = idata[index_in+i*width];
}
}
}
int main() {
// check if device support concurrent executions
int deviceCount;
cudaGetDeviceCount(&deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
printf("Device %d has compute capability %d.%d.\n", device,
deviceProp.major, deviceProp.minor);
cout << " concurrent kernel execution = " << deviceProp.concurrentKernels << endl;
}
/************************/
// (*) Repeat for side = 32, 64, 2048
// (for side = 2048, set nTranspose = 8)
int side = 32;
int n = side*side;
int nTranspose = 96;
int nreps = 200;
int nStream = nTranspose;
// define streams
cudaStream_t stream[nStream];
for (int i=0; i<nStream; i++)
cudaStreamCreate(&stream[i]);
time_t sTime = time(NULL);
struct timeval tt1, tt2;
int ms;
double fms;
// allocate pinned host memory
double *data;
cudaMallocHost((void**) &data, nTranspose * n * sizeof(double));
// data initialization
for (int j=0; j<nTranspose; j++)
for (int i=0; i<n; i++) {
data[i+j*n] = double(i+j*n);
}
double *data_dev;
// device memory allocation
cudaMalloc((void**) &data_dev, nStream * 2 * n * sizeof(double));
dim3 grid(side/16,side/16,1);
dim3 threads(16,16,1);
// send data to device
for (int i=0; i<nStream; i++) {
int offset = i * n;
cudaMemcpy(data_dev + offset*2, data + offset, n * sizeof(double),
cudaMemcpyHostToDevice);
}
cudaThreadSynchronize();
gettimeofday( &tt1, NULL );
// kernel executions :
// (*) for concurrent execution : stream[0] --> stream[i]
for (int i=0; i<nStream; i++) {
int offset = i * n;
transposeNaive <<< grid, threads, 0, stream[0] >>>
(data_dev + offset*2+n, data_dev + offset*2, side, side, nreps);
}
cudaThreadSynchronize();
gettimeofday( &tt2, NULL );
// get data back from device
for (int i=0; i<nStream; i++) {
int offset = i * n;
cudaMemcpy(data + offset, data_dev + offset*2 + n, n * sizeof(double),
cudaMemcpyDeviceToHost);
}
// timing
ms = (tt2.tv_sec - tt1.tv_sec);
ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec);
fms = ((double)ms)/1000000.0;
cout << "Comp time = " << fms << endl;
// dDestroy streams
for (int i=0; i<nStream; i++)
cudaStreamDestroy(stream[i]);
cudaFree(data_dev);
cout << "value check = " << data[n+5467] << endl;
// free pinned host memory
cudaFreeHost(data);
}
|
934a49767c3513492e5470d0a7d708a69755181f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
template<typename T>
__global__
void init_array_kernel(T *gpu_array, size_t n) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
new (&gpu_array[i]) T();
}
}
template<class T, class TransformedT, class TransformationT>
__global__
void transform_kernel(
T *from_array, size_t n,
TransformedT *to_array,
TransformationT transform) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
to_array[i] = transform(from_array[i], i);
}
}
template<typename T, typename Transformation>
__global__
void for_each_kernel(T *gpu_array, size_t n, Transformation fn) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
fn(gpu_array[i], i);
}
}
template<typename T, typename Reduction>
__global__
void reduce_2step_kernel(
T *gpu_array, size_t n,
T *out,
Reduction fn, T initial_value=T{}) { /*
Log-reduction based from the one in the book "The CUDA Handbook" by
Nicholas Wilt.
*/
extern __shared__ T partials[];
const int tid = threadIdx.x;
auto reduced = initial_value;
for (int i = blockIdx.x * blockDim.x + tid;
i < n;
i += blockDim.x * gridDim.x) {
reduced = fn(reduced, gpu_array[i]);
}
partials[tid] = reduced;
__syncthreads();
for (int active_threads = blockDim.x / 2;
active_threads > 0;
active_threads /= 2) {
auto is_active_thread = tid < active_threads;
if (is_active_thread) {
partials[tid] = fn(partials[tid], partials[tid + active_threads]);
}
__syncthreads();
}
if (tid == 0) {
out[blockIdx.x] = partials[0];
}
}
/* -----------------------------------------------------------------------
These kernels are used and tested on the gpu_array and gpu_object classes.
*/
| 934a49767c3513492e5470d0a7d708a69755181f.cu | template<typename T>
__global__
void init_array_kernel(T *gpu_array, size_t n) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
new (&gpu_array[i]) T();
}
}
template<class T, class TransformedT, class TransformationT>
__global__
void transform_kernel(
T *from_array, size_t n,
TransformedT *to_array,
TransformationT transform) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
to_array[i] = transform(from_array[i], i);
}
}
template<typename T, typename Transformation>
__global__
void for_each_kernel(T *gpu_array, size_t n, Transformation fn) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
fn(gpu_array[i], i);
}
}
template<typename T, typename Reduction>
__global__
void reduce_2step_kernel(
T *gpu_array, size_t n,
T *out,
Reduction fn, T initial_value=T{}) { /*
Log-reduction based from the one in the book "The CUDA Handbook" by
Nicholas Wilt.
*/
extern __shared__ T partials[];
const int tid = threadIdx.x;
auto reduced = initial_value;
for (int i = blockIdx.x * blockDim.x + tid;
i < n;
i += blockDim.x * gridDim.x) {
reduced = fn(reduced, gpu_array[i]);
}
partials[tid] = reduced;
__syncthreads();
for (int active_threads = blockDim.x / 2;
active_threads > 0;
active_threads /= 2) {
auto is_active_thread = tid < active_threads;
if (is_active_thread) {
partials[tid] = fn(partials[tid], partials[tid + active_threads]);
}
__syncthreads();
}
if (tid == 0) {
out[blockIdx.x] = partials[0];
}
}
/* -----------------------------------------------------------------------
These kernels are used and tested on the gpu_array and gpu_object classes.
*/
|
eafd008f8bcaa2661d0bf14978bc7bd2f8a8ad24.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef __NVCC__
#include "hipcub/hipcub.hpp"
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include <algorithm>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/where_index_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/for_range.h"
#include "paddle/phi/core/ddim.h"
namespace paddle {
namespace operators {
using CUDADeviceContext = paddle::platform::CUDADeviceContext;
template <typename T>
__global__ void GetTrueNum(const T *cond_data, const int64_t numel,
int64_t *true_num_array) {
const int64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int64_t idx = tid; idx < numel; idx += gridDim.x * blockDim.x) {
true_num_array[idx] =
static_cast<int64_t>(static_cast<bool>(cond_data[idx]));
}
}
template <typename T>
__global__ void SetTrueIndex(int64_t *out_ptr, const T *cond_data,
const int64_t numel, const int64_t *stride_array,
const int64_t rank,
const int64_t *true_num_array) {
const int64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int64_t idx = tid; idx < numel; idx += gridDim.x * blockDim.x) {
// true_num_array is calculated by cub::InclusiveSum,
// cause the first element of true_num_array is 1,
// so we need substract 1 to get true index.
const int64_t true_index = true_num_array[idx] - 1;
if (static_cast<bool>(cond_data[idx])) {
int64_t rank_index = idx;
for (int j = 0; j < rank; j++) {
const int64_t out_index = rank_index / stride_array[j];
out_ptr[true_index * rank + j] = out_index;
rank_index -= out_index * stride_array[j];
}
}
}
}
template <typename T>
class CUDAWhereIndexKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto *condition = context.Input<framework::Tensor>("Condition");
auto *out = context.Output<framework::Tensor>("Out");
auto &dev_ctx = context.template device_context<CUDADeviceContext>();
const T *cond_data = condition->data<T>();
const int64_t numel = condition->numel();
auto dims = condition->dims();
const int rank = dims.size();
auto d_array_mem = memory::Alloc(dev_ctx, (numel + rank) * sizeof(int64_t));
auto h_array_mem =
memory::Alloc(platform::CPUPlace(), (rank + 1) * sizeof(int64_t));
// "stride_array" is an array and len(stride_array)==rank,
// each element is the stride of each dimension -- the length from i to i+1.
int64_t *h_stride_array = reinterpret_cast<int64_t *>(h_array_mem->ptr());
int64_t *d_stride_array = reinterpret_cast<int64_t *>(d_array_mem->ptr());
// "true_num_array" is an array and len(stride_array)==numel,
// at the beginning,
// "true_num_array" will set 1 if condition[i] == true else 0,
// then it will be calculated by cub::InclusiveSum,
// so that we can get the true number before i as the out index
int64_t *d_true_num_array = d_stride_array + rank;
// the total_true_num is the total number of condition[i] == true
int64_t *h_total_true_num = h_stride_array + rank;
// alloce cub memory
size_t cub_size = 0;
hipcub::DeviceScan::InclusiveSum(nullptr, cub_size, d_true_num_array,
d_true_num_array, numel, dev_ctx.stream());
auto cub_mem = memory::Alloc(dev_ctx, cub_size * sizeof(int64_t));
void *cub_data = cub_mem->ptr();
// set d_true_num_array[i]=1 if cond_data[i]==true else 0
const int threads = ::min(numel, static_cast<int64_t>(128));
const int64_t need_grids = (numel + threads - 1) / threads;
const int grids = ::min(need_grids, static_cast<int64_t>(256));
hipLaunchKernelGGL(( GetTrueNum<T>), dim3(grids), dim3(threads), 0, dev_ctx.stream(), cond_data, numel,
d_true_num_array);
// calculate the inclusive prefix sum of "true_num_array"
// to get the index of "out" tensor,
// and the total number of cond_data[i]==true.
// Example:
// condition: F T T F F F T T
// before: 0 1 1 0 0 0 1 1
// after: 0 1 2 2 2 2 3 4
// out: 1 2 6 7
hipcub::DeviceScan::InclusiveSum(cub_data, cub_size, d_true_num_array,
d_true_num_array, numel, dev_ctx.stream());
// calculate each dimension's stride
h_stride_array[rank - 1] = 1;
for (int i = rank - 2; i >= 0; i--) {
h_stride_array[i] = h_stride_array[i + 1] * dims[i + 1];
}
memory::Copy(dev_ctx.GetPlace(), d_stride_array, platform::CPUPlace(),
h_stride_array, rank * sizeof(int64_t), dev_ctx.stream());
// get total ture number and set output size
// the last element of cub::InclusiveSum is the total number
memory::Copy(platform::CPUPlace(), h_total_true_num, dev_ctx.GetPlace(),
d_true_num_array + numel - 1, sizeof(int64_t),
dev_ctx.stream());
dev_ctx.Wait();
int64_t true_num = *h_total_true_num;
out->Resize(phi::make_ddim({static_cast<int64_t>(true_num), rank}));
auto out_data = out->mutable_data<int64_t>(context.GetPlace());
if (true_num == 0) {
return;
}
// using true_num_array and stride_array to calculate the output index
hipLaunchKernelGGL(( SetTrueIndex<T>), dim3(grids), dim3(threads), 0, dev_ctx.stream(),
out_data, cond_data, numel, d_stride_array, rank, d_true_num_array);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(where_index, ops::CUDAWhereIndexKernel<int64_t>,
ops::CUDAWhereIndexKernel<int>,
ops::CUDAWhereIndexKernel<int16_t>,
ops::CUDAWhereIndexKernel<bool>,
ops::CUDAWhereIndexKernel<float>,
ops::CUDAWhereIndexKernel<double>);
| eafd008f8bcaa2661d0bf14978bc7bd2f8a8ad24.cu | /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef __NVCC__
#include "cub/cub.cuh"
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include <algorithm>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/where_index_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/for_range.h"
#include "paddle/phi/core/ddim.h"
namespace paddle {
namespace operators {
using CUDADeviceContext = paddle::platform::CUDADeviceContext;
template <typename T>
__global__ void GetTrueNum(const T *cond_data, const int64_t numel,
int64_t *true_num_array) {
const int64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int64_t idx = tid; idx < numel; idx += gridDim.x * blockDim.x) {
true_num_array[idx] =
static_cast<int64_t>(static_cast<bool>(cond_data[idx]));
}
}
template <typename T>
__global__ void SetTrueIndex(int64_t *out_ptr, const T *cond_data,
const int64_t numel, const int64_t *stride_array,
const int64_t rank,
const int64_t *true_num_array) {
const int64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int64_t idx = tid; idx < numel; idx += gridDim.x * blockDim.x) {
// true_num_array is calculated by cub::InclusiveSum,
// cause the first element of true_num_array is 1,
// so we need substract 1 to get true index.
const int64_t true_index = true_num_array[idx] - 1;
if (static_cast<bool>(cond_data[idx])) {
int64_t rank_index = idx;
for (int j = 0; j < rank; j++) {
const int64_t out_index = rank_index / stride_array[j];
out_ptr[true_index * rank + j] = out_index;
rank_index -= out_index * stride_array[j];
}
}
}
}
template <typename T>
class CUDAWhereIndexKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto *condition = context.Input<framework::Tensor>("Condition");
auto *out = context.Output<framework::Tensor>("Out");
auto &dev_ctx = context.template device_context<CUDADeviceContext>();
const T *cond_data = condition->data<T>();
const int64_t numel = condition->numel();
auto dims = condition->dims();
const int rank = dims.size();
auto d_array_mem = memory::Alloc(dev_ctx, (numel + rank) * sizeof(int64_t));
auto h_array_mem =
memory::Alloc(platform::CPUPlace(), (rank + 1) * sizeof(int64_t));
// "stride_array" is an array and len(stride_array)==rank,
// each element is the stride of each dimension -- the length from i to i+1.
int64_t *h_stride_array = reinterpret_cast<int64_t *>(h_array_mem->ptr());
int64_t *d_stride_array = reinterpret_cast<int64_t *>(d_array_mem->ptr());
// "true_num_array" is an array and len(stride_array)==numel,
// at the beginning,
// "true_num_array" will set 1 if condition[i] == true else 0,
// then it will be calculated by cub::InclusiveSum,
// so that we can get the true number before i as the out index
int64_t *d_true_num_array = d_stride_array + rank;
// the total_true_num is the total number of condition[i] == true
int64_t *h_total_true_num = h_stride_array + rank;
// alloce cub memory
size_t cub_size = 0;
cub::DeviceScan::InclusiveSum(nullptr, cub_size, d_true_num_array,
d_true_num_array, numel, dev_ctx.stream());
auto cub_mem = memory::Alloc(dev_ctx, cub_size * sizeof(int64_t));
void *cub_data = cub_mem->ptr();
// set d_true_num_array[i]=1 if cond_data[i]==true else 0
const int threads = std::min(numel, static_cast<int64_t>(128));
const int64_t need_grids = (numel + threads - 1) / threads;
const int grids = std::min(need_grids, static_cast<int64_t>(256));
GetTrueNum<T><<<grids, threads, 0, dev_ctx.stream()>>>(cond_data, numel,
d_true_num_array);
// calculate the inclusive prefix sum of "true_num_array"
// to get the index of "out" tensor,
// and the total number of cond_data[i]==true.
// Example:
// condition: F T T F F F T T
// before: 0 1 1 0 0 0 1 1
// after: 0 1 2 2 2 2 3 4
// out: 1 2 6 7
cub::DeviceScan::InclusiveSum(cub_data, cub_size, d_true_num_array,
d_true_num_array, numel, dev_ctx.stream());
// calculate each dimension's stride
h_stride_array[rank - 1] = 1;
for (int i = rank - 2; i >= 0; i--) {
h_stride_array[i] = h_stride_array[i + 1] * dims[i + 1];
}
memory::Copy(dev_ctx.GetPlace(), d_stride_array, platform::CPUPlace(),
h_stride_array, rank * sizeof(int64_t), dev_ctx.stream());
// get total ture number and set output size
// the last element of cub::InclusiveSum is the total number
memory::Copy(platform::CPUPlace(), h_total_true_num, dev_ctx.GetPlace(),
d_true_num_array + numel - 1, sizeof(int64_t),
dev_ctx.stream());
dev_ctx.Wait();
int64_t true_num = *h_total_true_num;
out->Resize(phi::make_ddim({static_cast<int64_t>(true_num), rank}));
auto out_data = out->mutable_data<int64_t>(context.GetPlace());
if (true_num == 0) {
return;
}
// using true_num_array and stride_array to calculate the output index
SetTrueIndex<T><<<grids, threads, 0, dev_ctx.stream()>>>(
out_data, cond_data, numel, d_stride_array, rank, d_true_num_array);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(where_index, ops::CUDAWhereIndexKernel<int64_t>,
ops::CUDAWhereIndexKernel<int>,
ops::CUDAWhereIndexKernel<int16_t>,
ops::CUDAWhereIndexKernel<bool>,
ops::CUDAWhereIndexKernel<float>,
ops::CUDAWhereIndexKernel<double>);
|
910fdf1727bf40eb91b1ac63343b7dd3049ee05a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "SelectMetabolite.h"
#include "Setup.h"
#define BALANCED_MARKER -1
// Kernel with one thread per metabolite. Sums input/output counts and writes
// them to the metabolite's index in inputCounts and outputCounts.
__global__
void computeMetaboliteInputOutputCounts(float* metaboliteCoefficients, int pathwayStartIndex, int pathwayCount, int metaboliteCount, bool* balancedMetabolites, int* inputCounts, int* outputCounts) {
int inputCount = 0;
int outputCount = 0;
int m = blockIdx.x * blockDim.x + threadIdx.x;
if (m >= metaboliteCount) {
return;
}
int end_i = pathwayStartIndex + pathwayCount;
for (int p = pathwayStartIndex; p < end_i; p++) {
float coeff = metaboliteCoefficients[ circularIndex(p * metaboliteCount + m) ];
if (coeff > ZERO) {
outputCount++;
} else if (coeff < NEG_ZERO) {
inputCount++;
}
}
inputCounts[m] = inputCount;
outputCounts[m] = outputCount;
// insert sentinel value if the metabolite is already balanced
if (balancedMetabolites[m]) {
inputCounts[m] = BALANCED_MARKER;
outputCounts[m] = BALANCED_MARKER;
}
}
// Returns the index of the next metabolite to remove.
// Side effect: h_{input|output}Counts contain the correct counts
// for each metabolite.
int getNextMetabolite(float* d_metaboliteCoefficients, int pathwayStartIndex, int pathwayCount, int metaboliteCount, bool* d_balancedMetabolites, int* d_inputCounts, int* d_outputCounts, int* h_inputCounts, int* h_outputCounts) {
int threads_per_count_block = 256;
int blocks_per_grid = ceil(((float) metaboliteCount) / threads_per_count_block);
computeMetaboliteInputOutputCounts << <blocks_per_grid, threads_per_count_block >> >
(d_metaboliteCoefficients, pathwayStartIndex, pathwayCount, metaboliteCount, d_balancedMetabolites, d_inputCounts, d_outputCounts);
int count_mem_size = metaboliteCount * sizeof (int);
hipMemcpy(h_inputCounts, d_inputCounts, count_mem_size, hipMemcpyDeviceToHost);
hipMemcpy(h_outputCounts, d_outputCounts, count_mem_size, hipMemcpyDeviceToHost);
// select the metabolite with the minimum product of inputs and outputs
int min_i = 0;
long min_product = LONG_MAX;
for (int i = 0; i < metaboliteCount; i++) {
if (h_inputCounts[i] == BALANCED_MARKER) {
continue;
}
int product = h_inputCounts[i] * h_outputCounts[i];
if (product < min_product) {
min_i = i;
min_product = product;
if (min_product == 0) return min_i; // stop if we've already found a "best"
}
}
return min_i;
}
| 910fdf1727bf40eb91b1ac63343b7dd3049ee05a.cu | #include "SelectMetabolite.h"
#include "Setup.h"
#define BALANCED_MARKER -1
// Kernel with one thread per metabolite. Sums input/output counts and writes
// them to the metabolite's index in inputCounts and outputCounts.
__global__
void computeMetaboliteInputOutputCounts(float* metaboliteCoefficients, int pathwayStartIndex, int pathwayCount, int metaboliteCount, bool* balancedMetabolites, int* inputCounts, int* outputCounts) {
int inputCount = 0;
int outputCount = 0;
int m = blockIdx.x * blockDim.x + threadIdx.x;
if (m >= metaboliteCount) {
return;
}
int end_i = pathwayStartIndex + pathwayCount;
for (int p = pathwayStartIndex; p < end_i; p++) {
float coeff = metaboliteCoefficients[ circularIndex(p * metaboliteCount + m) ];
if (coeff > ZERO) {
outputCount++;
} else if (coeff < NEG_ZERO) {
inputCount++;
}
}
inputCounts[m] = inputCount;
outputCounts[m] = outputCount;
// insert sentinel value if the metabolite is already balanced
if (balancedMetabolites[m]) {
inputCounts[m] = BALANCED_MARKER;
outputCounts[m] = BALANCED_MARKER;
}
}
// Returns the index of the next metabolite to remove.
// Side effect: h_{input|output}Counts contain the correct counts
// for each metabolite.
int getNextMetabolite(float* d_metaboliteCoefficients, int pathwayStartIndex, int pathwayCount, int metaboliteCount, bool* d_balancedMetabolites, int* d_inputCounts, int* d_outputCounts, int* h_inputCounts, int* h_outputCounts) {
int threads_per_count_block = 256;
int blocks_per_grid = ceil(((float) metaboliteCount) / threads_per_count_block);
computeMetaboliteInputOutputCounts << <blocks_per_grid, threads_per_count_block >> >
(d_metaboliteCoefficients, pathwayStartIndex, pathwayCount, metaboliteCount, d_balancedMetabolites, d_inputCounts, d_outputCounts);
int count_mem_size = metaboliteCount * sizeof (int);
cudaMemcpy(h_inputCounts, d_inputCounts, count_mem_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_outputCounts, d_outputCounts, count_mem_size, cudaMemcpyDeviceToHost);
// select the metabolite with the minimum product of inputs and outputs
int min_i = 0;
long min_product = LONG_MAX;
for (int i = 0; i < metaboliteCount; i++) {
if (h_inputCounts[i] == BALANCED_MARKER) {
continue;
}
int product = h_inputCounts[i] * h_outputCounts[i];
if (product < min_product) {
min_i = i;
min_product = product;
if (min_product == 0) return min_i; // stop if we've already found a "best"
}
}
return min_i;
}
|
021e133e83805b82e5c06c90bccd33cc91b09603.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (email: [email protected]) 2019-04-24
* I'll attend several conferences and workshops in the following weeks -
* busy days :(
*/
#include "../../XDevice.h"
#include "../../XTensor.h"
#include "../../XUtility.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_ROCM
/*
mask entries of a given tensor (CUDA Kernel)
c = a - b * \beta
>> a - A matrix
>> mask - mask matrix
>> c - where we put masked a
>> size - the size of a/b/c
>> alpha - value
*/
__global__
void KernelMASK(DTYPE * a, int * mask, DTYPE * c, int size, DTYPE alpha)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
if (mask[i] == 0) {
c[i] = alpha;
}
else {
c[i] = a[i];
}
}
}
/*
mask entries of a given tensor (cuda version)
>> a - a tensor
>> mask - mask tensor
>> c - where we put masked a
>> alpha - value
*/
void _CudaMask(const XTensor * a, const XTensor * mask, XTensor * c, DTYPE alpha)
{
CheckNTErrors(a && mask && c, "Empty tensor input!");
CheckNTErrors((a->unitNum == mask->unitNum && a->unitNum == c->unitNum),
"Unmatched tensors in addition!");
CheckNTErrors(mask->dataType == X_INT, "The mask tensor must be in X_INT!")
//CheckNTErrors((a->dataType == mask->dataType && a->dataType == c->dataType),
// "Unmatched tensors in addition!");
CheckNTErrors((a->devID == mask->devID && a->devID == c->devID),
"The tensors must be on the same!");
int devIDBackup = XDevice::GetGPUDevice();
XDevice::SetGPUDevice(a->devID);
if (!a->isSparse && !mask->isSparse) {
CheckNTErrors(!c->isSparse, "Illegal use of sparse matrix in addition!");
if (a->dataType == DEFAULT_DTYPE &&
mask->dataType == X_INT &&
c->dataType == DEFAULT_DTYPE)
{
int gridSize[3], blockSize[3];
GDevs.GetCudaThread(a->devID, a->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
KernelMASK << <blocks, threads >> >((DTYPE*)a->data, (int *)mask->data, (DTYPE*)c->data, a->unitNum, alpha);
}
else {
// TODO!!
ShowNTErrors("TODO!");
}
}
else {
// TODO!!
ShowNTErrors("TODO!");
}
XDevice::SetGPUDevice(devIDBackup);
}
#endif // USE_ROCM
} // namespace nts(NiuTrans.Tensor) | 021e133e83805b82e5c06c90bccd33cc91b09603.cu | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (email: [email protected]) 2019-04-24
* I'll attend several conferences and workshops in the following weeks -
* busy days :(
*/
#include "../../XDevice.h"
#include "../../XTensor.h"
#include "../../XUtility.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/*
mask entries of a given tensor (CUDA Kernel)
c = a - b * \beta
>> a - A matrix
>> mask - mask matrix
>> c - where we put masked a
>> size - the size of a/b/c
>> alpha - value
*/
__global__
void KernelMASK(DTYPE * a, int * mask, DTYPE * c, int size, DTYPE alpha)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
if (mask[i] == 0) {
c[i] = alpha;
}
else {
c[i] = a[i];
}
}
}
/*
mask entries of a given tensor (cuda version)
>> a - a tensor
>> mask - mask tensor
>> c - where we put masked a
>> alpha - value
*/
void _CudaMask(const XTensor * a, const XTensor * mask, XTensor * c, DTYPE alpha)
{
CheckNTErrors(a && mask && c, "Empty tensor input!");
CheckNTErrors((a->unitNum == mask->unitNum && a->unitNum == c->unitNum),
"Unmatched tensors in addition!");
CheckNTErrors(mask->dataType == X_INT, "The mask tensor must be in X_INT!")
//CheckNTErrors((a->dataType == mask->dataType && a->dataType == c->dataType),
// "Unmatched tensors in addition!");
CheckNTErrors((a->devID == mask->devID && a->devID == c->devID),
"The tensors must be on the same!");
int devIDBackup = XDevice::GetGPUDevice();
XDevice::SetGPUDevice(a->devID);
if (!a->isSparse && !mask->isSparse) {
CheckNTErrors(!c->isSparse, "Illegal use of sparse matrix in addition!");
if (a->dataType == DEFAULT_DTYPE &&
mask->dataType == X_INT &&
c->dataType == DEFAULT_DTYPE)
{
int gridSize[3], blockSize[3];
GDevs.GetCudaThread(a->devID, a->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
KernelMASK << <blocks, threads >> >((DTYPE*)a->data, (int *)mask->data, (DTYPE*)c->data, a->unitNum, alpha);
}
else {
// TODO!!
ShowNTErrors("TODO!");
}
}
else {
// TODO!!
ShowNTErrors("TODO!");
}
XDevice::SetGPUDevice(devIDBackup);
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor) |
7d38e9ec2fe3f810c1d4cf291f92608133ae73e1.hip | // !!! This is a file automatically generated by hipify!!!
// Joshua Donnoe, Kyle Evans, and Dominik Haeflinger
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include "common.h"
#define INFINITY 1/0.
#define NUM_THREADS 256
//k is the number of edges originally
//half is the new number of edges. which is half of e rounded up
__global__ void reduce(edge_t* src, edge_t* dest, int e, int half){
// Thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= half) return;
edge_t* left = &src[tid * 2];
edge_t* right = left + 1;
if (right == src + e) {
memcpy((void*) &dest[tid], (const void*) left, 8);
}
else {
memcpy((void*) &dest[tid], (const void*) ((left->distance < right->distance)) ? left : right, 8);
}
}
// Calculates x position in matrix
__device__ void calcXPos(unsigned short *x, int adjIndex, float adjN, float adjN2){
*x = (unsigned short)(floor(adjN - sqrt(adjN2 - adjIndex)));
}
// Calculates y position in matrix
__device__ void calcYPos(unsigned short *y, int adjIndex, float adjN, int x){
*y = (unsigned short)(adjIndex + (x * (x + adjN)) / 2);
}
// Calculate the position in the matrix
__device__ void calcPosInMatrix(int index, unsigned short *x, unsigned short *y, float adjNX, float adjNX2, int adjNY){
calcXPos(x, 2 * index, adjNX, adjNX2);
calcYPos(y, index + 1, adjNY, *x);
}
// Calcuate edges between all points
__global__ void calculateEdge(edge_t* edges, point_t* points, int e, float adjNX, float adjNX2, int adjNY){
// Thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= e) return;
edge_t *edge = &edges[tid];
calcPosInMatrix(tid, &(edge->tree1), &(edge->tree2), adjNX, adjNX2, adjNY);
point_t *xp = &points[(edge->tree1)]; // point at x value
point_t *yp = &points[(edge->tree2)]; // point at y value
float sum = 0;
for (int i = 0; i < DIM; i++) {
float delta = xp->coordinates[i] - yp->coordinates[i];
sum += delta * delta;
}
edge->distance = sqrt(sum);
}
// Updates the thread edge to use the smallest edge
__global__ void updateTree(edge_t* edges, int e, unsigned short o, unsigned short n) {
// Thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= 2 * e) return;
edge_t* edge = &edges[tid%e];
unsigned short *tree = (tid > e) ? &edge->tree1 : &edge->tree2;
if (*tree == o) {
*tree = n;
}
}
// Depriciated
__global__ void updateTree2(edge_t* edges, int e, unsigned short o, unsigned short n) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= e) return;
edge_t* edge = &edges[tid];
if (edge->tree2 == o) {
edge->tree2 = n;
}
}
// Updates the distance of the thread edge to infinity if it creates a cycle
__global__ void updateDistance(edge_t* edges, int e) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= e) return;
edge_t* edge = &edges[tid];
if (edge->tree1 == edge->tree2) {
edge->distance = INFINITY;
}
}
// main duh
int main(int argc, char **argv) {
int N = read_int(argc, argv, "-N", 1000);
int E = N * (N-1)/2;
int NUM_BLOCKS = ceil(E / 256.);
//pointers
edge_t* d_edges;
hipMalloc((void **) &d_edges, 7 * (N * N - N)+ 16);
point_t * d_points = (point_t *)(((void *) d_edges) + 4 * (N * N - N));
edge_t* half = (edge_t*)d_points;
edge_t* quarter = (edge_t*)(((void*)half) + 2 * (N * N - N) + 8);
edge_t smallest;
//hiprand
hiprandGenerator_t gen; // Random number generator
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT); // Initialize generator
hiprandSetPseudoRandomGeneratorSeed(gen, time(NULL)); // Set generator's seed
//adjusted values
float adjNX = N - .5f;
float adjNX2 = adjNX * adjNX;
int adjNY = 3 - 2*N;
float sum = 0;
// Perform calculations 1000 times
for (int i = 0; i < 1000 ; i++) {
hiprandGenerateUniform(gen, (float*)d_points, N * DIM); // Generate n random numbers in d_points
hipLaunchKernelGGL(( calculateEdge) , dim3(NUM_BLOCKS), dim3(NUM_THREADS) , 0, 0, d_edges, d_points, E, adjNX, adjNX2, adjNY);
// Begin Kruskal's algorithm
for (int numEdgesSel = N - 1; numEdgesSel-- > 0;) {
hipDeviceSynchronize();
int numEdgesRed = E;
hipLaunchKernelGGL(( reduce) , dim3(NUM_BLOCKS), dim3(NUM_THREADS) , 0, 0, d_edges, half, numEdgesRed, (numEdgesRed + 1) / 2);
numEdgesRed = (numEdgesRed + 1) / 2;
// Reduce until 1 edge remains
while(numEdgesRed > 1){
hipDeviceSynchronize();
hipLaunchKernelGGL(( reduce) , dim3(NUM_BLOCKS), dim3(NUM_THREADS) , 0, 0, half, quarter, numEdgesRed, (numEdgesRed + 1) / 2);
numEdgesRed = (numEdgesRed + 1) / 2;
hipDeviceSynchronize();
hipLaunchKernelGGL(( reduce) , dim3(NUM_BLOCKS), dim3(NUM_THREADS) , 0, 0, quarter, half, numEdgesRed, (numEdgesRed + 1) / 2);
numEdgesRed = (numEdgesRed + 1) / 2;
}
// Bring the smallest edge back to host and add the distance locally
hipMemcpy((void*)&smallest, (const void*)half, sizeof(edge_t), hipMemcpyDeviceToHost);
sum += smallest.distance;
// Update the tree and distances
hipLaunchKernelGGL(( updateTree) , dim3(NUM_BLOCKS), dim3(NUM_THREADS) , 0, 0, d_edges, E, smallest.tree1, smallest.tree2);
hipLaunchKernelGGL(( updateDistance) , dim3(NUM_BLOCKS), dim3(NUM_THREADS) , 0, 0, d_edges, E);
}
}
printf("sum %f\n", sum/1000);
// Clean-up
hipFree(d_edges);
hiprandDestroyGenerator(gen);
return 0;
}
| 7d38e9ec2fe3f810c1d4cf291f92608133ae73e1.cu | // Joshua Donnoe, Kyle Evans, and Dominik Haeflinger
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include "common.h"
#define INFINITY 1/0.
#define NUM_THREADS 256
//k is the number of edges originally
//half is the new number of edges. which is half of e rounded up
__global__ void reduce(edge_t* src, edge_t* dest, int e, int half){
// Thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= half) return;
edge_t* left = &src[tid * 2];
edge_t* right = left + 1;
if (right == src + e) {
memcpy((void*) &dest[tid], (const void*) left, 8);
}
else {
memcpy((void*) &dest[tid], (const void*) ((left->distance < right->distance)) ? left : right, 8);
}
}
// Calculates x position in matrix
__device__ void calcXPos(unsigned short *x, int adjIndex, float adjN, float adjN2){
*x = (unsigned short)(floor(adjN - sqrt(adjN2 - adjIndex)));
}
// Calculates y position in matrix
__device__ void calcYPos(unsigned short *y, int adjIndex, float adjN, int x){
*y = (unsigned short)(adjIndex + (x * (x + adjN)) / 2);
}
// Calculate the position in the matrix
__device__ void calcPosInMatrix(int index, unsigned short *x, unsigned short *y, float adjNX, float adjNX2, int adjNY){
calcXPos(x, 2 * index, adjNX, adjNX2);
calcYPos(y, index + 1, adjNY, *x);
}
// Calcuate edges between all points
__global__ void calculateEdge(edge_t* edges, point_t* points, int e, float adjNX, float adjNX2, int adjNY){
// Thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= e) return;
edge_t *edge = &edges[tid];
calcPosInMatrix(tid, &(edge->tree1), &(edge->tree2), adjNX, adjNX2, adjNY);
point_t *xp = &points[(edge->tree1)]; // point at x value
point_t *yp = &points[(edge->tree2)]; // point at y value
float sum = 0;
for (int i = 0; i < DIM; i++) {
float delta = xp->coordinates[i] - yp->coordinates[i];
sum += delta * delta;
}
edge->distance = sqrt(sum);
}
// Updates the thread edge to use the smallest edge
__global__ void updateTree(edge_t* edges, int e, unsigned short o, unsigned short n) {
// Thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= 2 * e) return;
edge_t* edge = &edges[tid%e];
unsigned short *tree = (tid > e) ? &edge->tree1 : &edge->tree2;
if (*tree == o) {
*tree = n;
}
}
// Depriciated
__global__ void updateTree2(edge_t* edges, int e, unsigned short o, unsigned short n) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= e) return;
edge_t* edge = &edges[tid];
if (edge->tree2 == o) {
edge->tree2 = n;
}
}
// Updates the distance of the thread edge to infinity if it creates a cycle
__global__ void updateDistance(edge_t* edges, int e) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= e) return;
edge_t* edge = &edges[tid];
if (edge->tree1 == edge->tree2) {
edge->distance = INFINITY;
}
}
// main duh
int main(int argc, char **argv) {
int N = read_int(argc, argv, "-N", 1000);
int E = N * (N-1)/2;
int NUM_BLOCKS = ceil(E / 256.);
//pointers
edge_t* d_edges;
cudaMalloc((void **) &d_edges, 7 * (N * N - N)+ 16);
point_t * d_points = (point_t *)(((void *) d_edges) + 4 * (N * N - N));
edge_t* half = (edge_t*)d_points;
edge_t* quarter = (edge_t*)(((void*)half) + 2 * (N * N - N) + 8);
edge_t smallest;
//curand
curandGenerator_t gen; // Random number generator
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT); // Initialize generator
curandSetPseudoRandomGeneratorSeed(gen, time(NULL)); // Set generator's seed
//adjusted values
float adjNX = N - .5f;
float adjNX2 = adjNX * adjNX;
int adjNY = 3 - 2*N;
float sum = 0;
// Perform calculations 1000 times
for (int i = 0; i < 1000 ; i++) {
curandGenerateUniform(gen, (float*)d_points, N * DIM); // Generate n random numbers in d_points
calculateEdge <<< NUM_BLOCKS, NUM_THREADS >>> (d_edges, d_points, E, adjNX, adjNX2, adjNY);
// Begin Kruskal's algorithm
for (int numEdgesSel = N - 1; numEdgesSel-- > 0;) {
cudaThreadSynchronize();
int numEdgesRed = E;
reduce <<< NUM_BLOCKS, NUM_THREADS >>> (d_edges, half, numEdgesRed, (numEdgesRed + 1) / 2);
numEdgesRed = (numEdgesRed + 1) / 2;
// Reduce until 1 edge remains
while(numEdgesRed > 1){
cudaThreadSynchronize();
reduce <<< NUM_BLOCKS, NUM_THREADS >>> (half, quarter, numEdgesRed, (numEdgesRed + 1) / 2);
numEdgesRed = (numEdgesRed + 1) / 2;
cudaThreadSynchronize();
reduce <<< NUM_BLOCKS, NUM_THREADS >>> (quarter, half, numEdgesRed, (numEdgesRed + 1) / 2);
numEdgesRed = (numEdgesRed + 1) / 2;
}
// Bring the smallest edge back to host and add the distance locally
cudaMemcpy((void*)&smallest, (const void*)half, sizeof(edge_t), cudaMemcpyDeviceToHost);
sum += smallest.distance;
// Update the tree and distances
updateTree <<< NUM_BLOCKS, NUM_THREADS >>> (d_edges, E, smallest.tree1, smallest.tree2);
updateDistance <<< NUM_BLOCKS, NUM_THREADS >>> (d_edges, E);
}
}
printf("sum %f\n", sum/1000);
// Clean-up
cudaFree(d_edges);
curandDestroyGenerator(gen);
return 0;
}
|
55c68e9eb8b182410cdcb73092b1575618309569.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// nv6502.cu
// A GPU implementation of a 6502 CPU emulator
// Kamil M Rocki, 1/24/19
#include "nv6502.h"
#include <stdio.h>
#include <sys/time.h>
#define CHECK_ERR_CUDA(err) if (err != hipSuccess) { printf("%s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); }
int read_bin(u8* mem, const char* fname) {
FILE * file = fopen(fname, "r+");
if (file == NULL || mem == NULL) return - 1;
fseek(file, 0, SEEK_END);
long int size = ftell(file);
fclose(file);
file = fopen(fname, "r+");
int bytes_read = fread(mem, sizeof(u8), size, file);
//printf("read file %s, %d bytes\n", fname, bytes_read);
return 0; fclose(file);
}
double get_time() {
struct timeval tv; gettimeofday(&tv, NULL);
return (tv.tv_sec + tv.tv_usec * 1e-6);
}
#define MAX_THREADS 256
u8 mem[MAX_THREADS][0x1000];
__device__ __host__ void print_scrn(u32 id, _6502 *n) { for (u8 i=0;i<32;i++) { for (u8 j=0;j<32;j++) { u8 v = n->mem[0x200 + j + 32*i]; if (v >= 0xa) printf("%02x ", v); else if (v > 0) printf("%2x ", v); else printf("__ "); } printf("\n"); } }
__device__ __host__ void print_regs(u32 id, _6502 *n) { printf("[%05d] PC: %04x OP: %02x, m: %2d, d: %04x, A:%02X X:%02X Y:%02X P:%02X SP:%02X CYC:%3ld\n", id, PC, I, m, d, A, X, Y, P, SP, CY); }
#define STACK_PG 0x0100
#define ZN(x) { Z=((x)==0); S=((x)>>7) & 0x1; }
#define LDM { d=(m>2) ? r8(n,d) : d; }
#define LD_A_OR_M() u8 w=(m==1)?A:r8(n,d)
#define ST_A_OR_M() if (m!=1) w8(n,d,w); else A=w;
__device__ u8 r8 (_6502 *n, u16 a) { return n->mem[a % MEM_SIZE]; } // byte read
__device__ void w8 (_6502 *n, u16 a, u8 v) { n->mem[a % MEM_SIZE] = v; } // byte write
__device__ u8 f8 (_6502 *n) { return r8(n, PC++); } // byte fetch
//// 16-bit versions
__device__ u16 r16 (_6502 *n, u16 a) { u16 base=a & 0xff00; return (r8(n,a) | (r8(n,base|((u8)(a+1))) << 8)); } // buggy
__device__ u16 r16_ok(_6502 *n, u16 a) { return (r8(n,a) | (r8(n,a+1) << 8)); }
__device__ u16 f16 (_6502 *n) { return (f8(n) | ((f8(n))<<8)); }
//
//// stack ops
__device__ u8 pop8 (_6502 *n) { SP++; return r8(n, STACK_PG | SP); }
__device__ u16 pop16 (_6502 *n) { return (pop8(n) | ((pop8(n))<<8)); }
__device__ void push8 (_6502 *n, u8 v) { w8(n, STACK_PG | SP, v); SP--; }
__device__ void push16(_6502 *n, u16 v) { push8(n,(v>>8)); push8(n,v); }
__device__ void jr (_6502 *n, u8 cond) { if (cond) { PC=(u16)d; } }
//
//// decoding addressing mode
__device__ void imp (_6502 *n) { m=0; b=0; } // implied, 1
__device__ void acc (_6502 *n) { m=1; b=0; } // accumulator, 1
__device__ void imm (_6502 *n) { m=2; b=1; d=(u16)f8(n); } // immediate, 2
__device__ void zp (_6502 *n) { m=3; b=1; d=(u16)f8(n); } // zero page, 2
__device__ void zpx (_6502 *n) { m=4; b=1; u8 r=f8(n); d=(r+X) & 0xff;} // zero page, x, 3
__device__ void zpy (_6502 *n) { m=5; b=1; u8 r=f8(n); d=(r+Y) & 0xff; } // zero page, y, 3
__device__ void rel (_6502 *n) { m=6; b=1; u8 r=f8(n); if (r<0x80) d=PC+r; else d=PC+r-0x100;} // relative, 2
__device__ void abso(_6502 *n) { m=7; b=2; d=f16(n); } // absolute, 3
__device__ void absx(_6502 *n) { m=8; b=2; d=f16(n); d+=X; } // absolute, x, 3
__device__ void absy(_6502 *n) { m=9; b=2; d=f16(n); d+=Y; } // absolute, y, 3
__device__ void ind (_6502 *n) { m=10; b=2; d=r16(n,f16(n)); } // indirect, 3
__device__ void indx(_6502 *n) { m=11; b=1; u8 r=f8(n); d=r16(n,(u8)(r + X)); } // indirect x
__device__ void indy(_6502 *n) { m=12; b=1; u8 r=f8(n); d=r16(n,(u8)(r)); d+=Y;} // indirect y
//instructions
__device__ void _adc(_6502 *n) {
u8 a = A; LDM; A=d+A+C; ZN(A);
u16 t = (u16)d + (u16)a + (u16)C; C=(t > 0xff);
V = (!((a^d) & 0x80)) && (((a^A) & 0x80)>0 );
} // Add Memory to Accumulator with Carry
__device__ void _sbc(_6502 *n) {
u8 a = A; LDM; A=A-d-(1-C); ZN(A);
s16 t = (s16)a - (s16)d - (1-(s16)C); C=(t >= 0x0);
V = (((a^d) & 0x80)>0) && (((a^A) & 0x80)>0);
} // Subtract Memory from Accumulator with Borrow
__device__ void _cp (_6502 *n, u8 _a, u8 _b) { u8 r=_a-_b; C=(_a>=_b); ZN(r); }
__device__ void _ora(_6502 *n) { LDM; A|=d; ZN(A); } // "OR" Memory with Accumulator
__device__ void _and(_6502 *n) { LDM; A&=d; ZN(A); } // "AND" Memory with Accumulator
__device__ void _eor(_6502 *n) { LDM; A^=d; ZN(A); } // "XOR" Memory with Accumulator
__device__ void _cmp(_6502 *n) { LDM; _cp(n,A,d); } // Compare Memory and Accumulator
__device__ void _cpx(_6502 *n) { LDM; _cp(n,X,d); } // Compare Memory and Index X
__device__ void _cpy(_6502 *n) { LDM; _cp(n,Y,d); } // Compare Memory and Index Y
__device__ void _bcc(_6502 *n) { jr(n,!C); } // Branch on Carry Clear
__device__ void _bcs(_6502 *n) { jr(n,C); } // Branch on Carry Set
__device__ void _beq(_6502 *n) { jr(n,Z); } // Branch on Result Zero
__device__ void _bit(_6502 *n) { LDM; S=(d>>7) & 1; V=(d>>6) & 1; Z=(d & A)==0; } // Test Bits in Memory with A
__device__ void _bmi(_6502 *n) { jr(n, S); } // Branch on Result Minus
__device__ void _bne(_6502 *n) { jr(n,!Z); } // Branch on Result not Zero
__device__ void _bpl(_6502 *n) { jr(n,!S); } // Branch on Result Plus
__device__ void _brk(_6502 *n) { B=1; } // Force Break
__device__ void _bvc(_6502 *n) { jr(n,!V); } // Branch on Overflow Clear
__device__ void _bvs(_6502 *n) { jr(n, V); } // Branch on Overflow Set
__device__ void _clc(_6502 *n) { C=0; } // Clear Carry Flag
__device__ void _cld(_6502 *n) { D=0; } // Clear Decimal Mode
__device__ void _cli(_6502 *n) { I=0; } // Clear interrupt Disable Bit
__device__ void _clv(_6502 *n) { V=0; } // Clear Overflow Flag
__device__ void _dec(_6502 *n) { u16 d0 = d; LDM; d--; d &= 0xff; ZN(d); w8(n,d0,d); } // Decrement Memory by One
__device__ void _dex(_6502 *n) { X--; ZN(X); } // Decrement Index X by One
__device__ void _dey(_6502 *n) { Y--; ZN(Y); } // Decrement Index Y by One
__device__ void _inc(_6502 *n) { u16 d0=d; LDM; d++; d &= 0xff; ZN(d); w8(n,d0,d); d=d0; } // Incr Memory by One
__device__ void _inx(_6502 *n) { X++; ZN(X); } // Increment Index X by One
__device__ void _iny(_6502 *n) { Y++; ZN(Y); } // Increment Index Y by One
__device__ void _jmp(_6502 *n) { PC=d;} // Jump to New Location
__device__ void _jsr(_6502 *n) { push16(n,PC-1); PC=d; } // Jump to New Location Saving Return Address
__device__ void _lda(_6502 *n) { LDM; A=d; ZN(A); } // Load Accumulator with Memory
__device__ void _ldx(_6502 *n) { LDM; X=d; ZN(X); } // Load Index X with Memory
__device__ void _ldy(_6502 *n) { LDM; Y=d; ZN(Y); } // Load Index Y with Memory
__device__ void _lsr(_6502 *n) { LD_A_OR_M(); C=w & 1; w>>=1; ZN(w); ST_A_OR_M(); } // Shift Right One Bit
__device__ void _asl(_6502 *n) { LD_A_OR_M(); C=(w>>7) & 1; w<<=1; ZN(w); ST_A_OR_M();} // Shift Left One Bit
__device__ void _rol(_6502 *n) { LD_A_OR_M(); u8 c = C; C=(w>>7) & 1; w=(w<<1) | c; ZN(w); ST_A_OR_M(); } // Rotate One Bit Left (Memory or Accumulator)
__device__ void _ror(_6502 *n) { LD_A_OR_M(); u8 c = C; C=(w & 1); w=(w>>1) | (c<<7); ZN(w); ST_A_OR_M(); } // Rotate One Bit Right (Memory or Accumulator)
__device__ void _nop(_6502 *n) { /* No Operation */ }
__device__ void _pha(_6502 *n) { push8(n, A); } // Push Accumulator on Stack
__device__ void _php(_6502 *n) { push8(n, P | 0x10); } // Push Processor Status on Stack
__device__ void _pla(_6502 *n) { A=pop8(n); Z=(A==0); S=(A>>7)&0x1;} // Pull Accumulator from Stack
__device__ void _plp(_6502 *n) { P=pop8(n) & 0xef | 0x20; } // Pull Processor Status from Stack
__device__ void _rti(_6502 *n) { P=(pop8(n) & 0xef) | 0x20; PC=pop16(n); } // Return from Interrupt
__device__ void _rts(_6502 *n) { PC=pop16(n)+1;} // Return from Subroutine
__device__ void _sec(_6502 *n) { C=1;} // Set Carry Flag
__device__ void _sed(_6502 *n) { D=1;} // Set Decimal Mode
__device__ void _sei(_6502 *n) { I=1;} // Set Interrupt Disable Status
__device__ void _sta(_6502 *n) { w8(n,d,A);} // Store Accumulator in Memory
__device__ void _stx(_6502 *n) { w8(n,d,X);} // Store Index X in Memory
__device__ void _sty(_6502 *n) { w8(n,d,Y);} // Store Index Y in Memory
__device__ void _tax(_6502 *n) { X=A; ZN(X); } // Transfer Accumulator to Index X
__device__ void _tay(_6502 *n) { Y=A; ZN(Y); } // Transfer Accumulator to Index Y
__device__ void _tsx(_6502 *n) { X=SP;ZN(X); } // Transfer Stack Pointer to Index X
__device__ void _txa(_6502 *n) { A=X; ZN(A); } // Transfer Index X to Accumulator
__device__ void _txs(_6502 *n) { SP=X; } // Transfer Index X to Stack Pointer
__device__ void _tya(_6502 *n) { A=Y; ZN(A); } // Transfer Index Y to Accumulator
// undocumented
__device__ void _lax(_6502 *n) { _lda(n); X=A; ZN(A); } // lda, ldx
__device__ void _sax(_6502 *n) { w8(n,d,A&X); }
__device__ void _dcp(_6502 *n) { _dec(n); _cp(n,A,d); }
__device__ void _isb(_6502 *n) { _inc(n); _sbc(n); }
__device__ void _slo(_6502 *n) { _asl(n); _ora(n); }
__device__ void _rla(_6502 *n) { _rol(n); _and(n); }
__device__ void _sre(_6502 *n) { _lsr(n); _eor(n); }
__device__ void _rra(_6502 *n) { _ror(n); _adc(n); }
__device__ void *addrtable[256] = {
&imp, &indx,&imp,&indx,&zp, &zp, &zp, &zp, &imp,&imm, &acc,&imm, &abso,&abso,&abso,&abso,
&rel, &indy,&imp,&indy,&zpx,&zpx,&zpx,&zpx,&imp,&absy,&imp,&absy,&absx,&absx,&absx,&absx,
&abso,&indx,&imp,&indx,&zp, &zp, &zp, &zp, &imp,&imm, &acc,&imm, &abso,&abso,&abso,&abso,
&rel, &indy,&imp,&indy,&zpx,&zpx,&zpx,&zpx,&imp,&absy,&imp,&absy,&absx,&absx,&absx,&absx,
&imp, &indx,&imp,&indx,&zp, &zp, &zp, &zp, &imp,&imm, &acc,&imm, &abso,&abso,&abso,&abso,
&rel, &indy,&imp,&indy,&zpx,&zpx,&zpx,&zpx,&imp,&absy,&imp,&absy,&absx,&absx,&absx,&absx,
&imp, &indx,&imp,&indx,&zp, &zp, &zp, &zp, &imp,&imm, &acc,&imm, &ind, &abso,&abso,&abso,
&rel, &indy,&imp,&indy,&zpx,&zpx,&zpx,&zpx,&imp,&absy,&imp,&absy,&absx,&absx,&absx,&absx,
&imm, &indx,&imm,&indx,&zp, &zp, &zp, &zp, &imp,&imm, &imp,&imm, &abso,&abso,&abso,&abso,
&rel, &indy,&imp,&indy,&zpx,&zpx,&zpy,&zpy,&imp,&absy,&imp,&absy,&absx,&absx,&absy,&absy,
&imm, &indx,&imm,&indx,&zp, &zp, &zp, &zp, &imp,&imm, &imp,&imm, &abso,&abso,&abso,&abso,
&rel, &indy,&imp,&indy,&zpx,&zpx,&zpy,&zpy,&imp,&absy,&imp,&absy,&absx,&absx,&absy,&absy,
&imm, &indx,&imm,&indx,&zp, &zp, &zp, &zp, &imp,&imm, &imp,&imm, &abso,&abso,&abso,&abso,
&rel, &indy,&imp,&indy,&zpx,&zpx,&zpx,&zpx,&imp,&absy,&imp,&absy,&absx,&absx,&absx,&absx,
&imm, &indx,&imm,&indx,&zp, &zp, &zp, &zp, &imp,&imm, &imp,&imm, &abso,&abso,&abso,&abso,
&rel, &indy,&imp,&indy,&zpx,&zpx,&zpx,&zpx,&imp,&absy,&imp,&absy,&absx,&absx,&absx,&absx};
__device__ void *optable[256] = { // opcode -> functions map
&_brk,&_ora,&_nop,&_slo,&_nop,&_ora,&_asl,&_slo,&_php,&_ora,&_asl,&_nop,&_nop,&_ora,&_asl,&_slo,
&_bpl,&_ora,&_nop,&_slo,&_nop,&_ora,&_asl,&_slo,&_clc,&_ora,&_nop,&_slo,&_nop,&_ora,&_asl,&_slo,
&_jsr,&_and,&_nop,&_rla,&_bit,&_and,&_rol,&_rla,&_plp,&_and,&_rol,&_nop,&_bit,&_and,&_rol,&_rla,
&_bmi,&_and,&_nop,&_rla,&_nop,&_and,&_rol,&_rla,&_sec,&_and,&_nop,&_rla,&_nop,&_and,&_rol,&_rla,
&_rti,&_eor,&_nop,&_sre,&_nop,&_eor,&_lsr,&_sre,&_pha,&_eor,&_lsr,&_nop,&_jmp,&_eor,&_lsr,&_sre,
&_bvc,&_eor,&_nop,&_sre,&_nop,&_eor,&_lsr,&_sre,&_cli,&_eor,&_nop,&_sre,&_nop,&_eor,&_lsr,&_sre,
&_rts,&_adc,&_nop,&_rra,&_nop,&_adc,&_ror,&_rra,&_pla,&_adc,&_ror,&_nop,&_jmp,&_adc,&_ror,&_rra,
&_bvs,&_adc,&_nop,&_rra,&_nop,&_adc,&_ror,&_rra,&_sei,&_adc,&_nop,&_rra,&_nop,&_adc,&_ror,&_rra,
&_nop,&_sta,&_nop,&_sax,&_sty,&_sta,&_stx,&_sax,&_dey,&_nop,&_txa,&_nop,&_sty,&_sta,&_stx,&_sax,
&_bcc,&_sta,&_nop,&_nop,&_sty,&_sta,&_stx,&_sax,&_tya,&_sta,&_txs,&_nop,&_nop,&_sta,&_nop,&_nop,
&_ldy,&_lda,&_ldx,&_lax,&_ldy,&_lda,&_ldx,&_lax,&_tay,&_lda,&_tax,&_nop,&_ldy,&_lda,&_ldx,&_lax,
&_bcs,&_lda,&_nop,&_lax,&_ldy,&_lda,&_ldx,&_lax,&_clv,&_lda,&_tsx,&_lax,&_ldy,&_lda,&_ldx,&_lax,
&_cpy,&_cmp,&_nop,&_dcp,&_cpy,&_cmp,&_dec,&_dcp,&_iny,&_cmp,&_dex,&_nop,&_cpy,&_cmp,&_dec,&_dcp,
&_bne,&_cmp,&_nop,&_dcp,&_nop,&_cmp,&_dec,&_dcp,&_cld,&_cmp,&_nop,&_dcp,&_nop,&_cmp,&_dec,&_dcp,
&_cpx,&_sbc,&_nop,&_isb,&_cpx,&_sbc,&_inc,&_isb,&_inx,&_sbc,&_nop,&_sbc,&_cpx,&_sbc,&_inc,&_isb,
&_beq,&_sbc,&_nop,&_isb,&_nop,&_sbc,&_inc,&_isb,&_sed,&_sbc,&_nop,&_isb,&_nop,&_sbc,&_inc,&_isb
};
// use local mem? this is usually faster
#define LMEM
__global__ void step(_6502* states, int steps, int num_threads) {
int i = blockDim.x * blockIdx.x + threadIdx.x; // thread idx
if (i < num_threads) {
#ifdef LMEM // use local memory
_6502 ln = states[i]; _6502 *n = &ln;
#else // operate directly on global mem
_6502 *n = &states[i];
#endif
for (int j = 0; j < n->max_cycles; ++j) {
u8 op = f8(n); I = op; // fetch next byte
((void(*)(_6502*))addrtable[op])(n); // decode addr mode
((void(*)(_6502*)) optable[op])(n); // execute
CY++; // increment cycle count
}
#ifdef LMEM // update from local mem
states[i] = ln;
#endif
}
}
void reset(_6502 *n, u16 _PC, u8 _SP) {
PC=_PC; A=0x00; X=0x00; P=0x24; SP=_SP; CY=0; memset(n->mem, '\0', MEM_SIZE);
n->max_cycles = (rand() % 5000 + 10);
}
// wrapper for CUDA call
void gpu_step(_6502* states, u32 steps, u32 num_blocks, u32 threads_per_block) {
hipLaunchKernelGGL(( step), dim3(num_blocks), dim3(threads_per_block), 0, 0, states, steps, num_blocks * threads_per_block);
}
extern "C" {
void run(int num_blocks, int threads_per_block, int steps, int iters, const char *name) {
hipError_t err = hipSuccess; // for checking CUDA errors
int num_threads = num_blocks * threads_per_block;
printf(" main: running %d blocks * %d threads (%d threads total), %d steps\n", num_blocks, threads_per_block, num_threads, steps);
// allocate _6502 registers / state
_6502 *h_in_regs = (_6502 *) malloc(num_threads * sizeof(_6502));
_6502 *h_out_regs = (_6502 *) malloc(num_threads * sizeof(_6502));
// resetting all instances
for (u32 i = 0; i < num_threads; i++) {
reset(&h_in_regs[i], 0x0600, 0xfe); read_bin(&h_in_regs[i].mem[0x0600], name);
}
// alloc gpu mem
printf(" main: allocating %zu device bytes\n", num_threads * sizeof(_6502));
_6502* d_regs = NULL;
err = hipMalloc((void **)&d_regs, num_threads * sizeof(_6502) ); CHECK_ERR_CUDA(err);
printf(" main: copying host -> device\n");
err = hipMemcpy(d_regs, h_in_regs, sizeof(_6502 ) * num_threads, hipMemcpyHostToDevice); CHECK_ERR_CUDA(err);
for (int j = 0; j < iters; j++ ) {
double start_time = get_time();
hipDeviceSynchronize();
///
gpu_step(d_regs, steps, num_blocks, threads_per_block);
///
hipDeviceSynchronize();
double walltime = get_time() - start_time;
err = hipGetLastError(); CHECK_ERR_CUDA(err);
printf(" main: kernel time = %.6f s, %2.6f us/step, %5.3f MHz\n", walltime,
1e6 * (walltime/(steps * num_threads)), ((steps * num_threads)/walltime)/1e6);
printf(" main: copying device -> host\n");
err = hipMemcpy(h_out_regs, d_regs, sizeof(_6502) * num_threads, hipMemcpyDeviceToHost); CHECK_ERR_CUDA(err);
for (u32 i = 0; i < num_threads; i++) {
memcpy(mem[i], h_out_regs[i].mem, 0x1000);
//print_regs(i, &h_out_regs[i]); print_scrn(i, &h_out_regs[i]); }
}
}
printf(" main: freeing memory\n");
// free gpu mem
err = hipFree(d_regs); CHECK_ERR_CUDA(err);
// free host mem
free(h_in_regs); free(h_out_regs);
printf(" main: done.\n");
}
}
int main(int argc, char **argv) {
int num_blocks = 1; int threads_per_block = 1; int iters = 1; int steps = 32768;
if (argc > 3) {
num_blocks = strtol(argv[1], NULL, 10);
threads_per_block = strtol(argv[2], NULL, 10);
steps = strtol(argv[3], NULL, 10);
} else {
printf("usage: %s blocks thread/block steps\n", argv[0]);
return -1;
}
run(num_blocks, threads_per_block, steps, iters, "sierp.bin");
return 0;
}
| 55c68e9eb8b182410cdcb73092b1575618309569.cu | // nv6502.cu
// A GPU implementation of a 6502 CPU emulator
// Kamil M Rocki, 1/24/19
#include "nv6502.h"
#include <stdio.h>
#include <sys/time.h>
#define CHECK_ERR_CUDA(err) if (err != cudaSuccess) { printf("%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); }
int read_bin(u8* mem, const char* fname) {
FILE * file = fopen(fname, "r+");
if (file == NULL || mem == NULL) return - 1;
fseek(file, 0, SEEK_END);
long int size = ftell(file);
fclose(file);
file = fopen(fname, "r+");
int bytes_read = fread(mem, sizeof(u8), size, file);
//printf("read file %s, %d bytes\n", fname, bytes_read);
return 0; fclose(file);
}
double get_time() {
struct timeval tv; gettimeofday(&tv, NULL);
return (tv.tv_sec + tv.tv_usec * 1e-6);
}
#define MAX_THREADS 256
u8 mem[MAX_THREADS][0x1000];
__device__ __host__ void print_scrn(u32 id, _6502 *n) { for (u8 i=0;i<32;i++) { for (u8 j=0;j<32;j++) { u8 v = n->mem[0x200 + j + 32*i]; if (v >= 0xa) printf("%02x ", v); else if (v > 0) printf("%2x ", v); else printf("__ "); } printf("\n"); } }
__device__ __host__ void print_regs(u32 id, _6502 *n) { printf("[%05d] PC: %04x OP: %02x, m: %2d, d: %04x, A:%02X X:%02X Y:%02X P:%02X SP:%02X CYC:%3ld\n", id, PC, I, m, d, A, X, Y, P, SP, CY); }
#define STACK_PG 0x0100
#define ZN(x) { Z=((x)==0); S=((x)>>7) & 0x1; }
#define LDM { d=(m>2) ? r8(n,d) : d; }
#define LD_A_OR_M() u8 w=(m==1)?A:r8(n,d)
#define ST_A_OR_M() if (m!=1) w8(n,d,w); else A=w;
__device__ u8 r8 (_6502 *n, u16 a) { return n->mem[a % MEM_SIZE]; } // byte read
__device__ void w8 (_6502 *n, u16 a, u8 v) { n->mem[a % MEM_SIZE] = v; } // byte write
__device__ u8 f8 (_6502 *n) { return r8(n, PC++); } // byte fetch
//// 16-bit versions
__device__ u16 r16 (_6502 *n, u16 a) { u16 base=a & 0xff00; return (r8(n,a) | (r8(n,base|((u8)(a+1))) << 8)); } // buggy
__device__ u16 r16_ok(_6502 *n, u16 a) { return (r8(n,a) | (r8(n,a+1) << 8)); }
__device__ u16 f16 (_6502 *n) { return (f8(n) | ((f8(n))<<8)); }
//
//// stack ops
__device__ u8 pop8 (_6502 *n) { SP++; return r8(n, STACK_PG | SP); }
__device__ u16 pop16 (_6502 *n) { return (pop8(n) | ((pop8(n))<<8)); }
__device__ void push8 (_6502 *n, u8 v) { w8(n, STACK_PG | SP, v); SP--; }
__device__ void push16(_6502 *n, u16 v) { push8(n,(v>>8)); push8(n,v); }
__device__ void jr (_6502 *n, u8 cond) { if (cond) { PC=(u16)d; } }
//
//// decoding addressing mode
__device__ void imp (_6502 *n) { m=0; b=0; } // implied, 1
__device__ void acc (_6502 *n) { m=1; b=0; } // accumulator, 1
__device__ void imm (_6502 *n) { m=2; b=1; d=(u16)f8(n); } // immediate, 2
__device__ void zp (_6502 *n) { m=3; b=1; d=(u16)f8(n); } // zero page, 2
__device__ void zpx (_6502 *n) { m=4; b=1; u8 r=f8(n); d=(r+X) & 0xff;} // zero page, x, 3
__device__ void zpy (_6502 *n) { m=5; b=1; u8 r=f8(n); d=(r+Y) & 0xff; } // zero page, y, 3
__device__ void rel (_6502 *n) { m=6; b=1; u8 r=f8(n); if (r<0x80) d=PC+r; else d=PC+r-0x100;} // relative, 2
__device__ void abso(_6502 *n) { m=7; b=2; d=f16(n); } // absolute, 3
__device__ void absx(_6502 *n) { m=8; b=2; d=f16(n); d+=X; } // absolute, x, 3
__device__ void absy(_6502 *n) { m=9; b=2; d=f16(n); d+=Y; } // absolute, y, 3
__device__ void ind (_6502 *n) { m=10; b=2; d=r16(n,f16(n)); } // indirect, 3
__device__ void indx(_6502 *n) { m=11; b=1; u8 r=f8(n); d=r16(n,(u8)(r + X)); } // indirect x
__device__ void indy(_6502 *n) { m=12; b=1; u8 r=f8(n); d=r16(n,(u8)(r)); d+=Y;} // indirect y
//instructions
__device__ void _adc(_6502 *n) {
u8 a = A; LDM; A=d+A+C; ZN(A);
u16 t = (u16)d + (u16)a + (u16)C; C=(t > 0xff);
V = (!((a^d) & 0x80)) && (((a^A) & 0x80)>0 );
} // Add Memory to Accumulator with Carry
__device__ void _sbc(_6502 *n) {
u8 a = A; LDM; A=A-d-(1-C); ZN(A);
s16 t = (s16)a - (s16)d - (1-(s16)C); C=(t >= 0x0);
V = (((a^d) & 0x80)>0) && (((a^A) & 0x80)>0);
} // Subtract Memory from Accumulator with Borrow
__device__ void _cp (_6502 *n, u8 _a, u8 _b) { u8 r=_a-_b; C=(_a>=_b); ZN(r); }
__device__ void _ora(_6502 *n) { LDM; A|=d; ZN(A); } // "OR" Memory with Accumulator
__device__ void _and(_6502 *n) { LDM; A&=d; ZN(A); } // "AND" Memory with Accumulator
__device__ void _eor(_6502 *n) { LDM; A^=d; ZN(A); } // "XOR" Memory with Accumulator
__device__ void _cmp(_6502 *n) { LDM; _cp(n,A,d); } // Compare Memory and Accumulator
__device__ void _cpx(_6502 *n) { LDM; _cp(n,X,d); } // Compare Memory and Index X
__device__ void _cpy(_6502 *n) { LDM; _cp(n,Y,d); } // Compare Memory and Index Y
__device__ void _bcc(_6502 *n) { jr(n,!C); } // Branch on Carry Clear
__device__ void _bcs(_6502 *n) { jr(n,C); } // Branch on Carry Set
__device__ void _beq(_6502 *n) { jr(n,Z); } // Branch on Result Zero
__device__ void _bit(_6502 *n) { LDM; S=(d>>7) & 1; V=(d>>6) & 1; Z=(d & A)==0; } // Test Bits in Memory with A
__device__ void _bmi(_6502 *n) { jr(n, S); } // Branch on Result Minus
__device__ void _bne(_6502 *n) { jr(n,!Z); } // Branch on Result not Zero
__device__ void _bpl(_6502 *n) { jr(n,!S); } // Branch on Result Plus
__device__ void _brk(_6502 *n) { B=1; } // Force Break
__device__ void _bvc(_6502 *n) { jr(n,!V); } // Branch on Overflow Clear
__device__ void _bvs(_6502 *n) { jr(n, V); } // Branch on Overflow Set
__device__ void _clc(_6502 *n) { C=0; } // Clear Carry Flag
__device__ void _cld(_6502 *n) { D=0; } // Clear Decimal Mode
__device__ void _cli(_6502 *n) { I=0; } // Clear interrupt Disable Bit
__device__ void _clv(_6502 *n) { V=0; } // Clear Overflow Flag
__device__ void _dec(_6502 *n) { u16 d0 = d; LDM; d--; d &= 0xff; ZN(d); w8(n,d0,d); } // Decrement Memory by One
__device__ void _dex(_6502 *n) { X--; ZN(X); } // Decrement Index X by One
__device__ void _dey(_6502 *n) { Y--; ZN(Y); } // Decrement Index Y by One
__device__ void _inc(_6502 *n) { u16 d0=d; LDM; d++; d &= 0xff; ZN(d); w8(n,d0,d); d=d0; } // Incr Memory by One
__device__ void _inx(_6502 *n) { X++; ZN(X); } // Increment Index X by One
__device__ void _iny(_6502 *n) { Y++; ZN(Y); } // Increment Index Y by One
__device__ void _jmp(_6502 *n) { PC=d;} // Jump to New Location
__device__ void _jsr(_6502 *n) { push16(n,PC-1); PC=d; } // Jump to New Location Saving Return Address
__device__ void _lda(_6502 *n) { LDM; A=d; ZN(A); } // Load Accumulator with Memory
__device__ void _ldx(_6502 *n) { LDM; X=d; ZN(X); } // Load Index X with Memory
__device__ void _ldy(_6502 *n) { LDM; Y=d; ZN(Y); } // Load Index Y with Memory
__device__ void _lsr(_6502 *n) { LD_A_OR_M(); C=w & 1; w>>=1; ZN(w); ST_A_OR_M(); } // Shift Right One Bit
__device__ void _asl(_6502 *n) { LD_A_OR_M(); C=(w>>7) & 1; w<<=1; ZN(w); ST_A_OR_M();} // Shift Left One Bit
__device__ void _rol(_6502 *n) { LD_A_OR_M(); u8 c = C; C=(w>>7) & 1; w=(w<<1) | c; ZN(w); ST_A_OR_M(); } // Rotate One Bit Left (Memory or Accumulator)
__device__ void _ror(_6502 *n) { LD_A_OR_M(); u8 c = C; C=(w & 1); w=(w>>1) | (c<<7); ZN(w); ST_A_OR_M(); } // Rotate One Bit Right (Memory or Accumulator)
__device__ void _nop(_6502 *n) { /* No Operation */ }
__device__ void _pha(_6502 *n) { push8(n, A); } // Push Accumulator on Stack
__device__ void _php(_6502 *n) { push8(n, P | 0x10); } // Push Processor Status on Stack
__device__ void _pla(_6502 *n) { A=pop8(n); Z=(A==0); S=(A>>7)&0x1;} // Pull Accumulator from Stack
__device__ void _plp(_6502 *n) { P=pop8(n) & 0xef | 0x20; } // Pull Processor Status from Stack
__device__ void _rti(_6502 *n) { P=(pop8(n) & 0xef) | 0x20; PC=pop16(n); } // Return from Interrupt
__device__ void _rts(_6502 *n) { PC=pop16(n)+1;} // Return from Subroutine
__device__ void _sec(_6502 *n) { C=1;} // Set Carry Flag
__device__ void _sed(_6502 *n) { D=1;} // Set Decimal Mode
__device__ void _sei(_6502 *n) { I=1;} // Set Interrupt Disable Status
__device__ void _sta(_6502 *n) { w8(n,d,A);} // Store Accumulator in Memory
__device__ void _stx(_6502 *n) { w8(n,d,X);} // Store Index X in Memory
__device__ void _sty(_6502 *n) { w8(n,d,Y);} // Store Index Y in Memory
__device__ void _tax(_6502 *n) { X=A; ZN(X); } // Transfer Accumulator to Index X
__device__ void _tay(_6502 *n) { Y=A; ZN(Y); } // Transfer Accumulator to Index Y
__device__ void _tsx(_6502 *n) { X=SP;ZN(X); } // Transfer Stack Pointer to Index X
__device__ void _txa(_6502 *n) { A=X; ZN(A); } // Transfer Index X to Accumulator
__device__ void _txs(_6502 *n) { SP=X; } // Transfer Index X to Stack Pointer
__device__ void _tya(_6502 *n) { A=Y; ZN(A); } // Transfer Index Y to Accumulator
// undocumented
__device__ void _lax(_6502 *n) { _lda(n); X=A; ZN(A); } // lda, ldx
__device__ void _sax(_6502 *n) { w8(n,d,A&X); }
__device__ void _dcp(_6502 *n) { _dec(n); _cp(n,A,d); }
__device__ void _isb(_6502 *n) { _inc(n); _sbc(n); }
__device__ void _slo(_6502 *n) { _asl(n); _ora(n); }
__device__ void _rla(_6502 *n) { _rol(n); _and(n); }
__device__ void _sre(_6502 *n) { _lsr(n); _eor(n); }
__device__ void _rra(_6502 *n) { _ror(n); _adc(n); }
__device__ void *addrtable[256] = {
&imp, &indx,&imp,&indx,&zp, &zp, &zp, &zp, &imp,&imm, &acc,&imm, &abso,&abso,&abso,&abso,
&rel, &indy,&imp,&indy,&zpx,&zpx,&zpx,&zpx,&imp,&absy,&imp,&absy,&absx,&absx,&absx,&absx,
&abso,&indx,&imp,&indx,&zp, &zp, &zp, &zp, &imp,&imm, &acc,&imm, &abso,&abso,&abso,&abso,
&rel, &indy,&imp,&indy,&zpx,&zpx,&zpx,&zpx,&imp,&absy,&imp,&absy,&absx,&absx,&absx,&absx,
&imp, &indx,&imp,&indx,&zp, &zp, &zp, &zp, &imp,&imm, &acc,&imm, &abso,&abso,&abso,&abso,
&rel, &indy,&imp,&indy,&zpx,&zpx,&zpx,&zpx,&imp,&absy,&imp,&absy,&absx,&absx,&absx,&absx,
&imp, &indx,&imp,&indx,&zp, &zp, &zp, &zp, &imp,&imm, &acc,&imm, &ind, &abso,&abso,&abso,
&rel, &indy,&imp,&indy,&zpx,&zpx,&zpx,&zpx,&imp,&absy,&imp,&absy,&absx,&absx,&absx,&absx,
&imm, &indx,&imm,&indx,&zp, &zp, &zp, &zp, &imp,&imm, &imp,&imm, &abso,&abso,&abso,&abso,
&rel, &indy,&imp,&indy,&zpx,&zpx,&zpy,&zpy,&imp,&absy,&imp,&absy,&absx,&absx,&absy,&absy,
&imm, &indx,&imm,&indx,&zp, &zp, &zp, &zp, &imp,&imm, &imp,&imm, &abso,&abso,&abso,&abso,
&rel, &indy,&imp,&indy,&zpx,&zpx,&zpy,&zpy,&imp,&absy,&imp,&absy,&absx,&absx,&absy,&absy,
&imm, &indx,&imm,&indx,&zp, &zp, &zp, &zp, &imp,&imm, &imp,&imm, &abso,&abso,&abso,&abso,
&rel, &indy,&imp,&indy,&zpx,&zpx,&zpx,&zpx,&imp,&absy,&imp,&absy,&absx,&absx,&absx,&absx,
&imm, &indx,&imm,&indx,&zp, &zp, &zp, &zp, &imp,&imm, &imp,&imm, &abso,&abso,&abso,&abso,
&rel, &indy,&imp,&indy,&zpx,&zpx,&zpx,&zpx,&imp,&absy,&imp,&absy,&absx,&absx,&absx,&absx};
__device__ void *optable[256] = { // opcode -> functions map
&_brk,&_ora,&_nop,&_slo,&_nop,&_ora,&_asl,&_slo,&_php,&_ora,&_asl,&_nop,&_nop,&_ora,&_asl,&_slo,
&_bpl,&_ora,&_nop,&_slo,&_nop,&_ora,&_asl,&_slo,&_clc,&_ora,&_nop,&_slo,&_nop,&_ora,&_asl,&_slo,
&_jsr,&_and,&_nop,&_rla,&_bit,&_and,&_rol,&_rla,&_plp,&_and,&_rol,&_nop,&_bit,&_and,&_rol,&_rla,
&_bmi,&_and,&_nop,&_rla,&_nop,&_and,&_rol,&_rla,&_sec,&_and,&_nop,&_rla,&_nop,&_and,&_rol,&_rla,
&_rti,&_eor,&_nop,&_sre,&_nop,&_eor,&_lsr,&_sre,&_pha,&_eor,&_lsr,&_nop,&_jmp,&_eor,&_lsr,&_sre,
&_bvc,&_eor,&_nop,&_sre,&_nop,&_eor,&_lsr,&_sre,&_cli,&_eor,&_nop,&_sre,&_nop,&_eor,&_lsr,&_sre,
&_rts,&_adc,&_nop,&_rra,&_nop,&_adc,&_ror,&_rra,&_pla,&_adc,&_ror,&_nop,&_jmp,&_adc,&_ror,&_rra,
&_bvs,&_adc,&_nop,&_rra,&_nop,&_adc,&_ror,&_rra,&_sei,&_adc,&_nop,&_rra,&_nop,&_adc,&_ror,&_rra,
&_nop,&_sta,&_nop,&_sax,&_sty,&_sta,&_stx,&_sax,&_dey,&_nop,&_txa,&_nop,&_sty,&_sta,&_stx,&_sax,
&_bcc,&_sta,&_nop,&_nop,&_sty,&_sta,&_stx,&_sax,&_tya,&_sta,&_txs,&_nop,&_nop,&_sta,&_nop,&_nop,
&_ldy,&_lda,&_ldx,&_lax,&_ldy,&_lda,&_ldx,&_lax,&_tay,&_lda,&_tax,&_nop,&_ldy,&_lda,&_ldx,&_lax,
&_bcs,&_lda,&_nop,&_lax,&_ldy,&_lda,&_ldx,&_lax,&_clv,&_lda,&_tsx,&_lax,&_ldy,&_lda,&_ldx,&_lax,
&_cpy,&_cmp,&_nop,&_dcp,&_cpy,&_cmp,&_dec,&_dcp,&_iny,&_cmp,&_dex,&_nop,&_cpy,&_cmp,&_dec,&_dcp,
&_bne,&_cmp,&_nop,&_dcp,&_nop,&_cmp,&_dec,&_dcp,&_cld,&_cmp,&_nop,&_dcp,&_nop,&_cmp,&_dec,&_dcp,
&_cpx,&_sbc,&_nop,&_isb,&_cpx,&_sbc,&_inc,&_isb,&_inx,&_sbc,&_nop,&_sbc,&_cpx,&_sbc,&_inc,&_isb,
&_beq,&_sbc,&_nop,&_isb,&_nop,&_sbc,&_inc,&_isb,&_sed,&_sbc,&_nop,&_isb,&_nop,&_sbc,&_inc,&_isb
};
// use local mem? this is usually faster
#define LMEM
__global__ void step(_6502* states, int steps, int num_threads) {
int i = blockDim.x * blockIdx.x + threadIdx.x; // thread idx
if (i < num_threads) {
#ifdef LMEM // use local memory
_6502 ln = states[i]; _6502 *n = &ln;
#else // operate directly on global mem
_6502 *n = &states[i];
#endif
for (int j = 0; j < n->max_cycles; ++j) {
u8 op = f8(n); I = op; // fetch next byte
((void(*)(_6502*))addrtable[op])(n); // decode addr mode
((void(*)(_6502*)) optable[op])(n); // execute
CY++; // increment cycle count
}
#ifdef LMEM // update from local mem
states[i] = ln;
#endif
}
}
void reset(_6502 *n, u16 _PC, u8 _SP) {
PC=_PC; A=0x00; X=0x00; P=0x24; SP=_SP; CY=0; memset(n->mem, '\0', MEM_SIZE);
n->max_cycles = (rand() % 5000 + 10);
}
// wrapper for CUDA call
void gpu_step(_6502* states, u32 steps, u32 num_blocks, u32 threads_per_block) {
step<<<num_blocks, threads_per_block>>>(states, steps, num_blocks * threads_per_block);
}
extern "C" {
void run(int num_blocks, int threads_per_block, int steps, int iters, const char *name) {
cudaError_t err = cudaSuccess; // for checking CUDA errors
int num_threads = num_blocks * threads_per_block;
printf(" main: running %d blocks * %d threads (%d threads total), %d steps\n", num_blocks, threads_per_block, num_threads, steps);
// allocate _6502 registers / state
_6502 *h_in_regs = (_6502 *) malloc(num_threads * sizeof(_6502));
_6502 *h_out_regs = (_6502 *) malloc(num_threads * sizeof(_6502));
// resetting all instances
for (u32 i = 0; i < num_threads; i++) {
reset(&h_in_regs[i], 0x0600, 0xfe); read_bin(&h_in_regs[i].mem[0x0600], name);
}
// alloc gpu mem
printf(" main: allocating %zu device bytes\n", num_threads * sizeof(_6502));
_6502* d_regs = NULL;
err = cudaMalloc((void **)&d_regs, num_threads * sizeof(_6502) ); CHECK_ERR_CUDA(err);
printf(" main: copying host -> device\n");
err = cudaMemcpy(d_regs, h_in_regs, sizeof(_6502 ) * num_threads, cudaMemcpyHostToDevice); CHECK_ERR_CUDA(err);
for (int j = 0; j < iters; j++ ) {
double start_time = get_time();
cudaDeviceSynchronize();
///
gpu_step(d_regs, steps, num_blocks, threads_per_block);
///
cudaDeviceSynchronize();
double walltime = get_time() - start_time;
err = cudaGetLastError(); CHECK_ERR_CUDA(err);
printf(" main: kernel time = %.6f s, %2.6f us/step, %5.3f MHz\n", walltime,
1e6 * (walltime/(steps * num_threads)), ((steps * num_threads)/walltime)/1e6);
printf(" main: copying device -> host\n");
err = cudaMemcpy(h_out_regs, d_regs, sizeof(_6502) * num_threads, cudaMemcpyDeviceToHost); CHECK_ERR_CUDA(err);
for (u32 i = 0; i < num_threads; i++) {
memcpy(mem[i], h_out_regs[i].mem, 0x1000);
//print_regs(i, &h_out_regs[i]); print_scrn(i, &h_out_regs[i]); }
}
}
printf(" main: freeing memory\n");
// free gpu mem
err = cudaFree(d_regs); CHECK_ERR_CUDA(err);
// free host mem
free(h_in_regs); free(h_out_regs);
printf(" main: done.\n");
}
}
int main(int argc, char **argv) {
int num_blocks = 1; int threads_per_block = 1; int iters = 1; int steps = 32768;
if (argc > 3) {
num_blocks = strtol(argv[1], NULL, 10);
threads_per_block = strtol(argv[2], NULL, 10);
steps = strtol(argv[3], NULL, 10);
} else {
printf("usage: %s blocks thread/block steps\n", argv[0]);
return -1;
}
run(num_blocks, threads_per_block, steps, iters, "sierp.bin");
return 0;
}
|
8eb74d000e00465919b6055c8e54d8dd8007b909.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <inttypes.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
const int width = 10 * 1024;
const int height = 10 * 1024;
void hist_host(uint8_t *image, int width, int height, int *hist)
{
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++)
hist[image[i * width + j]]++;
}
__global__ void hist_gpu(uint8_t *image, int width, int height, int *hist)
{
size_t size = width * height;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
hist[image[i]]++;
}
}
__global__ void hist_gpu_atomic(uint8_t *image, int width, int height, int *hist)
{
size_t size = width * height;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
while (i < size) {
atomicAdd(&hist[image[i]], 1);
i += stride;
}
}
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
int main()
{
double tcpu = 0, tgpu = 0, tmem = 0;
hipError_t err;
size_t size = sizeof(uint8_t) * width * height;
uint8_t *image = (uint8_t *)malloc(size);
if (image == NULL) {
fprintf(stderr, "Allocation error.\n");
exit(EXIT_FAILURE);
}
srand(0);
for (size_t i = 0; i < size; i++)
image[i] = (rand() / (double)RAND_MAX) * 255;
int hist[256];
memset(hist, 0, sizeof(*hist) * 256);
tcpu = -wtime();
hist_host(image, width, height, hist);
tcpu += wtime();
double sum = 0;
for (int i = 0; i < 256; i++)
sum += hist[i];
printf("Sum (CPU) = %f\n", sum);
/* Allocate on device */
int *d_hist = NULL;
hipMalloc((void **)&d_hist, sizeof(*d_hist) * 256);
hipMemset(d_hist, 0, sizeof(*d_hist) * 256);
uint8_t *d_image = NULL;
hipMalloc((void **)&d_image, size);
hipMemcpy(d_image, image, size, hipMemcpyHostToDevice);
/* Launch the kernel */
int threadsPerBlock = 1024;
int blocksPerGrid = (size + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
tgpu = -wtime();
hipLaunchKernelGGL(( hist_gpu), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_image, width, height, d_hist);
hipDeviceSynchronize();
tgpu += wtime();
if ( (err = hipGetLastError()) != hipSuccess) {
fprintf(stderr, "Failed to launch kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
tmem -= wtime();
hipMemcpy(hist, d_hist, sizeof(*d_hist) * 256, hipMemcpyDeviceToHost);
tmem += wtime();
sum = 0;
for (int i = 0; i < 256; i++)
sum += hist[i];
printf("Sum (GPU) = %f\n", sum);
printf("CPU version (sec.): %.6f\n", tcpu);
printf("GPU version (sec.): %.6f\n", tgpu);
printf("Memory ops. (sec.): %.6f\n", tmem);
printf("Speedup: %.2f\n", tcpu / tgpu);
printf("Speedup (with mem ops.): %.2f\n", tcpu / (tgpu + tmem));
hipFree(d_image);
hipFree(d_hist);
free(image);
hipDeviceReset();
return 0;
}
| 8eb74d000e00465919b6055c8e54d8dd8007b909.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <inttypes.h>
#include <sys/time.h>
#include <cuda_runtime.h>
const int width = 10 * 1024;
const int height = 10 * 1024;
void hist_host(uint8_t *image, int width, int height, int *hist)
{
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++)
hist[image[i * width + j]]++;
}
__global__ void hist_gpu(uint8_t *image, int width, int height, int *hist)
{
size_t size = width * height;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
hist[image[i]]++;
}
}
__global__ void hist_gpu_atomic(uint8_t *image, int width, int height, int *hist)
{
size_t size = width * height;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
while (i < size) {
atomicAdd(&hist[image[i]], 1);
i += stride;
}
}
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
int main()
{
double tcpu = 0, tgpu = 0, tmem = 0;
cudaError_t err;
size_t size = sizeof(uint8_t) * width * height;
uint8_t *image = (uint8_t *)malloc(size);
if (image == NULL) {
fprintf(stderr, "Allocation error.\n");
exit(EXIT_FAILURE);
}
srand(0);
for (size_t i = 0; i < size; i++)
image[i] = (rand() / (double)RAND_MAX) * 255;
int hist[256];
memset(hist, 0, sizeof(*hist) * 256);
tcpu = -wtime();
hist_host(image, width, height, hist);
tcpu += wtime();
double sum = 0;
for (int i = 0; i < 256; i++)
sum += hist[i];
printf("Sum (CPU) = %f\n", sum);
/* Allocate on device */
int *d_hist = NULL;
cudaMalloc((void **)&d_hist, sizeof(*d_hist) * 256);
cudaMemset(d_hist, 0, sizeof(*d_hist) * 256);
uint8_t *d_image = NULL;
cudaMalloc((void **)&d_image, size);
cudaMemcpy(d_image, image, size, cudaMemcpyHostToDevice);
/* Launch the kernel */
int threadsPerBlock = 1024;
int blocksPerGrid = (size + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
tgpu = -wtime();
hist_gpu<<<blocksPerGrid, threadsPerBlock>>>(d_image, width, height, d_hist);
cudaDeviceSynchronize();
tgpu += wtime();
if ( (err = cudaGetLastError()) != cudaSuccess) {
fprintf(stderr, "Failed to launch kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
tmem -= wtime();
cudaMemcpy(hist, d_hist, sizeof(*d_hist) * 256, cudaMemcpyDeviceToHost);
tmem += wtime();
sum = 0;
for (int i = 0; i < 256; i++)
sum += hist[i];
printf("Sum (GPU) = %f\n", sum);
printf("CPU version (sec.): %.6f\n", tcpu);
printf("GPU version (sec.): %.6f\n", tgpu);
printf("Memory ops. (sec.): %.6f\n", tmem);
printf("Speedup: %.2f\n", tcpu / tgpu);
printf("Speedup (with mem ops.): %.2f\n", tcpu / (tgpu + tmem));
cudaFree(d_image);
cudaFree(d_hist);
free(image);
cudaDeviceReset();
return 0;
}
|
219aa514e04ea67463eac93a043c030debe78e23.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2023 The Regents of the University of Michigan.
// Part of HOOMD-blue, released under the BSD 3-Clause License.
#include "ComputeThermoHMAGPU.cuh"
#include "hoomd/VectorMath.h"
#include <assert.h>
namespace hoomd
{
namespace md
{
namespace kernel
{
//! Shared memory used in reducing the sums
extern __shared__ Scalar3 compute_thermo_hma_sdata[];
//! Shared memory used in final reduction
extern __shared__ Scalar3 compute_thermo_hma_final_sdata[];
/*! \file ComputeThermoGPU.cu
\brief Defines GPU kernel code for computing thermodynamic properties on the GPU. Used by
ComputeThermoGPU.
*/
//! Perform partial sums of the thermo properties on the GPU
/*! \param d_scratch Scratch space to hold partial sums. One element is written per block
\param box Box the particles are in
\param d_net_force Net force / pe array from ParticleData
\param d_net_virial Net virial array from ParticleData
\param virial_pitch pitch of 2D virial array
\param d_position Particle position array from ParticleData
\param d_lattice_site Particle lattice site array
\param d_image Image array from ParticleData
\param d_body Particle body id
\param d_tag Particle tag
\param d_group_members List of group members for which to sum properties
\param work_size Number of particles in the group this GPU processes
\param offset Offset of this GPU in list of group members
\param block_offset Offset of this GPU in the array of partial sums
All partial sums are packaged up in a Scalar3 to keep pointer management down.
- force * dr is summed in .x
- Potential energy is summed in .y
- W is summed in .z
One thread is executed per group member. That thread reads in the values for its member into
shared memory and then the block performs a reduction in parallel to produce a partial sum output
for the block. These partial sums are written to d_scratch[blockIdx.x].
sizeof(Scalar3)*block_size of dynamic shared memory are needed for this kernel to run.
*/
__global__ void gpu_compute_thermo_hma_partial_sums(Scalar3* d_scratch,
BoxDim box,
Scalar4* d_net_force,
Scalar* d_net_virial,
const size_t virial_pitch,
Scalar4* d_position,
Scalar3* d_lattice_site,
int3* d_image,
unsigned int* d_body,
unsigned int* d_tag,
unsigned int* d_group_members,
unsigned int work_size,
unsigned int offset,
unsigned int block_offset)
{
// determine which particle this thread works on
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
Scalar3 my_element; // element of scratch space read in
// non-participating thread: contribute 0 to the sum
my_element = make_scalar3(0, 0, 0);
if (group_idx < work_size)
{
unsigned int idx = d_group_members[group_idx + offset];
// ignore rigid body constituent particles in the sum
unsigned int body = d_body[idx];
unsigned int tag = d_tag[idx];
if (body >= MIN_FLOPPY || body == tag)
{
Scalar4 net_force = d_net_force[idx];
Scalar net_isotropic_virial;
// (1/3)*trace of virial tensor
net_isotropic_virial = Scalar(1.0 / 3.0)
* (d_net_virial[0 * virial_pitch + idx] // xx
+ d_net_virial[3 * virial_pitch + idx] // yy
+ d_net_virial[5 * virial_pitch + idx]); // zz
Scalar4 pos4 = d_position[idx];
Scalar3 pos3 = make_scalar3(pos4.x, pos4.y, pos4.z);
Scalar3 lat = d_lattice_site[tag];
Scalar3 dr = box.shift(pos3, d_image[idx]) - lat;
double fdr = 0;
fdr += (double)d_net_force[idx].x * dr.x;
fdr += (double)d_net_force[idx].y * dr.y;
fdr += (double)d_net_force[idx].z * dr.z;
// compute our contribution to the sum
my_element.x = Scalar(fdr);
my_element.y = net_force.w;
my_element.z = net_isotropic_virial;
}
}
compute_thermo_hma_sdata[threadIdx.x] = my_element;
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
{
compute_thermo_hma_sdata[threadIdx.x].x
+= compute_thermo_hma_sdata[threadIdx.x + offs].x;
compute_thermo_hma_sdata[threadIdx.x].y
+= compute_thermo_hma_sdata[threadIdx.x + offs].y;
compute_thermo_hma_sdata[threadIdx.x].z
+= compute_thermo_hma_sdata[threadIdx.x + offs].z;
}
offs >>= 1;
__syncthreads();
}
// write out our partial sum
if (threadIdx.x == 0)
{
Scalar3 res = compute_thermo_hma_sdata[0];
d_scratch[block_offset + blockIdx.x] = make_scalar3(res.x, res.y, res.z);
}
}
//! Complete partial sums and compute final thermodynamic quantities (for pressure, only isotropic
//! contribution)
/*! \param d_properties Property array to write final values
\param d_scratch Partial sums
\param box Box the particles are in
\param D Dimensionality of the system
\param group_size Number of particles in the group
\param num_partial_sums Number of partial sums in \a d_scratch
\param temperature The temperature that governs sampling of the integrator
\param harmonicPressure The contribution to the pressure from harmonic fluctuations
\param external_virial External contribution to virial (1/3 trace)
\param external_energy External contribution to potential energy
Only one block is executed. In that block, the partial sums are read in and reduced to final
values. From the final sums, the thermodynamic properties are computed and written to
d_properties.
sizeof(Scalar3)*block_size bytes of shared memory are needed for this kernel to run.
*/
__global__ void gpu_compute_thermo_hma_final_sums(Scalar* d_properties,
Scalar3* d_scratch,
BoxDim box,
unsigned int D,
unsigned int group_size,
unsigned int num_partial_sums,
Scalar temperature,
Scalar harmonicPressure,
Scalar external_virial,
Scalar external_energy)
{
Scalar3 final_sum = make_scalar3(Scalar(0.0), Scalar(0.0), Scalar(0.0));
// sum up the values in the partial sum via a sliding window
for (int start = 0; start < num_partial_sums; start += blockDim.x)
{
__syncthreads();
if (start + threadIdx.x < num_partial_sums)
{
Scalar3 scratch = d_scratch[start + threadIdx.x];
compute_thermo_hma_final_sdata[threadIdx.x]
= make_scalar3(scratch.x, scratch.y, scratch.z);
}
else
compute_thermo_hma_final_sdata[threadIdx.x]
= make_scalar3(Scalar(0.0), Scalar(0.0), Scalar(0.0));
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
{
compute_thermo_hma_final_sdata[threadIdx.x].x
+= compute_thermo_hma_final_sdata[threadIdx.x + offs].x;
compute_thermo_hma_final_sdata[threadIdx.x].y
+= compute_thermo_hma_final_sdata[threadIdx.x + offs].y;
compute_thermo_hma_final_sdata[threadIdx.x].z
+= compute_thermo_hma_final_sdata[threadIdx.x + offs].z;
}
offs >>= 1;
__syncthreads();
}
if (threadIdx.x == 0)
{
final_sum.x += compute_thermo_hma_final_sdata[0].x;
final_sum.y += compute_thermo_hma_final_sdata[0].y;
final_sum.z += compute_thermo_hma_final_sdata[0].z;
}
}
if (threadIdx.x == 0)
{
// compute final quantities
Scalar fdr = final_sum.x;
Scalar pe_total = final_sum.y + external_energy;
Scalar W = final_sum.z + external_virial;
// compute the pressure
// volume/area & other 2D stuff needed
Scalar volume;
Scalar3 L = box.getL();
if (D == 2)
{
// "volume" is area in 2D
volume = L.x * L.y;
// W needs to be corrected since the 1/3 factor is built in
W *= Scalar(3.0) / Scalar(2.0);
}
else
{
volume = L.x * L.y * L.z;
}
// pressure: P = (N * K_B * T + W)/V
Scalar fV = (harmonicPressure / temperature - group_size / volume) / (D * (group_size - 1));
Scalar pressure = harmonicPressure + W / volume + fV * fdr;
// fill out the GPUArray
d_properties[thermoHMA_index::potential_energyHMA]
= pe_total + 1.5 * (group_size - 1) * temperature + 0.5 * fdr;
d_properties[thermoHMA_index::pressureHMA] = pressure;
}
}
//! Compute partial sums of thermodynamic properties of a group on the GPU,
/*! \param d_pos Particle position array from ParticleData
\param d_lattice_site Particle lattice site array
\param d_image Image array from ParticleData
\param d_body Particle body id
\param d_tag Particle tag
\param d_group_members List of group members
\param group_size Number of group members
\param box Box the particles are in
\param args Additional arguments
\param gpu_partition Load balancing info for multi-GPU reduction
This function drives gpu_compute_thermo_partial_sums and gpu_compute_thermo_final_sums, see them
for details.
*/
hipError_t gpu_compute_thermo_hma_partial(Scalar4* d_pos,
Scalar3* d_lattice_site,
int3* d_image,
unsigned int* d_body,
unsigned int* d_tag,
unsigned int* d_group_members,
unsigned int group_size,
const BoxDim& box,
const compute_thermo_hma_args& args,
const GPUPartition& gpu_partition)
{
assert(d_pos);
assert(d_group_members);
assert(args.d_net_force);
assert(args.d_net_virial);
assert(args.d_scratch);
unsigned int block_offset = 0;
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
dim3 grid(nwork / args.block_size + 1, 1, 1);
dim3 threads(args.block_size, 1, 1);
const size_t shared_bytes = sizeof(Scalar3) * args.block_size;
hipLaunchKernelGGL(( gpu_compute_thermo_hma_partial_sums), dim3(grid), dim3(threads), shared_bytes, 0, args.d_scratch,
box,
args.d_net_force,
args.d_net_virial,
args.virial_pitch,
d_pos,
d_lattice_site,
d_image,
d_body,
d_tag,
d_group_members,
nwork,
range.first,
block_offset);
block_offset += grid.x;
}
assert(block_offset <= args.n_blocks);
return hipSuccess;
}
//! Compute thermodynamic properties of a group on the GPU
/*! \param d_properties Array to write computed properties
\param d_body Particle body id
\param d_tag Particle tag
\param d_group_members List of group members
\param group_size Number of group members
\param box Box the particles are in
\param args Additional arguments
\param num_blocks Number of partial sums to reduce
This function drives gpu_compute_thermo_partial_sums and gpu_compute_thermo_final_sums, see them
for details.
*/
hipError_t gpu_compute_thermo_hma_final(Scalar* d_properties,
unsigned int* d_body,
unsigned int* d_tag,
unsigned int* d_group_members,
unsigned int group_size,
const BoxDim& box,
const compute_thermo_hma_args& args)
{
assert(d_properties);
assert(d_group_members);
assert(args.d_net_force);
assert(args.d_net_virial);
assert(args.d_scratch);
// setup the grid to run the final kernel
int final_block_size = 512;
dim3 grid = dim3(1, 1, 1);
dim3 threads = dim3(final_block_size, 1, 1);
const size_t shared_bytes = sizeof(Scalar3) * final_block_size;
Scalar external_virial
= Scalar(1.0 / 3.0)
* (args.external_virial_xx + args.external_virial_yy + args.external_virial_zz);
// run the kernel
hipLaunchKernelGGL(( gpu_compute_thermo_hma_final_sums), dim3(grid), dim3(threads), shared_bytes, 0, d_properties,
args.d_scratch,
box,
args.D,
group_size,
args.n_blocks,
args.temperature,
args.harmonicPressure,
external_virial,
args.external_energy);
return hipSuccess;
}
} // end namespace kernel
} // end namespace md
} // end namespace hoomd
| 219aa514e04ea67463eac93a043c030debe78e23.cu | // Copyright (c) 2009-2023 The Regents of the University of Michigan.
// Part of HOOMD-blue, released under the BSD 3-Clause License.
#include "ComputeThermoHMAGPU.cuh"
#include "hoomd/VectorMath.h"
#include <assert.h>
namespace hoomd
{
namespace md
{
namespace kernel
{
//! Shared memory used in reducing the sums
extern __shared__ Scalar3 compute_thermo_hma_sdata[];
//! Shared memory used in final reduction
extern __shared__ Scalar3 compute_thermo_hma_final_sdata[];
/*! \file ComputeThermoGPU.cu
\brief Defines GPU kernel code for computing thermodynamic properties on the GPU. Used by
ComputeThermoGPU.
*/
//! Perform partial sums of the thermo properties on the GPU
/*! \param d_scratch Scratch space to hold partial sums. One element is written per block
\param box Box the particles are in
\param d_net_force Net force / pe array from ParticleData
\param d_net_virial Net virial array from ParticleData
\param virial_pitch pitch of 2D virial array
\param d_position Particle position array from ParticleData
\param d_lattice_site Particle lattice site array
\param d_image Image array from ParticleData
\param d_body Particle body id
\param d_tag Particle tag
\param d_group_members List of group members for which to sum properties
\param work_size Number of particles in the group this GPU processes
\param offset Offset of this GPU in list of group members
\param block_offset Offset of this GPU in the array of partial sums
All partial sums are packaged up in a Scalar3 to keep pointer management down.
- force * dr is summed in .x
- Potential energy is summed in .y
- W is summed in .z
One thread is executed per group member. That thread reads in the values for its member into
shared memory and then the block performs a reduction in parallel to produce a partial sum output
for the block. These partial sums are written to d_scratch[blockIdx.x].
sizeof(Scalar3)*block_size of dynamic shared memory are needed for this kernel to run.
*/
__global__ void gpu_compute_thermo_hma_partial_sums(Scalar3* d_scratch,
BoxDim box,
Scalar4* d_net_force,
Scalar* d_net_virial,
const size_t virial_pitch,
Scalar4* d_position,
Scalar3* d_lattice_site,
int3* d_image,
unsigned int* d_body,
unsigned int* d_tag,
unsigned int* d_group_members,
unsigned int work_size,
unsigned int offset,
unsigned int block_offset)
{
// determine which particle this thread works on
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
Scalar3 my_element; // element of scratch space read in
// non-participating thread: contribute 0 to the sum
my_element = make_scalar3(0, 0, 0);
if (group_idx < work_size)
{
unsigned int idx = d_group_members[group_idx + offset];
// ignore rigid body constituent particles in the sum
unsigned int body = d_body[idx];
unsigned int tag = d_tag[idx];
if (body >= MIN_FLOPPY || body == tag)
{
Scalar4 net_force = d_net_force[idx];
Scalar net_isotropic_virial;
// (1/3)*trace of virial tensor
net_isotropic_virial = Scalar(1.0 / 3.0)
* (d_net_virial[0 * virial_pitch + idx] // xx
+ d_net_virial[3 * virial_pitch + idx] // yy
+ d_net_virial[5 * virial_pitch + idx]); // zz
Scalar4 pos4 = d_position[idx];
Scalar3 pos3 = make_scalar3(pos4.x, pos4.y, pos4.z);
Scalar3 lat = d_lattice_site[tag];
Scalar3 dr = box.shift(pos3, d_image[idx]) - lat;
double fdr = 0;
fdr += (double)d_net_force[idx].x * dr.x;
fdr += (double)d_net_force[idx].y * dr.y;
fdr += (double)d_net_force[idx].z * dr.z;
// compute our contribution to the sum
my_element.x = Scalar(fdr);
my_element.y = net_force.w;
my_element.z = net_isotropic_virial;
}
}
compute_thermo_hma_sdata[threadIdx.x] = my_element;
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
{
compute_thermo_hma_sdata[threadIdx.x].x
+= compute_thermo_hma_sdata[threadIdx.x + offs].x;
compute_thermo_hma_sdata[threadIdx.x].y
+= compute_thermo_hma_sdata[threadIdx.x + offs].y;
compute_thermo_hma_sdata[threadIdx.x].z
+= compute_thermo_hma_sdata[threadIdx.x + offs].z;
}
offs >>= 1;
__syncthreads();
}
// write out our partial sum
if (threadIdx.x == 0)
{
Scalar3 res = compute_thermo_hma_sdata[0];
d_scratch[block_offset + blockIdx.x] = make_scalar3(res.x, res.y, res.z);
}
}
//! Complete partial sums and compute final thermodynamic quantities (for pressure, only isotropic
//! contribution)
/*! \param d_properties Property array to write final values
\param d_scratch Partial sums
\param box Box the particles are in
\param D Dimensionality of the system
\param group_size Number of particles in the group
\param num_partial_sums Number of partial sums in \a d_scratch
\param temperature The temperature that governs sampling of the integrator
\param harmonicPressure The contribution to the pressure from harmonic fluctuations
\param external_virial External contribution to virial (1/3 trace)
\param external_energy External contribution to potential energy
Only one block is executed. In that block, the partial sums are read in and reduced to final
values. From the final sums, the thermodynamic properties are computed and written to
d_properties.
sizeof(Scalar3)*block_size bytes of shared memory are needed for this kernel to run.
*/
__global__ void gpu_compute_thermo_hma_final_sums(Scalar* d_properties,
Scalar3* d_scratch,
BoxDim box,
unsigned int D,
unsigned int group_size,
unsigned int num_partial_sums,
Scalar temperature,
Scalar harmonicPressure,
Scalar external_virial,
Scalar external_energy)
{
Scalar3 final_sum = make_scalar3(Scalar(0.0), Scalar(0.0), Scalar(0.0));
// sum up the values in the partial sum via a sliding window
for (int start = 0; start < num_partial_sums; start += blockDim.x)
{
__syncthreads();
if (start + threadIdx.x < num_partial_sums)
{
Scalar3 scratch = d_scratch[start + threadIdx.x];
compute_thermo_hma_final_sdata[threadIdx.x]
= make_scalar3(scratch.x, scratch.y, scratch.z);
}
else
compute_thermo_hma_final_sdata[threadIdx.x]
= make_scalar3(Scalar(0.0), Scalar(0.0), Scalar(0.0));
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
{
compute_thermo_hma_final_sdata[threadIdx.x].x
+= compute_thermo_hma_final_sdata[threadIdx.x + offs].x;
compute_thermo_hma_final_sdata[threadIdx.x].y
+= compute_thermo_hma_final_sdata[threadIdx.x + offs].y;
compute_thermo_hma_final_sdata[threadIdx.x].z
+= compute_thermo_hma_final_sdata[threadIdx.x + offs].z;
}
offs >>= 1;
__syncthreads();
}
if (threadIdx.x == 0)
{
final_sum.x += compute_thermo_hma_final_sdata[0].x;
final_sum.y += compute_thermo_hma_final_sdata[0].y;
final_sum.z += compute_thermo_hma_final_sdata[0].z;
}
}
if (threadIdx.x == 0)
{
// compute final quantities
Scalar fdr = final_sum.x;
Scalar pe_total = final_sum.y + external_energy;
Scalar W = final_sum.z + external_virial;
// compute the pressure
// volume/area & other 2D stuff needed
Scalar volume;
Scalar3 L = box.getL();
if (D == 2)
{
// "volume" is area in 2D
volume = L.x * L.y;
// W needs to be corrected since the 1/3 factor is built in
W *= Scalar(3.0) / Scalar(2.0);
}
else
{
volume = L.x * L.y * L.z;
}
// pressure: P = (N * K_B * T + W)/V
Scalar fV = (harmonicPressure / temperature - group_size / volume) / (D * (group_size - 1));
Scalar pressure = harmonicPressure + W / volume + fV * fdr;
// fill out the GPUArray
d_properties[thermoHMA_index::potential_energyHMA]
= pe_total + 1.5 * (group_size - 1) * temperature + 0.5 * fdr;
d_properties[thermoHMA_index::pressureHMA] = pressure;
}
}
//! Compute partial sums of thermodynamic properties of a group on the GPU,
/*! \param d_pos Particle position array from ParticleData
\param d_lattice_site Particle lattice site array
\param d_image Image array from ParticleData
\param d_body Particle body id
\param d_tag Particle tag
\param d_group_members List of group members
\param group_size Number of group members
\param box Box the particles are in
\param args Additional arguments
\param gpu_partition Load balancing info for multi-GPU reduction
This function drives gpu_compute_thermo_partial_sums and gpu_compute_thermo_final_sums, see them
for details.
*/
hipError_t gpu_compute_thermo_hma_partial(Scalar4* d_pos,
Scalar3* d_lattice_site,
int3* d_image,
unsigned int* d_body,
unsigned int* d_tag,
unsigned int* d_group_members,
unsigned int group_size,
const BoxDim& box,
const compute_thermo_hma_args& args,
const GPUPartition& gpu_partition)
{
assert(d_pos);
assert(d_group_members);
assert(args.d_net_force);
assert(args.d_net_virial);
assert(args.d_scratch);
unsigned int block_offset = 0;
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
dim3 grid(nwork / args.block_size + 1, 1, 1);
dim3 threads(args.block_size, 1, 1);
const size_t shared_bytes = sizeof(Scalar3) * args.block_size;
gpu_compute_thermo_hma_partial_sums<<<grid, threads, shared_bytes>>>(args.d_scratch,
box,
args.d_net_force,
args.d_net_virial,
args.virial_pitch,
d_pos,
d_lattice_site,
d_image,
d_body,
d_tag,
d_group_members,
nwork,
range.first,
block_offset);
block_offset += grid.x;
}
assert(block_offset <= args.n_blocks);
return hipSuccess;
}
//! Compute thermodynamic properties of a group on the GPU
/*! \param d_properties Array to write computed properties
\param d_body Particle body id
\param d_tag Particle tag
\param d_group_members List of group members
\param group_size Number of group members
\param box Box the particles are in
\param args Additional arguments
\param num_blocks Number of partial sums to reduce
This function drives gpu_compute_thermo_partial_sums and gpu_compute_thermo_final_sums, see them
for details.
*/
hipError_t gpu_compute_thermo_hma_final(Scalar* d_properties,
unsigned int* d_body,
unsigned int* d_tag,
unsigned int* d_group_members,
unsigned int group_size,
const BoxDim& box,
const compute_thermo_hma_args& args)
{
assert(d_properties);
assert(d_group_members);
assert(args.d_net_force);
assert(args.d_net_virial);
assert(args.d_scratch);
// setup the grid to run the final kernel
int final_block_size = 512;
dim3 grid = dim3(1, 1, 1);
dim3 threads = dim3(final_block_size, 1, 1);
const size_t shared_bytes = sizeof(Scalar3) * final_block_size;
Scalar external_virial
= Scalar(1.0 / 3.0)
* (args.external_virial_xx + args.external_virial_yy + args.external_virial_zz);
// run the kernel
gpu_compute_thermo_hma_final_sums<<<grid, threads, shared_bytes>>>(d_properties,
args.d_scratch,
box,
args.D,
group_size,
args.n_blocks,
args.temperature,
args.harmonicPressure,
external_virial,
args.external_energy);
return hipSuccess;
}
} // end namespace kernel
} // end namespace md
} // end namespace hoomd
|
9f8398cc513828f370f3fde9fd2752576e555eef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file layer_norm.cu
* \brief Implements Ba et. al, Layer Normalization (https://arxiv.org/abs/1607.06450).
*/
#include "./layer_norm-inl.h"
using namespace mshadow::cuda;
namespace mxnet {
namespace op {
template <typename DType>
__device__ __forceinline__ DType warp_shfl(DType value, int src_lane,
int width = 32, unsigned int mask = 0xffffffff) {
#if TORCH_HIP_VERSION >= 9000
return __shfl_sync(mask, value, src_lane, width);
#else
return __shfl(value, src_lane, width);
#endif
}
template <typename DType>
__device__ __forceinline__ DType warp_shfl_xor(DType value, int laneMask,
int width = 32, unsigned int mask = 0xffffffff) {
#if TORCH_HIP_VERSION >= 9000
return __shfl_xor_sync(mask, value, laneMask, width);
#else
return __shfl_xor(value, laneMask, width);
#endif
}
/* A single updating step of the Welford's online algorithm to calculate the mean and variance.
* The value 'curr' will be accumulated to the (mean, sigma2, count) triplet.
*
*/
template<typename DType, typename IType>
__device__ __forceinline__ void StepWelfordOnlineSum(const DType curr,
DType& mean, //NOLINT
DType& sigma2, //NOLINT
IType& count) { //NOLINT
count += IType(1);
DType delta = curr - mean;
mean += delta / count;
sigma2 += delta * (curr - mean);
}
/* Merge the mean/variance of two partitions. It's the key step of the Chan's parallel algorithm.
* The (lhs_mean, lhs_sigma2, lhs_count) will be merged into (rhs_mean, rhs_sigma2, rhs_count)
*
* See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance for more details.
*
* TODO(sxjscience) Explore the possibility of int lhs_count and rhs_count
*/
template<typename DType, typename IType>
__device__ __inline__ void ChanMergePartition(const DType lhs_mean,
const DType lhs_sigma2,
const IType lhs_count,
DType& rhs_mean, //NOLINT
DType& rhs_sigma2, //NOLINT
IType& rhs_count) { //NOLINT
DType delta = rhs_mean - lhs_mean;
DType nA = static_cast<DType>(lhs_count);
DType nB = static_cast<DType>(rhs_count);
rhs_count = nA + nB;
if (rhs_count > DType(0)) {
nA = nA / rhs_count;
nB = nB / rhs_count;
rhs_mean = nA * lhs_mean + nB * rhs_mean;
rhs_sigma2 = rhs_sigma2 + lhs_sigma2 + delta * delta * nA * nB * rhs_count;
} else {
rhs_mean = DType(0);
rhs_sigma2 = DType(0);
}
}
/* Split the input column into multiple partitions and compute the mean/sigma of each partition.
* Each thread will keep a mean/sigma2. The mean/sigma2 can be further merged to get the mean and
* sigma2 of the column.
*/
template<typename AType, typename DType, typename IType>
__device__ __forceinline__ void BlockWelfordOnlineSum(const DType* __restrict__ col_vals,
const int nchannel,
AType& mean, //NOLINT
AType& sigma2, //NOLINT
IType& count) { //NOLINT
int tid = threadIdx.x + threadIdx.y * blockDim.x;
const int nthread = blockDim.x * blockDim.y;
// Each thread takes charge of 4 consecutive numbers. This should optimize the loading speed using
// vectorized types like float4.
// Also, to minimize branch divergence, we split the for-loop into two parts.
int l = 4 * tid;
for (; l + 3 < nchannel; l += 4 * nthread) {
#pragma unroll
for (int i = 0; i < 4; ++i) {
StepWelfordOnlineSum(static_cast<AType>(col_vals[l + i]), mean, sigma2, count);
}
}
for (; l < nchannel; ++l) {
StepWelfordOnlineSum(static_cast<AType>(col_vals[l]), mean, sigma2, count);
}
}
template<>
__device__ __forceinline__
void BlockWelfordOnlineSum<float, mshadow::half::half_t, int>
(const mshadow::half::half_t* __restrict__ col_vals,
const int nchannel,
float& mean, //NOLINT
float& sigma2, //NOLINT
int& count) { //NOLINT
int tid = threadIdx.x + threadIdx.y * blockDim.x;
const int nthread = blockDim.x * blockDim.y;
// We cast the input half pointer to half2 to optimize the loading speed.
// Here, we need to notice that CUDA forces memory alignment, i.e.,
// ASSERT static_cast<size_t>(ptr) % sizeof(dtype) == 0.
// Thus, we need to shift the address of the half pointer to be aligned by half2.
int align_shift = (reinterpret_cast<size_t>(col_vals) % 4) != 0;
int padding = (nchannel - align_shift) % 2;
int half2_size = (nchannel - align_shift) / 2;
const __half2* half2_col_vals = reinterpret_cast<const __half2*>(col_vals + align_shift);
if (threadIdx.x == 0 && threadIdx.y == 0) {
if (align_shift) {
StepWelfordOnlineSum(__half2float(col_vals[0].cuhalf_), mean, sigma2, count);
}
if (padding) {
StepWelfordOnlineSum(__half2float(col_vals[nchannel - 1].cuhalf_), mean, sigma2, count);
}
}
for (int l = tid; l < half2_size; l += nthread) {
float2 ele_val = __half22float2(half2_col_vals[l]);
StepWelfordOnlineSum(ele_val.x, mean, sigma2, count);
StepWelfordOnlineSum(ele_val.y, mean, sigma2, count);
}
}
/* Fused CUDA kernel for the forward pass of layer normalization.
* It computes the LayerNorm when axis=-1, i.e., contiguous reduction scenario.
* Shape of the input tensors:
* in_data = (nbatch, nchannel)
* gamma = (nchannel,)
* beta = (nchannel,)
* out_data = (nchannel,)
* mean_data = (nbatch,)
* var_data = (nbatch,)
* It's always launched with (blockDim.x, blockDim.y) = (WARP_SIZE, blockDim.y)
* Also, when blockDim.y > 1, it requires shared memory that has size:
* sizeof(AType) * blockDim.y + sizeof(int) * blockDim.y / 2
*/
template<typename AType, typename DType, typename IType>
__global__ void LayerNormFusedForwardKernelContig(const int nbatch,
const int nchannel,
const AType eps,
const DType* __restrict__ in_data,
const DType* __restrict__ gamma,
const DType* __restrict__ beta,
DType* __restrict__ out_data,
DType* __restrict__ mean_data,
DType* __restrict__ std_data) {
int bid = blockIdx.x + blockIdx.y * gridDim.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int nthread = blockDim.x * blockDim.y;
IType count = 0;
AType mean = 0;
AType sigma2 = 0;
if (bid < nbatch) {
extern __shared__ char buf[]; // Shared memory
const DType* col_vals = in_data + bid * nchannel;
BlockWelfordOnlineSum(col_vals, nchannel, mean, sigma2, count);
// Merge the mean/sigma2 within a warp
// Use the Chan's Parallel Algorithm to merge all (mean, sigma2, counts)
// within a warp of threads.
// After calling the function, threadIdx.x == 0 will store the result of
// the aggregated (mean, sigma2, counts).
for (int mask = blockDim.x / 2; mask > 0; mask >>= 1) {
AType meanB = warp_shfl_xor(mean, mask);
AType sigma2B = warp_shfl_xor(sigma2, mask);
IType countB = warp_shfl_xor(count, mask);
ChanMergePartition(meanB, sigma2B, countB, mean, sigma2, count);
}
if (blockDim.y > 1) {
// Inter-warp reduction. Copy the upper-half of the warps to shared memory
// and merge with the lower-half warp
AType* mean_buf = reinterpret_cast<AType*>(buf);
AType* sigma2_buf =
reinterpret_cast<AType*>(buf + sizeof(AType) * blockDim.y / 2 * blockDim.x);
IType* count_buf = reinterpret_cast<IType*>(buf + sizeof(AType) * blockDim.y * blockDim.x);
for (int offset = blockDim.y / 2; offset > 0; offset >>= 1) {
if (threadIdx.y >= offset && threadIdx.y < 2 * offset) {
const int idx = (threadIdx.y - offset) * blockDim.x + threadIdx.x;
mean_buf[idx] = mean;
sigma2_buf[idx] = sigma2;
count_buf[idx] = count;
}
__syncthreads();
if (threadIdx.y < offset) {
const int idx = threadIdx.y * blockDim.x + threadIdx.x;
ChanMergePartition(mean_buf[idx], sigma2_buf[idx], count_buf[idx], mean, sigma2, count);
}
__syncthreads();
}
// Broadcast the result to all threads
if (threadIdx.y == 0) {
mean_buf[threadIdx.x] = mean;
sigma2_buf[threadIdx.x] = sigma2;
}
__syncthreads();
mean = mean_buf[threadIdx.x];
sigma2 = sigma2_buf[threadIdx.x] / nchannel;
} else {
sigma2 /= nchannel;
}
// Calculate the out_data: gamma * (x - mean) / sqrt(var + eps) + beta
AType std_eps = sqrt(sigma2 + eps);
AType invstd_eps = DType(1.0) / std_eps;
DType* out_col_val = out_data + bid * nchannel;
if (gamma != nullptr && beta != nullptr) {
for (int i = tid; i < nchannel; i += nthread) {
out_col_val[i] = gamma[i] * static_cast<DType>(invstd_eps *
(static_cast<AType>(col_vals[i]) - mean))
+ beta[i];
}
} else if (gamma == nullptr && beta != nullptr) {
for (int i = tid; i < nchannel; i += nthread) {
out_col_val[i] = static_cast<DType>(invstd_eps * (static_cast<AType>(col_vals[i]) - mean))
+ beta[i];
}
} else if (gamma != nullptr && beta == nullptr) {
for (int i = tid; i < nchannel; i += nthread) {
out_col_val[i] = gamma[i] * static_cast<DType>(invstd_eps *
(static_cast<AType>(col_vals[i]) - mean));
}
} else {
for (int i = tid; i < nchannel; i += nthread) {
out_col_val[i] = static_cast<DType>(invstd_eps * (static_cast<AType>(col_vals[i]) - mean));
}
}
// Write the out_data and var_data
if (threadIdx.x == 0 && threadIdx.y == 0) {
mean_data[bid] = static_cast<DType>(mean);
std_data[bid] = static_cast<DType>(std_eps);
}
}
}
template<bool safe_acc = false>
void LayerNormGPUContig(const LayerNormParam param,
const OpContext& ctx, const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 3U);
mxnet::TShape data_shape(2, 0);
mxnet::TShape mean_shape(1, 0);
size_t in_ndim = inputs[layernorm::kData].ndim();
data_shape[0] = mean_shape[0] = inputs[layernorm::kData].shape_.ProdShape(0, in_ndim - 1);
data_shape[1] = inputs[layernorm::kData].shape_[in_ndim - 1];
const TBlob in_data = inputs[layernorm::kData].reshape(data_shape);
const TBlob gamma = inputs[layernorm::kGamma];
const TBlob beta = inputs[layernorm::kBeta];
const TBlob out_data = outputs[layernorm::kOut].reshape(data_shape);
const TBlob mean_data = outputs[layernorm::kMean].reshape(mean_shape);
const TBlob std_data = outputs[layernorm::kStd].reshape(mean_shape);
// Make sure the inputs are contiguous
CHECK_EQ(in_data.CheckContiguous(), true);
CHECK_EQ(gamma.CheckContiguous(), true);
CHECK_EQ(beta.CheckContiguous(), true);
CHECK_EQ(out_data.CheckContiguous(), true);
CHECK_EQ(mean_data.CheckContiguous(), true);
CHECK_EQ(std_data.CheckContiguous(), true);
// Lauch the kernel. The dynamic shared memory size is
// sizeof(DType) * blockDim.y * blockDim.x + sizeof(DType) * blockDim.y / 2 * blockDim.x
int nbatch = data_shape[0];
int nchannel = data_shape[1];
float eps = param.eps;
int ngrid_x = (nbatch > kMaxGridDim) ? (nbatch + kBaseGridNum - 1) / kBaseGridNum : nbatch;
int ngrid_y = (nbatch > kMaxGridDim) ? kBaseGridNum : 1;
int nthread_y;
const dim3 dimGrid(ngrid_x, ngrid_y);
if (nchannel <= 128) {
nthread_y = 1;
} else if (nchannel <= 512) {
nthread_y = 2;
} else {
nthread_y = 4;
}
hipStream_t stream = Stream<gpu>::GetStream(ctx.get_stream<gpu>());
const dim3 dimBlock(32, nthread_y);
MXNET_REAL_ACC_TYPE_SWITCH(in_data.type_flag_, DType, AccType, {
typedef typename std::conditional<safe_acc, AccType, DType>::type AType;
int nshared = nthread_y > 1 ? nthread_y * 32 * sizeof(AType)
+ (nthread_y / 2) * 32 * sizeof(int) : 0;
CheckLaunchParam(dimGrid, dimBlock);
hipLaunchKernelGGL(( LayerNormFusedForwardKernelContig<AType, DType, int>) , dim3(dimGrid), dim3(dimBlock), nshared, stream,
nbatch, nchannel, static_cast<AType>(eps),
in_data.dptr<DType>(), gamma.dptr<DType>(), beta.dptr<DType>(),
out_data.dptr<DType>(), mean_data.dptr<DType>(), std_data.dptr<DType>());
});
MSHADOW_CUDA_POST_KERNEL_CHECK(LayerNormFusedForwardKernelContig);
}
template<>
void LayerNormCompute<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext& ctx, const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const LayerNormParam& param = nnvm::get<LayerNormParam>(attrs.parsed);
if (req[0] == kNullOp) return;
CHECK_NE(req[0], kAddTo);
int axis = param.axis;
if (axis < 0) {
axis += static_cast<int>(inputs[0].ndim());
}
CHECK(axis >= 0 && axis < inputs[0].ndim()) << "Channel axis out of range: " << param.axis;
if (axis == inputs[0].ndim() - 1) {
// Try to use the accelerated CUDA kernels
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false);
if (!safe_acc && inputs[0].type_flag_ == mshadow::kFloat16) {
common::LogOnce("MXNET_SAFE_ACCUMULATION=1 is recommended for LayerNorm with float16 inputs. "
"See https://mxnet.apache.org/api/faq/env_var "
"for more details.");
}
if (safe_acc) {
return LayerNormGPUContig<true>(param, ctx, inputs, req, outputs);
} else {
return LayerNormGPUContig<false>(param, ctx, inputs, req, outputs);
}
}
return LayerNormComputeGeneral<gpu>(attrs, ctx, inputs, req, outputs);
}
/* Fused CUDA kernel for calculating the gradient w.r.t gamma/beta in LayerNorm when axis=-1
* (Contiguous case).
* The gradient of gamma and beta are:
* d_gamma = sum(out_grad * (x - mean) / std, axis=0)
* d_beta = sum(out_grad, axis=0)
*
* We compute the gradient (mainly reduction over a non-contiguous axis) using two steps to
* improve the parallelism.
*
* In the first step, we divide the rows uniformly into K parts. K independent threadblocks are used
* to calculate the partial reduction result of each part. Illustrated below:
*
* 1st Block 2nd Block 3rd Block k-th Block
* | --------------- | ---------------- | --------------- | ... | ---------------- |
* | --------------- | ---------------- | --------------- | ... | ---------------- |
* | --------------- | ---------------- | --------------- | ... | ---------------- |
* | --------------- | ---------------- | --------------- | ... | ---------------- |
* part_gamma[0] part_gamma[1] part_gamma[2] part_gamma[k-1]
* part_beta[0] part_beta[1] part_beta[2] part_beta[k-1]
*
*
* In the second step, we sum up the row-values in part_gamma and part_beta.
*
* This `LayerNormFusedBackwardKernel_PartGammaBeta` function implements the first step and
* `LayerNormFusedBackwardKernel_GammaBeta` implements the second step.
*/
template<typename AType, typename DType>
__global__ void LayerNormFusedBackwardKernel_PartGammaBeta(const int nbatch,
const int nchannel,
const DType* __restrict__ in_data,
const DType* __restrict__ out_grad,
const DType* __restrict__ mean_data,
const DType* __restrict__ std_data,
AType* __restrict__ part_gamma_grad,
AType* __restrict__ part_beta_grad) {
extern __shared__ char buf[];
AType* d_buf = reinterpret_cast<AType*>(buf);
const int npart = gridDim.y;
const int block_row_num = (nbatch + npart - 1) / npart;
// The rows are divided into `npart` parts. Each threadblock calculates the reduction result
// within the corresponding row ranges.
int row_stride = blockDim.x + 1;
const int c = blockIdx.x * blockDim.x + threadIdx.x;
int r_begin = blockIdx.y * block_row_num;
int r_end = min((blockIdx.y + 1) * block_row_num, nbatch);
AType* buf_gamma_grad = d_buf;
AType* buf_beta_grad = d_buf + blockDim.y * row_stride;
AType local_gamma_grad = 0;
AType local_beta_grad = 0;
if (c < nchannel) {
for (int r_b = r_begin; r_b < r_end; r_b += blockDim.y) {
int r = r_b + threadIdx.y;
if (r < r_end) {
AType local_mean = static_cast<AType>(mean_data[r]);
AType local_std = static_cast<AType>(std_data[r]);
int read_idx = r * nchannel + c;
AType local_in_data = static_cast<AType>(in_data[read_idx]);
AType local_out_grad = static_cast<AType>(out_grad[read_idx]);
local_gamma_grad += (local_in_data - local_mean) / local_std * local_out_grad;
local_beta_grad += local_out_grad;
}
}
}
buf_gamma_grad[threadIdx.y * row_stride + threadIdx.x] = local_gamma_grad;
buf_beta_grad[threadIdx.y * row_stride + threadIdx.x] = local_beta_grad;
__syncthreads();
for (int offset = blockDim.y/2; offset > 1; offset >>= 1) {
if (threadIdx.y < offset) {
int idx1 = threadIdx.y * row_stride + threadIdx.x;
int idx2 = (threadIdx.y + offset) * row_stride + threadIdx.x;
buf_gamma_grad[idx1] += buf_gamma_grad[idx2];
buf_beta_grad[idx1] += buf_beta_grad[idx2];
}
__syncthreads();
}
if (threadIdx.y == 0 && c < nchannel) {
part_gamma_grad[blockIdx.y * nchannel + c] = buf_gamma_grad[threadIdx.x]
+ buf_gamma_grad[threadIdx.x + row_stride];
part_beta_grad[blockIdx.y * nchannel + c] = buf_beta_grad[threadIdx.x]
+ buf_beta_grad[threadIdx.x + row_stride];
}
}
template<bool gamma_addto, bool beta_addto, typename AType, typename DType>
__global__ void LayerNormFusedBackwardKernel_GammaBeta(const int nbatch,
const int nchannel,
const int npart,
const AType* __restrict__ part_gamma_grad,
const AType* __restrict__ part_beta_grad,
DType* gamma_grad,
DType* beta_grad) {
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
if (c < nchannel) {
extern __shared__ char buf[];
AType* buf_gamma_grad = reinterpret_cast<AType*>(buf);
AType* buf_beta_grad = reinterpret_cast<AType*>(buf) + blockDim.x * blockDim.y;
buf_gamma_grad[tid] = 0;
buf_beta_grad[tid] = 0;
for (int r = threadIdx.y; r < npart; r += blockDim.y) {
buf_gamma_grad[tid] += part_gamma_grad[r * nchannel + c];
buf_beta_grad[tid] += part_beta_grad[r * nchannel + c];
}
__syncthreads();
// Begin for inter-warp reduce
if (npart > 1) {
for (int offset = blockDim.y/2; offset > 0; offset >>= 1) {
if (threadIdx.y < offset) {
int idx1 = tid;
int idx2 = tid + offset * blockDim.x;
buf_gamma_grad[idx1] += buf_gamma_grad[idx2];
buf_beta_grad[idx1] += buf_beta_grad[idx2];
}
__syncthreads();
}
}
if (threadIdx.y == 0) {
if (gamma_grad) {
if (gamma_addto) {
gamma_grad[c] += static_cast<DType>(buf_gamma_grad[threadIdx.x]);
} else {
gamma_grad[c] = static_cast<DType>(buf_gamma_grad[threadIdx.x]);
}
}
if (beta_grad) {
if (beta_addto) {
beta_grad[c] += static_cast<DType>(buf_beta_grad[threadIdx.x]);
} else {
beta_grad[c] = static_cast<DType>(buf_beta_grad[threadIdx.x]);
}
}
}
}
}
/*
*
*
*/
template<int LOAD_UNROLL, bool data_addto, typename AType, typename DType>
__global__ void LayerNormFusedBackwardKernel_Data(const int nbatch,
const int nchannel,
const DType* __restrict__ in_data,
const DType* __restrict__ out_grad,
const DType* __restrict__ mean_data,
const DType* __restrict__ std_data,
const DType* __restrict__ gamma,
DType* data_grad) {
int bid = blockIdx.x + blockIdx.y * gridDim.x;
const int nthread = blockDim.x * blockDim.y;
if (bid < nbatch) {
// Shared memory with size blockDim.y * blockDim.x * sizeof(DType)
extern __shared__ char buf[];
int tid = threadIdx.x + threadIdx.y * blockDim.x;
// 1. Calculate: mean(out_grad * gamma / std, axis=-1)
// mean(out_grad * gamma / std * (x - mean) / std, axis=-1)
AType sum_val0 = 0; // Stores mean(out_grad * gamma / std, axis=-1)
AType sum_val1 = 0; // Stores mean(out_grad * gamma / std * (x - mean) / std, axis=-1)
AType mean = static_cast<AType>(mean_data[bid]);
AType invstd_eps = AType(1) / static_cast<AType>(std_data[bid]);
int l = LOAD_UNROLL * tid;
for (; l + LOAD_UNROLL - 1 < nchannel; l += nthread * LOAD_UNROLL) {
#pragma unroll
for (int i = 0; i < LOAD_UNROLL; ++i) {
AType ele_og = static_cast<AType>(out_grad[bid * nchannel + l + i]);
AType ele_x = static_cast<AType>(in_data[bid * nchannel + l + i]);
AType ele_gamma = static_cast<AType>(gamma[l + i]);
sum_val0 += ele_og * ele_gamma * invstd_eps;
sum_val1 += ele_og * ele_gamma * (ele_x - mean) * invstd_eps * invstd_eps;
}
}
for (; l < nchannel; ++l) {
AType ele_og = static_cast<AType>(out_grad[bid * nchannel + l]);
AType ele_x = static_cast<AType>(in_data[bid * nchannel + l]);
AType ele_gamma = static_cast<AType>(gamma[l]);
sum_val0 += ele_og * ele_gamma * invstd_eps;
sum_val1 += ele_og * ele_gamma * (ele_x - mean) * invstd_eps * invstd_eps;
}
// Intra-warp reduction (all-reduce)
for (int mask = blockDim.x / 2; mask > 0; mask >>= 1) {
sum_val0 += warp_shfl_xor(sum_val0, mask);
sum_val1 += warp_shfl_xor(sum_val1, mask);
}
// Inter-warp reduction (all-reduce)
if (blockDim.y > 1) {
AType* sum_val0_buf = reinterpret_cast<AType*>(buf);
AType* sum_val1_buf =
reinterpret_cast<AType*>(buf + blockDim.y / 2 * blockDim.x * sizeof(AType));
for (int offset = blockDim.y / 2; offset > 0; offset >>= 1) {
if (threadIdx.y >= offset && threadIdx.y < 2 * offset) {
const int idx = (threadIdx.y - offset) * blockDim.x + threadIdx.x;
sum_val0_buf[idx] = sum_val0;
sum_val1_buf[idx] = sum_val1;
}
__syncthreads();
if (threadIdx.y < offset) {
const int idx = threadIdx.y * blockDim.x + threadIdx.x;
sum_val0 += sum_val0_buf[idx];
sum_val1 += sum_val1_buf[idx];
}
__syncthreads();
}
if (threadIdx.y == 0) {
sum_val0_buf[threadIdx.x] = sum_val0;
sum_val1_buf[threadIdx.x] = sum_val1;
}
__syncthreads();
sum_val0 = sum_val0_buf[threadIdx.x];
sum_val1 = sum_val1_buf[threadIdx.x];
}
sum_val0 /= nchannel;
sum_val1 /= nchannel;
// 2. Calculate the gradient as
// out_grad * gamma / std - sum_val0 - (x - mean) / std * sum_val1
for (int l = tid; l < nchannel; l += nthread) {
AType ele_out_grad = static_cast<AType>(out_grad[bid * nchannel + l]);
AType ele_x = static_cast<AType>(in_data[bid * nchannel + l]);
AType ele_gamma = static_cast<AType>(gamma[l]);
if (data_addto) {
data_grad[bid * nchannel + l] +=
static_cast<DType>(ele_out_grad * ele_gamma * invstd_eps
- sum_val0 - (ele_x - mean) * invstd_eps * sum_val1);
} else {
data_grad[bid * nchannel + l] =
static_cast<DType>(ele_out_grad * ele_gamma * invstd_eps - sum_val0
- (ele_x - mean) * invstd_eps * sum_val1);
}
}
}
}
void GetGammaBetaGradKernelParams(const int nbatch, const int nchannel,
dim3* part_grad_block_dim, dim3* part_grad_grid_dim,
dim3* gb_block_dim, dim3* gb_grid_dim,
int* npart) {
*npart = 16;
*part_grad_block_dim = dim3(32, 16);
*part_grad_grid_dim = dim3((nchannel + 32 - 1) / 32, *npart);
*gb_block_dim = dim3(32, *npart);
*gb_grid_dim = dim3((nchannel + 32 - 1) / 32);
CheckLaunchParam(*part_grad_grid_dim, *part_grad_block_dim);
CheckLaunchParam(*gb_grid_dim, *gb_block_dim);
}
template<bool safe_acc = false>
void LayerNormGradGPUContig(const LayerNormParam param,
const OpContext& ctx, const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 5U);
const TBlob out_grad = inputs[0];
const TBlob in_data = inputs[1];
const TBlob gamma = inputs[2];
const TBlob mean_data = inputs[3];
const TBlob std_data = inputs[4];
const TBlob data_grad = outputs[0];
const TBlob gamma_grad = outputs[1];
const TBlob beta_grad = outputs[2];
// Make sure the inputs are contiguous
CHECK_EQ(out_grad.CheckContiguous(), true);
CHECK_EQ(in_data.CheckContiguous(), true);
CHECK_EQ(gamma.CheckContiguous(), true);
CHECK_EQ(mean_data.CheckContiguous(), true);
CHECK_EQ(std_data.CheckContiguous(), true);
int nbatch = in_data.shape_.ProdShape(0, in_data.ndim() - 1);
int nchannel = in_data.shape_[in_data.ndim() - 1];
int data_grad_req = req[0];
int gamma_grad_req = req[1];
int beta_grad_req = req[2];
CHECK_NE(data_grad_req, kWriteInplace);
CHECK_NE(gamma_grad_req, kWriteInplace);
CHECK_NE(beta_grad_req, kWriteInplace);
Stream<gpu> *s = ctx.get_stream<gpu>();
hipStream_t stream = Stream<gpu>::GetStream(s);
// Calculate the gradient for gamma/beta
CHECK_EQ(gamma_grad.CheckContiguous(), true);
CHECK_EQ(beta_grad.CheckContiguous(), true);
dim3 part_grad_block_dim, part_grad_grid_dim, gb_block_dim, gb_grid_dim;
int npart;
GetGammaBetaGradKernelParams(nbatch, nchannel, &part_grad_block_dim, &part_grad_grid_dim,
&gb_block_dim, &gb_grid_dim, &npart);
if (gamma_grad_req != kNullOp || beta_grad_req != kNullOp) {
MXNET_REAL_ACC_TYPE_SWITCH(in_data.type_flag_, DType, AccType, {
typedef typename std::conditional<safe_acc, AccType, DType>::type AType;
Tensor<gpu, 1, AType> workspace =
ctx.requested[0].get_space_typed<gpu, 1, AType>(Shape1(2 * npart * nchannel), s);
AType* part_gamma_grad_ptr = workspace.dptr_;
AType* part_beta_grad_ptr = workspace.dptr_ + npart * nchannel;
const int nshared_K1 = 2 * (part_grad_block_dim.x + 1)
* part_grad_block_dim.y * sizeof(AType);
const int nshared_K2 = 2 * gb_block_dim.x * gb_block_dim.y * sizeof(AType);
DType* gamma_grad_ptr = (gamma_grad_req != kNullOp) ? gamma_grad.dptr<DType>() : nullptr;
DType* beta_grad_ptr = (beta_grad_req != kNullOp) ? beta_grad.dptr<DType>() : nullptr;
hipLaunchKernelGGL(( LayerNormFusedBackwardKernel_PartGammaBeta)
, dim3(part_grad_grid_dim), dim3(part_grad_block_dim), nshared_K1, stream,
nbatch, nchannel, in_data.dptr<DType>(), out_grad.dptr<DType>(),
mean_data.dptr<DType>(), std_data.dptr<DType>(), part_gamma_grad_ptr, part_beta_grad_ptr);
MSHADOW_CUDA_POST_KERNEL_CHECK(LayerNormFusedBackwardKernel_PartGammaBeta);
if (gamma_grad_req == kAddTo && beta_grad_req != kAddTo) {
hipLaunchKernelGGL(( LayerNormFusedBackwardKernel_GammaBeta<true, false>)
, dim3(gb_grid_dim), dim3(gb_block_dim), nshared_K2, stream,
nbatch, nchannel, npart, part_gamma_grad_ptr, part_beta_grad_ptr,
gamma_grad_ptr, beta_grad_ptr);
} else if (gamma_grad_req != kAddTo && beta_grad_req == kAddTo) {
hipLaunchKernelGGL(( LayerNormFusedBackwardKernel_GammaBeta<false, true>)
, dim3(gb_grid_dim), dim3(gb_block_dim), nshared_K2, stream,
nbatch, nchannel, npart, part_gamma_grad_ptr, part_beta_grad_ptr,
gamma_grad_ptr, beta_grad_ptr);
} else if (gamma_grad_req == kAddTo && beta_grad_req == kAddTo) {
hipLaunchKernelGGL(( LayerNormFusedBackwardKernel_GammaBeta<true, true>)
, dim3(gb_grid_dim), dim3(gb_block_dim), nshared_K2, stream,
nbatch, nchannel, npart, part_gamma_grad_ptr, part_beta_grad_ptr,
gamma_grad_ptr, beta_grad_ptr);
} else {
hipLaunchKernelGGL(( LayerNormFusedBackwardKernel_GammaBeta<false, false>)
, dim3(gb_grid_dim), dim3(gb_block_dim), nshared_K2, stream,
nbatch, nchannel, npart, part_gamma_grad_ptr, part_beta_grad_ptr,
gamma_grad_ptr, beta_grad_ptr);
}
});
MSHADOW_CUDA_POST_KERNEL_CHECK(LayerNormFusedBackwardKernel_GammaBeta);
}
// Calculate the gradient for data
CHECK_EQ(data_grad.CheckContiguous(), true);
int ngrid_x = (nbatch > kMaxGridDim) ? (nbatch + kBaseGridNum - 1) / kBaseGridNum : nbatch;
int ngrid_y = (nbatch > kMaxGridDim) ? kBaseGridNum : 1;
const dim3 data_grid_dim(ngrid_x, ngrid_y);
int nthread_y;
if (nchannel <= 32) {
nthread_y = 1;
} else if (nchannel <= 128) {
nthread_y = 2;
} else if (nchannel <= 512) {
nthread_y = 4;
} else {
nthread_y = 8;
}
const dim3 data_block_dim(32, nthread_y);
const int LOAD_UNROLL = 4;
if (data_grad_req != kNullOp) {
MXNET_REAL_ACC_TYPE_SWITCH(in_data.type_flag_, DType, AccType, {
typedef typename std::conditional<safe_acc, AccType, DType>::type AType;
int nshared = data_block_dim.y > 1 ? data_block_dim.y * data_block_dim.x * sizeof(AType) : 0;
CheckLaunchParam(data_grid_dim, data_block_dim);
if (data_grad_req == kAddTo) {
hipLaunchKernelGGL(( LayerNormFusedBackwardKernel_Data<LOAD_UNROLL, true, AType>)
, dim3(data_grid_dim), dim3(data_block_dim), nshared, stream,
nbatch, nchannel, in_data.dptr<DType>(), out_grad.dptr<DType>(), mean_data.dptr<DType>(),
std_data.dptr<DType>(), gamma.dptr<DType>(), data_grad.dptr<DType>());
} else {
hipLaunchKernelGGL(( LayerNormFusedBackwardKernel_Data<LOAD_UNROLL, false, AType>)
, dim3(data_grid_dim), dim3(data_block_dim), nshared, stream,
nbatch, nchannel, in_data.dptr<DType>(), out_grad.dptr<DType>(), mean_data.dptr<DType>(),
std_data.dptr<DType>(), gamma.dptr<DType>(), data_grad.dptr<DType>());
}
});
MSHADOW_CUDA_POST_KERNEL_CHECK(LayerNormFusedBackwardKernel_Data);
}
}
template<>
void LayerNormGradCompute<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext& ctx, const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const LayerNormParam& param = nnvm::get<LayerNormParam>(attrs.parsed);
int axis = param.axis;
if (axis < 0) {
axis += static_cast<int>(inputs[0].ndim());
}
CHECK(axis >= 0 && axis < inputs[0].ndim()) << "Channel axis out of range: " << param.axis;
if (axis == inputs[0].ndim() - 1) {
// Use the accelerated CUDA kernels
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false);
if (safe_acc) {
return LayerNormGradGPUContig<true>(param, ctx, inputs, req, outputs);
} else {
return LayerNormGradGPUContig<false>(param, ctx, inputs, req, outputs);
}
}
return LayerNormGradComputeGeneral<gpu>(attrs, ctx, inputs, req, outputs);
}
NNVM_REGISTER_OP(LayerNorm)
.set_attr<FCompute>("FCompute<gpu>", LayerNormCompute<gpu>);
NNVM_REGISTER_OP(_backward_LayerNorm)
.set_attr<FCompute>("FCompute<gpu>", LayerNormGradCompute<gpu>);
} // namespace op
} // namespace mxnet
| 9f8398cc513828f370f3fde9fd2752576e555eef.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file layer_norm.cu
* \brief Implements Ba et. al, Layer Normalization (https://arxiv.org/abs/1607.06450).
*/
#include "./layer_norm-inl.h"
using namespace mshadow::cuda;
namespace mxnet {
namespace op {
template <typename DType>
__device__ __forceinline__ DType warp_shfl(DType value, int src_lane,
int width = 32, unsigned int mask = 0xffffffff) {
#if CUDA_VERSION >= 9000
return __shfl_sync(mask, value, src_lane, width);
#else
return __shfl(value, src_lane, width);
#endif
}
template <typename DType>
__device__ __forceinline__ DType warp_shfl_xor(DType value, int laneMask,
int width = 32, unsigned int mask = 0xffffffff) {
#if CUDA_VERSION >= 9000
return __shfl_xor_sync(mask, value, laneMask, width);
#else
return __shfl_xor(value, laneMask, width);
#endif
}
/* A single updating step of the Welford's online algorithm to calculate the mean and variance.
* The value 'curr' will be accumulated to the (mean, sigma2, count) triplet.
*
*/
template<typename DType, typename IType>
__device__ __forceinline__ void StepWelfordOnlineSum(const DType curr,
DType& mean, //NOLINT
DType& sigma2, //NOLINT
IType& count) { //NOLINT
count += IType(1);
DType delta = curr - mean;
mean += delta / count;
sigma2 += delta * (curr - mean);
}
/* Merge the mean/variance of two partitions. It's the key step of the Chan's parallel algorithm.
* The (lhs_mean, lhs_sigma2, lhs_count) will be merged into (rhs_mean, rhs_sigma2, rhs_count)
*
* See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance for more details.
*
* TODO(sxjscience) Explore the possibility of int lhs_count and rhs_count
*/
template<typename DType, typename IType>
__device__ __inline__ void ChanMergePartition(const DType lhs_mean,
const DType lhs_sigma2,
const IType lhs_count,
DType& rhs_mean, //NOLINT
DType& rhs_sigma2, //NOLINT
IType& rhs_count) { //NOLINT
DType delta = rhs_mean - lhs_mean;
DType nA = static_cast<DType>(lhs_count);
DType nB = static_cast<DType>(rhs_count);
rhs_count = nA + nB;
if (rhs_count > DType(0)) {
nA = nA / rhs_count;
nB = nB / rhs_count;
rhs_mean = nA * lhs_mean + nB * rhs_mean;
rhs_sigma2 = rhs_sigma2 + lhs_sigma2 + delta * delta * nA * nB * rhs_count;
} else {
rhs_mean = DType(0);
rhs_sigma2 = DType(0);
}
}
/* Split the input column into multiple partitions and compute the mean/sigma of each partition.
* Each thread will keep a mean/sigma2. The mean/sigma2 can be further merged to get the mean and
* sigma2 of the column.
*/
template<typename AType, typename DType, typename IType>
__device__ __forceinline__ void BlockWelfordOnlineSum(const DType* __restrict__ col_vals,
const int nchannel,
AType& mean, //NOLINT
AType& sigma2, //NOLINT
IType& count) { //NOLINT
int tid = threadIdx.x + threadIdx.y * blockDim.x;
const int nthread = blockDim.x * blockDim.y;
// Each thread takes charge of 4 consecutive numbers. This should optimize the loading speed using
// vectorized types like float4.
// Also, to minimize branch divergence, we split the for-loop into two parts.
int l = 4 * tid;
for (; l + 3 < nchannel; l += 4 * nthread) {
#pragma unroll
for (int i = 0; i < 4; ++i) {
StepWelfordOnlineSum(static_cast<AType>(col_vals[l + i]), mean, sigma2, count);
}
}
for (; l < nchannel; ++l) {
StepWelfordOnlineSum(static_cast<AType>(col_vals[l]), mean, sigma2, count);
}
}
template<>
__device__ __forceinline__
void BlockWelfordOnlineSum<float, mshadow::half::half_t, int>
(const mshadow::half::half_t* __restrict__ col_vals,
const int nchannel,
float& mean, //NOLINT
float& sigma2, //NOLINT
int& count) { //NOLINT
int tid = threadIdx.x + threadIdx.y * blockDim.x;
const int nthread = blockDim.x * blockDim.y;
// We cast the input half pointer to half2 to optimize the loading speed.
// Here, we need to notice that CUDA forces memory alignment, i.e.,
// ASSERT static_cast<size_t>(ptr) % sizeof(dtype) == 0.
// Thus, we need to shift the address of the half pointer to be aligned by half2.
int align_shift = (reinterpret_cast<size_t>(col_vals) % 4) != 0;
int padding = (nchannel - align_shift) % 2;
int half2_size = (nchannel - align_shift) / 2;
const __half2* half2_col_vals = reinterpret_cast<const __half2*>(col_vals + align_shift);
if (threadIdx.x == 0 && threadIdx.y == 0) {
if (align_shift) {
StepWelfordOnlineSum(__half2float(col_vals[0].cuhalf_), mean, sigma2, count);
}
if (padding) {
StepWelfordOnlineSum(__half2float(col_vals[nchannel - 1].cuhalf_), mean, sigma2, count);
}
}
for (int l = tid; l < half2_size; l += nthread) {
float2 ele_val = __half22float2(half2_col_vals[l]);
StepWelfordOnlineSum(ele_val.x, mean, sigma2, count);
StepWelfordOnlineSum(ele_val.y, mean, sigma2, count);
}
}
/* Fused CUDA kernel for the forward pass of layer normalization.
* It computes the LayerNorm when axis=-1, i.e., contiguous reduction scenario.
* Shape of the input tensors:
* in_data = (nbatch, nchannel)
* gamma = (nchannel,)
* beta = (nchannel,)
* out_data = (nchannel,)
* mean_data = (nbatch,)
* var_data = (nbatch,)
* It's always launched with (blockDim.x, blockDim.y) = (WARP_SIZE, blockDim.y)
* Also, when blockDim.y > 1, it requires shared memory that has size:
* sizeof(AType) * blockDim.y + sizeof(int) * blockDim.y / 2
*/
template<typename AType, typename DType, typename IType>
__global__ void LayerNormFusedForwardKernelContig(const int nbatch,
const int nchannel,
const AType eps,
const DType* __restrict__ in_data,
const DType* __restrict__ gamma,
const DType* __restrict__ beta,
DType* __restrict__ out_data,
DType* __restrict__ mean_data,
DType* __restrict__ std_data) {
int bid = blockIdx.x + blockIdx.y * gridDim.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int nthread = blockDim.x * blockDim.y;
IType count = 0;
AType mean = 0;
AType sigma2 = 0;
if (bid < nbatch) {
extern __shared__ char buf[]; // Shared memory
const DType* col_vals = in_data + bid * nchannel;
BlockWelfordOnlineSum(col_vals, nchannel, mean, sigma2, count);
// Merge the mean/sigma2 within a warp
// Use the Chan's Parallel Algorithm to merge all (mean, sigma2, counts)
// within a warp of threads.
// After calling the function, threadIdx.x == 0 will store the result of
// the aggregated (mean, sigma2, counts).
for (int mask = blockDim.x / 2; mask > 0; mask >>= 1) {
AType meanB = warp_shfl_xor(mean, mask);
AType sigma2B = warp_shfl_xor(sigma2, mask);
IType countB = warp_shfl_xor(count, mask);
ChanMergePartition(meanB, sigma2B, countB, mean, sigma2, count);
}
if (blockDim.y > 1) {
// Inter-warp reduction. Copy the upper-half of the warps to shared memory
// and merge with the lower-half warp
AType* mean_buf = reinterpret_cast<AType*>(buf);
AType* sigma2_buf =
reinterpret_cast<AType*>(buf + sizeof(AType) * blockDim.y / 2 * blockDim.x);
IType* count_buf = reinterpret_cast<IType*>(buf + sizeof(AType) * blockDim.y * blockDim.x);
for (int offset = blockDim.y / 2; offset > 0; offset >>= 1) {
if (threadIdx.y >= offset && threadIdx.y < 2 * offset) {
const int idx = (threadIdx.y - offset) * blockDim.x + threadIdx.x;
mean_buf[idx] = mean;
sigma2_buf[idx] = sigma2;
count_buf[idx] = count;
}
__syncthreads();
if (threadIdx.y < offset) {
const int idx = threadIdx.y * blockDim.x + threadIdx.x;
ChanMergePartition(mean_buf[idx], sigma2_buf[idx], count_buf[idx], mean, sigma2, count);
}
__syncthreads();
}
// Broadcast the result to all threads
if (threadIdx.y == 0) {
mean_buf[threadIdx.x] = mean;
sigma2_buf[threadIdx.x] = sigma2;
}
__syncthreads();
mean = mean_buf[threadIdx.x];
sigma2 = sigma2_buf[threadIdx.x] / nchannel;
} else {
sigma2 /= nchannel;
}
// Calculate the out_data: gamma * (x - mean) / sqrt(var + eps) + beta
AType std_eps = sqrt(sigma2 + eps);
AType invstd_eps = DType(1.0) / std_eps;
DType* out_col_val = out_data + bid * nchannel;
if (gamma != nullptr && beta != nullptr) {
for (int i = tid; i < nchannel; i += nthread) {
out_col_val[i] = gamma[i] * static_cast<DType>(invstd_eps *
(static_cast<AType>(col_vals[i]) - mean))
+ beta[i];
}
} else if (gamma == nullptr && beta != nullptr) {
for (int i = tid; i < nchannel; i += nthread) {
out_col_val[i] = static_cast<DType>(invstd_eps * (static_cast<AType>(col_vals[i]) - mean))
+ beta[i];
}
} else if (gamma != nullptr && beta == nullptr) {
for (int i = tid; i < nchannel; i += nthread) {
out_col_val[i] = gamma[i] * static_cast<DType>(invstd_eps *
(static_cast<AType>(col_vals[i]) - mean));
}
} else {
for (int i = tid; i < nchannel; i += nthread) {
out_col_val[i] = static_cast<DType>(invstd_eps * (static_cast<AType>(col_vals[i]) - mean));
}
}
// Write the out_data and var_data
if (threadIdx.x == 0 && threadIdx.y == 0) {
mean_data[bid] = static_cast<DType>(mean);
std_data[bid] = static_cast<DType>(std_eps);
}
}
}
template<bool safe_acc = false>
void LayerNormGPUContig(const LayerNormParam param,
const OpContext& ctx, const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 3U);
mxnet::TShape data_shape(2, 0);
mxnet::TShape mean_shape(1, 0);
size_t in_ndim = inputs[layernorm::kData].ndim();
data_shape[0] = mean_shape[0] = inputs[layernorm::kData].shape_.ProdShape(0, in_ndim - 1);
data_shape[1] = inputs[layernorm::kData].shape_[in_ndim - 1];
const TBlob in_data = inputs[layernorm::kData].reshape(data_shape);
const TBlob gamma = inputs[layernorm::kGamma];
const TBlob beta = inputs[layernorm::kBeta];
const TBlob out_data = outputs[layernorm::kOut].reshape(data_shape);
const TBlob mean_data = outputs[layernorm::kMean].reshape(mean_shape);
const TBlob std_data = outputs[layernorm::kStd].reshape(mean_shape);
// Make sure the inputs are contiguous
CHECK_EQ(in_data.CheckContiguous(), true);
CHECK_EQ(gamma.CheckContiguous(), true);
CHECK_EQ(beta.CheckContiguous(), true);
CHECK_EQ(out_data.CheckContiguous(), true);
CHECK_EQ(mean_data.CheckContiguous(), true);
CHECK_EQ(std_data.CheckContiguous(), true);
// Lauch the kernel. The dynamic shared memory size is
// sizeof(DType) * blockDim.y * blockDim.x + sizeof(DType) * blockDim.y / 2 * blockDim.x
int nbatch = data_shape[0];
int nchannel = data_shape[1];
float eps = param.eps;
int ngrid_x = (nbatch > kMaxGridDim) ? (nbatch + kBaseGridNum - 1) / kBaseGridNum : nbatch;
int ngrid_y = (nbatch > kMaxGridDim) ? kBaseGridNum : 1;
int nthread_y;
const dim3 dimGrid(ngrid_x, ngrid_y);
if (nchannel <= 128) {
nthread_y = 1;
} else if (nchannel <= 512) {
nthread_y = 2;
} else {
nthread_y = 4;
}
cudaStream_t stream = Stream<gpu>::GetStream(ctx.get_stream<gpu>());
const dim3 dimBlock(32, nthread_y);
MXNET_REAL_ACC_TYPE_SWITCH(in_data.type_flag_, DType, AccType, {
typedef typename std::conditional<safe_acc, AccType, DType>::type AType;
int nshared = nthread_y > 1 ? nthread_y * 32 * sizeof(AType)
+ (nthread_y / 2) * 32 * sizeof(int) : 0;
CheckLaunchParam(dimGrid, dimBlock);
LayerNormFusedForwardKernelContig<AType, DType, int> <<<dimGrid, dimBlock, nshared, stream>>>
(nbatch, nchannel, static_cast<AType>(eps),
in_data.dptr<DType>(), gamma.dptr<DType>(), beta.dptr<DType>(),
out_data.dptr<DType>(), mean_data.dptr<DType>(), std_data.dptr<DType>());
});
MSHADOW_CUDA_POST_KERNEL_CHECK(LayerNormFusedForwardKernelContig);
}
template<>
void LayerNormCompute<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext& ctx, const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const LayerNormParam& param = nnvm::get<LayerNormParam>(attrs.parsed);
if (req[0] == kNullOp) return;
CHECK_NE(req[0], kAddTo);
int axis = param.axis;
if (axis < 0) {
axis += static_cast<int>(inputs[0].ndim());
}
CHECK(axis >= 0 && axis < inputs[0].ndim()) << "Channel axis out of range: " << param.axis;
if (axis == inputs[0].ndim() - 1) {
// Try to use the accelerated CUDA kernels
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false);
if (!safe_acc && inputs[0].type_flag_ == mshadow::kFloat16) {
common::LogOnce("MXNET_SAFE_ACCUMULATION=1 is recommended for LayerNorm with float16 inputs. "
"See https://mxnet.apache.org/api/faq/env_var "
"for more details.");
}
if (safe_acc) {
return LayerNormGPUContig<true>(param, ctx, inputs, req, outputs);
} else {
return LayerNormGPUContig<false>(param, ctx, inputs, req, outputs);
}
}
return LayerNormComputeGeneral<gpu>(attrs, ctx, inputs, req, outputs);
}
/* Fused CUDA kernel for calculating the gradient w.r.t gamma/beta in LayerNorm when axis=-1
* (Contiguous case).
* The gradient of gamma and beta are:
* d_gamma = sum(out_grad * (x - mean) / std, axis=0)
* d_beta = sum(out_grad, axis=0)
*
* We compute the gradient (mainly reduction over a non-contiguous axis) using two steps to
* improve the parallelism.
*
* In the first step, we divide the rows uniformly into K parts. K independent threadblocks are used
* to calculate the partial reduction result of each part. Illustrated below:
*
* 1st Block 2nd Block 3rd Block k-th Block
* | --------------- | ---------------- | --------------- | ... | ---------------- |
* | --------------- | ---------------- | --------------- | ... | ---------------- |
* | --------------- | ---------------- | --------------- | ... | ---------------- |
* | --------------- | ---------------- | --------------- | ... | ---------------- |
* part_gamma[0] part_gamma[1] part_gamma[2] part_gamma[k-1]
* part_beta[0] part_beta[1] part_beta[2] part_beta[k-1]
*
*
* In the second step, we sum up the row-values in part_gamma and part_beta.
*
* This `LayerNormFusedBackwardKernel_PartGammaBeta` function implements the first step and
* `LayerNormFusedBackwardKernel_GammaBeta` implements the second step.
*/
template<typename AType, typename DType>
__global__ void LayerNormFusedBackwardKernel_PartGammaBeta(const int nbatch,
const int nchannel,
const DType* __restrict__ in_data,
const DType* __restrict__ out_grad,
const DType* __restrict__ mean_data,
const DType* __restrict__ std_data,
AType* __restrict__ part_gamma_grad,
AType* __restrict__ part_beta_grad) {
extern __shared__ char buf[];
AType* d_buf = reinterpret_cast<AType*>(buf);
const int npart = gridDim.y;
const int block_row_num = (nbatch + npart - 1) / npart;
// The rows are divided into `npart` parts. Each threadblock calculates the reduction result
// within the corresponding row ranges.
int row_stride = blockDim.x + 1;
const int c = blockIdx.x * blockDim.x + threadIdx.x;
int r_begin = blockIdx.y * block_row_num;
int r_end = min((blockIdx.y + 1) * block_row_num, nbatch);
AType* buf_gamma_grad = d_buf;
AType* buf_beta_grad = d_buf + blockDim.y * row_stride;
AType local_gamma_grad = 0;
AType local_beta_grad = 0;
if (c < nchannel) {
for (int r_b = r_begin; r_b < r_end; r_b += blockDim.y) {
int r = r_b + threadIdx.y;
if (r < r_end) {
AType local_mean = static_cast<AType>(mean_data[r]);
AType local_std = static_cast<AType>(std_data[r]);
int read_idx = r * nchannel + c;
AType local_in_data = static_cast<AType>(in_data[read_idx]);
AType local_out_grad = static_cast<AType>(out_grad[read_idx]);
local_gamma_grad += (local_in_data - local_mean) / local_std * local_out_grad;
local_beta_grad += local_out_grad;
}
}
}
buf_gamma_grad[threadIdx.y * row_stride + threadIdx.x] = local_gamma_grad;
buf_beta_grad[threadIdx.y * row_stride + threadIdx.x] = local_beta_grad;
__syncthreads();
for (int offset = blockDim.y/2; offset > 1; offset >>= 1) {
if (threadIdx.y < offset) {
int idx1 = threadIdx.y * row_stride + threadIdx.x;
int idx2 = (threadIdx.y + offset) * row_stride + threadIdx.x;
buf_gamma_grad[idx1] += buf_gamma_grad[idx2];
buf_beta_grad[idx1] += buf_beta_grad[idx2];
}
__syncthreads();
}
if (threadIdx.y == 0 && c < nchannel) {
part_gamma_grad[blockIdx.y * nchannel + c] = buf_gamma_grad[threadIdx.x]
+ buf_gamma_grad[threadIdx.x + row_stride];
part_beta_grad[blockIdx.y * nchannel + c] = buf_beta_grad[threadIdx.x]
+ buf_beta_grad[threadIdx.x + row_stride];
}
}
template<bool gamma_addto, bool beta_addto, typename AType, typename DType>
__global__ void LayerNormFusedBackwardKernel_GammaBeta(const int nbatch,
const int nchannel,
const int npart,
const AType* __restrict__ part_gamma_grad,
const AType* __restrict__ part_beta_grad,
DType* gamma_grad,
DType* beta_grad) {
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
if (c < nchannel) {
extern __shared__ char buf[];
AType* buf_gamma_grad = reinterpret_cast<AType*>(buf);
AType* buf_beta_grad = reinterpret_cast<AType*>(buf) + blockDim.x * blockDim.y;
buf_gamma_grad[tid] = 0;
buf_beta_grad[tid] = 0;
for (int r = threadIdx.y; r < npart; r += blockDim.y) {
buf_gamma_grad[tid] += part_gamma_grad[r * nchannel + c];
buf_beta_grad[tid] += part_beta_grad[r * nchannel + c];
}
__syncthreads();
// Begin for inter-warp reduce
if (npart > 1) {
for (int offset = blockDim.y/2; offset > 0; offset >>= 1) {
if (threadIdx.y < offset) {
int idx1 = tid;
int idx2 = tid + offset * blockDim.x;
buf_gamma_grad[idx1] += buf_gamma_grad[idx2];
buf_beta_grad[idx1] += buf_beta_grad[idx2];
}
__syncthreads();
}
}
if (threadIdx.y == 0) {
if (gamma_grad) {
if (gamma_addto) {
gamma_grad[c] += static_cast<DType>(buf_gamma_grad[threadIdx.x]);
} else {
gamma_grad[c] = static_cast<DType>(buf_gamma_grad[threadIdx.x]);
}
}
if (beta_grad) {
if (beta_addto) {
beta_grad[c] += static_cast<DType>(buf_beta_grad[threadIdx.x]);
} else {
beta_grad[c] = static_cast<DType>(buf_beta_grad[threadIdx.x]);
}
}
}
}
}
/*
*
*
*/
template<int LOAD_UNROLL, bool data_addto, typename AType, typename DType>
__global__ void LayerNormFusedBackwardKernel_Data(const int nbatch,
const int nchannel,
const DType* __restrict__ in_data,
const DType* __restrict__ out_grad,
const DType* __restrict__ mean_data,
const DType* __restrict__ std_data,
const DType* __restrict__ gamma,
DType* data_grad) {
int bid = blockIdx.x + blockIdx.y * gridDim.x;
const int nthread = blockDim.x * blockDim.y;
if (bid < nbatch) {
// Shared memory with size blockDim.y * blockDim.x * sizeof(DType)
extern __shared__ char buf[];
int tid = threadIdx.x + threadIdx.y * blockDim.x;
// 1. Calculate: mean(out_grad * gamma / std, axis=-1)
// mean(out_grad * gamma / std * (x - mean) / std, axis=-1)
AType sum_val0 = 0; // Stores mean(out_grad * gamma / std, axis=-1)
AType sum_val1 = 0; // Stores mean(out_grad * gamma / std * (x - mean) / std, axis=-1)
AType mean = static_cast<AType>(mean_data[bid]);
AType invstd_eps = AType(1) / static_cast<AType>(std_data[bid]);
int l = LOAD_UNROLL * tid;
for (; l + LOAD_UNROLL - 1 < nchannel; l += nthread * LOAD_UNROLL) {
#pragma unroll
for (int i = 0; i < LOAD_UNROLL; ++i) {
AType ele_og = static_cast<AType>(out_grad[bid * nchannel + l + i]);
AType ele_x = static_cast<AType>(in_data[bid * nchannel + l + i]);
AType ele_gamma = static_cast<AType>(gamma[l + i]);
sum_val0 += ele_og * ele_gamma * invstd_eps;
sum_val1 += ele_og * ele_gamma * (ele_x - mean) * invstd_eps * invstd_eps;
}
}
for (; l < nchannel; ++l) {
AType ele_og = static_cast<AType>(out_grad[bid * nchannel + l]);
AType ele_x = static_cast<AType>(in_data[bid * nchannel + l]);
AType ele_gamma = static_cast<AType>(gamma[l]);
sum_val0 += ele_og * ele_gamma * invstd_eps;
sum_val1 += ele_og * ele_gamma * (ele_x - mean) * invstd_eps * invstd_eps;
}
// Intra-warp reduction (all-reduce)
for (int mask = blockDim.x / 2; mask > 0; mask >>= 1) {
sum_val0 += warp_shfl_xor(sum_val0, mask);
sum_val1 += warp_shfl_xor(sum_val1, mask);
}
// Inter-warp reduction (all-reduce)
if (blockDim.y > 1) {
AType* sum_val0_buf = reinterpret_cast<AType*>(buf);
AType* sum_val1_buf =
reinterpret_cast<AType*>(buf + blockDim.y / 2 * blockDim.x * sizeof(AType));
for (int offset = blockDim.y / 2; offset > 0; offset >>= 1) {
if (threadIdx.y >= offset && threadIdx.y < 2 * offset) {
const int idx = (threadIdx.y - offset) * blockDim.x + threadIdx.x;
sum_val0_buf[idx] = sum_val0;
sum_val1_buf[idx] = sum_val1;
}
__syncthreads();
if (threadIdx.y < offset) {
const int idx = threadIdx.y * blockDim.x + threadIdx.x;
sum_val0 += sum_val0_buf[idx];
sum_val1 += sum_val1_buf[idx];
}
__syncthreads();
}
if (threadIdx.y == 0) {
sum_val0_buf[threadIdx.x] = sum_val0;
sum_val1_buf[threadIdx.x] = sum_val1;
}
__syncthreads();
sum_val0 = sum_val0_buf[threadIdx.x];
sum_val1 = sum_val1_buf[threadIdx.x];
}
sum_val0 /= nchannel;
sum_val1 /= nchannel;
// 2. Calculate the gradient as
// out_grad * gamma / std - sum_val0 - (x - mean) / std * sum_val1
for (int l = tid; l < nchannel; l += nthread) {
AType ele_out_grad = static_cast<AType>(out_grad[bid * nchannel + l]);
AType ele_x = static_cast<AType>(in_data[bid * nchannel + l]);
AType ele_gamma = static_cast<AType>(gamma[l]);
if (data_addto) {
data_grad[bid * nchannel + l] +=
static_cast<DType>(ele_out_grad * ele_gamma * invstd_eps
- sum_val0 - (ele_x - mean) * invstd_eps * sum_val1);
} else {
data_grad[bid * nchannel + l] =
static_cast<DType>(ele_out_grad * ele_gamma * invstd_eps - sum_val0
- (ele_x - mean) * invstd_eps * sum_val1);
}
}
}
}
void GetGammaBetaGradKernelParams(const int nbatch, const int nchannel,
dim3* part_grad_block_dim, dim3* part_grad_grid_dim,
dim3* gb_block_dim, dim3* gb_grid_dim,
int* npart) {
*npart = 16;
*part_grad_block_dim = dim3(32, 16);
*part_grad_grid_dim = dim3((nchannel + 32 - 1) / 32, *npart);
*gb_block_dim = dim3(32, *npart);
*gb_grid_dim = dim3((nchannel + 32 - 1) / 32);
CheckLaunchParam(*part_grad_grid_dim, *part_grad_block_dim);
CheckLaunchParam(*gb_grid_dim, *gb_block_dim);
}
template<bool safe_acc = false>
void LayerNormGradGPUContig(const LayerNormParam param,
const OpContext& ctx, const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 5U);
const TBlob out_grad = inputs[0];
const TBlob in_data = inputs[1];
const TBlob gamma = inputs[2];
const TBlob mean_data = inputs[3];
const TBlob std_data = inputs[4];
const TBlob data_grad = outputs[0];
const TBlob gamma_grad = outputs[1];
const TBlob beta_grad = outputs[2];
// Make sure the inputs are contiguous
CHECK_EQ(out_grad.CheckContiguous(), true);
CHECK_EQ(in_data.CheckContiguous(), true);
CHECK_EQ(gamma.CheckContiguous(), true);
CHECK_EQ(mean_data.CheckContiguous(), true);
CHECK_EQ(std_data.CheckContiguous(), true);
int nbatch = in_data.shape_.ProdShape(0, in_data.ndim() - 1);
int nchannel = in_data.shape_[in_data.ndim() - 1];
int data_grad_req = req[0];
int gamma_grad_req = req[1];
int beta_grad_req = req[2];
CHECK_NE(data_grad_req, kWriteInplace);
CHECK_NE(gamma_grad_req, kWriteInplace);
CHECK_NE(beta_grad_req, kWriteInplace);
Stream<gpu> *s = ctx.get_stream<gpu>();
cudaStream_t stream = Stream<gpu>::GetStream(s);
// Calculate the gradient for gamma/beta
CHECK_EQ(gamma_grad.CheckContiguous(), true);
CHECK_EQ(beta_grad.CheckContiguous(), true);
dim3 part_grad_block_dim, part_grad_grid_dim, gb_block_dim, gb_grid_dim;
int npart;
GetGammaBetaGradKernelParams(nbatch, nchannel, &part_grad_block_dim, &part_grad_grid_dim,
&gb_block_dim, &gb_grid_dim, &npart);
if (gamma_grad_req != kNullOp || beta_grad_req != kNullOp) {
MXNET_REAL_ACC_TYPE_SWITCH(in_data.type_flag_, DType, AccType, {
typedef typename std::conditional<safe_acc, AccType, DType>::type AType;
Tensor<gpu, 1, AType> workspace =
ctx.requested[0].get_space_typed<gpu, 1, AType>(Shape1(2 * npart * nchannel), s);
AType* part_gamma_grad_ptr = workspace.dptr_;
AType* part_beta_grad_ptr = workspace.dptr_ + npart * nchannel;
const int nshared_K1 = 2 * (part_grad_block_dim.x + 1)
* part_grad_block_dim.y * sizeof(AType);
const int nshared_K2 = 2 * gb_block_dim.x * gb_block_dim.y * sizeof(AType);
DType* gamma_grad_ptr = (gamma_grad_req != kNullOp) ? gamma_grad.dptr<DType>() : nullptr;
DType* beta_grad_ptr = (beta_grad_req != kNullOp) ? beta_grad.dptr<DType>() : nullptr;
LayerNormFusedBackwardKernel_PartGammaBeta
<<<part_grad_grid_dim, part_grad_block_dim, nshared_K1, stream>>>
(nbatch, nchannel, in_data.dptr<DType>(), out_grad.dptr<DType>(),
mean_data.dptr<DType>(), std_data.dptr<DType>(), part_gamma_grad_ptr, part_beta_grad_ptr);
MSHADOW_CUDA_POST_KERNEL_CHECK(LayerNormFusedBackwardKernel_PartGammaBeta);
if (gamma_grad_req == kAddTo && beta_grad_req != kAddTo) {
LayerNormFusedBackwardKernel_GammaBeta<true, false>
<<<gb_grid_dim, gb_block_dim, nshared_K2, stream>>>
(nbatch, nchannel, npart, part_gamma_grad_ptr, part_beta_grad_ptr,
gamma_grad_ptr, beta_grad_ptr);
} else if (gamma_grad_req != kAddTo && beta_grad_req == kAddTo) {
LayerNormFusedBackwardKernel_GammaBeta<false, true>
<<<gb_grid_dim, gb_block_dim, nshared_K2, stream>>>
(nbatch, nchannel, npart, part_gamma_grad_ptr, part_beta_grad_ptr,
gamma_grad_ptr, beta_grad_ptr);
} else if (gamma_grad_req == kAddTo && beta_grad_req == kAddTo) {
LayerNormFusedBackwardKernel_GammaBeta<true, true>
<<<gb_grid_dim, gb_block_dim, nshared_K2, stream>>>
(nbatch, nchannel, npart, part_gamma_grad_ptr, part_beta_grad_ptr,
gamma_grad_ptr, beta_grad_ptr);
} else {
LayerNormFusedBackwardKernel_GammaBeta<false, false>
<<<gb_grid_dim, gb_block_dim, nshared_K2, stream>>>
(nbatch, nchannel, npart, part_gamma_grad_ptr, part_beta_grad_ptr,
gamma_grad_ptr, beta_grad_ptr);
}
});
MSHADOW_CUDA_POST_KERNEL_CHECK(LayerNormFusedBackwardKernel_GammaBeta);
}
// Calculate the gradient for data
CHECK_EQ(data_grad.CheckContiguous(), true);
int ngrid_x = (nbatch > kMaxGridDim) ? (nbatch + kBaseGridNum - 1) / kBaseGridNum : nbatch;
int ngrid_y = (nbatch > kMaxGridDim) ? kBaseGridNum : 1;
const dim3 data_grid_dim(ngrid_x, ngrid_y);
int nthread_y;
if (nchannel <= 32) {
nthread_y = 1;
} else if (nchannel <= 128) {
nthread_y = 2;
} else if (nchannel <= 512) {
nthread_y = 4;
} else {
nthread_y = 8;
}
const dim3 data_block_dim(32, nthread_y);
const int LOAD_UNROLL = 4;
if (data_grad_req != kNullOp) {
MXNET_REAL_ACC_TYPE_SWITCH(in_data.type_flag_, DType, AccType, {
typedef typename std::conditional<safe_acc, AccType, DType>::type AType;
int nshared = data_block_dim.y > 1 ? data_block_dim.y * data_block_dim.x * sizeof(AType) : 0;
CheckLaunchParam(data_grid_dim, data_block_dim);
if (data_grad_req == kAddTo) {
LayerNormFusedBackwardKernel_Data<LOAD_UNROLL, true, AType>
<<<data_grid_dim, data_block_dim, nshared, stream>>>
(nbatch, nchannel, in_data.dptr<DType>(), out_grad.dptr<DType>(), mean_data.dptr<DType>(),
std_data.dptr<DType>(), gamma.dptr<DType>(), data_grad.dptr<DType>());
} else {
LayerNormFusedBackwardKernel_Data<LOAD_UNROLL, false, AType>
<<<data_grid_dim, data_block_dim, nshared, stream>>>
(nbatch, nchannel, in_data.dptr<DType>(), out_grad.dptr<DType>(), mean_data.dptr<DType>(),
std_data.dptr<DType>(), gamma.dptr<DType>(), data_grad.dptr<DType>());
}
});
MSHADOW_CUDA_POST_KERNEL_CHECK(LayerNormFusedBackwardKernel_Data);
}
}
template<>
void LayerNormGradCompute<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext& ctx, const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const LayerNormParam& param = nnvm::get<LayerNormParam>(attrs.parsed);
int axis = param.axis;
if (axis < 0) {
axis += static_cast<int>(inputs[0].ndim());
}
CHECK(axis >= 0 && axis < inputs[0].ndim()) << "Channel axis out of range: " << param.axis;
if (axis == inputs[0].ndim() - 1) {
// Use the accelerated CUDA kernels
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false);
if (safe_acc) {
return LayerNormGradGPUContig<true>(param, ctx, inputs, req, outputs);
} else {
return LayerNormGradGPUContig<false>(param, ctx, inputs, req, outputs);
}
}
return LayerNormGradComputeGeneral<gpu>(attrs, ctx, inputs, req, outputs);
}
NNVM_REGISTER_OP(LayerNorm)
.set_attr<FCompute>("FCompute<gpu>", LayerNormCompute<gpu>);
NNVM_REGISTER_OP(_backward_LayerNorm)
.set_attr<FCompute>("FCompute<gpu>", LayerNormGradCompute<gpu>);
} // namespace op
} // namespace mxnet
|
a243106378409cc8baa2bc5239fa7c0b87f5b4a5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/****************************************************************************
*
* cuda-ltl.cu - Larger than Life with CUDA using shared memory
*
* Written by Martina Cavallucci <martina.cavallucci(at)studio.unibo.it>
* --------------------------------------------------------------------------
*
* This version of the Larger than Life uses shared memory and should
* work correctly with any domain size n.
* Compile with:
*
* nvcc cuda-ltl.cu -o cuda-ltl
*
* Run with:
* (Test it with the Game of Life parameter )
* ./cuda-ltl (R=)1 (B1=B2=)3 (D1=)3 (D2=)4 nsteps input_file output_file
*
****************************************************************************/
#include "hpc.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <ctype.h> /* for isdigit */
#define BLKSIZE 32
/*we assume the presence of 8 rows / columns of ghost cells per side,
then using only those that serve*/
#define HALO 8
/* We use 1D blocks to copy ghost cells; in this case we can use up to
1024 threads per block (for the GPUs on the lab machine) */
#define BLKSIZE_GHOST 1024
typedef unsigned char cell_t;
/* The struct of the bmap_t with a size and a point of unsigned char */
typedef struct {
int n;
cell_t *bmap;
} bmap_t;
/* The following function makes indexing of the two-dimensional CA
grid easier. Instead of writing, e.g., grid[i][j] (which you can
not do anyway, since the CA grids are passed around as pointers to
linear blocks of data), you write IDX(grid, n, i, j) to get a
pointer to grid[i][j]. This function assumes that the size of the
CA grid is (n+2)*(n+2), where the first and last rows/columns are
ghost cells.
Note the use of both the __device__ and __host__ qualifiers: this
function can be called both from host and device code. */
__device__ __host__ cell_t *IDX(cell_t *grid, int n, int i, int j)
{
return (grid + i*(n + 2 * HALO)+j);
}
/* Fill the ghost cells of |grid| in order to have cyclic boundary conditions*/
__global__ void copy_top_bottom(cell_t *grid, int n)
{
const int end = HALO + n - 1;
const int j = HALO + threadIdx.x + blockIdx.x * blockDim.x;
const int k = threadIdx.y + blockIdx.y * blockDim.y + 1;
/* Copy top and bottom */
if( k < HALO + 1){
if ( j < end + 1) {
*IDX(grid, n, end + k, j) = *IDX(grid, n, HALO + k - 1, j);
*IDX(grid, n, HALO - k, j) = *IDX(grid, n, end - k + 1, j);
}
}
}
__global__ void copy_left_right(cell_t *grid, int n )
{
const int end = HALO + n - 1;
const int i = HALO + threadIdx.y + blockIdx.y * blockDim.y;
const int k = threadIdx.y + blockIdx.y * blockDim.y + 1;
/* Copy left and right */
if( k < HALO + 1){
if ( i < end + 1 ) {
*IDX(grid, n, i, end + k) = *IDX(grid, n, i, HALO + k - 1);
*IDX(grid, n, i, HALO - k) = *IDX(grid, n, i, end + k - 1);
}
}
}
__global__ void copy_corners(cell_t *grid, int n)
{
const int i = threadIdx.y + blockIdx.y * blockDim.y;
const int j = threadIdx.x + blockIdx.x * blockDim.x;
/* Copy corners*/
if( i < HALO){
if (j < HALO){
*IDX(grid, n, i ,j) = *IDX(grid, n, i + HALO + 1, j + HALO + 1 );
*IDX(grid, n, i + HALO + n ,j + HALO + n) = *IDX(grid, n, i + HALO, j + HALO);
*IDX(grid, n, i ,j + HALO + n) = *IDX(grid, n, i + HALO + 1, j + HALO );
*IDX(grid, n, i + HALO + n ,j) = *IDX(grid, n, i + HALO , j + HALO + 1 );
}
}
}
/**
* Write the content of the bmap_t structure pointed to by ltl to the
* file f in PBM format and and allocates space
* for the ghost cell that will be assigned.
* The caller is responsible for passing a
* pointer f to a file opened for writing
*/
void write_ltl( bmap_t* grid, FILE *f , int r )
{
const int n = grid->n;
fprintf(f, "P1\n");
fprintf(f, "# produced by ltl\n");
fprintf(f, "%d %d\n", n, n);
for (int i = r ; i < n + r; i++) {
for (int j = r ; j < n + r; j++) {
fprintf(f, "%d ", *IDX(grid->bmap, n, i, j));
}
fprintf(f, "\n");
}
}
/*Compute of the Larger than life*/
__global__ void compute_ltl( cell_t *cur, cell_t *next, int n, int r, int b1, int b2, int d1, int d2)
{
/*we assume the presence of 8 rows / columns of ghost cells per side,
then using only those that serve*/
__shared__ cell_t buf[BLKSIZE+2*HALO][BLKSIZE+2*HALO];
/* "global" indexes */
const int gi = HALO + threadIdx.y + blockIdx.y * blockDim.y;
const int gj = HALO + threadIdx.x + blockIdx.x * blockDim.x;
/* "local" indexes */
const int li = HALO + threadIdx.y;
const int lj = HALO + threadIdx.x;
int nbors = 0;
/*Copy elements from global memory to local memory of block*/
if ( gi < n + 2 * HALO && gj < n + 2 * HALO ) {
buf[li][lj] = *IDX(cur, n, gi, gj);
if (li < 2 * HALO) { /* left-right */
buf[li - HALO ][lj] = *IDX(cur, n, gi - HALO, gj);
buf[li + BLKSIZE][lj] = (gi + BLKSIZE < n + 2 * HALO ? *IDX(cur, n, gi + BLKSIZE, gj) : 0);
}
if (lj < 2 * HALO) { /* top-bottom */
buf[li][lj - HALO ] = *IDX(cur, n, gi, gj - HALO);
buf[li][lj + BLKSIZE] = (gj + BLKSIZE < n + 2 * HALO ? *IDX(cur, n, gi, gj + BLKSIZE) : 0);
}
if (li < 2 * HALO && lj < 2 * HALO) { /* corners */
buf[li - HALO][lj - HALO ] = *IDX(cur, n, gi - HALO, gj - HALO);
buf[li - HALO][lj + BLKSIZE] = (gj + BLKSIZE < n + 2 * HALO ? *IDX(cur, n, gi - HALO, gj + BLKSIZE) : 0);
buf[li + BLKSIZE][lj - HALO] = (gi + BLKSIZE < n + 2 * HALO ? *IDX(cur, n, gi + BLKSIZE, gj - HALO) : 0);
buf[li + BLKSIZE][lj + BLKSIZE] = (gi + BLKSIZE < n + 2 * HALO && gj + BLKSIZE < n + 2 * HALO ? *IDX(cur, n, gi + BLKSIZE, gj + BLKSIZE) : 0);
}
}
__syncthreads();
const int globali = r + threadIdx.y + blockIdx.y * blockDim.y;
const int globalj = r + threadIdx.x + blockIdx.x * blockDim.x;
const int localy = r + threadIdx.y;
const int localx = r + threadIdx.x;
int i,j;
for(i = localy - r ; i < localy + r ; i++){
for(j = localx - r; j < localx + r; j++){
nbors = nbors +
buf[i][j] ;
}
}
if( !buf[localx][localy] && nbors >= b1 && nbors <= b2){ // if it can relive
*IDX(next, n, globali, globalj) = 1; //Set it as live
}else if(buf[localx][localy] && nbors + 1 >= d1 && nbors + 1 <= d2) // if the cell remaining live
{
*IDX(next, n, globali, globalj) = 1;// set it as live
}else{
*IDX(next, n, globali, globalj) = 0; // set it as died
}
}
/**
* Read a PBM file from file f. The caller is responsible for passing
* a pointer f to a file opened for reading. This function is not very
* robust; it may fail on perfectly legal PBM images, but should work
* for the images produced by gen-input.c. Also, it should work with
* PBM images produced by Gimp (you must save them in "ASCII format"
* when prompted).
*/
void read_ltl( bmap_t *ltl, FILE* f, int r)
{
char buf[2048];
char *s;
int n, i, j;
int width, height;
/* Get the file type (must be "P1") */
s = fgets(buf, sizeof(buf), f);
if (0 != strcmp(s, "P1\n")) {
fprintf(stderr, "FATAL: Unsupported file type \"%s\"\n", buf);
exit(-1);
}
/* Get any comment and ignore it; does not work if there are
leading spaces in the comment line */
do {
s = fgets(buf, sizeof(buf), f);
} while (s[0] == '#');
/* Get width, height; since we are assuming square images, we
reject the input if width != height. */
sscanf(s, "%d %d", &width, &height);
if ( width != height ) {
fprintf(stderr, "FATAL: image width (%d) and height (%d) must be equal\n", width, height);
exit(-1);
}
ltl->n = n = width;
int ng = n + (2*HALO);
ltl->bmap = (cell_t*)malloc( ng * ng * sizeof(cell_t));
/* scan bitmap; each pixel is represented by a single numeric
character ('0' or '1'); spaces and other separators are ignored
(Gimp produces PBM files with no spaces between digits) */
for (i = HALO; i < n + HALO ; i++) {
for (j = HALO; j < n + HALO ; j++) {
int val;
do {
val = fgetc(f);
if ( EOF == val ) {
fprintf(stderr, "FATAL: error reading input\n");
exit(-1);
}
} while ( !isdigit(val) );
*IDX(ltl->bmap, n, i, j) = (val - '0');
}
}
}
int main( int argc, char* argv[] )
{
int R, B1, B2, D1, D2, nsteps,s;
const char *infile, *outfile;
FILE *in, *out;
bmap_t cur;
cell_t *d_cur, *d_next,*d_tmp;
double tstart, tend;
if ( argc != 9 ) {
fprintf(stderr, "Usage: %s R B1 B2 D1 D2 nsteps infile outfile\n", argv[0]);
return -1;
}
R = atoi(argv[1]);
B1 = atoi(argv[2]);
B2 = atoi(argv[3]);
D1 = atoi(argv[4]);
D2 = atoi(argv[5]);
nsteps = atoi(argv[6]);
infile = argv[7];
outfile = argv[8];
assert( R <= 8 );
assert( 0 <= B1 );
assert( B1 <= B2 );
assert( 1 <= D1 );
assert( D1 <= D2 );
in = fopen(infile, "r");
if (in == NULL) {
fprintf(stderr, "FATAL: can not open \"%s\" for reading\n", infile);
exit(-1);
}
read_ltl(&cur, in, R);
fclose(in);
fprintf(stderr, "Size of input image: %d x %d\n", cur.n, cur.n);
fprintf(stderr, "Model parameters: R=%d B1=%d B2=%d D1=%d D2=%d nsteps=%d\n",
R, B1, B2, D1, D2, nsteps);
out = fopen(outfile, "w");
if ( out == NULL ) {
fprintf(stderr, "FATAL: can not open \"%s\" for writing", outfile);
exit(-1);
}
const int n = cur.n;
dim3 cpyBlock(BLKSIZE_GHOST,HALO);
dim3 cpyGrid((n + BLKSIZE-1)/BLKSIZE, (n + BLKSIZE-1)/BLKSIZE,1);
dim3 stepBlock(BLKSIZE,BLKSIZE);
dim3 stepGrid((n + BLKSIZE-1)/BLKSIZE, (n + BLKSIZE-1)/BLKSIZE);
const size_t size = (n+2*HALO)*(n+2*HALO)*sizeof(*(cur.bmap));
/* Allocate space for device copy of cur and next grids */
hipMalloc((void**)&d_cur, size);
hipMalloc((void**)&d_next, size);
/* Copy initial grid to d_cur */
hipMemcpy(d_cur,cur.bmap, size, hipMemcpyHostToDevice);
tstart = hpc_gettime();
for (s = 0; s < nsteps; s++) {
hipLaunchKernelGGL(( copy_top_bottom), dim3(cpyGrid),dim3(cpyBlock), 0, 0, d_cur, n);
hipLaunchKernelGGL(( copy_left_right), dim3(cpyGrid),dim3(cpyBlock), 0, 0, d_cur, n);
hipLaunchKernelGGL(( copy_corners), dim3(cpyGrid),dim3(cpyBlock), 0, 0, d_cur, n);
hipLaunchKernelGGL(( compute_ltl), dim3(stepGrid),dim3(stepBlock), 0, 0, d_cur, d_next, n, R, B1, B2, D1, D2);
d_tmp = d_cur;
d_cur = d_next;
d_next = d_tmp;
}
hipDeviceSynchronize();
tend = hpc_gettime();
fprintf(stderr, "Execution time %f\n", tend - tstart);
hipMemcpy(cur.bmap, d_cur, size, hipMemcpyDeviceToHost);
write_ltl(&cur, out, R);
fclose(out);
free(cur.bmap);
hipFree(d_cur);
hipFree(d_next);
return 0;
}
| a243106378409cc8baa2bc5239fa7c0b87f5b4a5.cu | /****************************************************************************
*
* cuda-ltl.cu - Larger than Life with CUDA using shared memory
*
* Written by Martina Cavallucci <martina.cavallucci(at)studio.unibo.it>
* --------------------------------------------------------------------------
*
* This version of the Larger than Life uses shared memory and should
* work correctly with any domain size n.
* Compile with:
*
* nvcc cuda-ltl.cu -o cuda-ltl
*
* Run with:
* (Test it with the Game of Life parameter )
* ./cuda-ltl (R =) 1 (B1 = B2 =) 3 (D1 =) 3 (D2 = )4 nsteps input_file output_file
*
****************************************************************************/
#include "hpc.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <ctype.h> /* for isdigit */
#define BLKSIZE 32
/*we assume the presence of 8 rows / columns of ghost cells per side,
then using only those that serve*/
#define HALO 8
/* We use 1D blocks to copy ghost cells; in this case we can use up to
1024 threads per block (for the GPUs on the lab machine) */
#define BLKSIZE_GHOST 1024
typedef unsigned char cell_t;
/* The struct of the bmap_t with a size and a point of unsigned char */
typedef struct {
int n;
cell_t *bmap;
} bmap_t;
/* The following function makes indexing of the two-dimensional CA
grid easier. Instead of writing, e.g., grid[i][j] (which you can
not do anyway, since the CA grids are passed around as pointers to
linear blocks of data), you write IDX(grid, n, i, j) to get a
pointer to grid[i][j]. This function assumes that the size of the
CA grid is (n+2)*(n+2), where the first and last rows/columns are
ghost cells.
Note the use of both the __device__ and __host__ qualifiers: this
function can be called both from host and device code. */
__device__ __host__ cell_t *IDX(cell_t *grid, int n, int i, int j)
{
return (grid + i*(n + 2 * HALO)+j);
}
/* Fill the ghost cells of |grid| in order to have cyclic boundary conditions*/
__global__ void copy_top_bottom(cell_t *grid, int n)
{
const int end = HALO + n - 1;
const int j = HALO + threadIdx.x + blockIdx.x * blockDim.x;
const int k = threadIdx.y + blockIdx.y * blockDim.y + 1;
/* Copy top and bottom */
if( k < HALO + 1){
if ( j < end + 1) {
*IDX(grid, n, end + k, j) = *IDX(grid, n, HALO + k - 1, j);
*IDX(grid, n, HALO - k, j) = *IDX(grid, n, end - k + 1, j);
}
}
}
__global__ void copy_left_right(cell_t *grid, int n )
{
const int end = HALO + n - 1;
const int i = HALO + threadIdx.y + blockIdx.y * blockDim.y;
const int k = threadIdx.y + blockIdx.y * blockDim.y + 1;
/* Copy left and right */
if( k < HALO + 1){
if ( i < end + 1 ) {
*IDX(grid, n, i, end + k) = *IDX(grid, n, i, HALO + k - 1);
*IDX(grid, n, i, HALO - k) = *IDX(grid, n, i, end + k - 1);
}
}
}
__global__ void copy_corners(cell_t *grid, int n)
{
const int i = threadIdx.y + blockIdx.y * blockDim.y;
const int j = threadIdx.x + blockIdx.x * blockDim.x;
/* Copy corners*/
if( i < HALO){
if (j < HALO){
*IDX(grid, n, i ,j) = *IDX(grid, n, i + HALO + 1, j + HALO + 1 );
*IDX(grid, n, i + HALO + n ,j + HALO + n) = *IDX(grid, n, i + HALO, j + HALO);
*IDX(grid, n, i ,j + HALO + n) = *IDX(grid, n, i + HALO + 1, j + HALO );
*IDX(grid, n, i + HALO + n ,j) = *IDX(grid, n, i + HALO , j + HALO + 1 );
}
}
}
/**
* Write the content of the bmap_t structure pointed to by ltl to the
* file f in PBM format and and allocates space
* for the ghost cell that will be assigned.
* The caller is responsible for passing a
* pointer f to a file opened for writing
*/
void write_ltl( bmap_t* grid, FILE *f , int r )
{
const int n = grid->n;
fprintf(f, "P1\n");
fprintf(f, "# produced by ltl\n");
fprintf(f, "%d %d\n", n, n);
for (int i = r ; i < n + r; i++) {
for (int j = r ; j < n + r; j++) {
fprintf(f, "%d ", *IDX(grid->bmap, n, i, j));
}
fprintf(f, "\n");
}
}
/*Compute of the Larger than life*/
__global__ void compute_ltl( cell_t *cur, cell_t *next, int n, int r, int b1, int b2, int d1, int d2)
{
/*we assume the presence of 8 rows / columns of ghost cells per side,
then using only those that serve*/
__shared__ cell_t buf[BLKSIZE+2*HALO][BLKSIZE+2*HALO];
/* "global" indexes */
const int gi = HALO + threadIdx.y + blockIdx.y * blockDim.y;
const int gj = HALO + threadIdx.x + blockIdx.x * blockDim.x;
/* "local" indexes */
const int li = HALO + threadIdx.y;
const int lj = HALO + threadIdx.x;
int nbors = 0;
/*Copy elements from global memory to local memory of block*/
if ( gi < n + 2 * HALO && gj < n + 2 * HALO ) {
buf[li][lj] = *IDX(cur, n, gi, gj);
if (li < 2 * HALO) { /* left-right */
buf[li - HALO ][lj] = *IDX(cur, n, gi - HALO, gj);
buf[li + BLKSIZE][lj] = (gi + BLKSIZE < n + 2 * HALO ? *IDX(cur, n, gi + BLKSIZE, gj) : 0);
}
if (lj < 2 * HALO) { /* top-bottom */
buf[li][lj - HALO ] = *IDX(cur, n, gi, gj - HALO);
buf[li][lj + BLKSIZE] = (gj + BLKSIZE < n + 2 * HALO ? *IDX(cur, n, gi, gj + BLKSIZE) : 0);
}
if (li < 2 * HALO && lj < 2 * HALO) { /* corners */
buf[li - HALO][lj - HALO ] = *IDX(cur, n, gi - HALO, gj - HALO);
buf[li - HALO][lj + BLKSIZE] = (gj + BLKSIZE < n + 2 * HALO ? *IDX(cur, n, gi - HALO, gj + BLKSIZE) : 0);
buf[li + BLKSIZE][lj - HALO] = (gi + BLKSIZE < n + 2 * HALO ? *IDX(cur, n, gi + BLKSIZE, gj - HALO) : 0);
buf[li + BLKSIZE][lj + BLKSIZE] = (gi + BLKSIZE < n + 2 * HALO && gj + BLKSIZE < n + 2 * HALO ? *IDX(cur, n, gi + BLKSIZE, gj + BLKSIZE) : 0);
}
}
__syncthreads();
const int globali = r + threadIdx.y + blockIdx.y * blockDim.y;
const int globalj = r + threadIdx.x + blockIdx.x * blockDim.x;
const int localy = r + threadIdx.y;
const int localx = r + threadIdx.x;
int i,j;
for(i = localy - r ; i < localy + r ; i++){
for(j = localx - r; j < localx + r; j++){
nbors = nbors +
buf[i][j] ;
}
}
if( !buf[localx][localy] && nbors >= b1 && nbors <= b2){ // if it can relive
*IDX(next, n, globali, globalj) = 1; //Set it as live
}else if(buf[localx][localy] && nbors + 1 >= d1 && nbors + 1 <= d2) // if the cell remaining live
{
*IDX(next, n, globali, globalj) = 1;// set it as live
}else{
*IDX(next, n, globali, globalj) = 0; // set it as died
}
}
/**
* Read a PBM file from file f. The caller is responsible for passing
* a pointer f to a file opened for reading. This function is not very
* robust; it may fail on perfectly legal PBM images, but should work
* for the images produced by gen-input.c. Also, it should work with
* PBM images produced by Gimp (you must save them in "ASCII format"
* when prompted).
*/
void read_ltl( bmap_t *ltl, FILE* f, int r)
{
char buf[2048];
char *s;
int n, i, j;
int width, height;
/* Get the file type (must be "P1") */
s = fgets(buf, sizeof(buf), f);
if (0 != strcmp(s, "P1\n")) {
fprintf(stderr, "FATAL: Unsupported file type \"%s\"\n", buf);
exit(-1);
}
/* Get any comment and ignore it; does not work if there are
leading spaces in the comment line */
do {
s = fgets(buf, sizeof(buf), f);
} while (s[0] == '#');
/* Get width, height; since we are assuming square images, we
reject the input if width != height. */
sscanf(s, "%d %d", &width, &height);
if ( width != height ) {
fprintf(stderr, "FATAL: image width (%d) and height (%d) must be equal\n", width, height);
exit(-1);
}
ltl->n = n = width;
int ng = n + (2*HALO);
ltl->bmap = (cell_t*)malloc( ng * ng * sizeof(cell_t));
/* scan bitmap; each pixel is represented by a single numeric
character ('0' or '1'); spaces and other separators are ignored
(Gimp produces PBM files with no spaces between digits) */
for (i = HALO; i < n + HALO ; i++) {
for (j = HALO; j < n + HALO ; j++) {
int val;
do {
val = fgetc(f);
if ( EOF == val ) {
fprintf(stderr, "FATAL: error reading input\n");
exit(-1);
}
} while ( !isdigit(val) );
*IDX(ltl->bmap, n, i, j) = (val - '0');
}
}
}
int main( int argc, char* argv[] )
{
int R, B1, B2, D1, D2, nsteps,s;
const char *infile, *outfile;
FILE *in, *out;
bmap_t cur;
cell_t *d_cur, *d_next,*d_tmp;
double tstart, tend;
if ( argc != 9 ) {
fprintf(stderr, "Usage: %s R B1 B2 D1 D2 nsteps infile outfile\n", argv[0]);
return -1;
}
R = atoi(argv[1]);
B1 = atoi(argv[2]);
B2 = atoi(argv[3]);
D1 = atoi(argv[4]);
D2 = atoi(argv[5]);
nsteps = atoi(argv[6]);
infile = argv[7];
outfile = argv[8];
assert( R <= 8 );
assert( 0 <= B1 );
assert( B1 <= B2 );
assert( 1 <= D1 );
assert( D1 <= D2 );
in = fopen(infile, "r");
if (in == NULL) {
fprintf(stderr, "FATAL: can not open \"%s\" for reading\n", infile);
exit(-1);
}
read_ltl(&cur, in, R);
fclose(in);
fprintf(stderr, "Size of input image: %d x %d\n", cur.n, cur.n);
fprintf(stderr, "Model parameters: R=%d B1=%d B2=%d D1=%d D2=%d nsteps=%d\n",
R, B1, B2, D1, D2, nsteps);
out = fopen(outfile, "w");
if ( out == NULL ) {
fprintf(stderr, "FATAL: can not open \"%s\" for writing", outfile);
exit(-1);
}
const int n = cur.n;
dim3 cpyBlock(BLKSIZE_GHOST,HALO);
dim3 cpyGrid((n + BLKSIZE-1)/BLKSIZE, (n + BLKSIZE-1)/BLKSIZE,1);
dim3 stepBlock(BLKSIZE,BLKSIZE);
dim3 stepGrid((n + BLKSIZE-1)/BLKSIZE, (n + BLKSIZE-1)/BLKSIZE);
const size_t size = (n+2*HALO)*(n+2*HALO)*sizeof(*(cur.bmap));
/* Allocate space for device copy of cur and next grids */
cudaMalloc((void**)&d_cur, size);
cudaMalloc((void**)&d_next, size);
/* Copy initial grid to d_cur */
cudaMemcpy(d_cur,cur.bmap, size, cudaMemcpyHostToDevice);
tstart = hpc_gettime();
for (s = 0; s < nsteps; s++) {
copy_top_bottom<<<cpyGrid,cpyBlock>>>(d_cur, n);
copy_left_right<<<cpyGrid,cpyBlock>>>(d_cur, n);
copy_corners<<<cpyGrid,cpyBlock>>>(d_cur, n);
compute_ltl<<<stepGrid,stepBlock>>>(d_cur, d_next, n, R, B1, B2, D1, D2);
d_tmp = d_cur;
d_cur = d_next;
d_next = d_tmp;
}
cudaDeviceSynchronize();
tend = hpc_gettime();
fprintf(stderr, "Execution time %f\n", tend - tstart);
cudaMemcpy(cur.bmap, d_cur, size, cudaMemcpyDeviceToHost);
write_ltl(&cur, out, R);
fclose(out);
free(cur.bmap);
cudaFree(d_cur);
cudaFree(d_next);
return 0;
}
|
b9b79fa67051a55cefca790b32812db3cfdd3378.hip | // !!! This is a file automatically generated by hipify!!!
#include "stdafx.h"
#include "TestBlas.h"
#include "blas.h"
#include "Matrix.h"
#include <cmath>
#include <rocblas.h>
#include <thrust\host_vector.h>
#include <thrust\device_vector.h>
#define Threshold 0.01f
bool TestBlas::run_tests()
{
printf("\nTesting Host Matrix.\n");
// Verify we can get a zero matrix, should be hand checked
printf("\nCreating a matrix of zeros of size 2x2:\n");
Matrix<host_vector<float>> hmat0(2);
thrust::fill(hmat0.vector.begin(), hmat0.vector.end(), 0.0);
hmat0.print();
//// Verify we can set values, should be hand
printf("\nSet(0,0) = 1, (0, 1) = 2, (1, 0) = 3, (1, 1) = 4:\n");
hmat0.set(0, 0, 1.0);
hmat0.set(0, 1, 2.0);
hmat0.set(1, 0, 3.0);
hmat0.set(1, 1, 4.0);
hmat0.print();
printf("\nGet(0,0) (0, 1) (1, 0) (1, 1):\n");
printf("%.1f ", hmat0.get(0,0));
printf("%.1f", hmat0.get(0,1));
printf("\n%.1f ", hmat0.get(1, 0));
printf("%.1f\n", hmat0.get(1,1));
// Setup alpha and beta yo
float alpha = 2.0;
float beta = 0.5;
printf("\nTesting Host Matrix-Vector operations.\n");
printf("\ngemv(A, x, y, alpha, beta, false):\n");
host_vector<float> hvec0(2);
hvec0[0] = -1.0;
hvec0[1] = 0.0;
host_vector<float> hvec1(2);
hvec1[0] = 1.0;
hvec1[1] = 1.5;
printf("%.1f\n*", alpha);
hmat0.print();
printf("*\n%.1f %.1f\n+\n", (float)hvec0[0], (float)hvec0[1]);
printf("%.1f\n*\n%.1f %.1f\n=\n", beta, (float)hvec1[0], (float)hvec1[1]);
if(blas::gemv(hmat0, hvec0, hvec1, alpha, beta, false) != 0)
{
printf("Failure to execute host gemv.");
return false;
}
printf("%.1f %.1f\n", (float)hvec1[0], (float)hvec1[1]);
if(!(abs(hvec1[0] + 1.5) < Threshold &&
abs(hvec1[1] + 5.25) < Threshold)
)
{
printf("Executed gemv but received an incorrect result.");
return false;
}
printf("\ngemv(A, x, y, true):\n");
printf("%.1f %.1f\n*", (float)hvec0[0], (float)hvec0[1]);
hmat0.print();
printf("=\n");
if(blas::gemv(hmat0, hvec0, hvec1, true) != 0)
{
printf("Failed to execute host gemv.");
return false;
}
printf("%.1f %.1f\n", (float)hvec1[0], (float)hvec1[1]);
if(!(abs(hvec1[0] + 1.0) < Threshold &&
abs(hvec1[1] + 2.0) < Threshold)
)
{
printf("Executed gemv but received an incorrect result.");
return false;
}
printf("\nger(x, y, A, alpha):\n");
printf("%.1f %.1f\nX\n%.1f %.1f\n+\n", (float)hvec0[0], (float)hvec0[1], (float)hvec1[0], (float)hvec1[1]);
hmat0.print();
printf("=");
if(blas::ger(hvec0, hvec1, hmat0, alpha) != 0)
{
printf("Failed to execute ger.");
return false;
}
hmat0.print();
if(!(abs(hmat0.get(0, 0) - 3.0) < Threshold &&
abs(hmat0.get(0, 1) - 6.0) < Threshold &&
abs(hmat0.get(1, 0) - 3.0) < Threshold &&
abs(hmat0.get(1, 1) - 4.0) < Threshold)
)
{
printf("Executed ger but received an incorrect result.");
return false;
}
printf("\nTesting Host Vector-Vector operations.\n");
printf("\nscal(x, alpha):\n");
printf("%.1f\n*\n%.1f %.1f\n=\n", alpha, (float)hvec0[0], (float)hvec0[1]);
if(blas::scal(hvec0, alpha) != 0)
{
printf("Failed to execute scal.");
return false;
}
printf("%.1f %.1f\n", (float)hvec0[0], (float)hvec0[1]);
if(!(abs(hvec0[0] + 2.0) < Threshold &&
abs(hvec0[1] - 0.0) < Threshold)
)
{
printf("Executed scal but received an incorrect result.");
return false;
}
printf("\ndot(x, y, result):\n");
printf("%.1f %.1f\ndot\n%.1f %.1f\n=\n", (float)hvec0[0], (float)hvec0[1], (float)hvec1[0], (float)hvec1[1]);
alpha = 0.0;
if(blas::dot(hvec0, hvec1, alpha) != 0)
{
printf("Failed to execute host dot.");
return false;
}
printf("%.1f\n", alpha);
if(!(abs(alpha - 2.0) < Threshold))
{
printf("Executed dot but received an incorrect result.");
return false;
}
printf("\naxpy(x, y, alpha):\n");
printf("%.1f\n*\n%.1f %.1f\n+\n%.1f %.1f\n=\n", alpha, (float)hvec0[0], (float)hvec0[1], (float)hvec1[0], (float)hvec1[1]);
if(blas::axpy(hvec0, hvec1, alpha) != 0)
{
printf("Failed to execute axpy.");
return false;
}
printf("%.1f %.1f\n", (float)hvec1[0], (float)hvec1[1]);
if(!(abs(hvec1[0] + 5.0) < Threshold &&
abs(hvec1[1] + 2.0) < Threshold)
)
{
printf("Executed axpy but received an incorrect result.");
return false;
}
printf("\nHost Passed!\n");
printf("\nInitializing CUDA & Cublas\n");
hipblasStatus_t stat = hipblasCreate(&blas::handle);
if(stat != HIPBLAS_STATUS_SUCCESS)
{
printf("CUBLAS Init Failure.");
return false;
}
printf("\nTesting Device Matrix.\n");
// Verify we can get a zero matrix, should be hand checked
printf("\nCreating a matrix of zeros of size 2x2:\n");
Matrix<device_vector<float>> dmat0(2);
thrust::fill(dmat0.vector.begin(), dmat0.vector.end(), 0.0);
dmat0.print();
//// Verify we can set values, should be hand
printf("\nSet(0,0) = 1, (0, 1) = 2, (1, 0) = 3, (1, 1) = 4:\n");
dmat0.set(0, 0, 1.0);
dmat0.set(0, 1, 2.0);
dmat0.set(1, 0, 3.0);
dmat0.set(1, 1, 4.0);
dmat0.print();
printf("\nGet(0,0) (0, 1) (1, 0) (1, 1):\n");
printf("%.1f ", dmat0.get(0,0));
printf("%.1f", dmat0.get(0,1));
printf("\n%.1f ", dmat0.get(1, 0));
printf("%.1f\n", dmat0.get(1,1));
// Reset alpha and beta yo
alpha = 2.0;
beta = 0.5;
printf("\nTesting Device Matrix-Vector operations.\n");
printf("\ngemv(A, x, y, alpha, beta, false):\n");
device_vector<float> dvec0(2);
dvec0[0] = -1.0;
dvec0[1] = 0.0;
device_vector<float> dvec1(2);
dvec1[0] = 1.0;
dvec1[1] = 1.5;
printf("%.1f\n*\n", alpha);
dmat0.print();
printf("*\n%.1f %.1f\n+\n", (float)dvec0[0], (float)dvec0[1]);
printf("%.1f\n*\n%.1f %.1f\n=\n", beta, (float)dvec1[0], (float)dvec1[1]);
if(blas::gemv(dmat0, dvec0, dvec1, alpha, beta, false) != 0)
{
printf("Failure to execute device gemv.");
return false;
}
printf("%.1f %.1f\n", (float)dvec1[0], (float)dvec1[1]);
if(!(abs(dvec1[0] + 1.5) < Threshold &&
abs(dvec1[1] + 5.25) < Threshold)
)
{
printf("Executed gemv but received an incorrect result.");
return false;
}
printf("\ngemv(A, x, true):\n");
printf("%.1f %.1f\n*", (float)dvec0[0], (float)dvec0[1]);
dmat0.print();
printf("=\n");
if(blas::gemv(dmat0, dvec0, dvec1, true) != 0)
{
printf("Failed to execute gemv.");
return false;
}
printf("%.1f %.1f\n", (float)dvec1[0], (float)dvec1[1]);
if(!(abs(dvec1[0] + 1.0) < Threshold &&
abs(dvec1[1] + 2.0) < Threshold)
)
{
printf("Executed gemv but received an incorrect result.");
return false;
}
printf("\nger(x, y, A, alpha):\n");
printf("%.1f %.1f\nX\n%.1f %.1f\n+", (float)dvec0[0], (float)dvec0[1], (float)dvec1[0], (float)dvec1[1]);
dmat0.print();
printf("=");
if(blas::ger(dvec0, dvec1, dmat0, alpha) != 0)
{
printf("Failed to execute device ger.");
return false;
}
dmat0.print();
if(!(abs(dmat0.get(0, 0) - 3.0) < Threshold &&
abs(dmat0.get(0, 1) - 6.0) < Threshold &&
abs(dmat0.get(1, 0) - 3.0) < Threshold &&
abs(dmat0.get(1, 1) - 4.0) < Threshold)
)
{
printf("Executed ger but received an incorrect result.");
return false;
}
printf("\nTesting Device Vector-Vector operations.\n");
printf("\nscal(x, alpha):\n");
printf("%.1f\n*\n%.1f %.1f\n=\n", alpha, (float)dvec0[0], (float)dvec0[1]);
if(blas::scal(dvec0, alpha) != 0)
{
printf("Failed to execute scal.");
return false;
}
printf("%.1f %.1f\n", (float)dvec0[0], (float)dvec0[1]);
if(!(abs(dvec0[0] + 2.0) < Threshold &&
abs(dvec0[1] - 0.0) < Threshold)
)
{
printf("Executed scal but received an incorrect result.");
return false;
}
printf("\ndot(x, y, result):\n");
printf("%.1f %.1f\ndot\n%.1f %.1f\n=\n", (float)dvec0[0], (float)dvec0[1], (float)dvec1[0], (float)dvec1[1]);
alpha = 0.0;
if(blas::dot(dvec0, dvec1, alpha) != 0)
{
printf("Failed to execute dot.");
return false;
}
printf("%.1f\n", alpha);
if(!(abs(alpha - 2.0) < Threshold))
{
printf("Executed dot but received an incorrect result.");
return false;
}
printf("\naxpy(x, y, alpha):\n");
printf("%.1f\n*\n%.1f %.1f\n+\n%.1f %.1f\n=\n", alpha, (float)dvec0[0], (float)dvec0[1], (float)dvec1[0], (float)dvec1[1]);
if(blas::axpy(dvec0, dvec1, alpha) != 0)
{
printf("Failed to execute axpy.");
return false;
}
printf("%.1f %.1f\n", (float)dvec1[0], (float)dvec1[1]);
if(!(abs(dvec1[0] + 5.0) < Threshold &&
abs(dvec1[1] + 2.0) < Threshold)
)
{
printf("Executed axpy but received an incorrect result.");
return false;
}
printf("\nDevice Passed!\n");
printf("\nAll Tests Passed!\n");
return true;
} | b9b79fa67051a55cefca790b32812db3cfdd3378.cu | #include "stdafx.h"
#include "TestBlas.h"
#include "blas.h"
#include "Matrix.h"
#include <cmath>
#include <cublas_v2.h>
#include <thrust\host_vector.h>
#include <thrust\device_vector.h>
#define Threshold 0.01f
bool TestBlas::run_tests()
{
printf("\nTesting Host Matrix.\n");
// Verify we can get a zero matrix, should be hand checked
printf("\nCreating a matrix of zeros of size 2x2:\n");
Matrix<host_vector<float>> hmat0(2);
thrust::fill(hmat0.vector.begin(), hmat0.vector.end(), 0.0);
hmat0.print();
//// Verify we can set values, should be hand
printf("\nSet(0,0) = 1, (0, 1) = 2, (1, 0) = 3, (1, 1) = 4:\n");
hmat0.set(0, 0, 1.0);
hmat0.set(0, 1, 2.0);
hmat0.set(1, 0, 3.0);
hmat0.set(1, 1, 4.0);
hmat0.print();
printf("\nGet(0,0) (0, 1) (1, 0) (1, 1):\n");
printf("%.1f ", hmat0.get(0,0));
printf("%.1f", hmat0.get(0,1));
printf("\n%.1f ", hmat0.get(1, 0));
printf("%.1f\n", hmat0.get(1,1));
// Setup alpha and beta yo
float alpha = 2.0;
float beta = 0.5;
printf("\nTesting Host Matrix-Vector operations.\n");
printf("\ngemv(A, x, y, alpha, beta, false):\n");
host_vector<float> hvec0(2);
hvec0[0] = -1.0;
hvec0[1] = 0.0;
host_vector<float> hvec1(2);
hvec1[0] = 1.0;
hvec1[1] = 1.5;
printf("%.1f\n*", alpha);
hmat0.print();
printf("*\n%.1f %.1f\n+\n", (float)hvec0[0], (float)hvec0[1]);
printf("%.1f\n*\n%.1f %.1f\n=\n", beta, (float)hvec1[0], (float)hvec1[1]);
if(blas::gemv(hmat0, hvec0, hvec1, alpha, beta, false) != 0)
{
printf("Failure to execute host gemv.");
return false;
}
printf("%.1f %.1f\n", (float)hvec1[0], (float)hvec1[1]);
if(!(abs(hvec1[0] + 1.5) < Threshold &&
abs(hvec1[1] + 5.25) < Threshold)
)
{
printf("Executed gemv but received an incorrect result.");
return false;
}
printf("\ngemv(A, x, y, true):\n");
printf("%.1f %.1f\n*", (float)hvec0[0], (float)hvec0[1]);
hmat0.print();
printf("=\n");
if(blas::gemv(hmat0, hvec0, hvec1, true) != 0)
{
printf("Failed to execute host gemv.");
return false;
}
printf("%.1f %.1f\n", (float)hvec1[0], (float)hvec1[1]);
if(!(abs(hvec1[0] + 1.0) < Threshold &&
abs(hvec1[1] + 2.0) < Threshold)
)
{
printf("Executed gemv but received an incorrect result.");
return false;
}
printf("\nger(x, y, A, alpha):\n");
printf("%.1f %.1f\nX\n%.1f %.1f\n+\n", (float)hvec0[0], (float)hvec0[1], (float)hvec1[0], (float)hvec1[1]);
hmat0.print();
printf("=");
if(blas::ger(hvec0, hvec1, hmat0, alpha) != 0)
{
printf("Failed to execute ger.");
return false;
}
hmat0.print();
if(!(abs(hmat0.get(0, 0) - 3.0) < Threshold &&
abs(hmat0.get(0, 1) - 6.0) < Threshold &&
abs(hmat0.get(1, 0) - 3.0) < Threshold &&
abs(hmat0.get(1, 1) - 4.0) < Threshold)
)
{
printf("Executed ger but received an incorrect result.");
return false;
}
printf("\nTesting Host Vector-Vector operations.\n");
printf("\nscal(x, alpha):\n");
printf("%.1f\n*\n%.1f %.1f\n=\n", alpha, (float)hvec0[0], (float)hvec0[1]);
if(blas::scal(hvec0, alpha) != 0)
{
printf("Failed to execute scal.");
return false;
}
printf("%.1f %.1f\n", (float)hvec0[0], (float)hvec0[1]);
if(!(abs(hvec0[0] + 2.0) < Threshold &&
abs(hvec0[1] - 0.0) < Threshold)
)
{
printf("Executed scal but received an incorrect result.");
return false;
}
printf("\ndot(x, y, result):\n");
printf("%.1f %.1f\ndot\n%.1f %.1f\n=\n", (float)hvec0[0], (float)hvec0[1], (float)hvec1[0], (float)hvec1[1]);
alpha = 0.0;
if(blas::dot(hvec0, hvec1, alpha) != 0)
{
printf("Failed to execute host dot.");
return false;
}
printf("%.1f\n", alpha);
if(!(abs(alpha - 2.0) < Threshold))
{
printf("Executed dot but received an incorrect result.");
return false;
}
printf("\naxpy(x, y, alpha):\n");
printf("%.1f\n*\n%.1f %.1f\n+\n%.1f %.1f\n=\n", alpha, (float)hvec0[0], (float)hvec0[1], (float)hvec1[0], (float)hvec1[1]);
if(blas::axpy(hvec0, hvec1, alpha) != 0)
{
printf("Failed to execute axpy.");
return false;
}
printf("%.1f %.1f\n", (float)hvec1[0], (float)hvec1[1]);
if(!(abs(hvec1[0] + 5.0) < Threshold &&
abs(hvec1[1] + 2.0) < Threshold)
)
{
printf("Executed axpy but received an incorrect result.");
return false;
}
printf("\nHost Passed!\n");
printf("\nInitializing CUDA & Cublas\n");
cublasStatus_t stat = cublasCreate(&blas::handle);
if(stat != CUBLAS_STATUS_SUCCESS)
{
printf("CUBLAS Init Failure.");
return false;
}
printf("\nTesting Device Matrix.\n");
// Verify we can get a zero matrix, should be hand checked
printf("\nCreating a matrix of zeros of size 2x2:\n");
Matrix<device_vector<float>> dmat0(2);
thrust::fill(dmat0.vector.begin(), dmat0.vector.end(), 0.0);
dmat0.print();
//// Verify we can set values, should be hand
printf("\nSet(0,0) = 1, (0, 1) = 2, (1, 0) = 3, (1, 1) = 4:\n");
dmat0.set(0, 0, 1.0);
dmat0.set(0, 1, 2.0);
dmat0.set(1, 0, 3.0);
dmat0.set(1, 1, 4.0);
dmat0.print();
printf("\nGet(0,0) (0, 1) (1, 0) (1, 1):\n");
printf("%.1f ", dmat0.get(0,0));
printf("%.1f", dmat0.get(0,1));
printf("\n%.1f ", dmat0.get(1, 0));
printf("%.1f\n", dmat0.get(1,1));
// Reset alpha and beta yo
alpha = 2.0;
beta = 0.5;
printf("\nTesting Device Matrix-Vector operations.\n");
printf("\ngemv(A, x, y, alpha, beta, false):\n");
device_vector<float> dvec0(2);
dvec0[0] = -1.0;
dvec0[1] = 0.0;
device_vector<float> dvec1(2);
dvec1[0] = 1.0;
dvec1[1] = 1.5;
printf("%.1f\n*\n", alpha);
dmat0.print();
printf("*\n%.1f %.1f\n+\n", (float)dvec0[0], (float)dvec0[1]);
printf("%.1f\n*\n%.1f %.1f\n=\n", beta, (float)dvec1[0], (float)dvec1[1]);
if(blas::gemv(dmat0, dvec0, dvec1, alpha, beta, false) != 0)
{
printf("Failure to execute device gemv.");
return false;
}
printf("%.1f %.1f\n", (float)dvec1[0], (float)dvec1[1]);
if(!(abs(dvec1[0] + 1.5) < Threshold &&
abs(dvec1[1] + 5.25) < Threshold)
)
{
printf("Executed gemv but received an incorrect result.");
return false;
}
printf("\ngemv(A, x, true):\n");
printf("%.1f %.1f\n*", (float)dvec0[0], (float)dvec0[1]);
dmat0.print();
printf("=\n");
if(blas::gemv(dmat0, dvec0, dvec1, true) != 0)
{
printf("Failed to execute gemv.");
return false;
}
printf("%.1f %.1f\n", (float)dvec1[0], (float)dvec1[1]);
if(!(abs(dvec1[0] + 1.0) < Threshold &&
abs(dvec1[1] + 2.0) < Threshold)
)
{
printf("Executed gemv but received an incorrect result.");
return false;
}
printf("\nger(x, y, A, alpha):\n");
printf("%.1f %.1f\nX\n%.1f %.1f\n+", (float)dvec0[0], (float)dvec0[1], (float)dvec1[0], (float)dvec1[1]);
dmat0.print();
printf("=");
if(blas::ger(dvec0, dvec1, dmat0, alpha) != 0)
{
printf("Failed to execute device ger.");
return false;
}
dmat0.print();
if(!(abs(dmat0.get(0, 0) - 3.0) < Threshold &&
abs(dmat0.get(0, 1) - 6.0) < Threshold &&
abs(dmat0.get(1, 0) - 3.0) < Threshold &&
abs(dmat0.get(1, 1) - 4.0) < Threshold)
)
{
printf("Executed ger but received an incorrect result.");
return false;
}
printf("\nTesting Device Vector-Vector operations.\n");
printf("\nscal(x, alpha):\n");
printf("%.1f\n*\n%.1f %.1f\n=\n", alpha, (float)dvec0[0], (float)dvec0[1]);
if(blas::scal(dvec0, alpha) != 0)
{
printf("Failed to execute scal.");
return false;
}
printf("%.1f %.1f\n", (float)dvec0[0], (float)dvec0[1]);
if(!(abs(dvec0[0] + 2.0) < Threshold &&
abs(dvec0[1] - 0.0) < Threshold)
)
{
printf("Executed scal but received an incorrect result.");
return false;
}
printf("\ndot(x, y, result):\n");
printf("%.1f %.1f\ndot\n%.1f %.1f\n=\n", (float)dvec0[0], (float)dvec0[1], (float)dvec1[0], (float)dvec1[1]);
alpha = 0.0;
if(blas::dot(dvec0, dvec1, alpha) != 0)
{
printf("Failed to execute dot.");
return false;
}
printf("%.1f\n", alpha);
if(!(abs(alpha - 2.0) < Threshold))
{
printf("Executed dot but received an incorrect result.");
return false;
}
printf("\naxpy(x, y, alpha):\n");
printf("%.1f\n*\n%.1f %.1f\n+\n%.1f %.1f\n=\n", alpha, (float)dvec0[0], (float)dvec0[1], (float)dvec1[0], (float)dvec1[1]);
if(blas::axpy(dvec0, dvec1, alpha) != 0)
{
printf("Failed to execute axpy.");
return false;
}
printf("%.1f %.1f\n", (float)dvec1[0], (float)dvec1[1]);
if(!(abs(dvec1[0] + 5.0) < Threshold &&
abs(dvec1[1] + 2.0) < Threshold)
)
{
printf("Executed axpy but received an incorrect result.");
return false;
}
printf("\nDevice Passed!\n");
printf("\nAll Tests Passed!\n");
return true;
} |
0926cb241d0a412e1e56b879df545dd6ff9335c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void
vectorAdd1(const float *A, const float *B, float *C, int numElements) {
/*@ requires 0 <= numElements;
requires numElements < gridDim.x * blockDim.x;
ensures \forall i; 0 <= i -> i < numElements -> C[i] == A[i] + B[i]; */
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements) {
C[i] = A[i] + B[i];
}
}
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements) {
/*@ ghost int m;
requires numElements == m * blockDim.x * gridDim.x;
ensures \forall i [C[i]]; 0 <= i -> i < numElements -> C[i] == A[i] + B[i]; */
int i = blockDim.x * blockIdx.x + threadIdx.x;
/*@ loop invariant
(\exists t : thread; active(t)) -> \forall t : thread; active(t);
loop invariant
i == loop_count * blockDim.x * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x;
loop invariant \forall k [C[k]];
0 <= k && k < blockDim.x * gridDim.x * loop_count -> C[k] == A[k] + B[k]; */
while (i < numElements) {
C[i] = A[i] + B[i];
i += gridDim.x * blockDim.x;
}
}
__global__ void
vectorAdd_non_unif(const float *A, const float *B, float *C, int numElements) {
/*@ requires numElements > 0;
ensures \forall i; 0 <= i -> i < numElements -> C[i] == A[i] + B[i] */
int i = blockDim.x * blockIdx.x + threadIdx.x;
/*@ loop invariant
(loop_count - 1) * blockDim.x * gridDim.x +
blockDim.x * blockIdx.x + threadIdx.x < numElements ->
i == loop_count * blockDim.x * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x;
loop invariant
(loop_count - 1) * blockDim.x * gridDim.x +
blockDim.x * blockIdx.x + threadIdx.x >= numElements ->
i == (loop_count - 1) * blockDim.x * gridDim.x +
blockDim.x * blockIdx.x + threadIdx.x;
loop invariant \forall k;
0 <= k && k < blockDim.x * gridDim.x * loop_count && k < numElements ->
C[k] == A[k] + B[k]; */
while (i < numElements) {
C[i] = A[i] + B[i];
i += gridDim.x * blockDim.x;
}
}
__global__ void
vectorAddDownward(const float *A, const float *B, float *C, int numElements) {
/*@ ghost int m
requires numElements == m * blockDim.x * gridDim.x;
ensures \forall i [C[i]]; 0 <= i -> i < numElements -> C[i] == A[i] + B[i]; */
int i = numElements - 1 - blockDim.x * blockIdx.x - threadIdx.x;
/*@ loop invariant
(\exists t : thread; active(t)) -> \forall t : thread; active(t);
loop invariant
i == numElements - 1 - loop_count * blockDim.x * gridDim.x -
blockDim.x * blockIdx.x - threadIdx.x;
loop invariant \forall k [C[k]];
numElements - blockDim.x * gridDim.x * loop_count <= k && k < numElements ->
C[k] == A[k] + B[k]; */
while (0 <= i) {
C[i] = A[i] + B[i];
i -= gridDim.x * blockDim.x;
}
}
| 0926cb241d0a412e1e56b879df545dd6ff9335c1.cu | __global__ void
vectorAdd1(const float *A, const float *B, float *C, int numElements) {
/*@ requires 0 <= numElements;
requires numElements < gridDim.x * blockDim.x;
ensures \forall i; 0 <= i -> i < numElements -> C[i] == A[i] + B[i]; */
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements) {
C[i] = A[i] + B[i];
}
}
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements) {
/*@ ghost int m;
requires numElements == m * blockDim.x * gridDim.x;
ensures \forall i [C[i]]; 0 <= i -> i < numElements -> C[i] == A[i] + B[i]; */
int i = blockDim.x * blockIdx.x + threadIdx.x;
/*@ loop invariant
(\exists t : thread; active(t)) -> \forall t : thread; active(t);
loop invariant
i == loop_count * blockDim.x * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x;
loop invariant \forall k [C[k]];
0 <= k && k < blockDim.x * gridDim.x * loop_count -> C[k] == A[k] + B[k]; */
while (i < numElements) {
C[i] = A[i] + B[i];
i += gridDim.x * blockDim.x;
}
}
__global__ void
vectorAdd_non_unif(const float *A, const float *B, float *C, int numElements) {
/*@ requires numElements > 0;
ensures \forall i; 0 <= i -> i < numElements -> C[i] == A[i] + B[i] */
int i = blockDim.x * blockIdx.x + threadIdx.x;
/*@ loop invariant
(loop_count - 1) * blockDim.x * gridDim.x +
blockDim.x * blockIdx.x + threadIdx.x < numElements ->
i == loop_count * blockDim.x * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x;
loop invariant
(loop_count - 1) * blockDim.x * gridDim.x +
blockDim.x * blockIdx.x + threadIdx.x >= numElements ->
i == (loop_count - 1) * blockDim.x * gridDim.x +
blockDim.x * blockIdx.x + threadIdx.x;
loop invariant \forall k;
0 <= k && k < blockDim.x * gridDim.x * loop_count && k < numElements ->
C[k] == A[k] + B[k]; */
while (i < numElements) {
C[i] = A[i] + B[i];
i += gridDim.x * blockDim.x;
}
}
__global__ void
vectorAddDownward(const float *A, const float *B, float *C, int numElements) {
/*@ ghost int m
requires numElements == m * blockDim.x * gridDim.x;
ensures \forall i [C[i]]; 0 <= i -> i < numElements -> C[i] == A[i] + B[i]; */
int i = numElements - 1 - blockDim.x * blockIdx.x - threadIdx.x;
/*@ loop invariant
(\exists t : thread; active(t)) -> \forall t : thread; active(t);
loop invariant
i == numElements - 1 - loop_count * blockDim.x * gridDim.x -
blockDim.x * blockIdx.x - threadIdx.x;
loop invariant \forall k [C[k]];
numElements - blockDim.x * gridDim.x * loop_count <= k && k < numElements ->
C[k] == A[k] + B[k]; */
while (0 <= i) {
C[i] = A[i] + B[i];
i -= gridDim.x * blockDim.x;
}
}
|
3f5b8a1d85f5e6ac1a3b34969bd3576edaa6f226.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include "reference.cpp"
const short xlength = 1024;
const short xMax = 14*6*256;
__device__
void swap(unsigned int x, unsigned int y, unsigned int * input) {
short temp = 0;
if ( input[y] < input[x] ) {
temp = input[x];
input[x] = input[y];
input[y] = temp;
}
}
__global__
void bubbleSort(const unsigned int* const data, //INPUT
unsigned int * const output,
int N) {
short idx = blockDim.x * blockIdx.x + threadIdx.x;
short idy = blockDim.y * blockIdx.y;
extern __shared__ unsigned int cids[];
if ( idx + idy * xMax >= N) return;
cids[threadIdx.x] = data[idx + idy * xMax];
__syncthreads();
// block sort -- parallel version of bubble sort presort arrays of size 32
short p = 0;
for( short k = 0; k < blockDim.x; k++) {
if ( threadIdx.x % 2 == p % 2 && threadIdx.x != blockDim.x - 1 )
swap(threadIdx.x, threadIdx.x + 1, cids);
p += 1;
__syncthreads();
}
output[idx + idy * xMax] = cids[threadIdx.x];
}
__device__
short binarySearchRightSubArray(const short key,
short start,
const short sz,
unsigned short * const input) {
short mid = 0;
short end = sz + sz - 1;
while ( start <= end ) {
mid = start + (end - start)/2;
if ( key > input[mid] )
start = mid + 1;
else if ( mid == end )
break;
else if ( key < input[mid] )
end = mid;
else if ( start < mid )
end = mid;
else
break;
}
return ( start >= sz + sz ) ? sz : mid - sz;
}
__device__
short binarySearchLeftSubArray(const short key,
short start,
const short sz,
unsigned short * const input) {
short mid = 0;
short end = sz - 1;
if ( input[sz-1] == key )
return sz;
while ( start <= end ) {
mid = start + (end - start)/2;
if ( key > input[mid] ) {
start = mid + 1;
mid++;
}
else if ( mid == end )
break;
else if ( key < input[mid] )
end = mid;
else if ( mid <= end )
start = mid + 1;
}
return ( start >= sz ) ? sz : mid;
}
__global__
void mergeSort(const unsigned int* const input, //INPUT
unsigned int* const output, //OUPUT
int N) {
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if ( idx >= N ) return;
extern __shared__ unsigned short in[];
unsigned short outIdx = threadIdx.x + blockDim.x;
__shared__ unsigned short sz;
sz = blockDim.x / 2;
in[threadIdx.x] = input[idx];
__syncthreads();
if ( threadIdx.x < sz )
in[outIdx] = binarySearchRightSubArray(in[threadIdx.x], sz, sz, in);
else
in[outIdx] = binarySearchLeftSubArray(in[threadIdx.x], 0, sz, in);
__syncthreads();
output[blockDim.x * blockIdx.x + in[outIdx] + (threadIdx.x % sz)] = in[threadIdx.x];
}
void computeHistogram(const unsigned int* const d_vals, //INPUT
unsigned int* const d_histo, //OUTPUT
const unsigned int numBins,
const unsigned int numElems) {
const unsigned int xDim = numElems / xlength;
const unsigned int xSumsDim = ceil(xDim/ (float) 128);
unsigned int *h_vals, *h_temp, * d_in, *d_out;//, *your_histo;
checkCudaErrors(hipMalloc( &d_in , sizeof(unsigned int) * numElems));
checkCudaErrors(hipMemset( d_in, 0, sizeof(unsigned int) * numElems));
checkCudaErrors(hipMalloc( &d_out, sizeof(unsigned int) * numElems));
checkCudaErrors(hipMemset( d_out, 0, sizeof(unsigned int) * numElems));
dim3 grid(14*6, 477);
hipLaunchKernelGGL(( bubbleSort), dim3(grid), dim3(256), sizeof(unsigned int) * 256 , 0, d_vals, d_out, numElems);
for ( short N = 512; N < xlength; N*=2 ) {
hipLaunchKernelGGL(( mergeSort), dim3(numElems/N), dim3(N), sizeof(unsigned short) * 2 * N , 0, d_out, d_in, numElems);
std::swap(d_out, d_in);
}
h_temp = new unsigned int[numElems];
h_vals = new unsigned int[numElems];
checkCudaErrors(hipMemcpy(h_vals, d_vals, sizeof(unsigned int) * numElems, hipMemcpyDeviceToHost ));
checkCudaErrors(hipMemcpy(h_temp, d_out, sizeof(unsigned int) * numElems, hipMemcpyDeviceToHost ));
count = 0;
for (int k = 1; k < numElems; k++) {
if (h_temp[k] < h_temp[k-1]) {
count++;
}
}
std::cout<< "# of transistions is: " << count << "\n";
delete[] h_temp;
delete[] h_vals;
checkCudaErrors(hipFree(d_in));
checkCudaErrors(hipFree(d_out));
} | 3f5b8a1d85f5e6ac1a3b34969bd3576edaa6f226.cu | #include "utils.h"
#include "reference.cpp"
const short xlength = 1024;
const short xMax = 14*6*256;
__device__
void swap(unsigned int x, unsigned int y, unsigned int * input) {
short temp = 0;
if ( input[y] < input[x] ) {
temp = input[x];
input[x] = input[y];
input[y] = temp;
}
}
__global__
void bubbleSort(const unsigned int* const data, //INPUT
unsigned int * const output,
int N) {
short idx = blockDim.x * blockIdx.x + threadIdx.x;
short idy = blockDim.y * blockIdx.y;
extern __shared__ unsigned int cids[];
if ( idx + idy * xMax >= N) return;
cids[threadIdx.x] = data[idx + idy * xMax];
__syncthreads();
// block sort -- parallel version of bubble sort presort arrays of size 32
short p = 0;
for( short k = 0; k < blockDim.x; k++) {
if ( threadIdx.x % 2 == p % 2 && threadIdx.x != blockDim.x - 1 )
swap(threadIdx.x, threadIdx.x + 1, cids);
p += 1;
__syncthreads();
}
output[idx + idy * xMax] = cids[threadIdx.x];
}
__device__
short binarySearchRightSubArray(const short key,
short start,
const short sz,
unsigned short * const input) {
short mid = 0;
short end = sz + sz - 1;
while ( start <= end ) {
mid = start + (end - start)/2;
if ( key > input[mid] )
start = mid + 1;
else if ( mid == end )
break;
else if ( key < input[mid] )
end = mid;
else if ( start < mid )
end = mid;
else
break;
}
return ( start >= sz + sz ) ? sz : mid - sz;
}
__device__
short binarySearchLeftSubArray(const short key,
short start,
const short sz,
unsigned short * const input) {
short mid = 0;
short end = sz - 1;
if ( input[sz-1] == key )
return sz;
while ( start <= end ) {
mid = start + (end - start)/2;
if ( key > input[mid] ) {
start = mid + 1;
mid++;
}
else if ( mid == end )
break;
else if ( key < input[mid] )
end = mid;
else if ( mid <= end )
start = mid + 1;
}
return ( start >= sz ) ? sz : mid;
}
__global__
void mergeSort(const unsigned int* const input, //INPUT
unsigned int* const output, //OUPUT
int N) {
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if ( idx >= N ) return;
extern __shared__ unsigned short in[];
unsigned short outIdx = threadIdx.x + blockDim.x;
__shared__ unsigned short sz;
sz = blockDim.x / 2;
in[threadIdx.x] = input[idx];
__syncthreads();
if ( threadIdx.x < sz )
in[outIdx] = binarySearchRightSubArray(in[threadIdx.x], sz, sz, in);
else
in[outIdx] = binarySearchLeftSubArray(in[threadIdx.x], 0, sz, in);
__syncthreads();
output[blockDim.x * blockIdx.x + in[outIdx] + (threadIdx.x % sz)] = in[threadIdx.x];
}
void computeHistogram(const unsigned int* const d_vals, //INPUT
unsigned int* const d_histo, //OUTPUT
const unsigned int numBins,
const unsigned int numElems) {
const unsigned int xDim = numElems / xlength;
const unsigned int xSumsDim = ceil(xDim/ (float) 128);
unsigned int *h_vals, *h_temp, * d_in, *d_out;//, *your_histo;
checkCudaErrors(cudaMalloc( &d_in , sizeof(unsigned int) * numElems));
checkCudaErrors(cudaMemset( d_in, 0, sizeof(unsigned int) * numElems));
checkCudaErrors(cudaMalloc( &d_out, sizeof(unsigned int) * numElems));
checkCudaErrors(cudaMemset( d_out, 0, sizeof(unsigned int) * numElems));
dim3 grid(14*6, 477);
bubbleSort<<< grid, 256, sizeof(unsigned int) * 256 >>>(d_vals, d_out, numElems);
for ( short N = 512; N < xlength; N*=2 ) {
mergeSort<<< numElems/N, N, sizeof(unsigned short) * 2 * N >>>(d_out, d_in, numElems);
std::swap(d_out, d_in);
}
h_temp = new unsigned int[numElems];
h_vals = new unsigned int[numElems];
checkCudaErrors(cudaMemcpy(h_vals, d_vals, sizeof(unsigned int) * numElems, cudaMemcpyDeviceToHost ));
checkCudaErrors(cudaMemcpy(h_temp, d_out, sizeof(unsigned int) * numElems, cudaMemcpyDeviceToHost ));
count = 0;
for (int k = 1; k < numElems; k++) {
if (h_temp[k] < h_temp[k-1]) {
count++;
}
}
std::cout<< "# of transistions is: " << count << "\n";
delete[] h_temp;
delete[] h_vals;
checkCudaErrors(cudaFree(d_in));
checkCudaErrors(cudaFree(d_out));
} |
0c8c4ba55b6d7e97d3914347f7f27a5bcccee37a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
extern "C" {
#include <lofs-read.h>
#include <lofs-dirstruct.h>
#include <lofs-hdf2nc.h>
#include <lofs-limits.h>
#include <lofs-macros.h>
}
#include "../include/datastructs.h"
#include "../calc/calcdiff6.cu"
#ifndef DIFF6_CU
#define DIFF6_CU
/*
* Copyright (C) 2017-2020 Kelton Halbert, Space Science and Engineering Center (SSEC), University of Wisconsin - Madison
* Written by Kelton Halbert at the University of Wisconsin - Madison,
* Cooperative Institute for Meteorological Satellite Studies (CIMSS),
* Space Science and Engineering Center (SSEC). Provided under the Apache 2.0 License.
* Email: [email protected]
*/
__global__ void cuCalcDiffUXYZ(grid *gd, mesh *msh, sounding *snd, float *ustag, float *tem1, float *tem2, float *tem3) {
// get our 3D index based on our blocks/threads
int i = (blockIdx.x*blockDim.x) + threadIdx.x;
int j = (blockIdx.y*blockDim.y) + threadIdx.y;
int k = (blockIdx.z*blockDim.z) + threadIdx.z;
int nx = gd->NX;
int ny = gd->NY;
int nz = gd->NZ;
// this is a pretty large stencil so we have to be careful
if ((i < nx-3) && (j < ny-3) && (k >= 3) && (k < nz-4) && (i >= 3) && (j >= 3)) {
calc_diffx_u(ustag, tem1, i, j, k, nx, ny);
calc_diffy_u(ustag, tem2, i, j, k, nx, ny);
calc_diffz_u(ustag, snd->u0, tem3, i, j, k, nx, ny);
// handle lower boundary condition for diffz_u
// we've kind of ignored the top boundary...
if ( (k == 3) && (zf(0) == 0.0) ) {
tem3[P3(i, j, 2, nx+2, ny+2)] = -1.0*tem3[P3(i, j, 4, nx+2, ny+2)];
tem3[P3(i, j, 1, nx+2, ny+2)] = tem3[P3(i, j, 3, nx+2, ny+2)];
tem3[P3(i, j, 0, nx+2, ny+2)] = 0.0;
}
}
}
__global__ void cuCalcDiffVXYZ(grid *gd, mesh *msh, sounding *snd, float *vstag, float *tem1, float *tem2, float *tem3) {
// get our 3D index based on our blocks/threads
int i = (blockIdx.x*blockDim.x) + threadIdx.x;
int j = (blockIdx.y*blockDim.y) + threadIdx.y;
int k = (blockIdx.z*blockDim.z) + threadIdx.z;
int nx = gd->NX;
int ny = gd->NY;
int nz = gd->NZ;
// this is a pretty large stencil so we have to be careful
if ((i < nx-3) && (j < ny-3) && (k >= 3) && (k < nz-4) && (i >= 3) && (j >= 3)) {
calc_diffx_v(vstag, tem1, i, j, k, nx, ny);
calc_diffy_v(vstag, tem2, i, j, k, nx, ny);
calc_diffz_v(vstag, snd->v0, tem3, i, j, k, nx, ny);
// handle lower boundary condition for diffz_u
// we've kind of ignored the top boundary...
if ( (k == 3) && (zf(0) == 0.0) ) {
tem3[P3(i, j, 2, nx+2, ny+2)] = -1.0*tem3[P3(i, j, 4, nx+2, ny+2)];
tem3[P3(i, j, 1, nx+2, ny+2)] = tem3[P3(i, j, 3, nx+2, ny+2)];
tem3[P3(i, j, 0, nx+2, ny+2)] = 0.0;
}
}
}
__global__ void cuCalcDiffWXYZ(grid *gd, mesh *msh, sounding *snd, float *wstag, float *tem1, float *tem2, float *tem3) {
// get our 3D index based on our blocks/threads
int i = (blockIdx.x*blockDim.x) + threadIdx.x;
int j = (blockIdx.y*blockDim.y) + threadIdx.y;
int k = (blockIdx.z*blockDim.z) + threadIdx.z;
int nx = gd->NX;
int ny = gd->NY;
int nz = gd->NZ;
// this is a pretty large stencil so we have to be careful
if ((i < nx-3) && (j < ny-3) && (k >= 3) && (k < nz-4) && (i >= 3) && (j >= 3)) {
calc_diffx_w(wstag, tem1, i, j, k, nx, ny);
calc_diffy_w(wstag, tem2, i, j, k, nx, ny);
calc_diffz_w(wstag, tem3, i, j, k, nx, ny);
// handle lower boundary condition for diffz_u
// we've kind of ignored the top boundary...
if ( (k == 3) && (zf(0) == 0.0) ) {
tem3[P3(i, j, 2, nx+2, ny+2)] = -1.0*tem3[P3(i, j, 4, nx+2, ny+2)];
tem3[P3(i, j, 1, nx+2, ny+2)] = tem3[P3(i, j, 3, nx+2, ny+2)];
tem3[P3(i, j, 0, nx+2, ny+2)] = 0.0;
}
}
}
__global__ void cuCalcDiff(grid *gd, mesh *msh, sounding *snd, float *diffx, float *diffy, float *diffz, float *difften) {
// get our 3D index based on our blocks/threads
int i = (blockIdx.x*blockDim.x) + threadIdx.x;
int j = (blockIdx.y*blockDim.y) + threadIdx.y;
int k = (blockIdx.z*blockDim.z) + threadIdx.z;
int nx = gd->NX;
int ny = gd->NY;
int nz = gd->NZ;
if ((i < nx+1) && (j < ny+1) && (k < nz)) {
calc_diff(diffx, diffy, diffz, difften, msh->dt, i, j, k, nx, ny);
}
}
#endif
| 0c8c4ba55b6d7e97d3914347f7f27a5bcccee37a.cu | #include <iostream>
#include <stdio.h>
extern "C" {
#include <lofs-read.h>
#include <lofs-dirstruct.h>
#include <lofs-hdf2nc.h>
#include <lofs-limits.h>
#include <lofs-macros.h>
}
#include "../include/datastructs.h"
#include "../calc/calcdiff6.cu"
#ifndef DIFF6_CU
#define DIFF6_CU
/*
* Copyright (C) 2017-2020 Kelton Halbert, Space Science and Engineering Center (SSEC), University of Wisconsin - Madison
* Written by Kelton Halbert at the University of Wisconsin - Madison,
* Cooperative Institute for Meteorological Satellite Studies (CIMSS),
* Space Science and Engineering Center (SSEC). Provided under the Apache 2.0 License.
* Email: [email protected]
*/
__global__ void cuCalcDiffUXYZ(grid *gd, mesh *msh, sounding *snd, float *ustag, float *tem1, float *tem2, float *tem3) {
// get our 3D index based on our blocks/threads
int i = (blockIdx.x*blockDim.x) + threadIdx.x;
int j = (blockIdx.y*blockDim.y) + threadIdx.y;
int k = (blockIdx.z*blockDim.z) + threadIdx.z;
int nx = gd->NX;
int ny = gd->NY;
int nz = gd->NZ;
// this is a pretty large stencil so we have to be careful
if ((i < nx-3) && (j < ny-3) && (k >= 3) && (k < nz-4) && (i >= 3) && (j >= 3)) {
calc_diffx_u(ustag, tem1, i, j, k, nx, ny);
calc_diffy_u(ustag, tem2, i, j, k, nx, ny);
calc_diffz_u(ustag, snd->u0, tem3, i, j, k, nx, ny);
// handle lower boundary condition for diffz_u
// we've kind of ignored the top boundary...
if ( (k == 3) && (zf(0) == 0.0) ) {
tem3[P3(i, j, 2, nx+2, ny+2)] = -1.0*tem3[P3(i, j, 4, nx+2, ny+2)];
tem3[P3(i, j, 1, nx+2, ny+2)] = tem3[P3(i, j, 3, nx+2, ny+2)];
tem3[P3(i, j, 0, nx+2, ny+2)] = 0.0;
}
}
}
__global__ void cuCalcDiffVXYZ(grid *gd, mesh *msh, sounding *snd, float *vstag, float *tem1, float *tem2, float *tem3) {
// get our 3D index based on our blocks/threads
int i = (blockIdx.x*blockDim.x) + threadIdx.x;
int j = (blockIdx.y*blockDim.y) + threadIdx.y;
int k = (blockIdx.z*blockDim.z) + threadIdx.z;
int nx = gd->NX;
int ny = gd->NY;
int nz = gd->NZ;
// this is a pretty large stencil so we have to be careful
if ((i < nx-3) && (j < ny-3) && (k >= 3) && (k < nz-4) && (i >= 3) && (j >= 3)) {
calc_diffx_v(vstag, tem1, i, j, k, nx, ny);
calc_diffy_v(vstag, tem2, i, j, k, nx, ny);
calc_diffz_v(vstag, snd->v0, tem3, i, j, k, nx, ny);
// handle lower boundary condition for diffz_u
// we've kind of ignored the top boundary...
if ( (k == 3) && (zf(0) == 0.0) ) {
tem3[P3(i, j, 2, nx+2, ny+2)] = -1.0*tem3[P3(i, j, 4, nx+2, ny+2)];
tem3[P3(i, j, 1, nx+2, ny+2)] = tem3[P3(i, j, 3, nx+2, ny+2)];
tem3[P3(i, j, 0, nx+2, ny+2)] = 0.0;
}
}
}
__global__ void cuCalcDiffWXYZ(grid *gd, mesh *msh, sounding *snd, float *wstag, float *tem1, float *tem2, float *tem3) {
// get our 3D index based on our blocks/threads
int i = (blockIdx.x*blockDim.x) + threadIdx.x;
int j = (blockIdx.y*blockDim.y) + threadIdx.y;
int k = (blockIdx.z*blockDim.z) + threadIdx.z;
int nx = gd->NX;
int ny = gd->NY;
int nz = gd->NZ;
// this is a pretty large stencil so we have to be careful
if ((i < nx-3) && (j < ny-3) && (k >= 3) && (k < nz-4) && (i >= 3) && (j >= 3)) {
calc_diffx_w(wstag, tem1, i, j, k, nx, ny);
calc_diffy_w(wstag, tem2, i, j, k, nx, ny);
calc_diffz_w(wstag, tem3, i, j, k, nx, ny);
// handle lower boundary condition for diffz_u
// we've kind of ignored the top boundary...
if ( (k == 3) && (zf(0) == 0.0) ) {
tem3[P3(i, j, 2, nx+2, ny+2)] = -1.0*tem3[P3(i, j, 4, nx+2, ny+2)];
tem3[P3(i, j, 1, nx+2, ny+2)] = tem3[P3(i, j, 3, nx+2, ny+2)];
tem3[P3(i, j, 0, nx+2, ny+2)] = 0.0;
}
}
}
__global__ void cuCalcDiff(grid *gd, mesh *msh, sounding *snd, float *diffx, float *diffy, float *diffz, float *difften) {
// get our 3D index based on our blocks/threads
int i = (blockIdx.x*blockDim.x) + threadIdx.x;
int j = (blockIdx.y*blockDim.y) + threadIdx.y;
int k = (blockIdx.z*blockDim.z) + threadIdx.z;
int nx = gd->NX;
int ny = gd->NY;
int nz = gd->NZ;
if ((i < nx+1) && (j < ny+1) && (k < nz)) {
calc_diff(diffx, diffy, diffz, difften, msh->dt, i, j, k, nx, ny);
}
}
#endif
|
20963eaaf5b0f2bce19f0af29f14affac2b9dec8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include <vector>
#include "caffe/layers/twoeltwise_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a,
const Dtype* bottom_data_b, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
if (bottom_data_a[index] > bottom_data_b[index]) {
top_data[index] = bottom_data_a[index];
} else {
top_data[index] = bottom_data_b[index];
}
}
}
template <typename Dtype>
void TwoEltwiseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int count = bottom[0]->count();
const int num = bottom[0]->num();
const int chw = count/num;
Dtype* top_data = preoutput_.mutable_gpu_data();
Dtype* output_data = top[0]->mutable_gpu_data();
switch (op_) {
case TwoEltwiseParameter_TwoEltwiseOp_PROD:
caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(),
top_data);
break;
case TwoEltwiseParameter_TwoEltwiseOp_SUM:
caffe_gpu_add(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(),
top_data);
break;
case TwoEltwiseParameter_TwoEltwiseOp_MAX:
hipLaunchKernelGGL(( MaxForward<Dtype>) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), top_data);
break;
case TwoEltwiseParameter_TwoEltwiseOp_SUB:
caffe_gpu_sub(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(),
top_data);
break;
default:
LOG(FATAL) << "Unknown elementwise operation.";
}
if (this->layer_param_.twoeltwise_param().absout() == true )
{
caffe_gpu_abs(count, top_data, top_data);
}
if( this->layer_param_.twoeltwise_param().numsqsum() == true )
{
caffe_gpu_powx(count, preoutput_.mutable_gpu_data(), Dtype(2), preoutput_.mutable_gpu_data());
std::cout << "li" << num << " " << chw << std::endl;
caffe_gpu_gemv(CblasNoTrans, num, chw, Dtype(1.0), preoutput_.mutable_gpu_data(),
summer_vec_.mutable_gpu_data(), Dtype(0.0), top[0]->mutable_gpu_data());
}else{
caffe_copy(count, top_data, output_data);
}
}
template <typename Dtype>
void TwoEltwiseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom)
{
for (int i = 0; i < propagate_down.size(); ++i) {
if (propagate_down[i]) { NOT_IMPLEMENTED; }
}
}
INSTANTIATE_LAYER_GPU_FUNCS(TwoEltwiseLayer);
} // namespace caffe
| 20963eaaf5b0f2bce19f0af29f14affac2b9dec8.cu | #include <cfloat>
#include <vector>
#include "caffe/layers/twoeltwise_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a,
const Dtype* bottom_data_b, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
if (bottom_data_a[index] > bottom_data_b[index]) {
top_data[index] = bottom_data_a[index];
} else {
top_data[index] = bottom_data_b[index];
}
}
}
template <typename Dtype>
void TwoEltwiseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int count = bottom[0]->count();
const int num = bottom[0]->num();
const int chw = count/num;
Dtype* top_data = preoutput_.mutable_gpu_data();
Dtype* output_data = top[0]->mutable_gpu_data();
switch (op_) {
case TwoEltwiseParameter_TwoEltwiseOp_PROD:
caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(),
top_data);
break;
case TwoEltwiseParameter_TwoEltwiseOp_SUM:
caffe_gpu_add(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(),
top_data);
break;
case TwoEltwiseParameter_TwoEltwiseOp_MAX:
MaxForward<Dtype> <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), top_data);
break;
case TwoEltwiseParameter_TwoEltwiseOp_SUB:
caffe_gpu_sub(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(),
top_data);
break;
default:
LOG(FATAL) << "Unknown elementwise operation.";
}
if (this->layer_param_.twoeltwise_param().absout() == true )
{
caffe_gpu_abs(count, top_data, top_data);
}
if( this->layer_param_.twoeltwise_param().numsqsum() == true )
{
caffe_gpu_powx(count, preoutput_.mutable_gpu_data(), Dtype(2), preoutput_.mutable_gpu_data());
std::cout << "li" << num << " " << chw << std::endl;
caffe_gpu_gemv(CblasNoTrans, num, chw, Dtype(1.0), preoutput_.mutable_gpu_data(),
summer_vec_.mutable_gpu_data(), Dtype(0.0), top[0]->mutable_gpu_data());
}else{
caffe_copy(count, top_data, output_data);
}
}
template <typename Dtype>
void TwoEltwiseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom)
{
for (int i = 0; i < propagate_down.size(); ++i) {
if (propagate_down[i]) { NOT_IMPLEMENTED; }
}
}
INSTANTIATE_LAYER_GPU_FUNCS(TwoEltwiseLayer);
} // namespace caffe
|
499af1486cf91a23fdba1363b5fce217a727a397.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/zswapdblk.cu, normal z -> s, Wed Jan 2 14:18:51 2019
*/
#include "magma_internal.h"
/******************************************************************************/
/*
* Swap diagonal blocks of two matrices.
* Each thread block swaps one diagonal block.
* Each thread iterates across one row of the block.
*/
__global__ void
sswapdblk_kernel( int nb,
float *dA, int ldda, int inca,
float *dB, int lddb, int incb )
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
dA += tx + bx * nb * (ldda + inca);
dB += tx + bx * nb * (lddb + incb);
float tmp;
#pragma unroll
for( int i = 0; i < nb; i++ ) {
tmp = dA[i*ldda];
dA[i*ldda] = dB[i*lddb];
dB[i*lddb] = tmp;
}
}
/***************************************************************************//**
Purpose
-------
sswapdblk swaps diagonal blocks of size nb x nb between matrices
dA and dB on the GPU. It swaps nblocks = n/nb blocks.
For i = 1 .. nblocks, submatrices
dA( i*nb*inca, i*nb ) and
dB( i*nb*incb, i*nb ) are swapped.
Arguments
---------
@param[in]
n INTEGER
The number of columns of the matrices dA and dB. N >= 0.
@param[in]
nb INTEGER
The size of diagonal blocks.
NB > 0 and NB <= maximum threads per CUDA block (512 or 1024).
@param[in,out]
dA REAL array, dimension (LDDA,N)
The matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA.
LDDA >= (nblocks - 1)*nb*inca + nb.
@param[in]
inca INTEGER
The row increment between diagonal blocks of dA. inca >= 0. For example,
inca = 1 means blocks are stored on the diagonal at dA(i*nb, i*nb),
inca = 0 means blocks are stored side-by-side at dA(0, i*nb).
@param[in,out]
dB REAL array, dimension (LDDB,N)
The matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array db.
LDDB >= (nblocks - 1)*nb*incb + nb.
@param[in]
incb INTEGER
The row increment between diagonal blocks of dB. incb >= 0. See inca.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_swapdblk
*******************************************************************************/
extern "C" void
magmablas_sswapdblk(
magma_int_t n, magma_int_t nb,
magmaFloat_ptr dA, magma_int_t ldda, magma_int_t inca,
magmaFloat_ptr dB, magma_int_t lddb, magma_int_t incb,
magma_queue_t queue )
{
magma_int_t nblocks = n / nb;
magma_int_t info = 0;
if (n < 0) {
info = -1;
} else if (nb < 1 || nb > 1024) {
info = -2;
} else if (ldda < (nblocks-1)*nb*inca + nb) {
info = -4;
} else if (inca < 0) {
info = -5;
} else if (lddb < (nblocks-1)*nb*incb + nb) {
info = -7;
} else if (incb < 0) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( nblocks > 0 ) {
hipLaunchKernelGGL(( sswapdblk_kernel), dim3(nblocks), dim3(nb), 0, queue->cuda_stream() ,
nb, dA, ldda, inca,
dB, lddb, incb );
}
}
| 499af1486cf91a23fdba1363b5fce217a727a397.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/zswapdblk.cu, normal z -> s, Wed Jan 2 14:18:51 2019
*/
#include "magma_internal.h"
/******************************************************************************/
/*
* Swap diagonal blocks of two matrices.
* Each thread block swaps one diagonal block.
* Each thread iterates across one row of the block.
*/
__global__ void
sswapdblk_kernel( int nb,
float *dA, int ldda, int inca,
float *dB, int lddb, int incb )
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
dA += tx + bx * nb * (ldda + inca);
dB += tx + bx * nb * (lddb + incb);
float tmp;
#pragma unroll
for( int i = 0; i < nb; i++ ) {
tmp = dA[i*ldda];
dA[i*ldda] = dB[i*lddb];
dB[i*lddb] = tmp;
}
}
/***************************************************************************//**
Purpose
-------
sswapdblk swaps diagonal blocks of size nb x nb between matrices
dA and dB on the GPU. It swaps nblocks = n/nb blocks.
For i = 1 .. nblocks, submatrices
dA( i*nb*inca, i*nb ) and
dB( i*nb*incb, i*nb ) are swapped.
Arguments
---------
@param[in]
n INTEGER
The number of columns of the matrices dA and dB. N >= 0.
@param[in]
nb INTEGER
The size of diagonal blocks.
NB > 0 and NB <= maximum threads per CUDA block (512 or 1024).
@param[in,out]
dA REAL array, dimension (LDDA,N)
The matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA.
LDDA >= (nblocks - 1)*nb*inca + nb.
@param[in]
inca INTEGER
The row increment between diagonal blocks of dA. inca >= 0. For example,
inca = 1 means blocks are stored on the diagonal at dA(i*nb, i*nb),
inca = 0 means blocks are stored side-by-side at dA(0, i*nb).
@param[in,out]
dB REAL array, dimension (LDDB,N)
The matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array db.
LDDB >= (nblocks - 1)*nb*incb + nb.
@param[in]
incb INTEGER
The row increment between diagonal blocks of dB. incb >= 0. See inca.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_swapdblk
*******************************************************************************/
extern "C" void
magmablas_sswapdblk(
magma_int_t n, magma_int_t nb,
magmaFloat_ptr dA, magma_int_t ldda, magma_int_t inca,
magmaFloat_ptr dB, magma_int_t lddb, magma_int_t incb,
magma_queue_t queue )
{
magma_int_t nblocks = n / nb;
magma_int_t info = 0;
if (n < 0) {
info = -1;
} else if (nb < 1 || nb > 1024) {
info = -2;
} else if (ldda < (nblocks-1)*nb*inca + nb) {
info = -4;
} else if (inca < 0) {
info = -5;
} else if (lddb < (nblocks-1)*nb*incb + nb) {
info = -7;
} else if (incb < 0) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( nblocks > 0 ) {
sswapdblk_kernel<<< nblocks, nb, 0, queue->cuda_stream() >>>
( nb, dA, ldda, inca,
dB, lddb, incb );
}
}
|
56598ae49dc6cbe93f2beef1de605b2561779e1b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/affine_channel_op.h"
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
using BlockReduce = hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename T, StorageOrder kOrder>
__global__ void AffineChannelScaleBiasBackwardCUDAKernel(
const int N,
const int C,
const int HxW,
const T* dY,
const T* X,
T* dscale,
T* dbias) {
const int outer_size = C;
const int inner_size = N * HxW;
__shared__ typename BlockReduce<T>::TempStorage ds_storage;
__shared__ typename BlockReduce<T>::TempStorage db_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T ds_sum = 0;
T db_sum = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = kOrder == StorageOrder::NCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
#if __CUDA_ARCH__ >= 350
ds_sum += __ldg(dY + index) * __ldg(X + index);
db_sum += __ldg(dY + index);
#else
ds_sum += dY[index] * X[index];
db_sum += dY[index];
#endif
}
ds_sum = BlockReduce<T>(ds_storage).Reduce(ds_sum, hipcub::Sum());
db_sum = BlockReduce<T>(db_storage).Reduce(db_sum, hipcub::Sum());
if (threadIdx.x == 0) {
dscale[i] = ds_sum;
dbias[i] = db_sum;
}
__syncthreads();
}
}
} // namespace
template <>
bool AffineChannelGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
const auto& dY = Input(0);
const auto& scale = is_learnable_ ? Input(2) : Input(1);
auto* dX = Output(0, dY.sizes(), at::dtype<float>());
const int N = dY.dim32(0);
const int C = dY.dim32(1);
const int HxW = dY.numel() / (N * C);
const float* dY_data = dY.data<float>();
const float* scale_data = scale.data<float>();
const std::array<int, 3> X_dims = {N, C, HxW};
const std::array<int, 3> scale_dims = {1, C, 1};
math::Mul<float, CUDAContext>(
3,
X_dims.data(),
3,
scale_dims.data(),
dY_data,
scale_data,
dX->template mutable_data<float>(),
&context_);
if (is_learnable_) {
const auto& X = Input(1);
const float* X_data = X.data<float>();
auto* dscale = Output(1, scale.sizes(), at::dtype<float>());
auto* dbias = Output(2, scale.sizes(), at::dtype<float>());
const int outer_size = N * HxW;
hipLaunchKernelGGL(( AffineChannelScaleBiasBackwardCUDAKernel<float, StorageOrder::NCHW>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
C,
HxW,
dY_data,
X_data,
dscale->template mutable_data<float>(),
dbias->template mutable_data<float>());
}
return true;
}
template <>
bool AffineChannelGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
const auto& dY = Input(0);
const auto& scale = is_learnable_ ? Input(2) : Input(1);
auto* dX = Output(0, dY.sizes(), at::dtype<float>());
const int ndim = dY.dim();
const int C = dY.dim32(ndim - 1);
const int rows = dY.numel() / C;
const int cols = C;
const float* dY_data = dY.data<float>();
const float* scale_data = scale.data<float>();
math::RowwiseMul<float, CUDAContext>(
rows,
cols,
dY_data,
scale_data,
dX->template mutable_data<float>(),
&context_);
if (is_learnable_) {
const auto& X = Input(1);
const float* X_data = X.data<float>();
const int N = X.dim32(0);
const int HxW = rows / N;
auto* dscale = Output(1, scale.sizes(), at::dtype<float>());
auto* dbias = Output(2, scale.sizes(), at::dtype<float>());
hipLaunchKernelGGL(( AffineChannelScaleBiasBackwardCUDAKernel<float, StorageOrder::NHWC>)
, dim3(::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
C,
HxW,
dY_data,
X_data,
dscale->template mutable_data<float>(),
dbias->template mutable_data<float>());
}
return true;
}
REGISTER_CUDA_OPERATOR(AffineChannel, AffineChannelOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
AffineChannelGradient,
AffineChannelGradientOp<float, CUDAContext>);
} // namespace caffe2
| 56598ae49dc6cbe93f2beef1de605b2561779e1b.cu | #include "caffe2/operators/affine_channel_op.h"
#include <cub/block/block_reduce.cuh>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
using BlockReduce = cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename T, StorageOrder kOrder>
__global__ void AffineChannelScaleBiasBackwardCUDAKernel(
const int N,
const int C,
const int HxW,
const T* dY,
const T* X,
T* dscale,
T* dbias) {
const int outer_size = C;
const int inner_size = N * HxW;
__shared__ typename BlockReduce<T>::TempStorage ds_storage;
__shared__ typename BlockReduce<T>::TempStorage db_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T ds_sum = 0;
T db_sum = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = kOrder == StorageOrder::NCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
#if __CUDA_ARCH__ >= 350
ds_sum += __ldg(dY + index) * __ldg(X + index);
db_sum += __ldg(dY + index);
#else
ds_sum += dY[index] * X[index];
db_sum += dY[index];
#endif
}
ds_sum = BlockReduce<T>(ds_storage).Reduce(ds_sum, cub::Sum());
db_sum = BlockReduce<T>(db_storage).Reduce(db_sum, cub::Sum());
if (threadIdx.x == 0) {
dscale[i] = ds_sum;
dbias[i] = db_sum;
}
__syncthreads();
}
}
} // namespace
template <>
bool AffineChannelGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
const auto& dY = Input(0);
const auto& scale = is_learnable_ ? Input(2) : Input(1);
auto* dX = Output(0, dY.sizes(), at::dtype<float>());
const int N = dY.dim32(0);
const int C = dY.dim32(1);
const int HxW = dY.numel() / (N * C);
const float* dY_data = dY.data<float>();
const float* scale_data = scale.data<float>();
const std::array<int, 3> X_dims = {N, C, HxW};
const std::array<int, 3> scale_dims = {1, C, 1};
math::Mul<float, CUDAContext>(
3,
X_dims.data(),
3,
scale_dims.data(),
dY_data,
scale_data,
dX->template mutable_data<float>(),
&context_);
if (is_learnable_) {
const auto& X = Input(1);
const float* X_data = X.data<float>();
auto* dscale = Output(1, scale.sizes(), at::dtype<float>());
auto* dbias = Output(2, scale.sizes(), at::dtype<float>());
const int outer_size = N * HxW;
AffineChannelScaleBiasBackwardCUDAKernel<float, StorageOrder::NCHW>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
C,
HxW,
dY_data,
X_data,
dscale->template mutable_data<float>(),
dbias->template mutable_data<float>());
}
return true;
}
template <>
bool AffineChannelGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
const auto& dY = Input(0);
const auto& scale = is_learnable_ ? Input(2) : Input(1);
auto* dX = Output(0, dY.sizes(), at::dtype<float>());
const int ndim = dY.dim();
const int C = dY.dim32(ndim - 1);
const int rows = dY.numel() / C;
const int cols = C;
const float* dY_data = dY.data<float>();
const float* scale_data = scale.data<float>();
math::RowwiseMul<float, CUDAContext>(
rows,
cols,
dY_data,
scale_data,
dX->template mutable_data<float>(),
&context_);
if (is_learnable_) {
const auto& X = Input(1);
const float* X_data = X.data<float>();
const int N = X.dim32(0);
const int HxW = rows / N;
auto* dscale = Output(1, scale.sizes(), at::dtype<float>());
auto* dbias = Output(2, scale.sizes(), at::dtype<float>());
AffineChannelScaleBiasBackwardCUDAKernel<float, StorageOrder::NHWC>
<<<std::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
C,
HxW,
dY_data,
X_data,
dscale->template mutable_data<float>(),
dbias->template mutable_data<float>());
}
return true;
}
REGISTER_CUDA_OPERATOR(AffineChannel, AffineChannelOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
AffineChannelGradient,
AffineChannelGradientOp<float, CUDAContext>);
} // namespace caffe2
|
f302f72dbbf5fe9fe1173be9b7dc8e2ad4c104aa.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <hip/hip_runtime.h>
#define MAXPOINTS 1000000
#define MAXSTEPS 1000000
#define MINPOINTS 20
#define PI 3.14159265
void check_param(void);
void init_line(void);
void update (void);
void printfinal (void);
__device__ void do_math(float*, float*, float*, int );
//---------------------------------------------------------------------------
// Kernel function
//---------------------------------------------------------------------------
__global__ void gpu_init_line( float*, float*, int );
__global__ void gpu_update( float*, float*, float*, int, int );
int nsteps, /* number of time steps */
tpoints, /* total points along string */
rcode; /* generic return code */
float values[MAXPOINTS+2], /* values at time t */
oldval[MAXPOINTS+2], /* values at time (t-dt) */
newval[MAXPOINTS+2]; /* values at time (t+dt) */
float *gValues,
*gOldVal,
*gNewVal;
/**********************************************************************
* Checks input values from parameters
*********************************************************************/
void check_param(void)
{
char tchar[20];
/* check number of points, number of iterations */
while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) {
printf("Enter number of points along vibrating string [%d-%d]: "
,MINPOINTS, MAXPOINTS);
scanf("%s", tchar);
tpoints = atoi(tchar);
if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS))
printf("Invalid. Please enter value between %d and %d\n",
MINPOINTS, MAXPOINTS);
}
while ((nsteps < 1) || (nsteps > MAXSTEPS)) {
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if ((nsteps < 1) || (nsteps > MAXSTEPS))
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
/**********************************************************************
* Initialize points on line
*********************************************************************/
__global__ void gpu_init_line( float *gOld, float *gVal, int tpoints )
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
/* Initialize old values array */
gOld[index] = gVal[index];
}
void init_line(void)
{
//int i, j;
int j;
float x, fac, k, tmp;
/* Calculate initial values based on sine curve */
fac = 2.0 * PI;
k = 0.0;
tmp = tpoints - 1;
for (j = 1; j <= tpoints; j++) {
x = k/tmp;
values[j] = sin (fac * x);
k = k + 1.0;
}
hipMemcpy( gValues, values, (MAXPOINTS+2)*sizeof(float),
hipMemcpyHostToDevice );
dim3 dimBlock( 512 ), dimGrid( (tpoints + dimBlock.x -1) / dimBlock.x );
hipLaunchKernelGGL(( gpu_init_line), dim3(dimGrid), dim3(dimBlock), 0, 0, gOldVal, gValues, tpoints );
}
/**********************************************************************
* Calculate new values using wave equation
*********************************************************************/
__device__ void do_math(float *gOld, float *gVal, float *gNew, int i)
{
float dtime, c, dx, tau, sqtau;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
gNew[i] = (2.0 * gVal[i]) - gOld[i] + (sqtau * (-2.0)*gVal[i]);
}
/**********************************************************************
* Update all values along line a specified number of times
*********************************************************************/
__global__ void gpu_update( float *gOld, float *gVal, float *gNew, int nsteps,
int tpoints )
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
/* Update values for each time step */
for( int i=0; i < nsteps; i++ ) {
/* global endpoints */
if( (index == 1) || (index == tpoints) )
gNew[index] = 0.0;
else
do_math( gOld, gVal, gNew, index );
/* Update old values with new values */
gOld[index] = gVal[index];
gVal[index] = gNew[index];
}
}
void update()
{
dim3 threadsPerBlock( 512 ),
blocksPerGrid( (tpoints + threadsPerBlock.x -1) / threadsPerBlock.x );
hipLaunchKernelGGL(( gpu_update), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, gOldVal, gValues, gNewVal, nsteps, tpoints );
hipMemcpy( values, gValues, (MAXPOINTS+2)*sizeof(float),
hipMemcpyDeviceToHost );
}
/**********************************************************************
* Print final results
*********************************************************************/
void printfinal()
{
int i;
for (i = 1; i <= tpoints; i++) {
printf("%6.4f ", values[i]);
if (i%10 == 0)
printf("\n");
}
}
/**********************************************************************
* Main program
*********************************************************************/
int main(int argc, char *argv[])
{
sscanf(argv[1],"%d",&tpoints);
sscanf(argv[2],"%d",&nsteps);
/* Allocate space in device Global Memory */
hipMalloc( (void**)&gValues, (MAXPOINTS+2)*sizeof(float) );
hipMalloc( (void**)&gOldVal, (MAXPOINTS+2)*sizeof(float) );
hipMalloc( (void**)&gNewVal, (MAXPOINTS+2)*sizeof(float) );
check_param();
printf("Initializing points on the line...\n");
init_line();
printf("Updating all points for all time steps...\n");
update();
printf("Printing final results...\n");
printfinal();
printf("\nDone.\n\n");
/* Free allocated space in device Global Memory */
hipFree( gValues );
hipFree( gOldVal );
hipFree( gNewVal );
return 0;
}
| f302f72dbbf5fe9fe1173be9b7dc8e2ad4c104aa.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <cuda.h>
#define MAXPOINTS 1000000
#define MAXSTEPS 1000000
#define MINPOINTS 20
#define PI 3.14159265
void check_param(void);
void init_line(void);
void update (void);
void printfinal (void);
__device__ void do_math(float*, float*, float*, int );
//---------------------------------------------------------------------------
// Kernel function
//---------------------------------------------------------------------------
__global__ void gpu_init_line( float*, float*, int );
__global__ void gpu_update( float*, float*, float*, int, int );
int nsteps, /* number of time steps */
tpoints, /* total points along string */
rcode; /* generic return code */
float values[MAXPOINTS+2], /* values at time t */
oldval[MAXPOINTS+2], /* values at time (t-dt) */
newval[MAXPOINTS+2]; /* values at time (t+dt) */
float *gValues,
*gOldVal,
*gNewVal;
/**********************************************************************
* Checks input values from parameters
*********************************************************************/
void check_param(void)
{
char tchar[20];
/* check number of points, number of iterations */
while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) {
printf("Enter number of points along vibrating string [%d-%d]: "
,MINPOINTS, MAXPOINTS);
scanf("%s", tchar);
tpoints = atoi(tchar);
if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS))
printf("Invalid. Please enter value between %d and %d\n",
MINPOINTS, MAXPOINTS);
}
while ((nsteps < 1) || (nsteps > MAXSTEPS)) {
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if ((nsteps < 1) || (nsteps > MAXSTEPS))
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
/**********************************************************************
* Initialize points on line
*********************************************************************/
__global__ void gpu_init_line( float *gOld, float *gVal, int tpoints )
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
/* Initialize old values array */
gOld[index] = gVal[index];
}
void init_line(void)
{
//int i, j;
int j;
float x, fac, k, tmp;
/* Calculate initial values based on sine curve */
fac = 2.0 * PI;
k = 0.0;
tmp = tpoints - 1;
for (j = 1; j <= tpoints; j++) {
x = k/tmp;
values[j] = sin (fac * x);
k = k + 1.0;
}
cudaMemcpy( gValues, values, (MAXPOINTS+2)*sizeof(float),
cudaMemcpyHostToDevice );
dim3 dimBlock( 512 ), dimGrid( (tpoints + dimBlock.x -1) / dimBlock.x );
gpu_init_line<<<dimGrid, dimBlock>>>( gOldVal, gValues, tpoints );
}
/**********************************************************************
* Calculate new values using wave equation
*********************************************************************/
__device__ void do_math(float *gOld, float *gVal, float *gNew, int i)
{
float dtime, c, dx, tau, sqtau;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
gNew[i] = (2.0 * gVal[i]) - gOld[i] + (sqtau * (-2.0)*gVal[i]);
}
/**********************************************************************
* Update all values along line a specified number of times
*********************************************************************/
__global__ void gpu_update( float *gOld, float *gVal, float *gNew, int nsteps,
int tpoints )
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
/* Update values for each time step */
for( int i=0; i < nsteps; i++ ) {
/* global endpoints */
if( (index == 1) || (index == tpoints) )
gNew[index] = 0.0;
else
do_math( gOld, gVal, gNew, index );
/* Update old values with new values */
gOld[index] = gVal[index];
gVal[index] = gNew[index];
}
}
void update()
{
dim3 threadsPerBlock( 512 ),
blocksPerGrid( (tpoints + threadsPerBlock.x -1) / threadsPerBlock.x );
gpu_update<<<blocksPerGrid, threadsPerBlock>>>( gOldVal, gValues, gNewVal, nsteps, tpoints );
cudaMemcpy( values, gValues, (MAXPOINTS+2)*sizeof(float),
cudaMemcpyDeviceToHost );
}
/**********************************************************************
* Print final results
*********************************************************************/
void printfinal()
{
int i;
for (i = 1; i <= tpoints; i++) {
printf("%6.4f ", values[i]);
if (i%10 == 0)
printf("\n");
}
}
/**********************************************************************
* Main program
*********************************************************************/
int main(int argc, char *argv[])
{
sscanf(argv[1],"%d",&tpoints);
sscanf(argv[2],"%d",&nsteps);
/* Allocate space in device Global Memory */
cudaMalloc( (void**)&gValues, (MAXPOINTS+2)*sizeof(float) );
cudaMalloc( (void**)&gOldVal, (MAXPOINTS+2)*sizeof(float) );
cudaMalloc( (void**)&gNewVal, (MAXPOINTS+2)*sizeof(float) );
check_param();
printf("Initializing points on the line...\n");
init_line();
printf("Updating all points for all time steps...\n");
update();
printf("Printing final results...\n");
printfinal();
printf("\nDone.\n\n");
/* Free allocated space in device Global Memory */
cudaFree( gValues );
cudaFree( gOldVal );
cudaFree( gNewVal );
return 0;
}
|
f9fbb97255972323993980c067e0323fd51e1250.hip | // !!! This is a file automatically generated by hipify!!!
/**
* correlation.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Will Killian <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#define POLYBENCH_TIME 1
#include "correlation.cuh"
#include "../../common/polybench.h"
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 1.05
#define GPU_DEVICE 0
#define sqrt_of_array_cell(x,j) sqrt(x[j])
#define FLOAT_N 3214212.01f
#define EPS 0.005f
#define RUN_ON_CPU
void init_arrays(int m, int n, DATA_TYPE POLYBENCH_2D(data, M, N, m, n))
{
int i, j;
for (i=0; i < m; i++)
{
for (j=0; j < n; j++)
{
data[i][j] = ((DATA_TYPE) i*j)/ M;
}
}
}
void correlation(int m, int n, DATA_TYPE POLYBENCH_2D(data, M, N, m, n), DATA_TYPE POLYBENCH_1D(mean, M, m), DATA_TYPE POLYBENCH_1D(stddev, M, m),
DATA_TYPE POLYBENCH_2D(symmat, M, N, m, n))
{
int i, j, j1, j2;
// Determine mean of column vectors of input data matrix
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
{
mean[j] += data[i][j];
}
mean[j] /= (DATA_TYPE)FLOAT_N;
}
// Determine standard deviations of column vectors of data matrix.
for (j = 0; j < _PB_M; j++)
{
stddev[j] = 0.0;
for (i = 0; i < _PB_N; i++)
{
stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]);
}
stddev[j] /= FLOAT_N;
stddev[j] = sqrt_of_array_cell(stddev, j);
stddev[j] = stddev[j] <= EPS ? 1.0 : stddev[j];
}
// Center and reduce the column vectors.
for (i = 0; i < _PB_N; i++)
{
for (j = 0; j < _PB_M; j++)
{
data[i][j] -= mean[j];
data[i][j] /= (sqrt(FLOAT_N)*stddev[j]) ;
}
}
// Calculate the m * m correlation matrix.
for (j1 = 0; j1 < _PB_M-1; j1++)
{
symmat[j1][j1] = 1.0;
for (j2 = j1+1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
{
symmat[j1][j2] += (data[i][j1] * data[i][j2]);
}
symmat[j2][j1] = symmat[j1][j2];
}
}
symmat[M-1][M-1] = 1.0;
}
void compareResults(int m, int n, DATA_TYPE POLYBENCH_2D(symmat, M, N, m, n), DATA_TYPE POLYBENCH_2D(symmat_outputFromGpu, M, N, m, n))
{
int i,j,fail;
fail = 0;
for (i=0; i < m; i++)
{
for (j=0; j < n; j++)
{
if (percentDiff(symmat[i][j], symmat_outputFromGpu[i][j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( GPU_DEVICE );
}
__global__ void mean_kernel(int m, int n, DATA_TYPE *mean, DATA_TYPE *data)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < _PB_M)
{
mean[j] = 0.0;
int i;
for(i=0; i < _PB_N; i++)
{
mean[j] += data[i*M + j];
}
mean[j] /= (DATA_TYPE)FLOAT_N;
}
}
__global__ void std_kernel(int m, int n, DATA_TYPE *mean, DATA_TYPE *std, DATA_TYPE *data)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < _PB_M)
{
std[j] = 0.0;
int i;
for(i = 0; i < _PB_N; i++)
{
std[j] += (data[i*M + j] - mean[j]) * (data[i*M + j] - mean[j]);
}
std[j] /= (FLOAT_N);
std[j] = sqrt(std[j]);
if(std[j] <= EPS)
{
std[j] = 1.0;
}
}
}
__global__ void reduce_kernel(int m, int n, DATA_TYPE *mean, DATA_TYPE *std, DATA_TYPE *data)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < _PB_N) && (j < _PB_M))
{
data[i*M + j] -= mean[j];
data[i*M + j] /= (sqrt(FLOAT_N) * std[j]);
}
}
__global__ void corr_kernel(int m, int n, DATA_TYPE *symmat, DATA_TYPE *data)
{
int j1 = blockIdx.x * blockDim.x + threadIdx.x;
int i, j2;
if (j1 < (_PB_M-1))
{
symmat[j1*M + j1] = 1.0;
for (j2 = (j1 + 1); j2 < _PB_M; j2++)
{
symmat[j1*M + j2] = 0.0;
for(i = 0; i < _PB_N; i++)
{
symmat[j1*M + j2] += data[i*M + j1] * data[i*M + j2];
}
symmat[j2*M + j1] = symmat[j1*M + j2];
}
}
}
void correlationCuda(int m, int n, DATA_TYPE POLYBENCH_2D(data, M, N, m, n), DATA_TYPE POLYBENCH_1D(mean, M, m),
DATA_TYPE POLYBENCH_1D(stddev, M, m), DATA_TYPE POLYBENCH_2D(symmat, M, N, m, n),
DATA_TYPE POLYBENCH_2D(symmat_outputFromGpu, M, N, m, n))
{
DATA_TYPE *data_gpu;
DATA_TYPE *stddev_gpu;
DATA_TYPE *mean_gpu;
DATA_TYPE *symmat_gpu;
hipMalloc((void **)&data_gpu, sizeof(DATA_TYPE) * M * N);
hipMalloc((void **)&symmat_gpu, sizeof(DATA_TYPE) * M * N);
hipMalloc((void **)&stddev_gpu, sizeof(DATA_TYPE) * M);
hipMalloc((void **)&mean_gpu, sizeof(DATA_TYPE) * M);
hipMemcpy(data_gpu, data, sizeof(DATA_TYPE) * M * N, hipMemcpyHostToDevice);
hipMemcpy(symmat_gpu, symmat, sizeof(DATA_TYPE) * M * N, hipMemcpyHostToDevice);
hipMemcpy(stddev_gpu, stddev, sizeof(DATA_TYPE) * M, hipMemcpyHostToDevice);
hipMemcpy(mean_gpu, mean, sizeof(DATA_TYPE) * M, hipMemcpyHostToDevice);
dim3 block1(DIM_THREAD_BLOCK_KERNEL_1_X, DIM_THREAD_BLOCK_KERNEL_1_Y);
dim3 grid1((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_1_X)), 1);
dim3 block2(DIM_THREAD_BLOCK_KERNEL_2_X, DIM_THREAD_BLOCK_KERNEL_2_Y);
dim3 grid2((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X)), 1);
dim3 block3(DIM_THREAD_BLOCK_KERNEL_3_X, DIM_THREAD_BLOCK_KERNEL_3_Y);
dim3 grid3((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_3_X)), (size_t)(ceil((float)(N)) / ((float)DIM_THREAD_BLOCK_KERNEL_3_Y)));
dim3 block4(DIM_THREAD_BLOCK_KERNEL_4_X, DIM_THREAD_BLOCK_KERNEL_4_Y);
dim3 grid4((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_4_X)), 1);
/* Start timer. */
polybench_start_instruments;
hipLaunchKernelGGL(( mean_kernel), dim3(grid1), dim3(block1) , 0, 0, m, n, mean_gpu,data_gpu);
hipDeviceSynchronize();
hipLaunchKernelGGL(( std_kernel), dim3(grid2), dim3(block2) , 0, 0, m, n, mean_gpu,stddev_gpu,data_gpu);
hipDeviceSynchronize();
hipLaunchKernelGGL(( reduce_kernel), dim3(grid3), dim3(block3) , 0, 0, m, n, mean_gpu,stddev_gpu,data_gpu);
hipDeviceSynchronize();
hipLaunchKernelGGL(( corr_kernel), dim3(grid4), dim3(block4) , 0, 0, m, n, symmat_gpu,data_gpu);
hipDeviceSynchronize();
/* Stop and print timer. */
printf("GPU Time in seconds:\n");
polybench_stop_instruments;
polybench_print_instruments;
DATA_TYPE valueAtSymmatIndexMTimesMPlus1PlusMPoint = 1.0;
hipMemcpy(&(symmat_gpu[(M-1)*M + (M-1)]), &valueAtSymmatIndexMTimesMPlus1PlusMPoint, sizeof(DATA_TYPE), hipMemcpyHostToDevice);
hipMemcpy(symmat_outputFromGpu, symmat_gpu, sizeof(DATA_TYPE) * M * N, hipMemcpyDeviceToHost);
hipFree(data_gpu);
hipFree(symmat_gpu);
hipFree(stddev_gpu);
hipFree(mean_gpu);
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
int main()
{
int m = M;
int n = N;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat_outputFromGpu,DATA_TYPE,M,N,m,n);
init_arrays(m, n, POLYBENCH_ARRAY(data));
GPU_argv_init();
correlationCuda(m, n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(mean), POLYBENCH_ARRAY(stddev), POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(symmat_outputFromGpu));
#ifdef RUN_ON_CPU
/* Start timer. */
polybench_start_instruments;
correlation(m, n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(mean), POLYBENCH_ARRAY(stddev), POLYBENCH_ARRAY(symmat));
/* Stop and print timer. */
printf("CPU Time in seconds:\n");
polybench_stop_instruments;
polybench_print_instruments;
compareResults(m, n, POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(symmat_outputFromGpu));
#else //print output to stderr so no dead code elimination
print_array(m, POLYBENCH_ARRAY(symmat_outputFromGpu));
#endif //RUN_ON_CPU
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(mean);
POLYBENCH_FREE_ARRAY(stddev);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(symmat_outputFromGpu);
return 0;
}
#include "../../common/polybench.c"
| f9fbb97255972323993980c067e0323fd51e1250.cu | /**
* correlation.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Will Killian <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
#include <cuda.h>
#define POLYBENCH_TIME 1
#include "correlation.cuh"
#include "../../common/polybench.h"
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 1.05
#define GPU_DEVICE 0
#define sqrt_of_array_cell(x,j) sqrt(x[j])
#define FLOAT_N 3214212.01f
#define EPS 0.005f
#define RUN_ON_CPU
void init_arrays(int m, int n, DATA_TYPE POLYBENCH_2D(data, M, N, m, n))
{
int i, j;
for (i=0; i < m; i++)
{
for (j=0; j < n; j++)
{
data[i][j] = ((DATA_TYPE) i*j)/ M;
}
}
}
void correlation(int m, int n, DATA_TYPE POLYBENCH_2D(data, M, N, m, n), DATA_TYPE POLYBENCH_1D(mean, M, m), DATA_TYPE POLYBENCH_1D(stddev, M, m),
DATA_TYPE POLYBENCH_2D(symmat, M, N, m, n))
{
int i, j, j1, j2;
// Determine mean of column vectors of input data matrix
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
{
mean[j] += data[i][j];
}
mean[j] /= (DATA_TYPE)FLOAT_N;
}
// Determine standard deviations of column vectors of data matrix.
for (j = 0; j < _PB_M; j++)
{
stddev[j] = 0.0;
for (i = 0; i < _PB_N; i++)
{
stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]);
}
stddev[j] /= FLOAT_N;
stddev[j] = sqrt_of_array_cell(stddev, j);
stddev[j] = stddev[j] <= EPS ? 1.0 : stddev[j];
}
// Center and reduce the column vectors.
for (i = 0; i < _PB_N; i++)
{
for (j = 0; j < _PB_M; j++)
{
data[i][j] -= mean[j];
data[i][j] /= (sqrt(FLOAT_N)*stddev[j]) ;
}
}
// Calculate the m * m correlation matrix.
for (j1 = 0; j1 < _PB_M-1; j1++)
{
symmat[j1][j1] = 1.0;
for (j2 = j1+1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
{
symmat[j1][j2] += (data[i][j1] * data[i][j2]);
}
symmat[j2][j1] = symmat[j1][j2];
}
}
symmat[M-1][M-1] = 1.0;
}
void compareResults(int m, int n, DATA_TYPE POLYBENCH_2D(symmat, M, N, m, n), DATA_TYPE POLYBENCH_2D(symmat_outputFromGpu, M, N, m, n))
{
int i,j,fail;
fail = 0;
for (i=0; i < m; i++)
{
for (j=0; j < n; j++)
{
if (percentDiff(symmat[i][j], symmat_outputFromGpu[i][j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
}
__global__ void mean_kernel(int m, int n, DATA_TYPE *mean, DATA_TYPE *data)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < _PB_M)
{
mean[j] = 0.0;
int i;
for(i=0; i < _PB_N; i++)
{
mean[j] += data[i*M + j];
}
mean[j] /= (DATA_TYPE)FLOAT_N;
}
}
__global__ void std_kernel(int m, int n, DATA_TYPE *mean, DATA_TYPE *std, DATA_TYPE *data)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < _PB_M)
{
std[j] = 0.0;
int i;
for(i = 0; i < _PB_N; i++)
{
std[j] += (data[i*M + j] - mean[j]) * (data[i*M + j] - mean[j]);
}
std[j] /= (FLOAT_N);
std[j] = sqrt(std[j]);
if(std[j] <= EPS)
{
std[j] = 1.0;
}
}
}
__global__ void reduce_kernel(int m, int n, DATA_TYPE *mean, DATA_TYPE *std, DATA_TYPE *data)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < _PB_N) && (j < _PB_M))
{
data[i*M + j] -= mean[j];
data[i*M + j] /= (sqrt(FLOAT_N) * std[j]);
}
}
__global__ void corr_kernel(int m, int n, DATA_TYPE *symmat, DATA_TYPE *data)
{
int j1 = blockIdx.x * blockDim.x + threadIdx.x;
int i, j2;
if (j1 < (_PB_M-1))
{
symmat[j1*M + j1] = 1.0;
for (j2 = (j1 + 1); j2 < _PB_M; j2++)
{
symmat[j1*M + j2] = 0.0;
for(i = 0; i < _PB_N; i++)
{
symmat[j1*M + j2] += data[i*M + j1] * data[i*M + j2];
}
symmat[j2*M + j1] = symmat[j1*M + j2];
}
}
}
void correlationCuda(int m, int n, DATA_TYPE POLYBENCH_2D(data, M, N, m, n), DATA_TYPE POLYBENCH_1D(mean, M, m),
DATA_TYPE POLYBENCH_1D(stddev, M, m), DATA_TYPE POLYBENCH_2D(symmat, M, N, m, n),
DATA_TYPE POLYBENCH_2D(symmat_outputFromGpu, M, N, m, n))
{
DATA_TYPE *data_gpu;
DATA_TYPE *stddev_gpu;
DATA_TYPE *mean_gpu;
DATA_TYPE *symmat_gpu;
cudaMalloc((void **)&data_gpu, sizeof(DATA_TYPE) * M * N);
cudaMalloc((void **)&symmat_gpu, sizeof(DATA_TYPE) * M * N);
cudaMalloc((void **)&stddev_gpu, sizeof(DATA_TYPE) * M);
cudaMalloc((void **)&mean_gpu, sizeof(DATA_TYPE) * M);
cudaMemcpy(data_gpu, data, sizeof(DATA_TYPE) * M * N, cudaMemcpyHostToDevice);
cudaMemcpy(symmat_gpu, symmat, sizeof(DATA_TYPE) * M * N, cudaMemcpyHostToDevice);
cudaMemcpy(stddev_gpu, stddev, sizeof(DATA_TYPE) * M, cudaMemcpyHostToDevice);
cudaMemcpy(mean_gpu, mean, sizeof(DATA_TYPE) * M, cudaMemcpyHostToDevice);
dim3 block1(DIM_THREAD_BLOCK_KERNEL_1_X, DIM_THREAD_BLOCK_KERNEL_1_Y);
dim3 grid1((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_1_X)), 1);
dim3 block2(DIM_THREAD_BLOCK_KERNEL_2_X, DIM_THREAD_BLOCK_KERNEL_2_Y);
dim3 grid2((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X)), 1);
dim3 block3(DIM_THREAD_BLOCK_KERNEL_3_X, DIM_THREAD_BLOCK_KERNEL_3_Y);
dim3 grid3((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_3_X)), (size_t)(ceil((float)(N)) / ((float)DIM_THREAD_BLOCK_KERNEL_3_Y)));
dim3 block4(DIM_THREAD_BLOCK_KERNEL_4_X, DIM_THREAD_BLOCK_KERNEL_4_Y);
dim3 grid4((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_4_X)), 1);
/* Start timer. */
polybench_start_instruments;
mean_kernel<<< grid1, block1 >>>(m, n, mean_gpu,data_gpu);
cudaThreadSynchronize();
std_kernel<<< grid2, block2 >>>(m, n, mean_gpu,stddev_gpu,data_gpu);
cudaThreadSynchronize();
reduce_kernel<<< grid3, block3 >>>(m, n, mean_gpu,stddev_gpu,data_gpu);
cudaThreadSynchronize();
corr_kernel<<< grid4, block4 >>>(m, n, symmat_gpu,data_gpu);
cudaThreadSynchronize();
/* Stop and print timer. */
printf("GPU Time in seconds:\n");
polybench_stop_instruments;
polybench_print_instruments;
DATA_TYPE valueAtSymmatIndexMTimesMPlus1PlusMPoint = 1.0;
cudaMemcpy(&(symmat_gpu[(M-1)*M + (M-1)]), &valueAtSymmatIndexMTimesMPlus1PlusMPoint, sizeof(DATA_TYPE), cudaMemcpyHostToDevice);
cudaMemcpy(symmat_outputFromGpu, symmat_gpu, sizeof(DATA_TYPE) * M * N, cudaMemcpyDeviceToHost);
cudaFree(data_gpu);
cudaFree(symmat_gpu);
cudaFree(stddev_gpu);
cudaFree(mean_gpu);
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
int main()
{
int m = M;
int n = N;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat_outputFromGpu,DATA_TYPE,M,N,m,n);
init_arrays(m, n, POLYBENCH_ARRAY(data));
GPU_argv_init();
correlationCuda(m, n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(mean), POLYBENCH_ARRAY(stddev), POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(symmat_outputFromGpu));
#ifdef RUN_ON_CPU
/* Start timer. */
polybench_start_instruments;
correlation(m, n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(mean), POLYBENCH_ARRAY(stddev), POLYBENCH_ARRAY(symmat));
/* Stop and print timer. */
printf("CPU Time in seconds:\n");
polybench_stop_instruments;
polybench_print_instruments;
compareResults(m, n, POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(symmat_outputFromGpu));
#else //print output to stderr so no dead code elimination
print_array(m, POLYBENCH_ARRAY(symmat_outputFromGpu));
#endif //RUN_ON_CPU
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(mean);
POLYBENCH_FREE_ARRAY(stddev);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(symmat_outputFromGpu);
return 0;
}
#include "../../common/polybench.c"
|
654b41547684ffd75d3eb01cce65c78c6129b588.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "TopBottomBound2D.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *Hs = NULL;
hipMalloc(&Hs, XSIZE*YSIZE);
double *Ztopo = NULL;
hipMalloc(&Ztopo, XSIZE*YSIZE);
double *K2n = NULL;
hipMalloc(&K2n, XSIZE*YSIZE);
double *K2s = NULL;
hipMalloc(&K2s, XSIZE*YSIZE);
int BC2D = 1;
int M = 2;
int N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
TopBottomBound2D), dim3(gridBlock),dim3(threadBlock), 0, 0, Hs,Ztopo,K2n,K2s,BC2D,M,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
TopBottomBound2D), dim3(gridBlock),dim3(threadBlock), 0, 0, Hs,Ztopo,K2n,K2s,BC2D,M,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
TopBottomBound2D), dim3(gridBlock),dim3(threadBlock), 0, 0, Hs,Ztopo,K2n,K2s,BC2D,M,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 654b41547684ffd75d3eb01cce65c78c6129b588.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "TopBottomBound2D.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *Hs = NULL;
cudaMalloc(&Hs, XSIZE*YSIZE);
double *Ztopo = NULL;
cudaMalloc(&Ztopo, XSIZE*YSIZE);
double *K2n = NULL;
cudaMalloc(&K2n, XSIZE*YSIZE);
double *K2s = NULL;
cudaMalloc(&K2s, XSIZE*YSIZE);
int BC2D = 1;
int M = 2;
int N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
TopBottomBound2D<<<gridBlock,threadBlock>>>(Hs,Ztopo,K2n,K2s,BC2D,M,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
TopBottomBound2D<<<gridBlock,threadBlock>>>(Hs,Ztopo,K2n,K2s,BC2D,M,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
TopBottomBound2D<<<gridBlock,threadBlock>>>(Hs,Ztopo,K2n,K2s,BC2D,M,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
bca8ec5227bc1179bb7ad1120b191b904dafa9c0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "j3d27pt-64x16-3-256_kernel.hu"
__device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; }
__global__ void kernel0_3(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 10;
const AN5D_TYPE __side3Len = 58;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((1.500f * (__REGREF(__a, 0, 0))) + (0.500f * (__SBREF(__a_sb, -1, -1)))) + (0.700f * (__SBREF(__a_sb, -1, 0)))) + (0.900f * (__SBREF(__a_sb, -1, 1)))) + (1.200f * (__SBREF(__a_sb, 0, -1)))) + (1.201f * (__SBREF(__a_sb, 0, 1)))) + (0.901f * (__SBREF(__a_sb, 1, -1)))) + (0.701f * (__SBREF(__a_sb, 1, 0)))) + (0.501f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((1.510f * (__REGREF(__a, 0, 0)))) + (0.510f * (__SBREF(__a_sb, -1, -1)))) + (0.710f * (__SBREF(__a_sb, -1, 0)))) + (0.910f * (__SBREF(__a_sb, -1, 1)))) + (1.210f * (__SBREF(__a_sb, 0, -1)))) + (1.211f * (__SBREF(__a_sb, 0, 1)))) + (0.911f * (__SBREF(__a_sb, 1, -1)))) + (0.711f * (__SBREF(__a_sb, 1, 0)))) + (0.511f * (__SBREF(__a_sb, 1, 1))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((1.520f * (__REGREF(__a, 0, 0)))) + (0.520f * (__SBREF(__a_sb, -1, -1)))) + (0.720f * (__SBREF(__a_sb, -1, 0)))) + (0.920f * (__SBREF(__a_sb, -1, 1)))) + (1.220f * (__SBREF(__a_sb, 0, -1)))) + (1.221f * (__SBREF(__a_sb, 0, 1)))) + (0.921f * (__SBREF(__a_sb, 1, -1)))) + (0.721f * (__SBREF(__a_sb, 1, 0)))) + (0.521f * (__SBREF(__a_sb, 1, 1)))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0);
#define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(1, __reg_3_1);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(2, __reg_3_2);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(3, __reg_3_0);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(3, __reg_3_0);
__DB_SWITCH(); __syncthreads();
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 3, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 3, __reg_3_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 2, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1);
__STORE(__h - 1, __reg_3_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 2, __reg_3_2);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 1, __reg_3_0);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2);
__STORE(__h + 0, __reg_3_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 2, __reg_3_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 1, __reg_3_0);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h + 0, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0);
__STORE(__h + 1, __reg_3_2);
}
}
else
{
for (__h = 7; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 3, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 3, __reg_3_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 3, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 3, __reg_3_0);
__h++;
}
}
__global__ void kernel0_2(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 12;
const AN5D_TYPE __side3Len = 60;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((1.500f * (__REGREF(__a, 0, 0))) + (0.500f * (__SBREF(__a_sb, -1, -1)))) + (0.700f * (__SBREF(__a_sb, -1, 0)))) + (0.900f * (__SBREF(__a_sb, -1, 1)))) + (1.200f * (__SBREF(__a_sb, 0, -1)))) + (1.201f * (__SBREF(__a_sb, 0, 1)))) + (0.901f * (__SBREF(__a_sb, 1, -1)))) + (0.701f * (__SBREF(__a_sb, 1, 0)))) + (0.501f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((1.510f * (__REGREF(__a, 0, 0)))) + (0.510f * (__SBREF(__a_sb, -1, -1)))) + (0.710f * (__SBREF(__a_sb, -1, 0)))) + (0.910f * (__SBREF(__a_sb, -1, 1)))) + (1.210f * (__SBREF(__a_sb, 0, -1)))) + (1.211f * (__SBREF(__a_sb, 0, 1)))) + (0.911f * (__SBREF(__a_sb, 1, -1)))) + (0.711f * (__SBREF(__a_sb, 1, 0)))) + (0.511f * (__SBREF(__a_sb, 1, 1))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((1.520f * (__REGREF(__a, 0, 0)))) + (0.520f * (__SBREF(__a_sb, -1, -1)))) + (0.720f * (__SBREF(__a_sb, -1, 0)))) + (0.920f * (__SBREF(__a_sb, -1, 1)))) + (1.220f * (__SBREF(__a_sb, 0, -1)))) + (1.221f * (__SBREF(__a_sb, 0, 1)))) + (0.921f * (__SBREF(__a_sb, 1, -1)))) + (0.721f * (__SBREF(__a_sb, 1, 0)))) + (0.521f * (__SBREF(__a_sb, 1, 1)))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0);
#define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(1, __reg_2_1);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(2, __reg_2_2);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(2, __reg_2_2);
__DB_SWITCH(); __syncthreads();
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 2, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h - 2, __reg_2_2);
__h++;
}
if (0) {}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2);
__STORE(__h - 1, __reg_2_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 1, __reg_2_1);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0);
__STORE(__h + 0, __reg_2_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 1, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h + 0, __reg_2_2);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1);
__STORE(__h + 1, __reg_2_0);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 2, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h - 2, __reg_2_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 2, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h - 2, __reg_2_2);
__h++;
}
}
__global__ void kernel0_1(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 14;
const AN5D_TYPE __side3Len = 62;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((1.500f * (__REGREF(__a, 0, 0))) + (0.500f * (__SBREF(__a_sb, -1, -1)))) + (0.700f * (__SBREF(__a_sb, -1, 0)))) + (0.900f * (__SBREF(__a_sb, -1, 1)))) + (1.200f * (__SBREF(__a_sb, 0, -1)))) + (1.201f * (__SBREF(__a_sb, 0, 1)))) + (0.901f * (__SBREF(__a_sb, 1, -1)))) + (0.701f * (__SBREF(__a_sb, 1, 0)))) + (0.501f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((1.510f * (__REGREF(__a, 0, 0)))) + (0.510f * (__SBREF(__a_sb, -1, -1)))) + (0.710f * (__SBREF(__a_sb, -1, 0)))) + (0.910f * (__SBREF(__a_sb, -1, 1)))) + (1.210f * (__SBREF(__a_sb, 0, -1)))) + (1.211f * (__SBREF(__a_sb, 0, 1)))) + (0.911f * (__SBREF(__a_sb, 1, -1)))) + (0.711f * (__SBREF(__a_sb, 1, 0)))) + (0.511f * (__SBREF(__a_sb, 1, 1))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((1.520f * (__REGREF(__a, 0, 0)))) + (0.520f * (__SBREF(__a_sb, -1, -1)))) + (0.720f * (__SBREF(__a_sb, -1, 0)))) + (0.920f * (__SBREF(__a_sb, -1, 1)))) + (1.220f * (__SBREF(__a_sb, 0, -1)))) + (1.221f * (__SBREF(__a_sb, 0, 1)))) + (0.921f * (__SBREF(__a_sb, 1, -1)))) + (0.721f * (__SBREF(__a_sb, 1, 0)))) + (0.521f * (__SBREF(__a_sb, 1, 1)))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0);
#define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(1, __reg_1_1);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(1, __reg_1_1);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 1, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 1, __reg_1_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
}
}
else
{
for (__h = 3; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 1, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 1, __reg_1_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 1, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 1, __reg_1_1);
__h++;
}
}
| bca8ec5227bc1179bb7ad1120b191b904dafa9c0.cu | #include "j3d27pt-64x16-3-256_kernel.hu"
__device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; }
__global__ void kernel0_3(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 10;
const AN5D_TYPE __side3Len = 58;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((1.500f * (__REGREF(__a, 0, 0))) + (0.500f * (__SBREF(__a_sb, -1, -1)))) + (0.700f * (__SBREF(__a_sb, -1, 0)))) + (0.900f * (__SBREF(__a_sb, -1, 1)))) + (1.200f * (__SBREF(__a_sb, 0, -1)))) + (1.201f * (__SBREF(__a_sb, 0, 1)))) + (0.901f * (__SBREF(__a_sb, 1, -1)))) + (0.701f * (__SBREF(__a_sb, 1, 0)))) + (0.501f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((1.510f * (__REGREF(__a, 0, 0)))) + (0.510f * (__SBREF(__a_sb, -1, -1)))) + (0.710f * (__SBREF(__a_sb, -1, 0)))) + (0.910f * (__SBREF(__a_sb, -1, 1)))) + (1.210f * (__SBREF(__a_sb, 0, -1)))) + (1.211f * (__SBREF(__a_sb, 0, 1)))) + (0.911f * (__SBREF(__a_sb, 1, -1)))) + (0.711f * (__SBREF(__a_sb, 1, 0)))) + (0.511f * (__SBREF(__a_sb, 1, 1))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((1.520f * (__REGREF(__a, 0, 0)))) + (0.520f * (__SBREF(__a_sb, -1, -1)))) + (0.720f * (__SBREF(__a_sb, -1, 0)))) + (0.920f * (__SBREF(__a_sb, -1, 1)))) + (1.220f * (__SBREF(__a_sb, 0, -1)))) + (1.221f * (__SBREF(__a_sb, 0, 1)))) + (0.921f * (__SBREF(__a_sb, 1, -1)))) + (0.721f * (__SBREF(__a_sb, 1, 0)))) + (0.521f * (__SBREF(__a_sb, 1, 1)))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0);
#define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(1, __reg_3_1);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(2, __reg_3_2);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(3, __reg_3_0);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(3, __reg_3_0);
__DB_SWITCH(); __syncthreads();
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 3, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 3, __reg_3_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 2, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1);
__STORE(__h - 1, __reg_3_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 2, __reg_3_2);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 1, __reg_3_0);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2);
__STORE(__h + 0, __reg_3_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 2, __reg_3_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 1, __reg_3_0);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h + 0, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0);
__STORE(__h + 1, __reg_3_2);
}
}
else
{
for (__h = 7; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 3, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 3, __reg_3_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 3, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 3, __reg_3_0);
__h++;
}
}
__global__ void kernel0_2(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 12;
const AN5D_TYPE __side3Len = 60;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((1.500f * (__REGREF(__a, 0, 0))) + (0.500f * (__SBREF(__a_sb, -1, -1)))) + (0.700f * (__SBREF(__a_sb, -1, 0)))) + (0.900f * (__SBREF(__a_sb, -1, 1)))) + (1.200f * (__SBREF(__a_sb, 0, -1)))) + (1.201f * (__SBREF(__a_sb, 0, 1)))) + (0.901f * (__SBREF(__a_sb, 1, -1)))) + (0.701f * (__SBREF(__a_sb, 1, 0)))) + (0.501f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((1.510f * (__REGREF(__a, 0, 0)))) + (0.510f * (__SBREF(__a_sb, -1, -1)))) + (0.710f * (__SBREF(__a_sb, -1, 0)))) + (0.910f * (__SBREF(__a_sb, -1, 1)))) + (1.210f * (__SBREF(__a_sb, 0, -1)))) + (1.211f * (__SBREF(__a_sb, 0, 1)))) + (0.911f * (__SBREF(__a_sb, 1, -1)))) + (0.711f * (__SBREF(__a_sb, 1, 0)))) + (0.511f * (__SBREF(__a_sb, 1, 1))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((1.520f * (__REGREF(__a, 0, 0)))) + (0.520f * (__SBREF(__a_sb, -1, -1)))) + (0.720f * (__SBREF(__a_sb, -1, 0)))) + (0.920f * (__SBREF(__a_sb, -1, 1)))) + (1.220f * (__SBREF(__a_sb, 0, -1)))) + (1.221f * (__SBREF(__a_sb, 0, 1)))) + (0.921f * (__SBREF(__a_sb, 1, -1)))) + (0.721f * (__SBREF(__a_sb, 1, 0)))) + (0.521f * (__SBREF(__a_sb, 1, 1)))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0);
#define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(1, __reg_2_1);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(2, __reg_2_2);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(2, __reg_2_2);
__DB_SWITCH(); __syncthreads();
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 2, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h - 2, __reg_2_2);
__h++;
}
if (0) {}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2);
__STORE(__h - 1, __reg_2_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 1, __reg_2_1);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0);
__STORE(__h + 0, __reg_2_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 1, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h + 0, __reg_2_2);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1);
__STORE(__h + 1, __reg_2_0);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 2, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h - 2, __reg_2_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 2, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h - 2, __reg_2_2);
__h++;
}
}
__global__ void kernel0_1(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 14;
const AN5D_TYPE __side3Len = 62;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((1.500f * (__REGREF(__a, 0, 0))) + (0.500f * (__SBREF(__a_sb, -1, -1)))) + (0.700f * (__SBREF(__a_sb, -1, 0)))) + (0.900f * (__SBREF(__a_sb, -1, 1)))) + (1.200f * (__SBREF(__a_sb, 0, -1)))) + (1.201f * (__SBREF(__a_sb, 0, 1)))) + (0.901f * (__SBREF(__a_sb, 1, -1)))) + (0.701f * (__SBREF(__a_sb, 1, 0)))) + (0.501f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((1.510f * (__REGREF(__a, 0, 0)))) + (0.510f * (__SBREF(__a_sb, -1, -1)))) + (0.710f * (__SBREF(__a_sb, -1, 0)))) + (0.910f * (__SBREF(__a_sb, -1, 1)))) + (1.210f * (__SBREF(__a_sb, 0, -1)))) + (1.211f * (__SBREF(__a_sb, 0, 1)))) + (0.911f * (__SBREF(__a_sb, 1, -1)))) + (0.711f * (__SBREF(__a_sb, 1, 0)))) + (0.511f * (__SBREF(__a_sb, 1, 1))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((1.520f * (__REGREF(__a, 0, 0)))) + (0.520f * (__SBREF(__a_sb, -1, -1)))) + (0.720f * (__SBREF(__a_sb, -1, 0)))) + (0.920f * (__SBREF(__a_sb, -1, 1)))) + (1.220f * (__SBREF(__a_sb, 0, -1)))) + (1.221f * (__SBREF(__a_sb, 0, 1)))) + (0.921f * (__SBREF(__a_sb, 1, -1)))) + (0.721f * (__SBREF(__a_sb, 1, 0)))) + (0.521f * (__SBREF(__a_sb, 1, 1)))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0);
#define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(1, __reg_1_1);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(1, __reg_1_1);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 1, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 1, __reg_1_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
}
}
else
{
for (__h = 3; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 1, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 1, __reg_1_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 1, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 1, __reg_1_1);
__h++;
}
}
|
02326edfecabb92814e37a9a432acd67d83335c3.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "efficient.h"
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
bool timeInProg = false;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// **SCAN KERNS** Using textbook and slide formulas
__global__ void kernScanUpSweep(int n, int d, int *idata) { // BEFORE Warp Partitioning:
int index = (blockIdx.x * blockDim.x) + threadIdx.x; // int index = (blockIdx.x * blockDim.x) + threadIdx.x;
// if (index < n) {
int exp = (int) powf(2.f, float(d) + 1.f); // int exp = (int)powf(2.f, float(d) + 1.f);
int offset = index * exp + exp - 1; // if (index % exp == 0) {
// idata[index + exp - 1] += idata[index + (exp / 2) - 1];
if (offset < n) { // }
idata[offset] += idata[offset - exp / 2]; // }
}
}
__global__ void kernSetRootZero(int n, int *idata) { // Had to be seperate from Down
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index == n - 1) {
idata[index] = 0;
}
}
__global__ void kernScanDownSweep(int n, int d, int *idata) { // BEFORE Warp Partitioning:
int index = (blockIdx.x * blockDim.x) + threadIdx.x; // int index = (blockIdx.x * blockDim.x) + threadIdx.x;
// if (index < n) {
int exp = (int)powf(2.f, float(d) + 1.f); // int exp = (int)powf(2.f, float(d) + 1.f);
int offset = index * exp + exp - 1;
// if (index % exp == 0) {
if (offset < n) { // int t = idata[index + (exp / 2) - 1];
int t = idata[offset - exp / 2]; // idata[index + (exp / 2) - 1] = idata[index + exp - 1];
idata[offset - exp / 2] = idata[offset]; // idata[index + exp - 1] += t;
idata[offset] += t; // }
} // }
}
// **COMPACT KERNS**
__global__ void kernMapTo01(int n, int *data01, const int *idata) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < n) {
data01[index] = (idata[index] == 0 ? 0 : 1);
}
}
__global__ void kernScatter(int n, int *odata, const int *scanResult,
const int *data01, const int *idata) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < n) {
if (data01[index] == 1) {
odata[scanResult[index]] = idata[index];
}
}
}
// For radix sort
void changeTimeBool(bool time) {
timeInProg = time;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
int log2 = ilog2ceil(n);
int newLength = (int) pow(2, log2);
int* idataExtend = new int[newLength];
// Copying info from idata
for (int i = 0; i < n; i++) {
idataExtend[i] = idata[i];
}
for (int i = n; i < newLength; i++) {
idataExtend[i] = 0;
}
int* dev_idata;
hipMalloc((void**)&dev_idata, newLength * sizeof(int));
checkCUDAErrorFn("hipMalloc dev_idata failed!", "efficient.cu", 93);
// Copying the input array to the device
hipMemcpy(dev_idata, idataExtend, newLength * sizeof(int), hipMemcpyHostToDevice);
checkCUDAErrorFn("hipMemcpy to input-copy failed!", "efficient.cu", 97);
// START
if (!timeInProg) { timer().startGpuTimer(); }
// Up Sweep
for (int d = 0; d <= log2 - 1; d++) {
kernScanUpSweep << < newLength, blockSize >> > (newLength, d, dev_idata);
checkCUDAErrorFn("kernScanUpSweep failed!", "efficient.cu", 105);
}
// Setting root to zero
kernSetRootZero << < newLength, blockSize >> > (newLength, dev_idata);
// Down Sweep
for (int d = log2 - 1; d >= 0; d--) {
kernScanDownSweep << < newLength, blockSize >> > (newLength, d, dev_idata);
checkCUDAErrorFn("kernScanDownSweep failed!", "efficient.cu", 112);
}
// END
if (!timeInProg) { timer().endGpuTimer(); }
// Copying the result back to the output array
hipMemcpy(odata, dev_idata, n * sizeof(int), hipMemcpyDeviceToHost);
checkCUDAErrorFn("hipMemcpy to output failed!", "efficient.cu", 120);
delete[] idataExtend;
hipFree(dev_idata);
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
// Arrays to Use:
int* dev_idata; // Device copy of idata
int* dev_binaryMap; // 0-1 vers of idata, memcpy'ed into iScanData
int* dev_scanResult; // Device copy of post-scan binary map (oScanData)
int* dev_odata; // Output result
int* iScanData = new int[n]; // 0-1 input to scan()
int* oScanData = new int[n]; // scan result
// Allocate memory
hipMalloc((void**)&dev_idata, n * sizeof(int));
checkCUDAErrorFn("hipMalloc dev_idata failed!", "efficient.cu", 148);
hipMalloc((void**)&dev_binaryMap, n * sizeof(int));
checkCUDAErrorFn("hipMalloc dev_binaryMap failed!", "efficient.cu", 151);
hipMalloc((void**)&dev_scanResult, n * sizeof(int));
checkCUDAErrorFn("hipMalloc dev_scanResult failed!", "efficient.cu", 154);
hipMalloc((void**)&dev_odata, n * sizeof(int));
checkCUDAErrorFn("hipMalloc dev_odata failed!", "efficient.cu", 157);
// Copying the input array to the device
hipMemcpy(dev_idata, idata, n * sizeof(int), hipMemcpyHostToDevice);
checkCUDAErrorFn("hipMemcpy to input-copy failed!", "efficient.cu", 161);
// START
timer().startGpuTimer();
timeInProg = true;
// Compute temporary array of 0's and 1's
kernMapTo01 << < n, blockSize >> > (n, dev_binaryMap, dev_idata);
// Move this data back to host and pass into scan(), then copy data back
hipMemcpy(iScanData, dev_binaryMap, n * sizeof(int), hipMemcpyDeviceToHost);
checkCUDAErrorFn("hipMemcpy to 01-scan failed!", "efficient.cu", 173);
scan(n, oScanData, iScanData);
hipMemcpy(dev_scanResult, oScanData, n * sizeof(int), hipMemcpyHostToDevice);
checkCUDAErrorFn("hipMemcpy to scanResult failed!", "efficient.cu", 178);
// Pass data into scatter
kernScatter << < n, blockSize >> > (n, dev_odata, dev_scanResult,
dev_binaryMap, dev_idata);
// END
timer().endGpuTimer();
timeInProg = false;
// Finding new length
int newLength = (iScanData[n - 1] == 0 ? oScanData[n - 1] : oScanData[n - 1] + 1);
// Copying the result back to the output array
hipMemcpy(odata, dev_odata, newLength * sizeof(int), hipMemcpyDeviceToHost);
checkCUDAErrorFn("hipMemcpy to output failed!", "efficient.cu", 194);
hipFree(dev_idata);
hipFree(dev_binaryMap);
hipFree(dev_scanResult);
hipFree(dev_odata);
delete[] iScanData;
delete[] oScanData;
return newLength;
}
}
}
| 02326edfecabb92814e37a9a432acd67d83335c3.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "efficient.h"
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
bool timeInProg = false;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// **SCAN KERNS** Using textbook and slide formulas
__global__ void kernScanUpSweep(int n, int d, int *idata) { // BEFORE Warp Partitioning:
int index = (blockIdx.x * blockDim.x) + threadIdx.x; // int index = (blockIdx.x * blockDim.x) + threadIdx.x;
// if (index < n) {
int exp = (int) powf(2.f, float(d) + 1.f); // int exp = (int)powf(2.f, float(d) + 1.f);
int offset = index * exp + exp - 1; // if (index % exp == 0) {
// idata[index + exp - 1] += idata[index + (exp / 2) - 1];
if (offset < n) { // }
idata[offset] += idata[offset - exp / 2]; // }
}
}
__global__ void kernSetRootZero(int n, int *idata) { // Had to be seperate from Down
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index == n - 1) {
idata[index] = 0;
}
}
__global__ void kernScanDownSweep(int n, int d, int *idata) { // BEFORE Warp Partitioning:
int index = (blockIdx.x * blockDim.x) + threadIdx.x; // int index = (blockIdx.x * blockDim.x) + threadIdx.x;
// if (index < n) {
int exp = (int)powf(2.f, float(d) + 1.f); // int exp = (int)powf(2.f, float(d) + 1.f);
int offset = index * exp + exp - 1;
// if (index % exp == 0) {
if (offset < n) { // int t = idata[index + (exp / 2) - 1];
int t = idata[offset - exp / 2]; // idata[index + (exp / 2) - 1] = idata[index + exp - 1];
idata[offset - exp / 2] = idata[offset]; // idata[index + exp - 1] += t;
idata[offset] += t; // }
} // }
}
// **COMPACT KERNS**
__global__ void kernMapTo01(int n, int *data01, const int *idata) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < n) {
data01[index] = (idata[index] == 0 ? 0 : 1);
}
}
__global__ void kernScatter(int n, int *odata, const int *scanResult,
const int *data01, const int *idata) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < n) {
if (data01[index] == 1) {
odata[scanResult[index]] = idata[index];
}
}
}
// For radix sort
void changeTimeBool(bool time) {
timeInProg = time;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
int log2 = ilog2ceil(n);
int newLength = (int) pow(2, log2);
int* idataExtend = new int[newLength];
// Copying info from idata
for (int i = 0; i < n; i++) {
idataExtend[i] = idata[i];
}
for (int i = n; i < newLength; i++) {
idataExtend[i] = 0;
}
int* dev_idata;
cudaMalloc((void**)&dev_idata, newLength * sizeof(int));
checkCUDAErrorFn("cudaMalloc dev_idata failed!", "efficient.cu", 93);
// Copying the input array to the device
cudaMemcpy(dev_idata, idataExtend, newLength * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAErrorFn("cudaMemcpy to input-copy failed!", "efficient.cu", 97);
// START
if (!timeInProg) { timer().startGpuTimer(); }
// Up Sweep
for (int d = 0; d <= log2 - 1; d++) {
kernScanUpSweep << < newLength, blockSize >> > (newLength, d, dev_idata);
checkCUDAErrorFn("kernScanUpSweep failed!", "efficient.cu", 105);
}
// Setting root to zero
kernSetRootZero << < newLength, blockSize >> > (newLength, dev_idata);
// Down Sweep
for (int d = log2 - 1; d >= 0; d--) {
kernScanDownSweep << < newLength, blockSize >> > (newLength, d, dev_idata);
checkCUDAErrorFn("kernScanDownSweep failed!", "efficient.cu", 112);
}
// END
if (!timeInProg) { timer().endGpuTimer(); }
// Copying the result back to the output array
cudaMemcpy(odata, dev_idata, n * sizeof(int), cudaMemcpyDeviceToHost);
checkCUDAErrorFn("cudaMemcpy to output failed!", "efficient.cu", 120);
delete[] idataExtend;
cudaFree(dev_idata);
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
// Arrays to Use:
int* dev_idata; // Device copy of idata
int* dev_binaryMap; // 0-1 vers of idata, memcpy'ed into iScanData
int* dev_scanResult; // Device copy of post-scan binary map (oScanData)
int* dev_odata; // Output result
int* iScanData = new int[n]; // 0-1 input to scan()
int* oScanData = new int[n]; // scan result
// Allocate memory
cudaMalloc((void**)&dev_idata, n * sizeof(int));
checkCUDAErrorFn("cudaMalloc dev_idata failed!", "efficient.cu", 148);
cudaMalloc((void**)&dev_binaryMap, n * sizeof(int));
checkCUDAErrorFn("cudaMalloc dev_binaryMap failed!", "efficient.cu", 151);
cudaMalloc((void**)&dev_scanResult, n * sizeof(int));
checkCUDAErrorFn("cudaMalloc dev_scanResult failed!", "efficient.cu", 154);
cudaMalloc((void**)&dev_odata, n * sizeof(int));
checkCUDAErrorFn("cudaMalloc dev_odata failed!", "efficient.cu", 157);
// Copying the input array to the device
cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAErrorFn("cudaMemcpy to input-copy failed!", "efficient.cu", 161);
// START
timer().startGpuTimer();
timeInProg = true;
// Compute temporary array of 0's and 1's
kernMapTo01 << < n, blockSize >> > (n, dev_binaryMap, dev_idata);
// Move this data back to host and pass into scan(), then copy data back
cudaMemcpy(iScanData, dev_binaryMap, n * sizeof(int), cudaMemcpyDeviceToHost);
checkCUDAErrorFn("cudaMemcpy to 01-scan failed!", "efficient.cu", 173);
scan(n, oScanData, iScanData);
cudaMemcpy(dev_scanResult, oScanData, n * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAErrorFn("cudaMemcpy to scanResult failed!", "efficient.cu", 178);
// Pass data into scatter
kernScatter << < n, blockSize >> > (n, dev_odata, dev_scanResult,
dev_binaryMap, dev_idata);
// END
timer().endGpuTimer();
timeInProg = false;
// Finding new length
int newLength = (iScanData[n - 1] == 0 ? oScanData[n - 1] : oScanData[n - 1] + 1);
// Copying the result back to the output array
cudaMemcpy(odata, dev_odata, newLength * sizeof(int), cudaMemcpyDeviceToHost);
checkCUDAErrorFn("cudaMemcpy to output failed!", "efficient.cu", 194);
cudaFree(dev_idata);
cudaFree(dev_binaryMap);
cudaFree(dev_scanResult);
cudaFree(dev_odata);
delete[] iScanData;
delete[] oScanData;
return newLength;
}
}
}
|
909f285f3a1043bb560236cf7c120dada2fb6125.hip | // !!! This is a file automatically generated by hipify!!!
#include "lock_constraint_kernel.hpp"
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <math_constants.h>
#include "yuzu/foundation/memory/RelativePointer.hpp"
#include "yuzu/foundation/memory/pointer.hpp"
#include "yuzu/domain/boundary_conditions/BoundaryConditionData.hpp"
#include "yuzu/common/gpu.hpp"
#include "yuzu/utils/kernel_utils.hpp"
#define DOF_STATUS_FREE 0
namespace ay = axis::yuzu;
namespace ayfm = axis::yuzu::foundation::memory;
namespace aydbc = axis::yuzu::domain::boundary_conditions;
struct LockGPUData {
real ReleaseTime;
};
__global__ void __launch_bounds__(AXIS_YUZU_MAX_THREADS_PER_BLOCK)
ApplyLockOnGPUKernel(uint64 numThreads, uint64 startIndex,
void *baseMemoryAddressOnGPU, real time,
axis::yuzu::foundation::memory::RelativePointer vectorMaskPtr)
{
uint64 index = ay::GetThreadIndex(gridDim, blockIdx, blockDim,
threadIdx, startIndex);
if (!ay::IsActiveThread(index, numThreads)) return;
aydbc::BoundaryConditionData bcData(baseMemoryAddressOnGPU, index, sizeof(LockGPUData));
real *bucket = bcData.GetOutputBucket();
LockGPUData *data = (LockGPUData *)bcData.GetCustomData();
const real releaseTime = data->ReleaseTime;
uint64 dofId = bcData.GetDofId();
*bucket = 0;
if (releaseTime >= 0 && time > releaseTime)
{ // write quiet NaN, so that we signal that BC is inactive
char *vectorMask = axis::yabsptr<char>(vectorMaskPtr);
vectorMask[dofId] = DOF_STATUS_FREE;
}
}
void axis::domain::boundary_conditions::ApplyLockOnGPU(
uint64 numThreadsToUse, uint64 startIndex, void *baseMemoryAddressOnGPU,
const axis::Dimension3D& gridDim, const axis::Dimension3D& blockDim,
void * streamPtr, real time,
axis::foundation::memory::RelativePointer vectorMaskPtr )
{
dim3 grid, block;
grid.x = gridDim.X; grid.y = gridDim.Y; grid.z = gridDim.Z;
block.x = blockDim.X; block.y = blockDim.Y; block.z = blockDim.Z;
hipLaunchKernelGGL(( ApplyLockOnGPUKernel), dim3(grid), dim3(block), 0, (hipStream_t)streamPtr,
numThreadsToUse, startIndex, baseMemoryAddressOnGPU,
time, reinterpret_cast<ayfm::RelativePointer&>(vectorMaskPtr));
}
| 909f285f3a1043bb560236cf7c120dada2fb6125.cu | #include "lock_constraint_kernel.hpp"
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <math_constants.h>
#include "yuzu/foundation/memory/RelativePointer.hpp"
#include "yuzu/foundation/memory/pointer.hpp"
#include "yuzu/domain/boundary_conditions/BoundaryConditionData.hpp"
#include "yuzu/common/gpu.hpp"
#include "yuzu/utils/kernel_utils.hpp"
#define DOF_STATUS_FREE 0
namespace ay = axis::yuzu;
namespace ayfm = axis::yuzu::foundation::memory;
namespace aydbc = axis::yuzu::domain::boundary_conditions;
struct LockGPUData {
real ReleaseTime;
};
__global__ void __launch_bounds__(AXIS_YUZU_MAX_THREADS_PER_BLOCK)
ApplyLockOnGPUKernel(uint64 numThreads, uint64 startIndex,
void *baseMemoryAddressOnGPU, real time,
axis::yuzu::foundation::memory::RelativePointer vectorMaskPtr)
{
uint64 index = ay::GetThreadIndex(gridDim, blockIdx, blockDim,
threadIdx, startIndex);
if (!ay::IsActiveThread(index, numThreads)) return;
aydbc::BoundaryConditionData bcData(baseMemoryAddressOnGPU, index, sizeof(LockGPUData));
real *bucket = bcData.GetOutputBucket();
LockGPUData *data = (LockGPUData *)bcData.GetCustomData();
const real releaseTime = data->ReleaseTime;
uint64 dofId = bcData.GetDofId();
*bucket = 0;
if (releaseTime >= 0 && time > releaseTime)
{ // write quiet NaN, so that we signal that BC is inactive
char *vectorMask = axis::yabsptr<char>(vectorMaskPtr);
vectorMask[dofId] = DOF_STATUS_FREE;
}
}
void axis::domain::boundary_conditions::ApplyLockOnGPU(
uint64 numThreadsToUse, uint64 startIndex, void *baseMemoryAddressOnGPU,
const axis::Dimension3D& gridDim, const axis::Dimension3D& blockDim,
void * streamPtr, real time,
axis::foundation::memory::RelativePointer vectorMaskPtr )
{
dim3 grid, block;
grid.x = gridDim.X; grid.y = gridDim.Y; grid.z = gridDim.Z;
block.x = blockDim.X; block.y = blockDim.Y; block.z = blockDim.Z;
ApplyLockOnGPUKernel<<<grid, block, 0, (cudaStream_t)streamPtr>>>(
numThreadsToUse, startIndex, baseMemoryAddressOnGPU,
time, reinterpret_cast<ayfm::RelativePointer&>(vectorMaskPtr));
}
|
f701f77cedfce7d9368dace4419ab02df793c905.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S3_1.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6786130423415,0.00126001338762619,0.782408157036293,0.782291847999983,0.000172030269485783,0.486238827509178,0.00291727730193292,0.999998383985951,1.89824548377254e-08,1.86344423509529e-05,0.999775323244343,1.00730731658865,0.999997712256941,4.01726135419286e-05,0.574990955233012,10.0471666878941,139.498531828078};
for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={14.2009764772355,0.000315352885461227,0.000118695429886935,0.000239737901440067,0.231421905214242,0.144186320197361,0.137295429829565,4.48864047496363,0.0113697727065106,1.62322439191857,1100,0.000603973891374754,0.206433638678485,0.0181267509566151,0.00125480641274188,3.83309725808045e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
| f701f77cedfce7d9368dace4419ab02df793c905.cu | #include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S3_1.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6786130423415,0.00126001338762619,0.782408157036293,0.782291847999983,0.000172030269485783,0.486238827509178,0.00291727730193292,0.999998383985951,1.89824548377254e-08,1.86344423509529e-05,0.999775323244343,1.00730731658865,0.999997712256941,4.01726135419286e-05,0.574990955233012,10.0471666878941,139.498531828078};
for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={14.2009764772355,0.000315352885461227,0.000118695429886935,0.000239737901440067,0.231421905214242,0.144186320197361,0.137295429829565,4.48864047496363,0.0113697727065106,1.62322439191857,1100,0.000603973891374754,0.206433638678485,0.0181267509566151,0.00125480641274188,3.83309725808045e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
acff2c46a3b4dcd77a2d95b3f4e79106020daf3a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<fstream>
#include<stdio.h>
#include<iostream>
long long int read_file_to_memmory(FILE *pInfile , int *pPointer)
{
if(pInfile != NULL)
{
int mIndex =0;
int mSize = fread(pPointer+mIndex,1,sizeof(int),pInfile);
long long int mFileSize=0;
while(mSize!= 0)
{
mFileSize = mFileSize +mSize;
++mIndex;
mSize = fread(pPointer+mIndex,1,mSize,pInfile);
}
return mFileSize;
}
return 0;
}
long long int write_file_from_memmory(FILE *pOutFile , int *pPointer,long long int pFileSize)
{
if(pOutFile!=NULL)
{
pFileSize = fwrite(pPointer,1,pFileSize,pOutFile);
return pFileSize;
}
return 0;
}
__global__ void generate_decrypted(int *pDataPointer , int *pRandomData , int *pEncryptedData , long long int pSize)
{
long long int index = blockIdx.x * blockDim.x + threadIdx.x;
if( index <=(pSize /sizeof(int) ))
{
(*(pEncryptedData+index)) = (*(pDataPointer+ index))^(*(pRandomData+index));
}
else
return;
}
int main(int argc , char *argv[])
{
FILE *inFile;
FILE *outFile;
FILE *keyFile;
inFile = fopen("enc","rb");
keyFile = fopen("key","rb");
outFile = fopen(argv[1],"wb");
int *encryptedDataPointer = new int[268435456];
long long int fileSize = read_file_to_memmory(inFile,encryptedDataPointer);
int *keyDataPointer = new int[fileSize/sizeof(int) +100];
int *decryptedDataPointer = new int[fileSize/sizeof(int) +100];
fileSize = read_file_to_memmory(keyFile,keyDataPointer);
int *d_encryptedDataPointer;
hipMalloc((void**)&d_encryptedDataPointer,fileSize);
int *d_keyPointer;
hipMalloc((void**)&d_keyPointer,fileSize);
int *d_decryptedDataPointer;
hipMalloc((void**)&d_decryptedDataPointer,fileSize);
hipMemcpy(d_encryptedDataPointer,encryptedDataPointer,fileSize,hipMemcpyHostToDevice);
hipMemcpy(d_keyPointer , keyDataPointer,fileSize,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( generate_decrypted), dim3(fileSize/64 +1) ,dim3(64), 0, 0, d_encryptedDataPointer , d_keyPointer , d_decryptedDataPointer ,fileSize);
hipMemcpy(decryptedDataPointer,d_decryptedDataPointer,fileSize,hipMemcpyDeviceToHost);
fileSize = write_file_from_memmory(outFile,decryptedDataPointer,fileSize);
fclose(inFile);
fclose(outFile);
fclose(keyFile);
}
| acff2c46a3b4dcd77a2d95b3f4e79106020daf3a.cu | #include<fstream>
#include<stdio.h>
#include<iostream>
long long int read_file_to_memmory(FILE *pInfile , int *pPointer)
{
if(pInfile != NULL)
{
int mIndex =0;
int mSize = fread(pPointer+mIndex,1,sizeof(int),pInfile);
long long int mFileSize=0;
while(mSize!= 0)
{
mFileSize = mFileSize +mSize;
++mIndex;
mSize = fread(pPointer+mIndex,1,mSize,pInfile);
}
return mFileSize;
}
return 0;
}
long long int write_file_from_memmory(FILE *pOutFile , int *pPointer,long long int pFileSize)
{
if(pOutFile!=NULL)
{
pFileSize = fwrite(pPointer,1,pFileSize,pOutFile);
return pFileSize;
}
return 0;
}
__global__ void generate_decrypted(int *pDataPointer , int *pRandomData , int *pEncryptedData , long long int pSize)
{
long long int index = blockIdx.x * blockDim.x + threadIdx.x;
if( index <=(pSize /sizeof(int) ))
{
(*(pEncryptedData+index)) = (*(pDataPointer+ index))^(*(pRandomData+index));
}
else
return;
}
int main(int argc , char *argv[])
{
FILE *inFile;
FILE *outFile;
FILE *keyFile;
inFile = fopen("enc","rb");
keyFile = fopen("key","rb");
outFile = fopen(argv[1],"wb");
int *encryptedDataPointer = new int[268435456];
long long int fileSize = read_file_to_memmory(inFile,encryptedDataPointer);
int *keyDataPointer = new int[fileSize/sizeof(int) +100];
int *decryptedDataPointer = new int[fileSize/sizeof(int) +100];
fileSize = read_file_to_memmory(keyFile,keyDataPointer);
int *d_encryptedDataPointer;
cudaMalloc((void**)&d_encryptedDataPointer,fileSize);
int *d_keyPointer;
cudaMalloc((void**)&d_keyPointer,fileSize);
int *d_decryptedDataPointer;
cudaMalloc((void**)&d_decryptedDataPointer,fileSize);
cudaMemcpy(d_encryptedDataPointer,encryptedDataPointer,fileSize,cudaMemcpyHostToDevice);
cudaMemcpy(d_keyPointer , keyDataPointer,fileSize,cudaMemcpyHostToDevice);
generate_decrypted<<<fileSize/64 +1 ,64>>>(d_encryptedDataPointer , d_keyPointer , d_decryptedDataPointer ,fileSize);
cudaMemcpy(decryptedDataPointer,d_decryptedDataPointer,fileSize,cudaMemcpyDeviceToHost);
fileSize = write_file_from_memmory(outFile,decryptedDataPointer,fileSize);
fclose(inFile);
fclose(outFile);
fclose(keyFile);
}
|
2ebfcebeb62a1423b9c05ebb88cf66159f60b72d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <float.h>
#include "gpu_node_funcs.h"
#include "gpu_traversal.h"
extern "C" {
#include "voxel.h"
}
void gpu_errchk(const char *file, int line, hipError_t cuErr);
#define test(func) gpu_errchk(__FILE__,__LINE__,func)
//__device__ __constant__ node const_tree[4217];
texture<short, 3, hipReadModeElementType> tex;
hipArray *volume_array;
__device__ int gpu_longRangeContext(short *voxels, char *coords, float threshold, int a, int b, int c, int x, int y, int z) {
//int pos = (c * y * x) + (b * x) + a;
if(voxels[a] < threshold)
return LEFT;
return RIGHT;
}
// TODO
__device__ int gpu_coronal(short voxel, int y_center, int z_center, float threshold) {
if((voxel - y_center) < threshold)
return LEFT;
return RIGHT;
}
#define treenode const_tree[cur]
__global__ /*__launch_bounds__(512, 4)*/ void gpu_traverse(node *tree, leaf *leaves, short *voxels, short *results, int x, int y, int z) {
node *curr;
int cur;
int i, j, k;
int a, b, c;
int pos, alt_pos;
char res;
int cookie;
/* XXX i-j-k = z-y-x */
i = blockIdx.z;
/*if(blockIdx.y & 3 == 1) {
j = (blockIdx.y / 4);
k = threadIdx.x + 128;
} else if(blockIdx.y & 3 == 2) {
j = (blockIdx.y / 4);
k = threadIdx.x + 256;
} else if(blockIdx.y & 3 == 3) {
j = (blockIdx.y / 4);
k = threadIdx.x + 384;
} else {
j = (blockIdx.y / 4);
k = threadIdx.x;
}*/
j = blockIdx.y;
k = threadIdx.x;
cur = 0;
pos = (i * x * y) + (j * x) + k;
while(1) {
if(treenode.type == LONGRANGECONTEXT) {
a = (k + treenode.arguments[0]) & (x-1);
b = (j + treenode.arguments[1]) & (x-1);
c = i + treenode.arguments[2];
if(c < 0)
c += z;
else if (c >= z)
c -= z;
alt_pos = (c * y * x) + (b * x) + a;
res = voxels[alt_pos] < treenode.threshold ? LEFT : RIGHT;
cur = treenode.children[res];
}
else if(treenode.type == CORONAL) {
res = (x - (y>>1)) < treenode.threshold ? LEFT : RIGHT;
cur = treenode.children[res];
}
else {
break;
}
}
results[pos] = treenode.arguments[0]; //leaf number/ID
}
hipError_t copy_const(node *tree, hipStream_t stream) {
//printf("Host root type: %d\n", tree[0].type);
return hipMemcpyToSymbolAsync(const_tree, tree, 4217 * sizeof(node), 0,
hipMemcpyHostToDevice, stream);
}
hipError_t allocate_texture(short *volume) {
tex.addressMode[0] = hipAddressModeWrap;
tex.addressMode[1] = hipAddressModeWrap;
tex.addressMode[2] = hipAddressModeWrap;
tex.filterMode = hipFilterModeLinear;
tex.normalized = false;
hipChannelFormatDesc format = {16, 0, 0, 0, hipChannelFormatKindSigned};
hipExtent extent = {512, 512, 525};
test(hipMalloc3DArray(&volume_array, &format, extent, 0));
hipMemcpy3DParms params = {0};
int pitch = sizeof(float);
params.srcPtr = make_hipPitchedPtr((void*) volume, pitch, 512*512*525, 1);
params.dstArray = volume_array;
params.extent = extent;
params.kind = hipMemcpyHostToDevice;
test(hipMemcpy3D(¶ms));
//test(hipMemcpyToArray(volume_array, 0, 0, volume, 512*512*525, hipMemcpyHostToDevice));
return hipBindTextureToArray(tex, volume_array, format);
}
| 2ebfcebeb62a1423b9c05ebb88cf66159f60b72d.cu | #include <float.h>
#include "gpu_node_funcs.h"
#include "gpu_traversal.h"
extern "C" {
#include "voxel.h"
}
void gpu_errchk(const char *file, int line, cudaError cuErr);
#define test(func) gpu_errchk(__FILE__,__LINE__,func)
//__device__ __constant__ node const_tree[4217];
texture<short, 3, cudaReadModeElementType> tex;
cudaArray *volume_array;
__device__ int gpu_longRangeContext(short *voxels, char *coords, float threshold, int a, int b, int c, int x, int y, int z) {
//int pos = (c * y * x) + (b * x) + a;
if(voxels[a] < threshold)
return LEFT;
return RIGHT;
}
// TODO
__device__ int gpu_coronal(short voxel, int y_center, int z_center, float threshold) {
if((voxel - y_center) < threshold)
return LEFT;
return RIGHT;
}
#define treenode const_tree[cur]
__global__ /*__launch_bounds__(512, 4)*/ void gpu_traverse(node *tree, leaf *leaves, short *voxels, short *results, int x, int y, int z) {
node *curr;
int cur;
int i, j, k;
int a, b, c;
int pos, alt_pos;
char res;
int cookie;
/* XXX i-j-k = z-y-x */
i = blockIdx.z;
/*if(blockIdx.y & 3 == 1) {
j = (blockIdx.y / 4);
k = threadIdx.x + 128;
} else if(blockIdx.y & 3 == 2) {
j = (blockIdx.y / 4);
k = threadIdx.x + 256;
} else if(blockIdx.y & 3 == 3) {
j = (blockIdx.y / 4);
k = threadIdx.x + 384;
} else {
j = (blockIdx.y / 4);
k = threadIdx.x;
}*/
j = blockIdx.y;
k = threadIdx.x;
cur = 0;
pos = (i * x * y) + (j * x) + k;
while(1) {
if(treenode.type == LONGRANGECONTEXT) {
a = (k + treenode.arguments[0]) & (x-1);
b = (j + treenode.arguments[1]) & (x-1);
c = i + treenode.arguments[2];
if(c < 0)
c += z;
else if (c >= z)
c -= z;
alt_pos = (c * y * x) + (b * x) + a;
res = voxels[alt_pos] < treenode.threshold ? LEFT : RIGHT;
cur = treenode.children[res];
}
else if(treenode.type == CORONAL) {
res = (x - (y>>1)) < treenode.threshold ? LEFT : RIGHT;
cur = treenode.children[res];
}
else {
break;
}
}
results[pos] = treenode.arguments[0]; //leaf number/ID
}
cudaError_t copy_const(node *tree, cudaStream_t stream) {
//printf("Host root type: %d\n", tree[0].type);
return cudaMemcpyToSymbolAsync(const_tree, tree, 4217 * sizeof(node), 0,
cudaMemcpyHostToDevice, stream);
}
cudaError_t allocate_texture(short *volume) {
tex.addressMode[0] = cudaAddressModeWrap;
tex.addressMode[1] = cudaAddressModeWrap;
tex.addressMode[2] = cudaAddressModeWrap;
tex.filterMode = cudaFilterModeLinear;
tex.normalized = false;
cudaChannelFormatDesc format = {16, 0, 0, 0, cudaChannelFormatKindSigned};
cudaExtent extent = {512, 512, 525};
test(cudaMalloc3DArray(&volume_array, &format, extent, 0));
cudaMemcpy3DParms params = {0};
int pitch = sizeof(float);
params.srcPtr = make_cudaPitchedPtr((void*) volume, pitch, 512*512*525, 1);
params.dstArray = volume_array;
params.extent = extent;
params.kind = cudaMemcpyHostToDevice;
test(cudaMemcpy3D(¶ms));
//test(cudaMemcpyToArray(volume_array, 0, 0, volume, 512*512*525, cudaMemcpyHostToDevice));
return cudaBindTextureToArray(tex, volume_array, format);
}
|
5db1919a4a2523477d3140cb4a37cd5af6817d81.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_set_zero;
int xdim0_set_zero_h = -1;
#undef OPS_ACC0
#define OPS_ACC0(x,y) (x+xdim0_set_zero*(y))
//user function
__device__
void set_zero_gpu(double *A) {
A[OPS_ACC0(0,0)] = 0.0;
}
#undef OPS_ACC0
__global__ void ops_set_zero(
double* __restrict arg0,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_set_zero;
if (idx_x < size0 && idx_y < size1) {
set_zero_gpu(arg0);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_set_zero(char const *name, ops_block block, int dim, int* range,
ops_arg arg0) {
#else
void ops_par_loop_set_zero_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[1] = { arg0};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,1,range,1)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(1,"set_zero");
OPS_kernels[1].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<2; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int xdim0 = args[0].dat->size[0];
if (xdim0 != xdim0_set_zero_h) {
hipMemcpyToSymbol( xdim0_set_zero, &xdim0, sizeof(int) );
xdim0_set_zero_h = xdim0;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
char *p_a[1];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 1);
ops_halo_exchanges(args,1,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[1].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_set_zero), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0],x_size, y_size);
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[1].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 1);
ops_set_halo_dirtybit3(&args[0],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[1].mpi_time += t2-t1;
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg0);
}
}
#ifdef OPS_LAZY
void ops_par_loop_set_zero(char const *name, ops_block block, int dim, int* range,
ops_arg arg0) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 1;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 1;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 1;
desc->args = (ops_arg*)malloc(1*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->function = ops_par_loop_set_zero_execute;
if (OPS_diags > 1) {
ops_timing_realloc(1,"set_zero");
}
ops_enqueue_kernel(desc);
}
#endif
| 5db1919a4a2523477d3140cb4a37cd5af6817d81.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_set_zero;
int xdim0_set_zero_h = -1;
#undef OPS_ACC0
#define OPS_ACC0(x,y) (x+xdim0_set_zero*(y))
//user function
__device__
void set_zero_gpu(double *A) {
A[OPS_ACC0(0,0)] = 0.0;
}
#undef OPS_ACC0
__global__ void ops_set_zero(
double* __restrict arg0,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_set_zero;
if (idx_x < size0 && idx_y < size1) {
set_zero_gpu(arg0);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_set_zero(char const *name, ops_block block, int dim, int* range,
ops_arg arg0) {
#else
void ops_par_loop_set_zero_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[1] = { arg0};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,1,range,1)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(1,"set_zero");
OPS_kernels[1].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<2; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int xdim0 = args[0].dat->size[0];
if (xdim0 != xdim0_set_zero_h) {
cudaMemcpyToSymbol( xdim0_set_zero, &xdim0, sizeof(int) );
xdim0_set_zero_h = xdim0;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
char *p_a[1];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 1);
ops_halo_exchanges(args,1,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[1].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
ops_set_zero<<<grid, tblock >>> ( (double *)p_a[0],x_size, y_size);
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[1].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 1);
ops_set_halo_dirtybit3(&args[0],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[1].mpi_time += t2-t1;
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg0);
}
}
#ifdef OPS_LAZY
void ops_par_loop_set_zero(char const *name, ops_block block, int dim, int* range,
ops_arg arg0) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 1;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 1;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 1;
desc->args = (ops_arg*)malloc(1*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->function = ops_par_loop_set_zero_execute;
if (OPS_diags > 1) {
ops_timing_realloc(1,"set_zero");
}
ops_enqueue_kernel(desc);
}
#endif
|
01276b37139147d9cfcd7b61e48f5b806b79cc36.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <hip/hip_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <helper_image.h>
#include <hip/hip_runtime_api.h>
#include <numeric>
#include <stdlib.h>
#include <stdio.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <chrono>
#include "HistogramEqualizationGPU.cu"
using namespace cv;
using namespace std;
struct GpuTimer {
hipEvent_t start;
hipEvent_t stop;
GpuTimer() {
hipEventCreate(&start);
hipEventCreate(&stop);
}
~GpuTimer() {
hipEventDestroy(start);
hipEventDestroy(stop);
}
void Start() {
hipEventRecord(start, 0);
}
void Stop() {
hipEventRecord(stop, 0);
}
float Elapsed() {
float elapsed;
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
int main(void)
{
Mat image = imread("images/image1.jpg");
namedWindow("Original Image", 0);
resizeWindow("Equalized Image", 800, 600);
imshow("Original Image", image);
int height = image.rows;
int width = image.cols;
//Convert the image into a buffer
unsigned char* ptr_image = image.ptr();
//Start GPU timer
GpuTimer timer;
timer.Start();
cout << "Processing with GPU..." << endl;
eq_GPU(ptr_image, width, height);
timer.Stop();
cout << "Time for GPU: " << timer.Elapsed() << " msec" << endl << endl;
cout << "Saving equalized image..." << endl;
string equalized;
equalized = "images/image_equalized_cuda.jpg";
imwrite(equalized, image);
cout << "Image saved" << endl;
namedWindow("Equalized Image", 0);
resizeWindow("Equalized Image", 800, 600);
imshow("Equalized Image", image);
waitKey();
return 0;
} | 01276b37139147d9cfcd7b61e48f5b806b79cc36.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <cuda_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <helper_image.h>
#include <cuda_profiler_api.h>
#include <numeric>
#include <stdlib.h>
#include <stdio.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <chrono>
#include "HistogramEqualizationGPU.cu"
using namespace cv;
using namespace std;
struct GpuTimer {
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer() {
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer() {
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start() {
cudaEventRecord(start, 0);
}
void Stop() {
cudaEventRecord(stop, 0);
}
float Elapsed() {
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
int main(void)
{
Mat image = imread("images/image1.jpg");
namedWindow("Original Image", 0);
resizeWindow("Equalized Image", 800, 600);
imshow("Original Image", image);
int height = image.rows;
int width = image.cols;
//Convert the image into a buffer
unsigned char* ptr_image = image.ptr();
//Start GPU timer
GpuTimer timer;
timer.Start();
cout << "Processing with GPU..." << endl;
eq_GPU(ptr_image, width, height);
timer.Stop();
cout << "Time for GPU: " << timer.Elapsed() << " msec" << endl << endl;
cout << "Saving equalized image..." << endl;
string equalized;
equalized = "images/image_equalized_cuda.jpg";
imwrite(equalized, image);
cout << "Image saved" << endl;
namedWindow("Equalized Image", 0);
resizeWindow("Equalized Image", 800, 600);
imshow("Equalized Image", image);
waitKey();
return 0;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.